diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..434d70caf31e9ab11f453f5d7dae69954c4fefc4 --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,103 @@ +# Contributing Guidelines + +Welcome to the project! 🎉 +We move fast, but we keep code quality and stability in check. + +--- + +## 🏗️ Branching Model + +- **main** → Always stable and demo-ready. +- **dev** → Integration branch. +- **feature/** → Active development branches. + +**Flow:** +1. Branch off `dev` for your work. +2. Open a Pull Request (PR) into `dev`. +3. Once `dev` is stable, it’s merged into `main` via a PR + review. + +Example: +main ← dev ← feature/email-agent + + +--- + +## ✅ Pull Request Rules + +- All merges into `main` or `dev` **require at least one approving review**. +- Keep PRs **small and focused** (<300 lines if possible). +- Use **clear titles** and **short descriptions** (what + why). +- You can **open draft PRs early** for feedback. +- Squash merge when possible to keep history clean. + +**Naming Convention:** +- `feat`: add Gmail OAuth flow +- `fix`: handle missing image extraction +- `docs`: update setup instructions + + +--- + +## ⚙️ Branch Protection + +**main** +- Requires PR review before merging +- Requires status checks to pass (CI, tests, lint) +- Must be up-to-date with base before merging +- No direct commits or force pushes + +**dev** +- Requires PR review before merging +- Requires at least 1 approval +- Conversations must be resolved +- Status checks optional (for fast iteration) +- No direct commits or force pushes +- Squash or rebase merges only (no merge commits) + +--- + +## 🚀 Quick Workflow + +1. `git checkout dev` +2. `git pull` +3. `git checkout -b feature/my-new-feature` +4. Make your changes. +5. Push & open a PR → base: `dev` +6. Request a review. +7. Merge when approved and tests pass. +8. Once stable, open a PR from `dev → main`. + +--- + +## 🧹 Hygiene + +- Delete merged branches. +- Use Conventional Commits for clarity. +- Keep `main` always deployable/demo-ready. +- If you break something, fix it fast 😉 + +--- + +## 💬 Reviews + +- At least **one reviewer** per PR. +- Anyone can review — small teams move faster. +- **Pair reviews** encouraged for big changes. +- Minor changes (docs, comments) may be self-approved if trivial. + +--- +--- + +## ⚙️ Branch Protection Setup (GitHub Rulesets) + +You can configure these under +➡️ **Settings → Code and automation → Rulesets** + +| Branch | Require PR | Required Approvals | Require Status Checks | Require Conversation Resolution | Allow Merge Commit | Force Push | Delete Branch | +| :------ | :----------- | :----------------- | :-------------------- | :------------------------------ | :----------------- | :---------- | :------------- | +| `main` | ✅ | 1 | ✅ | ✅ | ❌ | ❌ | ❌ | +| `dev` | ✅ | 1 | optional | ✅ | ❌ | ❌ | ❌ | + +--- + +Thanks for contributing — keep it fast, clean, and collaborative! 🚀 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..cb7e5a3e1d1982498148606f51d3e50dfdb582df --- /dev/null +++ b/.gitignore @@ -0,0 +1,70 @@ + +# Ignore secret JSONs, not the folder +secrets/**/credentials.json +secrets/**/token.json + +secrets/**/calendar_credentials.json +secrets/**/calendar_token.json + +# (Optional) ignore any backup or tmp credentials +secrets/**/*_backup.json + +# any log files +*.log + +__pycache__/ +*.pyc + +.env + +# pycahche files everywhere +**/__pycache__/ +**/*.pyc +**/.DS_Store + + +# data +src/database/cvs/uploads +src/database/cvs/parsed +src/database/voice_recordings + + + +# Local credentials / tokens +.gcloud/ +.env +.env.* +**/credentials.json +/results/ + +# Terraform +.terraform/ +*.tfstate +*.tfstate.* +crash.log +terraform.tfvars +*.auto.tfvars +.override.tf +override.tf + + +# Ignore only .pdf and .txt files in uploads and parsed folders +src/database/cvs/uploads/*.pdf +src/database/cvs/uploads/*.txt +src/database/cvs/parsed/*.pdf +src/database/cvs/parsed/*.txt + +src/database/cvs/tests/*.pdf +src/database/cvs/tests/*.txt + +# Keep these files +!src/database/cvs/uploads/.gitkeep +!src/database/cvs/parsed/.gitkeep +!src/database/cvs/uploads/info.md +!src/database/cvs/parsed/info.md + +# lamnggraph CLI cache +.lgcache/ +.langgraph_api/ + +.idea/ \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..4eaa398c843c5f3ad0dfef2ce48852e577153435 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,61 @@ +FROM python:3.12-slim + +WORKDIR /app + +# System dependencies +RUN apt-get update && apt-get install -y gcc libpq-dev && rm -rf /var/lib/apt/lists/* + +# Copy requirement files +COPY requirements/base.txt requirements/base.txt +COPY requirements/db.txt requirements/db.txt +COPY requirements/agent.txt requirements/agent.txt +COPY requirements/supervisor.txt requirements/supervisor.txt +COPY requirements/api.txt requirements/api.txt +COPY requirements/cv_ui.txt requirements/cv_ui.txt +COPY requirements/mcp_calendar.txt requirements/mcp_calendar.txt +COPY requirements/mcp_gmail.txt requirements/mcp_gmail.txt +COPY src/frontend/gradio/requirements.txt requirements/gradio.txt + +# Install Python dependencies +RUN pip install --no-cache-dir -r requirements/base.txt \ + && pip install --no-cache-dir -r requirements/db.txt \ + && pip install --no-cache-dir -r requirements/agent.txt \ + && pip install --no-cache-dir -r requirements/supervisor.txt \ + && pip install --no-cache-dir -r requirements/api.txt \ + && pip install --no-cache-dir -r requirements/cv_ui.txt \ + && pip install --no-cache-dir -r requirements/mcp_calendar.txt \ + && pip install --no-cache-dir -r requirements/mcp_gmail.txt \ + && pip install --no-cache-dir -r requirements/gradio.txt + +# Copy application code +COPY src/ /app/src/ +COPY secrets/ /app/secrets/ + +ENV PYTHONPATH=/app +EXPOSE 7860 + +# Create entry script inside the image (avoids missing file in build context) +RUN printf '%s\n' \ + '#!/usr/bin/env bash' \ + 'set -e' \ + '' \ + '# Hugging Face provides PORT; default to 7860 locally' \ + 'export PORT=\"${PORT:-7860}\"' \ + '' \ + '# Defaults for local in-container routing; can be overridden via env' \ + 'export SUPERVISOR_API_URL=\"${SUPERVISOR_API_URL:-http://127.0.0.1:8080/api/v1/supervisor}\"' \ + 'export DATABASE_API_URL=\"${DATABASE_API_URL:-http://127.0.0.1:8080/api/v1/db}\"' \ + 'export CV_UPLOAD_API_URL=\"${CV_UPLOAD_API_URL:-http://127.0.0.1:8080/api/v1/cv}\"' \ + '' \ + '# Start FastAPI backend' \ + 'uvicorn src.api.app:app --host 0.0.0.0 --port 8080 &' \ + '' \ + '# Give the API a moment to come up' \ + 'sleep 2' \ + '' \ + '# Run Gradio frontend' \ + 'python src/frontend/gradio/app.py' \ + > /app/start.sh \ + && chmod +x /app/start.sh + +CMD ["/app/start.sh"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..598d5a193e31bfcc6e1958e92336ad07cb1de8bf --- /dev/null +++ b/README.md @@ -0,0 +1,403 @@ +# ***`Recruitment Agent`*** +

+ + + + + + + + + +

+ + + + + + +> This project was developed as part of the **[MCP 1st Birthday Hackathon](https://huggingface.co/MCP-1st-Birthday)** — submitted under +> **Track 2: MCP in Action (Enterprise)**, showcasing a real-world multi-agent application built on top of the Model Context Protocol. + + +
+📚 Table of Contents (click to expand) + +- [Problem Statement](#problem-statement) +- [Ethical & Regulatory Considerations](#ethical--regulatory-considerations) +- [Quick Start: Run Application](#quick-start-run-application) + - [Services & Ports](#services--ports) + - [Infrastructure & Secrets](#infrastructure--secrets) + - [Run Command](#run-command) + - [Resetting the Environment](#resetting-the-environment) +- [Application Flow & Entry Points](#application-flow--entry-points) + - [The Recruitment Lifecycle](#1-the-recruitment-lifecycle) + - [User Entry Points](#2-user-entry-points) +- [AI Engineering Principles](#ai-engineering-principles) + - [Prompt Engineering](#prompt-engineering) + - [Context Engineering](#context-engineering) +- [Model & Agent Registry](#model--agent-registry) + - [Integrated MCP Servers](#integrated-mcp-servers) +- [License & Acknowledgments](#license--acknowledgments) +- [Team](#team) + +
+ + + + +## **Problem Statement** + +Modern recruitment processes remain **slow**, **resource-intensive**, and increasingly **unsustainable** for HR teams amid persistent talent shortages and evolving skill demands. Recent industry reports underscore structural bottlenecks that hinder efficient hiring. + +High **applicant volumes overwhelm recruiters**, with a *typical job posting attracting hundreds of applications*, many *unqualified*, leading to administrative burdens and rushed evaluations. This results in *only about **5%** of viewers completing applications*, while teams waste time sifting through low-quality submissions. [`1`] + +Screening and early-stage evaluation consume excessive recruiter time, with **35%** of their efforts dedicated to tasks like interview scheduling alone, exacerbating workload pressures. Talent acquisition leaders report unmanageable demands, with **27%** citing overload as a key issue, up from prior years. [`2`] + +**Hiring timelines average 44 days across industries**, driven by skills mismatches and manual processes that delay filling critical roles. Globally, **76%** of employers struggle to fill positions due to talent gaps, particularly in tech and healthcare sectors. [`1`, `3`] + +The financial toll is significant, with **average cost-per-hire reaching $4,700**, fueled by prolonged cycles, high turnover in recruitment teams (projected at **51%** as a top 2025 challenge), and inefficiencies in sourcing. [`1`, `2`] + +HR professionals **face rising burnout** from these pressures, compounded by competition for diverse talent and the **need for more touchpoints in hiring**, which **45%** of leaders say adds complexity. Skills shortages, cited by **63%** of employers as the primary barrier to growth, further strain teams. [`2`, `4`] + +These challenges reveal that **traditional manual recruitment fails to scale** in a competitive 2025 landscape. An AI-driven recruitment agent can alleviate bottlenecks by automating screening, accelerating timelines, enhancing consistency, and allowing HR to prioritize strategic decisions over repetitive tasks. + + + +### *`References`* + +1. [HR Cloud — 25 Recruitment Challenges & Solutions in 2025](https://www.hrcloud.com/blog/recruitment-challenges) + +2. [Select Software Reviews — 100+ Recruitment Statistics Every HR Should Know in 2025](https://www.selectsoftwarereviews.com/blog/recruiting-statistics) + +3. [Social Talent — The 2025 Hiring Reality Check](https://www.socialtalent.com/leadership/the-2025-hiring-reality-check-data-driven-answers-to-ta-leaders-top-questions) + +4. [World Economic Forum — The Future of Jobs Report 2025](https://www.weforum.org/publications/the-future-of-jobs-report-2025/digest/) + +## **Ethical & Regulatory Considerations** + +This project was developed as an **experimental prototype for a hackathon**, designed to showcase how language-model agents can automate structured workflows. It is **not intended for production deployment** as an autonomous hiring system. Because it touches on the automated assessment of humans, it must be approached with caution and interpreted within the correct ethical and regulatory context. + +The risks of algorithmic profiling have been widely documented, most notably during the **Cambridge Analytica scandal**, where data from millions of users was harvested and used for psychographic targeting without consent. This episode demonstrated how data-driven models can be leveraged to manipulate individuals when used irresponsibly, and it significantly shaped today’s regulatory landscape. [`5`] + +Given this history, any system that evaluates or ranks people—particularly in employment—must uphold **strict transparency, human oversight, and narrow scope**. In this prototype, all AI outputs are intended purely as **assistive signals**. The system must **never** be used to autonomously approve, reject, or shortlist candidates. + +The **EU AI Act** classifies AI systems used for recruitment, CV screening, candidate ranking, promotion decisions, or termination as **High-Risk AI Systems** (Annex III). Such systems are permitted in the EU but must meet stringent requirements, including: + +- **Human oversight** with the ability to override AI suggestions +- **Transparency** about the model’s role and limitations +- **Detailed logging and traceability** of system behavior +- **Bias monitoring and risk management** +- **High-quality and relevant training data** +- **Clear separation** between AI scoring and final human judgment + +The Act also **prohibits** certain practices in hiring, such as emotion recognition in workplace settings, biometric inference of personality traits, and social-scoring-style ranking systems. [`6`, `7`, `8`] + +This prototype **does not** conduct emotion recognition, sensitive-trait inference, biometric profiling, or psychographic prediction. It is a technical experiment focused on agent orchestration, workflow automation, and context management—not an end-to-end HR decision engine. + +### **Human-in-the-Loop by Design** +To remain aligned with ethical expectations and regulatory requirements, this system must always operate with: + +- **Human-in-the-Loop (HITL):** Recruiters make all decisions. +- **Explainability:** Agents produce structured rationales, not black-box judgments. +- **Data minimization:** Only job-relevant information is processed. +- **No profiling of protected traits:** No biometric, psychographic, or emotional inference. + +### **Project Status** +This project remains a **research and demonstration artifact**, created to explore the technical viability of LLM-powered coordination between agents. It highlights what is technologically possible, but is **not a deployable HR solution** under the EU AI Act. Any real-world implementation would require extensive risk assessment, compliance measures, and human oversight to avoid replicating the harms demonstrated in past profiling scandals. + +--- + +### *`References`* + +5. [The Guardian — Cambridge Analytica: A Year On, Lesson in Institutional Failure](https://www.theguardian.com/uk-news/2019/mar/17/cambridge-analytica-year-on-lesson-in-institutional-failure-christopher-wylie) + +6. [High-level summary of the EU AI Act](https://artificialintelligenceact.eu/high-level-summary/) + +7. [EU Digital Strategy — Regulatory Framework for AI](https://digital-strategy.ec.europa.eu/en/policies/regulatory-framework-ai) + +8. [Clifford Chance — What Does the EU AI Act Mean for Employers?](https://www.cliffordchance.com/content/dam/cliffordchance/briefings/2024/08/what-does-the-eu-ai-act-mean-for-employers.pdf) + + + +## ***`Quick Start: Run Application`*** +To spin up the entire platform including the database, agents, and UI dashboards, we use **Docker Compose**. + +### ***Services & Ports*** +| Service | Description | Host Port | Container Port | +|---------|-------------|-----------|----------------| +| `db` | PostgreSQL 15 database with persistent storage | **5433** | 5432 | +| `cv_upload_streamlit` | UI for uploading CVs | **8501** | 8501 | +| `voice_screening_streamlit` | UI for voice screening candidates | **8502** | 8501 | +| `supervisor_ui` | Main Chat UI for the Supervisor Agent | **8503** | 8501 | +| `websocket_proxy` | Proxy for OpenAI Realtime API | **8000** | 8000 | + +### ***Infrastructure & Secrets*** +This project requires Google Cloud credentials for the Gmail and Calendar agents. + +- **Secrets:** Google tokens and credentials must be present in the `secrets/` directory. +- **Infrastructure:** You can provision the necessary GCP infrastructure using the code in `terraform/` or the scripts in `scripts/infra/`. +- **Documentation:** For detailed setup instructions, refer to the [MCP Docs](docs/mcp/). + +### ***Run Command*** +1. **Configure Environment:** + Copy the example environment file and fill in your API keys: + ```bash + cp .env.example .env + ``` + +2. **Start Services:** + ```bash + docker compose --env-file .env -f docker/docker-compose.yml up --build + ``` + +### 🧹 Resetting the Environment +If you need a clean slate (e.g., after modifying DB models): +```bash +# 1. Stop containers +docker compose -f docker/docker-compose.yml down + +# 2. Remove persistent DB volume +docker volume rm docker_postgres_data + +# 3. Rebuild & Start +docker compose --env-file .env -f docker/docker-compose.yml up --build +``` + +--- + +## ***`Application Flow & Entry Points`*** + +The platform orchestrates a complete recruitment pipeline, interacting with both Candidates and the HR Supervisor. + +### 1. The Recruitment Lifecycle +The system tracks candidates through a defined state machine (see `src/state/candidate.py` for the `CandidateStatus` enum). + +```mermaid +graph TD + %% Actors + Candidate((Candidate)) + HR((HR Supervisor)) + + %% System Components (Nodes) + CV_UI[CV Portal UI] + CV_Screen{CV Screening AI} + Voice_UI[Voice Portal UI] + Voice_Judge{Voice Judge AI} + Interview[Person-to-Person Interview] + Decision{Final Decision} + + %% Flow & Actions (Edges) + Candidate -->|1. Uploads CV| CV_UI + CV_UI -->|2. Triggers Analysis| CV_Screen + + CV_Screen -->|Pass: Sends Invite| Voice_UI + CV_Screen -->|Fail: Notify| Rejected((Rejected)) + + Voice_UI -->|3. Conducts Interview| Candidate + Candidate -->|4. Completes Session| Voice_Judge + + Voice_Judge -->|Pass: Schedule| Interview + Voice_Judge -->|Fail: Notify| Rejected + + Interview -->|5. Feedback| HR + HR -->|6. Updates Status| Decision + + Decision -->|Hire| Hired((Hired)) + Decision -->|Reject| Rejected + + %% Styling + style CV_UI fill:#e3f2fd,stroke:#1565c0 + style Voice_UI fill:#e3f2fd,stroke:#1565c0 + style CV_Screen fill:#fff3e0,stroke:#ef6c00 + style Voice_Judge fill:#fff3e0,stroke:#ef6c00 + style Interview fill:#e8f5e9,stroke:#2e7d32 + style Decision fill:#f3e5f5,stroke:#7b1fa2 +``` + +### 2. User Entry Points + +| User | Interface | Port | Description | +| :--- | :--- | :--- | :--- | +| **HR Manager** | **Supervisor UI** | `8503` | **The Command Center.** Chat with the Supervisor Agent to manage the pipeline, review candidates, query the DB, and schedule interviews. | +| **Candidate** | **CV Portal** | `8501` | Public-facing portal for candidates to register and upload their resumes to the system. | +| **Candidate** | **Voice Portal** | `8502` | AI-conducted voice interview interface. Candidates access this only after passing CV screening and receiving an invite. | + +--- + +## ***`AI Engineering Principles`*** + +### ***Prompt Engineering*** + +To improve the reliability of complex evaluations (such as CV scoring and Voice Interview judging), we enforce **Chain-of-Thought (CoT)** reasoning within our structured outputs, inspired by [Wei et al. (2022)](https://arxiv.org/abs/2201.11903). + +By requiring the model to generate a textual explanation *before* assigning numerical scores, we ensure the model "thinks" through the evidence before committing to a decision. This is implemented directly in our Pydantic schemas (e.g., `src/agents/cv_screening/schemas/output_schema.py`), where field order matters: + +```mermaid +flowchart LR + %% Nodes + Input[Input Data] + subgraph "Structured Output Schema" + Feedback["1. Generate Feedback (CoT)"] + Score["2. Assign Scores"] + end + Output[Overall Score] + + %% Flow + Input --> Feedback + Feedback --> Score + Score --> Output + + %% Styling + style Feedback fill:#e8f5e9,stroke:#2e7d32,stroke-width:2px + style Score fill:#fff3e0,stroke:#ef6c00,stroke-width:2px +``` + +This simple structural constraint leads to significantly better calibration and reduced hallucination in scoring. + +### ***Context Engineering*** + +To ensure long-running reliability and precision, this system employs a multi-layered approach to context management. This architecture prevents **"Context Rot"**—a phenomenon where LLM performance degrades as input length increases, as highlighted in [Chroma's research](https://research.trychroma.com/context-rot). By managing context effectively, we ensure agents remain focused and accurate over extended interactions. + + +#### 1. Context Isolation via Delegation +Instead of a single monolithic agent, tasks are delegated to **specialized sub-agents** (e.g., `cv_screener`, `voice_screener`). + +- **Delegate (Solid Arrow):** The Supervisor initiates a task, passing only the necessary context to a specific sub-agent. +- **Report Back (Dotted Arrow):** Once the sub-agent completes its task, it returns a structured summary to the Supervisor, ensuring the main context remains clean. + +```mermaid +graph TD + %% Legend (Top) + subgraph Legend [Legend] + direction LR + KeySup[Supervisor] -->|Delegation| KeyAgent[Sub-Agent] + KeyAgent -.->|Report Back| KeySup + end + + %% Force Legend to be above Supervisor + Legend ~~~ Supervisor + + Supervisor[🤖 Supervisor Agent] + + %% Sub-Agents + Gmail[📧 Gmail Agent] + Cal[📅 GCalendar Agent] + DBExec[💾 DB Executor] + CV[📄 CV Screener] + Voice[🎤 Voice Screener] + + %% Delegation (Outbound) + Supervisor --> Gmail + Supervisor --> Cal + Supervisor --> DBExec + Supervisor --> CV + Supervisor --> Voice + + %% Feedback (Inbound) + Gmail -.-> Supervisor + Cal -.-> Supervisor + DBExec -.-> Supervisor + CV -.-> Supervisor + Voice -.-> Supervisor + + %% Styling + style Supervisor fill:#e1bee7,stroke:#4a148c,stroke-width:2px + style Gmail fill:#fff3e0,stroke:#e65100 + style Cal fill:#fff3e0,stroke:#e65100 + style DBExec fill:#fff3e0,stroke:#e65100 + style CV fill:#e3f2fd,stroke:#1565c0 + style Voice fill:#e3f2fd,stroke:#1565c0 + style Legend fill:#f5f5f5,stroke:#9e9e9e,stroke-dasharray: 5 5 +``` + +- **How it works:** Each *sub-agent* operates in its *own isolated context/thread*. +- **Benefit:** The main Supervisor is not polluted with low-level execution logs. Sub-agents are **stateless** from the Supervisor's perspective—each trigger starts a fresh thread, preventing error accumulation in the workers. + + +#### 2. Context Offloading & Loading (RAG-lite) +We treat the database not just as storage, but as **offloaded context**. +- **Offloading:** Candidate data, screening results, and interview states are persisted immediately to a structured SQL/JSON database. +- **Loading:** The Supervisor does not keep all candidate data in memory. Instead, it utilizes the `db_executor` agent to **retrieve (load)** only the specific data points needed for the current planning step. +- **Benefit:** Keeps the active context window lean and focused on *reasoning* rather than *storage*. + +#### 3. Adaptive Context Compaction +For the **stateful Supervisor** (which manages the long-running user conversation), we implement **Compactive Summarization**. +- **Mechanism:** As the conversation history exceeds a token threshold, older interactions are summarized into a concise narrative while recent messages are kept verbatim. +- **Result:** The agent retains "long-term memory" of the conversation arc without hitting context window limits, keeping the Supervisor "forever young." + +```mermaid +graph TD + User[User / API] -->|Long-running Thread| Supervisor + + subgraph "Stateful & Compacted" + Supervisor[Supervisor Agent] + Memory[Context Compaction Module] -.->|Summarizes History| Supervisor + end + + subgraph "Stateless & Isolated" + CV[CV Screener] + Voice[Voice Screener] + end + + subgraph "Context Offloading" + DB[(Postgres DB)] + end + + Supervisor -->|Delegates Task| CV + Supervisor -->|Delegates Task| Voice + Supervisor -->|Queries/Updates| DB + + CV -.->|1. New Thread| CV + Voice -.->|1. New Thread| Voice +``` + +## ***`Model & Agent Registry`*** + +A breakdown of the various LLMs, Agents, and Workflows powering the system. + +- 🤖 **Agent:** Autonomous entity that can use tools, plan multiple steps, and maintain reasoning loops. +- ⚙️ **Workflow:** Deterministic, fixed sequence of operations (Pipeline). It may use LLMs for specific steps but the flow is hardcoded. +- 🧠 **Simple LLM:** A direct "one-shot" call to a Language Model for a specific transformation (e.g., summarization, extraction) without tools or loops. + +| Component | Type | Model | Description | Location | +| :--- | :--- | :--- | :--- | :--- | +| **Supervisor Agent** | 🤖 **Agent** | `gpt-4o` | Orchestrates delegation, planning, and context management. | `src/agents/supervisor/supervisor_v2.py` | +| **Gmail Agent** | 🤖 **Agent** | `gpt-4o` | Autonomous email management via MCP (read/send/label). | `src/agents/gmail/gmail_agent.py` | +| **GCalendar Agent** | 🤖 **Agent** | `gpt-4o` | Autonomous calendar scheduling via MCP. | `src/agents/gcalendar/gcalendar_agent.py` | +| **DB Executor** | 🤖 **Agent** | `gpt-4o` | Writes SQL/Python to query the database (CodeAct). | `src/agents/db_executor/db_executor.py` | +| **CV Screening** | ⚙️ **Workflow** | `gpt-4o` | Deterministic pipeline: Fetch → Read → Evaluate → Save. | `src/agents/cv_screening/cv_screening_workflow.py` | +| **Voice Judge** | 🧠 **Simple LLM** | `gpt-4o-audio` | Evaluates audio/transcripts for sentiment & confidence. | `src/agents/voice_screening/judge.py` | +| **Doc Parser** | 🧠 **Simple LLM** | `gpt-4o-mini` | Vision-based PDF-to-Markdown conversion. | `src/doc_parser/pdf_to_markdown.py` | +| **History Manager** | 🧠 **Simple LLM** | `gpt-4o-mini` | Summarizes conversation history for context compaction. | `src/context_eng/history_manager.py` | + +### 🔌 ***`Integrated MCP Servers`*** +The system integrates **Model Context Protocol (MCP)** servers to securely and standardizedly connect agents to external tools. + +| MCP Server | Purpose | Used By | +| :--- | :--- | :--- | +| **Gmail MCP** | Provides tools to `list`, `read`, `send`, and `label` emails. | `Gmail Agent` | +| **Google Calendar MCP** | Provides tools to `list_events`, `create_event`, and `update_event`. | `GCalendar Agent` | + +> **Note:** Each MCP server runs as a standalone process that exposes a standardized tool interface, which the respective agent consumes dynamically. + +--- + +## ***`License & Acknowledgments`*** +This project utilizes code from: +- [gmail-mcp](https://github.com/theposch/gmail-mcp) by **theposch** (GPLv3) + *Integrated at:* `src/mcp_servers/gmail-mcp/` +- [calendar-mcp](https://github.com/deciduus/calendar-mcp) by **deciduus** (AGPL-3.0) + *Integrated at:* `src/mcp_servers/calendar-mcp/` + +We deeply acknowledge these original works and the great AI and Data Science community that makes such collaboration possible. We distribute our modifications under the compatible license terms. + +--- + +## 👥 ***`Team`*** +| Member | +| -------- | +| [Sebastian Wefers](https://github.com/Ocean-code-1995) | +| [Dmitri Moscoglo](https://github.com/DimiM99) | +| [Owen Kaplinsky](https://github.com/owenkaplinsky) | +| [SrikarMK](https://github.com/Srikarmk) | diff --git a/docker/Dockerfile.candidates_db_init b/docker/Dockerfile.candidates_db_init new file mode 100644 index 0000000000000000000000000000000000000000..abb855859a0205d225c16bc41ea5cfe5d5dc7282 --- /dev/null +++ b/docker/Dockerfile.candidates_db_init @@ -0,0 +1,22 @@ +# --- Dockerfile.candidates_db_init --- +FROM python:3.12-slim + +# Set working directory inside container +WORKDIR /app + +# Install system dependencies needed for psycopg2 +RUN apt-get update && apt-get install -y \ + libpq-dev gcc && \ + rm -rf /var/lib/apt/lists/* + +# Copy requirements file and install dependencies + +COPY ../requirements/base.txt ./requirements/base.txt +COPY ../requirements/db.txt ./requirements/db.txt +RUN pip install --no-cache-dir -r requirements/db.txt + +# Copy *only* the candidate database module +COPY src/database/candidates ./src/database/candidates + +# Default command - use dedicated init script to avoid circular import +CMD ["python", "-m", "src.database.candidates.init_db"] diff --git a/docker/Dockerfile.cv_upload b/docker/Dockerfile.cv_upload new file mode 100644 index 0000000000000000000000000000000000000000..6fb70fe0444a314fb8660658651043fc267e5ab7 --- /dev/null +++ b/docker/Dockerfile.cv_upload @@ -0,0 +1,29 @@ +# Use Python slim base +FROM python:3.12-slim + +# Set working directory +WORKDIR /app + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + libpq-dev gcc && \ + rm -rf /var/lib/apt/lists/* + +# Copy only requirements first (for build caching) +COPY ../requirements/base.txt ./requirements/base.txt +COPY ../requirements/db.txt ./requirements/db.txt +COPY ../requirements/cv_ui.txt ./requirements/cv_ui.txt +# Install Streamlit + base deps +RUN pip install --no-cache-dir -r requirements/cv_ui.txt + +# Copy project source code +COPY ../src ./src + +ENV PYTHONPATH=/app + + +# Expose Streamlit port +EXPOSE 8501 + +# Default command to run Streamlit app +CMD ["streamlit", "run", "src/frontend/streamlit/cv_ui/app.py", "--server.port=8501", "--server.address=0.0.0.0"] \ No newline at end of file diff --git a/docker/Dockerfile.supervisor b/docker/Dockerfile.supervisor new file mode 100644 index 0000000000000000000000000000000000000000..b4828c41833db2d2edcc85b9a3f21cd3cb6ccb0d --- /dev/null +++ b/docker/Dockerfile.supervisor @@ -0,0 +1,36 @@ +FROM python:3.12-slim + +WORKDIR /app + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + gcc \ + libpq-dev \ + && rm -rf /var/lib/apt/lists/* + +# Copy requirement files +COPY requirements/base.txt requirements/base.txt +COPY requirements/db.txt requirements/db.txt +COPY requirements/agent.txt requirements/agent.txt +COPY requirements/supervisor.txt requirements/supervisor.txt +COPY requirements/mcp_calendar.txt requirements/mcp_calendar.txt + +# Install dependencies directly (no venv needed in container) +# **NOTE** uv isntallation is needed for the gmail mcp server to work. +# >>> it is installed in the base image /usr/local/bin/uv +RUN pip install --no-cache-dir uv && \ + pip install --no-cache-dir -r requirements/supervisor.txt && \ + pip install --no-cache-dir -r requirements/db.txt && \ + pip install --no-cache-dir -r requirements/agent.txt && \ + pip install --no-cache-dir -r requirements/mcp_calendar.txt + +# Copy application code +COPY src/ /app/src/ +COPY secrets/ /app/secrets/ +COPY .env /app/.env + +# Expose Streamlit port +EXPOSE 8501 + +# Run Streamlit +CMD ["streamlit", "run", "src/frontend/streamlit/supervisor_ui/app.py", "--server.port=8501", "--server.address=0.0.0.0"] diff --git a/docker/Dockerfile.supervisor_api b/docker/Dockerfile.supervisor_api new file mode 100644 index 0000000000000000000000000000000000000000..e69cae0ec9df0ba3bf0a4c86225ce66648bec468 --- /dev/null +++ b/docker/Dockerfile.supervisor_api @@ -0,0 +1,43 @@ +FROM python:3.12-slim + +WORKDIR /app + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + gcc \ + libpq-dev \ + && rm -rf /var/lib/apt/lists/* + +# Copy requirement files +COPY requirements/base.txt requirements/base.txt +COPY requirements/db.txt requirements/db.txt +COPY requirements/agent.txt requirements/agent.txt +COPY requirements/supervisor.txt requirements/supervisor.txt +COPY requirements/api.txt requirements/api.txt +COPY requirements/cv_ui.txt requirements/cv_ui.txt +COPY requirements/mcp_calendar.txt requirements/mcp_calendar.txt +COPY requirements/mcp_gmail.txt requirements/mcp_gmail.txt + +# Install dependencies directly (no venv needed in container) +# **NOTE** uv installation is needed for the gmail mcp server to work. +# >>> it is installed in the base image /usr/local/bin/uv +RUN pip install --no-cache-dir uv && \ + pip install --no-cache-dir -r requirements/supervisor.txt && \ + pip install --no-cache-dir -r requirements/db.txt && \ + pip install --no-cache-dir -r requirements/agent.txt && \ + pip install --no-cache-dir -r requirements/api.txt && \ + pip install --no-cache-dir -r requirements/cv_ui.txt && \ + pip install --no-cache-dir -r requirements/mcp_calendar.txt && \ + pip install --no-cache-dir -r requirements/mcp_gmail.txt + +# Copy application code +COPY src/ /app/src/ +COPY secrets/ /app/secrets/ +COPY .env /app/.env + +# Expose API port +EXPOSE 8080 + +# Run FastAPI with uvicorn +CMD ["uvicorn", "src.api.app:app", "--host", "0.0.0.0", "--port", "8080"] + diff --git a/docker/Dockerfile.voice_proxy b/docker/Dockerfile.voice_proxy new file mode 100644 index 0000000000000000000000000000000000000000..bf7ff4e4b12b49c1dd69b0f978d59d2146b20882 --- /dev/null +++ b/docker/Dockerfile.voice_proxy @@ -0,0 +1,29 @@ +# Use Python slim base +FROM python:3.12-slim + +# Set working directory +WORKDIR /app + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + libpq-dev gcc && \ + rm -rf /var/lib/apt/lists/* + +# Copy only requirements first (for build caching) +COPY ../requirements/base.txt ./requirements/base.txt +COPY ../requirements/db.txt ./requirements/db.txt +COPY ../requirements/voice_proxy.txt ./requirements/voice_proxy.txt +# Install Streamlit + base deps +RUN pip install --no-cache-dir -r requirements/voice_proxy.txt + +# Copy project source code +COPY ../src ./src + +ENV PYTHONPATH=/app + + +# Expose FastAPI port +EXPOSE 8000 + +# Default command to run FastAPI proxy +CMD ["python", "-m", "uvicorn", "src.frontend.streamlit.voice_screening_ui.proxy:app", "--host", "0.0.0.0", "--port", "8000"] diff --git a/docker/Dockerfile.voice_screening b/docker/Dockerfile.voice_screening new file mode 100644 index 0000000000000000000000000000000000000000..a934f471cf2f57f7fca681abde78d945c1f4ba45 --- /dev/null +++ b/docker/Dockerfile.voice_screening @@ -0,0 +1,28 @@ +# Use Python slim base +FROM python:3.12-slim + +# Set working directory +WORKDIR /app + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + libpq-dev gcc && \ + rm -rf /var/lib/apt/lists/* + +# Copy only requirements first (for build caching) +COPY ../requirements/base.txt ./requirements/base.txt +COPY ../requirements/voice_screening_ui.txt ./requirements/voice_screening_ui.txt +# Install Streamlit + base deps +RUN pip install --no-cache-dir -r requirements/voice_screening_ui.txt + +# Copy project source code +COPY ../src ./src + +ENV PYTHONPATH=/app + + +# Expose Streamlit port +EXPOSE 8501 + +# Default command to run Streamlit app +CMD ["streamlit", "run", "src/frontend/streamlit/voice_screening_ui/app.py", "--server.port=8501", "--server.address=0.0.0.0"] diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml new file mode 100644 index 0000000000000000000000000000000000000000..aaee673c9cdf4776777c113ffe29be3b8262b1b3 --- /dev/null +++ b/docker/docker-compose.yml @@ -0,0 +1,236 @@ +#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# +# Run as follows from root: +# >>> docker compose --env-file .env -f docker/docker-compose.yml up --build +#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# + +services: + # --- Database Service --- + # Runs a PostgreSQL 15 instance with persistent storage. + db: + image: postgres:15 + container_name: agentic_hr_db + restart: always + ports: + - "5433:5432" + volumes: + - postgres_data:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U agentic_user -d agentic_hr"] + interval: 3s + timeout: 3s + retries: 5 + environment: + POSTGRES_HOST: ${POSTGRES_HOST} + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_USER: ${POSTGRES_USER} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_DB: ${POSTGRES_DB} + networks: + - hrnet + + candidates_db_init: + # --- Application Service --- + # Runs your Python backend inside Docker. + # Initializes the database or starts the API (depending on command). + container_name: candidates_db_init + build: + context: .. # build from the project root + dockerfile: docker/Dockerfile.candidates_db_init + depends_on: + db: + condition: service_healthy + environment: + POSTGRES_HOST: ${POSTGRES_HOST} + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_USER: ${POSTGRES_USER} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_DB: ${POSTGRES_DB} + # command: ["python", "-m", "src.database.candidates.init_db"] + + volumes: + # --- Local code mount (for development only) --- + # Mounts your entire project source from the host (../) + # into the container at /app. + # ✅ Enables live code changes without rebuilding the image. + # ⚠️ Do NOT use in production – overrides the built image code. + - ../:/app # optional: live reload for local dev + + networks: + - hrnet + + # --- CV Upload --- + cv_upload_streamlit: + container_name: cv_upload_streamlit + build: + context: .. + dockerfile: docker/Dockerfile.cv_upload + ports: + - "8501:8501" + depends_on: + - db + - supervisor_api + environment: + # Database connection + POSTGRES_HOST: ${POSTGRES_HOST} + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_USER: ${POSTGRES_USER} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_DB: ${POSTGRES_DB} + DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + CV_UPLOAD_PATH: /app/src/database/cvs/uploads + # App specific + CV_UPLOAD_API_URL: http://supervisor_api:8080/api/v1/cv + PYTHONPATH: /app + volumes: + # Mount local code for live updates + - ../:/app + # Shared volume for CV uploads (persistent) + - ../src/database/cvs:/app/src/database/cvs + command: + [ + "streamlit", + "run", + "src/frontend/streamlit/cv_ui/app.py", + "--server.port=8501", + "--server.address=0.0.0.0", + ] + networks: + - hrnet + + # --- WebSocket Proxy for OpenAI Realtime API --- + websocket_proxy: + container_name: websocket_proxy + build: + context: .. + dockerfile: docker/Dockerfile.voice_proxy + ports: + - "8000:8000" + depends_on: + - db + - candidates_db_init + environment: + PYTHONPATH: /app + OPENAI_API_KEY: ${OPENAI_API_KEY} + BACKEND_API_URL: http://supervisor_api:8080 + volumes: + # Mount local code for live updates + - ../:/app + command: + [ + "python", + "-m", + "uvicorn", + "src.frontend.streamlit.voice_screening_ui.proxy:app", + "--host", + "0.0.0.0", + "--port", + "8000", + ] + networks: + - hrnet + + # --- Voice Screening UI --- + voice_screening_ui: + container_name: voice_screening_ui + build: + context: .. + dockerfile: docker/Dockerfile.voice_screening + ports: + - "8502:8501" # Map host port 8502 to container port 8501 + depends_on: + - db + - websocket_proxy + environment: + DATABASE_URL: postgresql://agentic_user:password123@db:5432/agentic_hr + PYTHONPATH: /app + WEBSOCKET_PROXY_URL: ws://websocket_proxy:8000/ws/realtime + BACKEND_API_URL: http://supervisor_api:8080 + volumes: + # Mount local code for live updates + - ../:/app + command: + [ + "streamlit", + "run", + "src/frontend/streamlit/voice_screening_ui/app.py", + "--server.port=8501", + "--server.address=0.0.0.0", + ] + networks: + - hrnet + + # --- Supervisor Agent API --- + supervisor_api: + container_name: supervisor_api + build: + context: .. + dockerfile: docker/Dockerfile.supervisor_api + ports: + - "8080:8080" # Map host port 8080 to container port 8080 + depends_on: + - db + environment: + # We set POSTGRES_HOST to 'db' so the agent connects to the container internal network + POSTGRES_HOST: ${POSTGRES_HOST} + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_USER: ${POSTGRES_USER} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_DB: ${POSTGRES_DB} + PYTHONPATH: /app + PROMPTLAYER_API_KEY: ${PROMPTLAYER_API_KEY} + OPENAI_API_KEY: ${OPENAI_API_KEY} + WEBSOCKET_PROXY_URL: ws://websocket_proxy:8000/ws/realtime + volumes: + # Mount local code for live updates + - ../:/app + command: + [ + "uvicorn", + "src.api.app:app", + "--host", + "0.0.0.0", + "--port", + "8080", + "--reload", + ] + networks: + - hrnet + + # --- Supervisor Agent UI --- + supervisor_ui: + container_name: supervisor_ui + build: + context: .. + dockerfile: docker/Dockerfile.supervisor + ports: + - "8503:8501" # Map host port 8503 to container port 8501 + depends_on: + - db + - supervisor_api + environment: + # We set POSTGRES_HOST to 'db' so the agent connects to the container internal network + PYTHONPATH: /app + # API URL for the Streamlit UI to connect to + SUPERVISOR_API_URL: http://supervisor_api:8080/api/v1/supervisor + volumes: + # Mount local code for live updates + - ../:/app + command: + [ + "streamlit", + "run", + "src/frontend/streamlit/supervisor_ui/app.py", + "--server.port=8501", + "--server.address=0.0.0.0", + ] + networks: + - hrnet + +volumes: + postgres_data: + cvs_data: + driver: local + +networks: + hrnet: + driver: bridge diff --git a/docker/info.md b/docker/info.md new file mode 100644 index 0000000000000000000000000000000000000000..4cfe876bad0fcd1c5a783d9bdc195b2d37052a84 --- /dev/null +++ b/docker/info.md @@ -0,0 +1,40 @@ +## 🐳 Docker Services + +### Services and Ports + +| Service | Description | Host Port | Container Port | +|---------|-------------|-----------|----------------| +| `db` | PostgreSQL 15 database with persistent storage | 5433 | 5432 | +| `candidates_db_init` | Python backend container — initializes DB schema | N/A | N/A | +| `cv_upload_streamlit` | Streamlit app for CV uploads | 8501 | 8501 | +| `websocket_proxy` | WebSocket proxy for OpenAI Realtime API | 8000 | 8000 | +| `voice_screening_ui` | Streamlit app for voice screening | 8502 | 8501 | +| `supervisor_ui` | Streamlit app for Supervisor Agent | 8503 | 8501 | +| `supervisor_api` | FastAPI backend for Supervisor Agent | 8080 | 8080 | + +--- + +### Run Command + +```bash +docker compose --env-file .env -f docker/docker-compose.yml up --build +``` + +--- + +### Resetting the Environment + +When making structural changes to the database (e.g., modifying models, updating Enums) or when you simply want a clean slate for testing, the persistent Docker volumes may cause conflicts with new code. + +To completely reset the environment and database: + +```bash +# 1. Stop running containers +docker compose -f docker/docker-compose.yml down + +# 2. Remove the persistent database volume +docker volume rm docker_postgres_data + +# 3. Rebuild and start fresh +docker compose --env-file .env -f docker/docker-compose.yml up --build +``` diff --git a/docs/agents/agent_ochestrator.md b/docs/agents/agent_ochestrator.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/agents/cv_screening.md b/docs/agents/cv_screening.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/agents/google_mcp_agent.md b/docs/agents/google_mcp_agent.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/agents/judging_agent.md b/docs/agents/judging_agent.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/agents/supervisor/mvps/mvp1/mvp_v1.md b/docs/agents/supervisor/mvps/mvp1/mvp_v1.md new file mode 100644 index 0000000000000000000000000000000000000000..9af15ac444148c06d36730617a7218e04ae1543b --- /dev/null +++ b/docs/agents/supervisor/mvps/mvp1/mvp_v1.md @@ -0,0 +1 @@ +Owens \ No newline at end of file diff --git a/docs/agents/supervisor/mvps/mvp2/mvp_v2.md b/docs/agents/supervisor/mvps/mvp2/mvp_v2.md new file mode 100644 index 0000000000000000000000000000000000000000..006d42606977b477b519ba75ebb72264deed0c27 --- /dev/null +++ b/docs/agents/supervisor/mvps/mvp2/mvp_v2.md @@ -0,0 +1,320 @@ +# ***MVP version #2*** +--- + + +## HR-Triggered Autonomous Workflow Concept + +The system is designed to operate **autonomously** while still allowing HR to initiate workflows and request status insights. +This ensures maximum automation without losing control or clarity in the process. + +--- + +### **HR Interaction Trigger** + +When HR opens the UI, they can interact with the supervisor agent by asking questions such as: + +> **“Hey recruitment agent, what is the current status quo? Any new applicants? How many have passed CV screening?”** + +The supervisor agent then: + +1. Queries the database using predefined tools +2. Generates a clear, human-friendly status report +3. Waits for HR instructions on how to proceed + +HR can then issue high-level commands like: + +- **“Process all new applicants.”** +- **“Do not process new applicants further — notify them instead.”** +- **“Continue processing only applicants who already passed screening.”** + +--- + +### **Supervisor Executes Fully Autonomous Actions** + +Once HR gives the high-level command, the supervisor performs all actions autonomously: + +- **Process new applicants** + - Parse CVs + - Run CV screening + - Update DB + - Notify or proceed depending on results + +- **Process screened applicants further** + - Notify candidate + - Request available time slots + - Match HR availability + - Schedule interview + - Send confirmation emails + +--- + +### **Concurrency and Isolation** + +To avoid mixing contexts across candidates: + +- **Only one supervisor agent instance runs at a time** +- Supervisor processes candidates **sequentially** +- Each candidate is handled **individually and deterministically** + +This avoids: +- Context bleed +- Duplicate actions +- Race conditions +- Mixed reasoning across candidates + +--- + +### **Per-Candidate Deterministic State Machine** + +Each candidate has a small state object: + +```json +{ + "candidate_id": 123, + "state": "cv_uploaded", + "checklist_path": "users/123/checklist.md" +} +``` +This keeps the workflow predictable, restartable, and isolated. + +--- + +### ***Per-Candidate Checklist File*** +Each candidate has a personal Markdown checklist: +```text +# Candidate Checklist — ID 123 + +- [x] CV uploaded +- [x] CV parsed and stored +- [x] CV screening started +- [x] CV screening completed +- [ ] Screening results notified to candidate +- [ ] Asked candidate for available time slots +- [ ] Received candidate availability +- [ ] Checked HR availability +- [ ] Scheduled interview +- [ ] Final confirmation email sent +``` +The supervisor uses this checklist to determine the next atomic action. +It loads only this candidate’s context, performs exactly one update, writes back, and moves on. + +--- +## **Hybrid Progress Tracking — DB Status + Checklist** + +The HR agent maintains two synchronized layers of workflow state: + +- **Database `status` field:** + Captures the **coarse-grained milestone** in the candidate’s lifecycle + (e.g., `applied`, `cv_screened`, `interview_scheduled`, `decision_made`). + → This is the **authoritative system state** used for HR dashboards, analytics, and reporting. + +- **Per-candidate Markdown checklist:** + Tracks **fine-grained atomic actions** that occur within each milestone + (e.g., CV parsed, CV screened, email sent, candidate replied). + → This serves as the **agent’s operational log**, enabling deterministic reasoning, auditing, and safe restarts. + +--- + +### **Checklist and Milestone Boundaries** + +The **checklist** is composed of multiple **substeps**, each representing one small, deterministic action. +When all substeps belonging to a stage are completed, the system reaches a **milestone boundary**. +That boundary marks a safe point to update the candidate’s `status` field in the database. + +| Milestone (`status` in DB) | Meaning | Checklist Substeps Leading to Boundary | +|-----------------------------|----------|---------------------------------------| +| `applied` | Candidate record created | `[x] CV uploaded`, `[x] CV parsed` | +| `cv_screened` | Screening phase finished | `[x] Screening started`, `[x] Screening completed`, `[x] Result stored` | +| `interview_scheduled` | Interview arranged | `[x] Candidate notified`, `[x] Availability received`, `[x] Interview scheduled` | +| `decision_made` | Final decision delivered | `[x] Interview completed`, `[x] Decision logged`, `[x] Notification email sent` | + +--- + +### **Sync Rule** + +1. After **each atomic substep**, the supervisor updates the checklist file. +2. When a **milestone boundary** is reached (all substeps for a phase checked off), + the supervisor updates the corresponding `status` field in the database. +3. The checklist remains the **fine-grained operational truth**, + while the database holds the **coarse-grained canonical truth**. + +--- + +### **Summary** + +- **Checklist = micro-level progress tracker** (agent reasoning + recovery) +- **Milestone boundaries = transition triggers** (define when to sync with DB) +- **Database `status` = macro-level lifecycle state** (system-wide reference) + +This hybrid approach combines **LLM-friendly transparency** with **system-level consistency**, ensuring the agent can reason, recover, and scale safely. + + +--- + +### ***Result*** +This approach provides: +- High autonomy +- Strong safety boundaries +- No context mixing +- Clear state tracking +- Reliable execution +- HR keeps high-level control +- LLM handles reasoning, routing, and next steps autonomously +This structure is scalable, maintainable, and production-friendly while still pushing agent autonomy very far. + + +## ⚡ ***Handling Everything Concurrently — The Async Supervisor Layer*** +--- +`The system must support concurrent processing of multiple candidate groups, each representing a different stage in the application pipeline (e.g., CV screening, voice screening, decision). Within each group, it should be able to process batches of candidates simultaneously while preserving per-candidate isolation and state consistency.` + +### **Thread-Based Per-Candidate Isolation For the Rescue** + +To ensure deterministic, fault-tolerant, and concurrent execution, the system leverages **LangGraph thread IDs** for per-candidate isolation: + +1. **Supervisor Delegation** + The Supervisor Agent queries all candidates, groups them by their current `status` (e.g., CV screening, voice screening, decision), and passes the **list of candidate IDs** to the appropriate subagent tool. + Each subagent handles its own data loading, ensuring the Supervisor remains lightweight and purely orchestration-focused. + +2. **Subagent Execution (Thread-per-Candidate)** + Inside each subagent (e.g., `screen_cv`), the system iterates over all received candidate IDs. + For each candidate: + - The **candidate ID serves as the `thread_id`**, providing a unique persistent context in LangGraph. + - The subagent loads candidate data from the database (CV path, JD path, etc.). + - The CV or voice screening logic runs **within that thread’s context**. + - On completion, the results are written back to the database, and the per-candidate checklist and state are updated. + +3. **Parallel and Safe Processing** + Subagents can process multiple candidates concurrently by spawning asynchronous executions per `thread_id`. + Each candidate’s context remains isolated, preventing race conditions or context mixing. + +**Result:** +- Supervisor coordinates and dispatches candidate groups +- Subagents handle per-candidate logic using thread-based persistence +- Each candidate’s run is self-contained, recoverable, and writes its final results back to the database + +> **Note:** +> During a “Process All” operation, the Supervisor Agent executes **multiple reasoning loops**, invoking each subagent tool in sequence (e.g., `screen_cv`, `voice_screening`, `schedule_hr_interview`). +> After each tool call, it observes the result, reasons about the next step, and continues until all candidate groups are processed. + + + +Latest chat: https://chatgpt.com/share/6920d318-3f64-8012-8fca-b17316093131 + +--- + +> below mst be adapted based on section above: + +... +```mermaid +flowchart TD + +HR_UI[UI: HR opens dashboard
and requests candidate status] + --> REPORT[System returns report
showing new and screened candidates] + +REPORT --> PARALLEL[Async Supervisor
launches concurrent group tasks] + +%% --- New candidate path --- +PARALLEL --> NEW_FLOW[Process new candidates
CV screening pipeline] +NEW_FLOW --> A[Delegate screening to subagent] +A --> B[Subagent screens CV] +B --> C[Write screening results to DB] +C --> D[Supervisor receives results] +D --> E{Did candidate pass screening?} +E -- No --> REJECT[Notify candidate and HR
application rejected] +E -- Yes --> PASSED[Notify candidate and HR
passed screening] + +%% --- Screened candidate path --- +PARALLEL --> SCREENED_FLOW[Process screened candidates
interview scheduling pipeline] +SCREENED_FLOW --> I[Request candidate time slots] +I --> J[Check HR calendar availability] +J --> K[Schedule interview] +K --> L[Notify HR and candidate
interview confirmed] + + + +``` + +To support concurrent processing across groups and candidates, the supervisor now operates as an asynchronous orchestrator. +It remains a single agent context — responsible for reasoning, reporting, and orchestration — but leverages asyncio to execute multiple workflows concurrently. +This allows the system to: +- Process multiple groups (e.g., new vs. screened candidates) in parallel +- Process multiple candidates per group concurrently +- Maintain isolation and determinism per candidate through separate state/checklist files + +--- + +Conceptual Overview +1. HR issues a high-level command (e.g., “Process all candidates”). +2. The supervisor queries the database and identifies candidate groups. +3. It launches async tasks for each group simultaneously. +4. Within each group, candidates are processed concurrently — each running the deterministic checklist logic described above. +5. The supervisor awaits completion of all group tasks and reports progress and results. + +This preserves: +- ✅ Single supervisor reasoning context +- ✅ Concurrent group + per-candidate execution +- ✅ Isolated per-candidate state and file I/O +- ✅ High throughput without context bleed + +--- + +***Conceptual Async Code Example*** + +```python +import asyncio +from typing import List + +# --- Candidate-level deterministic flow --- +async def process_candidate(candidate): + """Execute the per-candidate checklist and state transitions.""" + state = await load_candidate_state(candidate.id) + + if state == "cv_uploaded": + await parse_and_screen_cv(candidate) + elif state == "screened": + await schedule_interview(candidate) + # ... additional states here + + await save_candidate_state(candidate.id, state) + print(f"✅ Candidate {candidate.id} processed ({state})") + + +# --- Group-level concurrent handler --- +async def process_group(candidates: List, group_name: str): + """Handle all candidates in one group concurrently.""" + print(f"⚙️ Processing group: {group_name} ({len(candidates)} candidates)") + tasks = [process_candidate(c) for c in candidates] + await asyncio.gather(*tasks) + print(f"✅ Group {group_name} completed") + + +# --- Main supervisor orchestration --- +async def supervisor_run(): + """Supervisor orchestrates all concurrent candidate workflows.""" + print("🧠 Supervisor initialized") + + # Query database and classify candidates + report = await get_candidate_report() + new_candidates = report["new"] + screened_candidates = report["screened"] + + # Launch group workflows concurrently + await asyncio.gather( + process_group(new_candidates, "new_candidates"), + process_group(screened_candidates, "screened_candidates") + ) + + print("🎯 All candidate groups processed successfully") + + +# --- Entry point --- +if __name__ == "__main__": + asyncio.run(supervisor_run()) +``` + +--- +***Key Properties*** +- **Async orchestration, single agent:** The supervisor coordinates all tasks without duplicating reasoning contexts. +- **Per-candidate determinism:** Each checklist/state file is loaded, updated, and written atomically. +- **Parallel group execution:** New and screened candidates can be processed simultaneously. +- **Scalability path:** The same async structure can later integrate with LangGraph’s parallel nodes or distributed queues. \ No newline at end of file diff --git a/docs/agents/supervisor/mvps/mvp2/single_candidate_mvp.md b/docs/agents/supervisor/mvps/mvp2/single_candidate_mvp.md new file mode 100644 index 0000000000000000000000000000000000000000..754f601d90534371ce026222a627ba71d29ff37d --- /dev/null +++ b/docs/agents/supervisor/mvps/mvp2/single_candidate_mvp.md @@ -0,0 +1,257 @@ + +# ***Single-Candidate MVP — Tick Box Milestones*** +--- + +This V1 map outlines the simplest complete workflow for the HR agent system: HR enters the UI, receives a clear overview of all candidates and their screening status, and then chooses one of two actions—process new applicants or continue with those who are already screened. The supervisor then carries out the chosen workflow step-by-step, invoking the appropriate subagents (screening, Gmail, calendar) and updating each candidate’s state accordingly. + + +## **V2 Workflow Diagram — HR → Supervisor → Actions** +```mermaid +flowchart TD + + HR[HR opens UI] --> Q[Supervisor generates status report: applicants, screened, passed, failed] + + Q --> HR_Decision{HR chooses action} + + HR_Decision --> A[Option A: Process new applicant] + HR_Decision --> B[Option B: Process screened applicant] + + %% Option A path + A --> A1[Supervisor triggers screening] + A1 --> A2[Subagent screens candidate] + A2 --> A3[Screening result stored in DB] + A3 --> A4[Candidate status updated] + + %% Option B path + B --> B_DECISION{Candidate accepted or declined?} + + B_DECISION --> B1A[If accepted: Gmail + Calendar workflow] + B_DECISION --> B1B[If declined: Gmail rejection email] + + %% End nodes + A4 --> END((Done)) + B1A --> END + B1B --> END + +``` + +# Single-Candidate MVP — General Tick Box Milestones + +## **1) CV Upload + Candidate Storage** +- [ ] Candidate record created in DB (manual trigger OK) +- [ ] CV file uploaded (manual OK for MVP) +- [ ] CV parsed into structured format +- [ ] Parsed CV stored in DB linked to candidate +- [ ] Candidate status set to `uploaded` + +--- + +## **2) Supervisor + Basic UI (Single Entry Point)** + +### **2.1 HR Status Query & Reporting** +- [ ] Supervisor responds to: “What is the current status quo?” +- [ ] Supervisor reports total number of applicants +- [ ] Supervisor reports how many have been screened +- [ ] Supervisor reports how many passed vs failed screening +- [ ] Supervisor presents results as a clear HR-friendly summary report + +### **2.2 Supervisor Actions** +- [ ] “Process new applicant” option available +- [ ] “Process screened applicant” option available + +### **2.3 Action Logic** +- [ ] Choosing “process new applicant” triggers initial screening workflow +- [ ] Choosing “process screened applicant” triggers: + - [ ] Gmail + calendar workflow for accepted candidates + - [ ] Gmail rejection workflow for declined candidates + +### **2.4 State Updates** +- [ ] Supervisor updates candidate status after each action +- [ ] Supervisor logs each action outcome into DB + +--- + +## **3) CV Screening Subagent** +- [ ] Subagent loads candidate record +- [ ] Subagent loads stored CV text +- [ ] Subagent performs CV screening +- [ ] Structured screening result saved to DB +- [ ] Candidate status updated to `screened` +- [ ] Supervisor UI reflects updated screening status + +--- + +## **4) Per-Candidate Deterministic State Machine** +- [ ] Each candidate has a dedicated state object (e.g., `state: "cv_uploaded"`) +- [ ] State object persisted in DB or file +- [ ] Supervisor reads this state before taking actions +- [ ] Supervisor updates the state after actions +- [ ] Workflow remains predictable and restartable per candidate +- [ ] Each candidate’s context is isolated (no cross-candidate bleed) + +--- + +## **5) Per-Candidate Checklist File** +- [ ] A Markdown checklist file is created per candidate +- [ ] Checklist is updated at each atomic step +- [ ] Supervisor loads checklist to determine next required action +- [ ] Checklist mirrors real workflow steps (upload → screening → notification → scheduling) +- [ ] Checklist allows deterministic progress tracking +- [ ] Checklist helps ensure one atomic action per supervisor step + + + +--- + + +# 🧭 Incremental Implementation Roadmap — From Skeleton to MVP + +This roadmap breaks the HR Agent MVP into incremental, testable phases. +Each phase builds on the previous one — keeping the system functional at every step while increasing autonomy, determinism, and traceability. + +A key invariant across all phases: +> 🧩 **Each atomic action must update the per-candidate Markdown checklist** +> The checklist is the single source of truth for progress, state recovery, and auditability. + +***Checklist:*** +```text +# Candidate Checklist — ID 123 + +- [ ] CV uploaded, parsed and stored +- [ ] CV screening (pass / fail) +- [ ] Screening results notified to candidate & HR +- [ ] Asked candidate for available time slots +- [ ] Received candidate availability +- [ ] Checked HR availability +- [ ] Scheduled interview +- [ ] Final confirmation email sent +``` +--- + +## **Phase 1 — Foundation Layer: Candidate I/O + Static State** + +Goal: Establish basic data persistence and candidate representation. +Outcome: You can manually create a candidate, attach a CV, and store structured data. + +- [ ] DB schema or JSON store for candidates + - `candidate_id`, `name`, `cv_path`, `state`, `screening_result` +- [ ] Manual script or simple UI form to create a candidate +- [ ] Upload CV file (manual or API endpoint) +- [ ] Parse CV into structured format +- [ ] Store parsed CV in DB +- [ ] Candidate state set to `"cv_uploaded"` +- [ ] ✅ **Checklist updated after each atomic step:** + - `[x] CV uploaded` + - `[x] CV parsed` + - `[x] Candidate stored` + +--- + +## **Phase 2 — Supervisor Core Logic (CLI or Barebones UI)** + +Goal: Implement the supervisor agent’s reasoning + reporting loop for one candidate. +Outcome: HR can ask status questions and issue simple actions. + +- [ ] Simple command interface or Streamlit-like dashboard +- [ ] Supervisor responds to: “What’s the current status quo?” +- [ ] Supervisor reports: + - [ ] Total number of applicants + - [ ] Number screened + - [ ] Number passed vs failed +- [ ] HR can issue commands: + - [ ] `process_new_applicant` + - [ ] `process_screened_applicant` +- [ ] Supervisor executes one step → updates candidate state +- [ ] State transitions logged +- [ ] ✅ **Checklist updated automatically after each supervisor action** + +--- + +## **Phase 3 — Screening Subagent Integration** + +Goal: Automate CV evaluation and store results. +Outcome: Supervisor delegates screening to subagent and records outcome. + +- [ ] Subagent loads candidate record + parsed CV +- [ ] Runs screening model (mock or real) +- [ ] Produces structured result (e.g., `{passed: True, score: 85}`) +- [ ] Saves result in DB +- [ ] Candidate state transitions: + - `"cv_uploaded"` → `"screened_passed"` or `"screened_failed"` +- [ ] Supervisor report reflects updated screening counts +- [ ] ✅ **Checklist updated:** + - `[x] Screening started` + - `[x] Screening completed` + - `[x] Result stored` + +--- + +## **Phase 4 — Candidate Communication Layer (Gmail Integration)** + +Goal: Automate candidate notifications based on screening results. +Outcome: Supervisor triggers Gmail subagent to send emails. + +- [ ] Gmail subagent setup +- [ ] For `screened_failed`: send rejection email +- [ ] For `screened_passed`: send congratulations + request time slots +- [ ] Email status stored in DB +- [ ] Candidate state transitions: + - `"screened_failed"` → `"notified_rejection"` + - `"screened_passed"` → `"awaiting_time_slots"` +- [ ] ✅ **Checklist updated:** + - `[x] Candidate notified of result` + - `[x] Email status recorded` + +--- + +## **Phase 5 — Interview Scheduling Layer (Calendar Integration)** + +Goal: Automate scheduling for passed candidates. +Outcome: Supervisor matches availability and books meetings. + +- [ ] Calendar subagent setup +- [ ] Candidate time slot parsing (from email reply or mock) +- [ ] Check HR calendar availability +- [ ] Schedule meeting and send confirmation email +- [ ] Candidate state transitions: + - `"awaiting_time_slots"` → `"interview_scheduled"` +- [ ] ✅ **Checklist updated:** + - `[x] Candidate provided time slots` + - `[x] HR availability checked` + - `[x] Interview scheduled` + - `[x] Confirmation sent` + +--- + +# Advanced System Features - Future Directions for Next MVP + +## **Phase 6 — Determinism + Persistence Layer** + +Goal: Make the system predictable, restartable, and auditable. +Outcome: Each candidate runs in isolation with explicit state and checklist. + +- [ ] Per-candidate state object stored in DB or JSON +- [ ] Per-candidate Markdown checklist file created: + + +## **Phase 7 — Async Supervisor Orchestration (Scaling Up)** + +Goal: Extend single-candidate flow to multiple candidates concurrently. +Outcome: Async orchestration across candidate groups. + +- [ ] Async supervisor runs concurrently across groups: + - [ ] New candidates → screening path + - [ ] Screened candidates → scheduling path +- [ ] Each candidate processed in an isolated coroutine +- [ ] Checklist + state ensure deterministic behavior per candidate +- [ ] Supervisor reports real-time progress +- [ ] Single reasoning context preserved +- [ ] ✅ **Each candidate’s checklist still governs progression, ensuring isolation and safe concurrency** +- [ ] Supervisor awaits all async tasks and compiles final HR summary report +- [ ] System supports safe restarts (resume from last known checklist state) +- [ ] Scalable to LangGraph or distributed queue orchestration later on + +This roadmap breaks the HR Agent MVP into incremental, testable phases. +Each phase builds logically on the previous one — so the system remains functional at every step while increasing in autonomy and complexity. + +--- diff --git a/docs/agents/supervisor/mvps/mvp2/tools_needed.md b/docs/agents/supervisor/mvps/mvp2/tools_needed.md new file mode 100644 index 0000000000000000000000000000000000000000..794c8cca59ebf30f2c8dab0f882e2268e06c125f --- /dev/null +++ b/docs/agents/supervisor/mvps/mvp2/tools_needed.md @@ -0,0 +1,110 @@ +# ⚙️ Tools Required for MVP (Phases 1–5) + +The system consists of a **Supervisor Agent** (central orchestrator) and several **specialized subagents**. +Each tool corresponds to a specific capability needed for the single-candidate MVP flow — from CV upload to interview scheduling. + +--- + +## 🧠 **Supervisor Agent (Core Orchestrator)** + +### **Role** +Acts as the **central controller**: +- Interfaces with HR (via UI or chat) +- Handles reasoning, status reporting, and command interpretation +- Orchestrates subagents (screening, Gmail, calendar) +- Updates both the database and checklist after each atomic action + +### **Tools Required** + +| Tool | Purpose | Used in Phase | +|------|----------|---------------| +| 🗃️ **Database Tool / ORM Adapter** | Read, create, and update `Candidate` records (status, CV paths, results). | 1–5 | +| 📄 **File I/O Tool** | Read/write per-candidate checklist and parsed CV files. | 1–5 | +| 📊 **Reporting Helper / Aggregation Utility** | Query DB and summarize candidate counts (new, screened, passed, failed). | 2 | +| 🧩 **Subagent Dispatch Interface** | Send structured tasks to subagents (screening, Gmail, calendar). | 3–5 | +| 🧱 **State Manager** | Load and update candidate’s deterministic state object (`state`, `checklist`, `status`). | 4–5 | +| 🕵️ **HR Command Parser / Intent Handler** | Interpret HR’s natural-language instructions (e.g., “process new applicants”). | 2 | + +--- + +## 🤖 **Subagents & Their Tools** + +### **1. CV Screening Subagent** +**Phase:** 3 +**Purpose:** Automatically screen and score CVs. + +| Tool | Purpose | +|------|----------| +| 📄 **CV Parser** | Extract structured text from uploaded CV (PDF/DOCX). | +| 🧮 **Screening Model / Classifier** | Evaluate parsed CV using rules or an ML/LLM-based model. | +| 🗃️ **DB Access Tool** | Save screening results and update `Candidate.status = cv_screened`. | +| 📄 **Checklist Writer** | Mark `[x] Screening started` and `[x] Screening completed` in the checklist. | + +--- + +### **2. Gmail Subagent** +**Phase:** 4 +**Purpose:** Send automated emails to candidates based on screening results. + +| Tool | Purpose | +|------|----------| +| 📬 **Gmail API Wrapper** | Send templated emails (rejection or invitation). | +| 🧠 **Template Manager** | Store and select appropriate email templates. | +| 🗃️ **DB Access Tool** | Record email activity and update candidate state. | +| 📄 **Checklist Writer** | Mark `[x] Candidate notified` and `[x] Email status recorded`. | + +--- + +### **3. Calendar / Scheduling Subagent** +**Phase:** 5 +**Purpose:** Automate interview scheduling for passed candidates. + +| Tool | Purpose | +|------|----------| +| 🗓️ **Google Calendar API Wrapper** | Retrieve HR availability and create interview events. | +| 📬 **Gmail API (reuse)** | Send scheduling confirmations and time slot requests. | +| 🧠 **Availability Matcher** | Compare candidate-provided slots vs HR calendar availability. | +| 🗃️ **DB Access Tool** | Update `Candidate.status = interview_scheduled`. | +| 📄 **Checklist Writer** | Mark `[x] HR availability checked`, `[x] Interview scheduled`, `[x] Confirmation sent`. | + +--- + +## 🧾 **Cross-Cutting Utilities (Shared by All Agents)** + +| Utility | Purpose | Used by | +|----------|----------|----------| +| 🧩 **Checklist Manager** | CRUD operations on per-candidate Markdown checklist files (load, mark, persist). | All | +| 🧱 **State Sync Layer** | Sync checklist milestone boundaries with DB `status` updates. | Supervisor | +| ⏱️ **Logging & Audit Utility** | Record all actions, errors, and state transitions. | All | +| 🧮 **Config / Environment Loader** | Manage API keys, paths, and credentials for Gmail & Calendar. | All networked agents | + +--- + +## 🧭 **Tool Overview by Phase** + +| Phase | Supervisor Tools | Subagents / Tools | +|-------|-------------------|------------------| +| **1 — Candidate I/O + Storage** | DB Adapter, File I/O, Checklist Manager | CV Parser (manual for MVP) | +| **2 — Supervisor + UI** | HR Command Parser, DB Reporter, Checklist Manager | — | +| **3 — CV Screening** | Subagent Dispatcher, Checklist Manager | CV Parser, Screening Model, DB Writer | +| **4 — Candidate Communication** | Subagent Dispatcher, Checklist Manager | Gmail API, Template Manager | +| **5 — Interview Scheduling** | Subagent Dispatcher, Checklist Manager | Calendar API, Availability Matcher, Gmail API | + +--- + +## ✅ **Summary** + +- **Supervisor Agent Tools:** + - DB Adapter + - Checklist Manager + - HR Interface (UI or CLI) + - Subagent Dispatcher + - State Sync Logic + +- **Subagents:** + - **Screening Subagent:** CV Parser + Screening Model + - **Gmail Subagent:** Email Templating + Send API + - **Calendar Subagent:** Scheduling + Availability Matching + +Together, these tools form the complete single-candidate MVP pipeline — +from candidate intake → CV screening → communication → interview scheduling. diff --git a/docs/agents/supervisor/mvps/overview.md b/docs/agents/supervisor/mvps/overview.md new file mode 100644 index 0000000000000000000000000000000000000000..1d12531e9919ddbf666a1431c59af3c33ee538e4 --- /dev/null +++ b/docs/agents/supervisor/mvps/overview.md @@ -0,0 +1,64 @@ +# **Application Flow** + +## **MVP Version #2** + + +```mermaid +flowchart TD + +A[CV Upload] --> B[Store & Parse CV] + +B --> C[Trigger: CV Screening] + +C --> D[Supervisor Delegates
Screening to Subagent] + +D --> E[Write Results to DB] + +E --> F[Supervisor Receives Screening Results] + +F --> G{Did Candidate Pass
CV Screening?} + +G -- No --> H[Notify Candidate & HR:
Application Rejected] + +G -- Yes --> I[Notify Candidate & HR:
Passed Screening] + +I --> J[Request Candidate's
Available Time Slots] + +J --> K[Check HR Calendar
Availability] + +K --> L[Schedule Person-Person
Interview] + +L --> M[Notify HR & Candidate
Interview Confirmed] +``` + +The first goal is to ensure the application flows and works autonomously as follows: + +### **1) CV Submission** +- Applicant submits CV +- CV is parsed and stored + +### **2) CV Screening** +- Supervisor agent becomes aware that a CV was uploaded +- A **“CV was uploaded” signal** triggers the supervisor to delegate **CV screening** to a sub-agent +- Results are written to the database & candidate status is updated +- A **“CV was screened” signal** notifies the supervisor that results are available and is able to read the results from db + +### **3) Success / Failure Notification** + +Based on CV screening results that the supervisor injected into its context, it decides: + +#### **a) Not Passed** +- Trigger sub-agent to notify candidate *and* HR via email that the candidate did not meet requirements + +#### **b) Passed** +- Trigger sub-agent to: + - Notify candidate and HR of the successful CV screening + - Ask candidate to provide several available time slots + - Check HR’s available time slots in their calendar + - Schedule a meeting based on overlapping availability + - Notify HR that candidate X passed CV screening and that an interview was scheduled on **`dd-mm-yyyy`** + + + +## **Final** +*(To be defined)* diff --git a/docs/agents/supervisor/supervisor_general.md b/docs/agents/supervisor/supervisor_general.md new file mode 100644 index 0000000000000000000000000000000000000000..a233d3d3fb5a8ebf4d5a23ae0b2c96ab2eb70fcb --- /dev/null +++ b/docs/agents/supervisor/supervisor_general.md @@ -0,0 +1,120 @@ +# Supervisor Agent Overview +This document explains the role, behavior, and context-engineering strategy of the Supervisor Agent used in the agentic HR recruitment system. It describes how the supervisor plans, coordinates, delegates, and adapts the multi-agent workflow, and how context is compressed, summarized, and managed to maintain robustness across long-running interactions. + +It also explains context-engineering strategies that are utilized in order to reduce token usage along with increasing reliability and task completion rate of the agents. + +--- + +## 1. Purpose of the Supervisor Agent +The Supervisor Agent serves as the centralized orchestrator responsible for maintaining global workflow control. Its role is to ensure that each subagent operates in the correct order, under the correct conditions, with the right context, and with full visibility into progress and failures. + +At a high level, the supervisor is responsible for: +- Generating and maintaining the end-to-end hiring plan. +- Determining which subagent should execute next. +- Providing stateful context to each subagent. +- Performing adaptive re-planning when outputs or conditions change. +- Managing memory summaries and preventing context pollution. +- Producing explainable logs and reasoning traces for the dashboard. + +This agent ensures that the entire HR pipeline behaves autonomously while remaining transparent, safe, and resilient. + +--- + +## 2. High-Level Workflow +The supervisor follows a structured, plan-driven execution model. The default plan is: + +1. CV Screening +2. Voice Screening +3. HR Interview Scheduling +4. Final Decision Report + +However, execution is not strictly linear. The supervisor can skip, re-order, repeat, or halt steps based on subagent outputs. + +### Workflow Model +1. **Initialize Plan** + - Build an initial sequence of workflow stages based on available candidate data. + - Load any relevant memory summaries or past runs from the database. + +2. **Select Next Stage** + - Evaluate current state. + - Choose the next subagent based on plan progress and real-time results. + +3. **Construct Subagent Context Package** + - Provide the subagent with the minimal context they need: + - Candidate details + - Results from prior stages + - Relevant memory summaries + - Tool call history (when helpful) + - Current workflow goal + +4. **Invoke the Subagent** + - The selected agent executes a task using LangGraph tool calls or MCP integrations. + - Outputs are validated and stored in state and database. + +5. **Reflect and Update Plan** + - The supervisor generates a lightweight reflection summary: + "What happened?", "Is the result valid?", "What is needed next?" + - If conditions changed, the supervisor updates the plan accordingly. + Examples: + - Skip voice screening if CV screening fails. + - Retry scheduling if the calendar shows no available slots. + - Pause and request HR confirmation when required. + +6. **Persist Memory** + - Summaries, transcripts, evaluations, and structured results are stored to prevent context bloating. + - Only relevant compact memory is injected back into future steps. + +7. **Repeat Until Complete** + +--- + +## 3. Task Delegation Strategy +The supervisor delegates tasks to subagents based on rule-based planning combined with lightweight LLM reasoning. + +### Delegation Logic +- **CV Screening Agent** is invoked when a new applicant is added or a CV is updated. +- **Voice Screening Agent** is invoked when the candidate passes CV screening and HR or the plan flags them as suitable for a phone screen. +- **Scheduler Agent** is invoked once voice screening produces a valid transcript and evaluation. +- **Decision Agent** is invoked after all stages complete, or when early rejection is clear. + +Each subagent returns structured outputs that the supervisor uses to drive the next step. + +--- + +## 4. Adaptive Re-Planning +One of the supervisor’s core responsibilities is to respond dynamically to real-world conditions. + +Examples of adaptive behaviors: + +- **Tool failure**: If Gmail or Calendar returns an MCP error, the supervisor retries, selects an alternative interaction path, or defers the step. +- **Calendar constraints**: If no timeslots are available, the supervisor generates an alternate plan. +- **Candidate status shifts**: If a candidate responds late or provides new documents, the supervisor reevaluates the plan. +- **Voice call failure**: If the candidate does not pick up, the supervisor schedules a retry or sends an email follow-up. + +This makes the workflow robust, autonomous, and consistent with agentic paradigms. + +In unique situations, such as exceptional applicants, the agent can accellerate the timeline such as skipping stages. This is entirely discretionary and up to the agent's decision. + +--- + +## 5. Context Engineering Strategy +We utilize context engineering many times in the workflow. Each subagent instance is stateless, meaning that it doesn't retain any memory or context from previous turns. This allows for reduced token usage, since information from previous applicants is not relevant for new tasks. This state is reset every time it handles a unique task, but is retained until the supervisor determines the subagent has fully completed all necessary steps. + +Each time a subagent completes a task, the agent is given the full response. After the supervisor performs its next action, the subagent response is compacted into a high-level summary. + +The supervisor agent itself is also stateless in a sense: its context is applicant-dependant. Real-life timelines are messy, and multiple applicants will have steps overlap. Each time the supervisor agent needs to perform a new task, it loads the context from the user it is working on, and unloads it when it is done. This way, the supervisor is able to stay focused on a specific applicant and not get confused or distracted by managing multiple applicants at the same time. The state for this agent is saved in temporary files which are deleted once the applicant has a final decision made (hire or discard). + +--- + +## 6. Explainability and Dashboard Integration +The supervisor produces the trace outputs that power the Gradio dashboard. + +This includes: +- Plan state (past, current, future) +- Tool call logs +- Reasoning and reflection summaries +- Active memory excerpts +- Subagent outputs +- Error messages and fallback paths + +Because a single agent manages the plan, dashboard integration remains consistent and interpretable. \ No newline at end of file diff --git a/docs/agents/voice_screening.md b/docs/agents/voice_screening.md new file mode 100644 index 0000000000000000000000000000000000000000..beb362390793b36587754bd943958114942382c1 --- /dev/null +++ b/docs/agents/voice_screening.md @@ -0,0 +1,336 @@ +# Voice Screening MVP + +## Overview + +The **Voice Screening MVP** provides a simple browser-based voice interview interface using Streamlit and OpenAI Realtime API. This is a simplified implementation that removes the complexity of LangGraph agents, Twilio telephony, and FastAPI servers. + +## Architecture + +**Simple MVP Architecture:** +- **Streamlit UI**: Web interface with toggle recording button +- **WebSocket Proxy**: FastAPI proxy for browser WebSocket authentication +- **OpenAI Realtime API**: Real-time speech-to-speech via WebSocket +- **Real-time transcription**: Live transcript display +- **Real-time TTS**: Audio playback in browser with sequential queue +- **Simple backend**: Post-session analysis and database storage + +## Components + +| Component | Purpose | +|------------|----------| +| **Streamlit UI** | Main interface with interview controls and transcript display | +| **HTML/JavaScript Component** | WebSocket connection via proxy, audio recording/playback with queue | +| **WebSocket Proxy** | FastAPI service to handle OpenAI authentication (browsers can't set custom headers) | +| **OpenAI Realtime API** | Handles speech-to-text and text-to-speech in real-time (gpt-4o-mini-realtime-preview) | +| **Analysis Function** | Simple GPT-4 analysis of transcript (no LangGraph) | +| **Database Utilities** | Save results to database | + +## Flow + +```text +User enters email and requests authentication code +↓ +Proxy generates 6-digit code (MVP: displayed directly; production: sent via email/SMS) +↓ +User enters email and code to verify +↓ +Proxy validates code and returns session token +↓ +User clicks "Start Interview" +↓ +Browser opens WebSocket to WebSocket Proxy (with session token in query param) +↓ +Proxy validates session token +↓ +Proxy forwards connection to OpenAI Realtime API with API key authentication +↓ +Proxy configures OpenAI session (modalities, instructions, voice, etc.) +↓ +Proxy sends greeting request to OpenAI +↓ +Agent greets candidate (first TTS response) +↓ +User clicks mic button to start recording (toggle on) +↓ +Browser streams audio chunks to OpenAI via proxy +↓ +OpenAI returns transcriptions + TTS audio in real-time +↓ +Audio chunks queued and played sequentially in browser +↓ +Transcript shown live in Streamlit UI +↓ +User clicks mic button again to stop and send (toggle off) +↓ +Audio buffer committed to OpenAI +↓ +User clicks "End Interview" +↓ +Send transcript to backend for analysis +↓ +GPT-4 analyzes transcript (sentiment, confidence, communication) +↓ +Results saved to database +``` + +## Implementation Details + +### Streamlit UI (`src/voice_screening_ui/app.py`) + +**Features:** +- **Authentication screen**: Email and code input fields +- "Start Interview" button to initialize session +- Toggle microphone button (click to start, click again to stop and send) +- Live transcript display area +- Session controls (end interview, logout) +- Analysis and results display +- Database integration +- Debug panel for connection and audio troubleshooting + +**Session State:** +- `session_token`: Authentication token from proxy +- `user_email`: Authenticated user's email +- `session_id`: Unique interview session identifier +- `transcript`: List of transcript entries +- `is_interview_active`: Boolean flag for active session +- `candidate_id`: Candidate UUID + +### HTML/JavaScript Component (`src/voice_screening_ui/components/voice_interface.html`) + +**Features:** +- WebSocket connection to WebSocket Proxy with session token authentication +- Audio recording via browser ScriptProcessor API (PCM16) +- Audio playback via Web Audio API with sequential queue +- Real-time transcript updates +- Toggle recording (click to start/stop) +- Audio resampling from 24kHz to browser sample rate +- Debug panel for troubleshooting + +**Key Functions:** +- `connectWebSocket()`: Establishes connection to WebSocket Proxy (with session token) +- `toggleRecording()`: Toggles recording state (start/stop) +- `startRecording()`: Captures microphone audio and streams to API +- `stopRecording()`: Stops recording and commits audio buffer +- `handleRealtimeMessage()`: Processes responses from OpenAI +- `queueAudioChunk()`: Queues audio chunks for sequential playback +- `processAudioQueue()`: Plays audio chunks one at a time +- `playAudioChunk()`: Decodes and plays individual TTS audio chunks + +**Note:** Session configuration and greeting are now handled by the proxy, not the client. + +### Analysis Function (`src/voice_screening_ui/analysis.py`) + +**Simple function** (no LangGraph): +- Receives transcript text +- Uses OpenAI GPT-4 with structured output +- Returns `VoiceScreeningOutput` with scores and summary +- No agent nodes or graph execution + +### Database Integration (`src/voice_screening_ui/utils/db.py`) + +**Function:** +- `write_voice_results_to_db()`: Saves results to database +- Updates candidate status to `voice_done` +- Uses existing `VoiceScreeningResult` model + +### WebSocket Proxy (`src/voice_screening_ui/proxy.py`) + +**Features:** +- **Authentication endpoints**: `/auth/login` and `/auth/verify` +- **Session management**: In-memory token storage (MVP; use Redis/DB in production) +- **WebSocket proxy**: `/ws/realtime` endpoint with session token validation +- **Session configuration**: Handles OpenAI session setup server-side +- **Greeting**: Automatically sends greeting after session configuration +- **Health check**: `/health` endpoint for monitoring + +**Authentication Flow:** +1. User requests code via `POST /auth/login` with email +2. Proxy generates 6-digit code (MVP: returns directly; production: send via email/SMS) +3. User verifies via `POST /auth/verify` with email and code +4. Proxy validates and returns session token (valid for 1 hour) +5. WebSocket connection requires `token` query parameter + +**Session Configuration:** +- Moved from frontend to proxy for better security and control +- Configured automatically when WebSocket connects +- Includes modalities, instructions, voice, audio format, turn detection + +## Environment Variables + +```bash +OPENAI_API_KEY=your_openai_api_key # Required for Realtime API (stored in proxy only) +``` + +**Security:** +- API key stored in proxy environment variables (never exposed to browser) +- User authentication via email/code before WebSocket access +- Session tokens expire after 1 hour +- Auth codes expire after 10 minutes +- Proxy handles all OpenAI authentication server-side + +## Usage + +### Running the Application + +```bash +# Using Streamlit directly +streamlit run src/voice_screening_ui/app.py + +# Or via Docker (Streamlit service) +docker compose up voice_screening +``` + +#### troubleshootips tips + +- if you see a warning on env variable not being set, pass the .env manually and rebuild on down (subsequent build will be faste due to docker layer caching) +``` bash +cd docker +docker-compose --env-file "../.env" up voice_screening -d --build +``` + +- run streamlit with python path set +``` bash +PYTHONPATH=. streamlit run src/voice_screening_ui/app.py +``` + +### User Flow + +1. Start WebSocket proxy: `docker compose up websocket_proxy` (or run `python src/voice_screening_ui/proxy.py`) +2. Open Streamlit UI at `http://localhost:8502` (or configured port) +3. **Authentication:** + - Enter your email address + - Click "Request Code" to get authentication code + - Enter the code and click "Verify & Login" +4. Enter candidate email (optional for MVP) +5. Click "Start Interview" +6. Browser requests microphone permission +7. WebSocket connects to proxy with session token (proxy connects to OpenAI Realtime API) +8. Proxy configures session and sends greeting +9. Agent greets candidate +10. User clicks mic button to start recording +11. User speaks, audio streams to OpenAI +12. Transcript appears in real-time +13. Agent responds with audio (played sequentially) +14. User clicks mic button again to stop and send +15. User clicks "End Interview" +16. Click "Analyze Interview" to get results +17. Optionally save results to database +18. Click "Logout" to end session + +## Technical Details + +### OpenAI Realtime API + +**WebSocket Connection:** +- Model: `gpt-realtime-mini` +- URL: `wss://api.openai.com/v1/realtime?model=gpt-realtime-mini` +- Headers: `Authorization: Bearer {API_KEY}`, `OpenAI-Beta: realtime=v1` +- Format: PCM16 audio at 24kHz, JSON messages +- Turn Detection: Server-side VAD with 10s silence duration (prevents auto-commit during recording) + +**Key Message Types:** +- `session.update`: Configure session (modalities, voice, instructions) +- `input_audio_buffer.append`: Send audio chunks +- `input_audio_buffer.commit`: Commit audio for processing +- `response.audio_transcript.done`: Receive transcriptions +- `response.audio.delta`: Receive TTS audio chunks +- `response.text.done`: Receive text responses + +### Audio Processing + +**Recording:** +- Uses browser `ScriptProcessor` API (deprecated but functional) +- Captures audio at browser sample rate (typically 44.1kHz or 48kHz) +- Converts to PCM16 format +- Encodes to base64 for WebSocket transmission +- Streams chunks via `input_audio_buffer.append` +- Commits buffer via `input_audio_buffer.commit` when recording stops + +**Playback:** +- Receives base64 PCM16 audio at 24kHz +- Decodes using `DataView` for proper byte order (little-endian) +- Converts PCM16 to Float32Array +- Resamples from 24kHz to browser sample rate using `OfflineAudioContext` +- Queues chunks for sequential playback (prevents overlapping audio) +- Plays through browser audio context + +## Simplifications from Original Design + +**Removed:** +- LangGraph agent complexity +- Twilio telephony integration +- FastAPI server +- Media Streams handling +- Complex state management +- Supervisor agent integration + +**Kept:** +- Database models and utilities +- Analysis logic (simplified) +- Streamlit UI pattern +- OpenAI Realtime API integration + +## File Structure + +``` +src/voice_screening_ui/ +├── app.py # Main Streamlit UI (with authentication screen) +├── proxy.py # WebSocket proxy with auth endpoints and session management +├── analysis.py # Simple analysis function +├── components/ +│ ├── voice_interface.html # HTML/JS for WebSocket and audio (no API key handling) +│ └── __init__.py +└── utils/ + ├── db.py # Database utilities + └── __init__.py +``` + +## Testing + +**Manual Testing:** +1. Start Streamlit app (tested and works) +2. Test WebSocket connection (tested and works) +3. Test microphone access (tested and works) +4. Test audio recording and playback (tested and works) +5. Test transcript display (not tested) +6. Test analysis function (doesn't work, need work, a lot of work) +7. Test database saving (doesn't work, need work, a lot, a lot of work) + +### Verification Script +To verify the integration of the voice screener with the candidate database and static questions, you can run the provided verification script. + +**Option 1: Run via Docker (Recommended)** +This uses the containerized environment which already has all dependencies and network access to the database. +```bash +docker compose -f docker/docker-compose.yml run --rm -e POSTGRES_HOST=db websocket_proxy python tests/verify_voice_integration.py +``` + +**Option 2: Run Locally** +If you prefer to run it locally, you need to install the database requirements first: +```bash +pip install -r requirements/db.txt +python tests/verify_voice_integration.py +``` + +**Known Limitations:** +- Uses deprecated `ScriptProcessor` API (should migrate to `AudioWorklet`) +- Authentication codes displayed directly in MVP (should be sent via email/SMS in production) +- Session tokens stored in-memory (should use Redis/database in production) +- Simple error handling +- Limited session management +- Audio resampling may introduce slight latency + +## Future Enhancements + +- Migrate from `ScriptProcessor` to `AudioWorklet` API +- Send authentication codes via email/SMS (instead of displaying directly) +- Use Redis or database for session token storage (instead of in-memory) +- Add session persistence across page refreshes +- Improve error handling and reconnection logic +- Add recording playback +- Add interview question templates +- Optimize audio resampling performance +- Add audio level visualization +- Add rate limiting for authentication endpoints +- Add session refresh mechanism +- Integrate with supervisor agent (if needed) diff --git a/docs/context_engineering/ideas.md b/docs/context_engineering/ideas.md new file mode 100644 index 0000000000000000000000000000000000000000..98f2d5bbe2c88324c85770490bf85a25bdfb6ca0 --- /dev/null +++ b/docs/context_engineering/ideas.md @@ -0,0 +1,33 @@ +# ***Context Engineering*** +--- + +This file serves as inspiration for context engneering techniques that are currently being used by experts in the industry. + +***Inpsired by:*** +- [Langchain](https://www.notion.so/Context-Engineering-for-Agents-2a1808527b17803ba221c2ced7eef508) +- [Video](https://www.youtube.com/watch?v=XFCkrYHHfpQ&t=217s) + + +## ***`What is context engineering?`*** +`The art and science to fill the context with just the right information for the next step.` + +[Chroma research](https://research.trychroma.com/context-rot) shows that increasing input tokens daramatically impacts llm performance. + + +## Notes: + +### 1) Tool calls +- tools calls bloat context as its added to messages list, with call itself + tool results +- hence flush or summarise tools to reduce context length and keep context dense with just imoprtant info +- furthermore tools are suually injected at sytem prompt level which bloats context as well and leads to confusion of wghich tool to use. + + +### Context Offloading +... + + +### Context Compaction & Offloading + + + + diff --git a/docs/entrypoint_patterns.md b/docs/entrypoint_patterns.md new file mode 100644 index 0000000000000000000000000000000000000000..0b617c598ba28c8875e9ddd1bcb1dd12268a2064 --- /dev/null +++ b/docs/entrypoint_patterns.md @@ -0,0 +1,114 @@ +## 📄 How CV Parsing & LLM Evaluation Are Triggered — Summary + +Below is a clean overview of the three architectural patterns for triggering **CV parsing** and **LLM-based CV evaluation** inside an agentic HR pipeline. + +--- + +## 🧩 End-to-End Flow +1. Candidate uploads CV +2. System stores candidate entry in DB +3. CV parser runs automatically +4. Parsed CV JSON is stored in DB +5. Orchestrator detects that parsing is done +6. Orchestrator triggers the CV Screening Agent +7. LLM evaluates CV and stores results +8. Pipeline continues (voice → scheduling → final decision) + +--- +```sql +[User (Streamlit UI)] + ↓ + Upload CV + metadata (HTTP POST) + ↓ +[Orchestrator API] + ↓ + Save CV file (local or cloud) + ↓ + Write candidate entry to DB + ↓ + Trigger parsing pipeline + ↓ + Update parsed_cv_json + status='parsed' + ↓ + Orchestrator runs CV Screening Agent + ↓ + Write results to DB + status='cv_screened' + ↓ +[Streamlit polls /api/status/] + ↓ + Display updated status + scores +``` +--- + +## 🧠 Pattern A — Orchestrator-Driven State Machine (Recommended) + +The orchestrator continuously monitors the candidate’s status in the database and decides the next action based on that state. + +**Flow:** +- After parsing finishes, the system sets `status = "parsed"` +- The orchestrator checks the state and sees that the next step is CV screening +- It triggers the CV Screening Agent +- Once evaluation completes, the system updates status to `status = "cv_screened"` +- The orchestrator then moves to the next stage (voice screening, etc.) + +**Why this is the best choice:** +- Most “agentic” (planning + reasoning) +- Clean separation between deterministic parsing and cognitive reasoning +- Perfect fit for LangGraph orchestration +- Easy to visualize reasoning and workflow progress +- Ideal for hackathon judges (transparency + intentionality) + +--- + +## 🧠 Pattern B — Event-Based Trigger (Webhook, Queue, Pub/Sub) + +The parsing component emits an event like “cv_parsed” when finished. +A listener or orchestrator receives that event and immediately triggers the CV Screening Agent. + +**Pros:** +- Scales well +- Good for microservice architectures + +**Cons:** +- Less agentic +- Harder to show planning logic and state transitions +- More infrastructure complexity + +--- + +## 🧠 Pattern C — Orchestrator Polling the Database + +A loop runs every few seconds, searching for candidates whose status is “parsed” and triggering CV evaluation when found. + +**Pros:** +- Very simple to implement +- Works well for demos and prototypes + +**Cons:** +- Not reactive +- Less elegant +- Not as agentic or clean as Pattern A + +--- + +## 🏆 Recommendation + +Use **Pattern A (Orchestrator-Driven State Machine)** for the hackathon submission. + +**Benefits:** +- Natural agentic behavior +- Works directly with LangGraph’s planning style +- Provides clear reasoning transparency +- Fits well with your multi-agent architecture +- Easy to show on the Gradio dashboard +- Minimal complexity while still highly principled + +--- + +## 📝 TL;DR + +- CV parsing should run automatically after upload +- Parsed data should be saved to the DB +- **LLM CV evaluation should NOT be triggered by upload** +- Instead, the **orchestrator detects the new state and triggers evaluation** +- Pattern A (state machine) is the cleanest and most agentic solution \ No newline at end of file diff --git a/docs/how_langgraph_works.md b/docs/how_langgraph_works.md new file mode 100644 index 0000000000000000000000000000000000000000..1e92282f24c6d192065191b141d3db5a7a578e95 --- /dev/null +++ b/docs/how_langgraph_works.md @@ -0,0 +1,262 @@ +# 🧭 LangGraph Overview: Message Flow, Tool Execution & GPT-OSS Integration +LangGraph is a workflow engine for building agentic systems on top of LangChain. +It models the reasoning–action loop between models and tools using a transparent graph of nodes. + +This document explains: +1. How LangGraph message flow works +2. How tools and tool calls are represented +3. How your custom GPT-OSS (OpenRouter) wrapper integrates via bind_tools() + +--- + +## ⚙️ Core Concept +LangGraph passes a `state` object between nodes, usually defined like this: +```python +from typing import Annotated, List, TypedDict, Any +from langgraph.graph.message import add_messages + +class State(TypedDict): + messages: Annotated[List[Any], add_messages] +``` + +The `messages` list holds the entire conversation: user inputs, model responses, and tool outputs. +Each node reads this list, adds new messages, and returns an updated state. + +**LangGraph uses LangChain message objects:** +- 🧑‍💼 HumanMessage — from the user +- 🤖 AIMessage — from the model (may include tool_calls) +- 🧰 ToolMessage — from a tool +- ⚙️ SystemMessage — optional context + +## 🧩 The Typical Agent Flow +```sql +HumanMessage ─► LLMNode ─► ToolNode ─► LLMNode ─► Final Answer +``` + +### 1️⃣ Human input +```python +input_state = { + "messages": [ + HumanMessage( + content="Compute 8 * 12 using calculator tool" + ) + ] +} +``` +LangGraph starts from `START` and passes this to the first node (the model). + +### 2️⃣ Model response: tool call +Your model (`ChatOpenRouter` running GPT-OSS) examines the conversation and returns an `AIMessage`: +```json +{ + "content": "", + "tool_calls": [ + { + "id": "call_1", + "function": {"name": "calculator", "arguments": "{\"a\":8,\"b\":12,\"op\":\"mul\"}"} + } + ], + "finish_reason": "tool_calls" +} +``` +✅ LangGraph detects `.tool_calls` and automatically routes the next step to the ToolNode. + +### 3️⃣ Tool execution +The ***ToolNode*** executes the requested tool and adds a `ToolMessage` to the state: +```python +ToolMessage( + content='96.0', + name='calculator', + tool_call_id='call_1' +) +``` + +### 4️⃣ LLM continuation +The LLM now sees: +``` +[ + HumanMessage(...), + AIMessage(..., tool_calls=[...]), + ToolMessage(name="calculator", content="96.0") +] +``` + +It generates a final summary message: +```text +"The result of 8 × 12 is **96**." +``` +Since this new message has no further `tool_calls`, LangGraph ends the workflow. + +## 🧠 Internal Message Logic +| Step | Message Type | Produced By | Purpose | +| ---- | -------------- | ----------- | ------------------- | +| 1 | `HumanMessage` | user | Input | +| 2 | `AIMessage` | model | Requests tool | +| 3 | `ToolMessage` | ToolNode | Returns tool output | +| 4 | `AIMessage` | model | Final answer | + + +LangGraph uses conditional edges to decide whether to continue looping: +```python +workflow.add_conditional_edges( + "agent", + lambda state: "tools" if state["messages"][-1].tool_calls else END +) +``` +This keeps running until no more tool calls are made. + +## ⚙️ Example Graph Definition +```python +from langgraph.graph import StateGraph, START, END +from langgraph.prebuilt import ToolNode +from langchain_core.messages import HumanMessage +from langchain_core.tools import tool +from src.core.llm_providers.openrouter_llm import ChatOpenRouter + +@tool +def calculator(a: float, b: float, op: str) -> float: + """Perform a basic arithmetic operation.""" + if op == "add": return a + b + if op == "sub": return a - b + if op == "mul": return a * b + if op == "div": return a / b + +tools = [calculator] + +# Initialize GPT-OSS LLM +llm = ChatOpenRouter(model_name="openai/gpt-oss-120b", temperature=0.0) + +# ✅ Bind tools — this injects tool schemas into the model's context +llm_with_tools = llm.bind_tools(tools) + +def call_model(state: State) -> State: + response = llm_with_tools.invoke(state["messages"]) + return {"messages": [response]} + +workflow = StateGraph(State) +workflow.add_node("agent", call_model) +workflow.add_node("tools", ToolNode(tools)) +workflow.add_edge(START, "agent") +workflow.add_conditional_edges( + "agent", lambda s: "tools" if s["messages"][-1].tool_calls else END +) +workflow.add_edge("tools", "agent") + +agent = workflow.compile() +input_state = {"messages": [HumanMessage(content="Compute 8 * 12 using calculator tool")]} +print(agent.invoke(input_state)) +``` + +***>>> Check `notebooks/playground.ipynb` to see it in action!*** + +```mermaid +graph TD; + __start__(

__start__

) + agent(agent) + tools(tools) + __end__(

__end__

) + __start__ --> agent; + agent -->|tool_calls| tools; + tools --> agent; + agent -->|no tool_calls| __end__; + classDef default fill:#f2f0ff,line-height:1.2 + classDef first fill-opacity:0 + classDef last fill:#bfb6fc +``` + +## 🧩 How bind_tools() Works Internally +`bind_tools()` is the bridge between LangGraph and your LLM. +When you call: + +```python +llm_with_tools = llm.bind_tools(tools) +``` + +LangChain: +1. Extracts each tool's name, description, and argument schema. +2. Converts them into an OpenAI function-calling schema JSON block (like `tools=[{"type":"function","function":{"name":...}}]`). +3. Attaches that schema to the model's context before each inference call. +So GPT-OSS sees an augmented prompt like this: +> “You have access to the following tools: +> calculator(a: float, b: float, op: str) — Perform a basic arithmetic operation.” + + +During inference, the model can reason (internally) about which tool to call and output structured JSON like: +```json +{ + "tool_calls": [{ + "function": { + "name": "calculator", + "arguments": "{\"a\":8,\"b\":12,\"op\":\"mul\"}" + } + }] +} +``` + + +--- + +## 🧱 GPT-OSS Integration via ChatOpenRouter +To connect GPT-OSS via OpenRouter, use your wrapper: +```python +# src/core/llm_providers/openrouter_llm.py +from langchain_openai import ChatOpenAI +from pydantic_settings import BaseSettings, SettingsConfigDict + +class OpenRouterSettings(BaseSettings): + OPENROUTER_API_KEY: str + model_config = SettingsConfigDict(env_file=".env", extra="ignore") + +class ChatOpenRouter(ChatOpenAI): + """OpenRouter wrapper for GPT-OSS and other open models.""" + + def __init__( + self, + model_name: str = "openai/gpt-oss-120b", + base_url: str = "https://openrouter.ai/api/v1", + temperature: float = 0.2, + **kwargs, + ): + settings = OpenRouterSettings() + super().__init__( + model_name=model_name, + openai_api_base=base_url, + openai_api_key=settings.OPENROUTER_API_KEY, + temperature=temperature, + **kwargs, + ) +``` +Then simply: +```python +llm = ChatOpenRouter() +llm_with_tools = llm.bind_tools(tools) +``` +✅ This passes your validated API key to the OpenRouter endpoint. + +✅ bind_tools() adds your tool schemas to the model input so GPT-OSS knows which functions exist. + +✅ LangGraph handles execution and looping automatically. + + +--- + +## 🔁 Full Flow Recap + +| Step | Component | What Happens | +| ---- | ---------------------- | ---------------------------------------------------------- | +| 1 | `ChatOpenRouter` | Sends messages to GPT-OSS via OpenRouter API | +| 2 | `bind_tools()` | Injects tool schema into model context | +| 3 | `AIMessage.tool_calls` | Model outputs structured tool call | +| 4 | `ToolNode` | Executes the requested function | +| 5 | `ToolMessage` | Returns tool result to model | +| 6 | `AIMessage` | Model produces natural-language final answer | +| ✅ | LangGraph | Orchestrates routing and maintains full conversation state | + + +## ✅ TL;DR +- LangGraph represents an agent loop as a message-passing graph. +- Messages include `HumanMessage`, `AIMessage`, and `ToolMessage`. +- `bind_tools()` injects your tool schemas into the LLM's context so it can call them. +- The ToolNode executes the functions and feeds results back into the loop. +- Your `ChatOpenRouter` wrapper lets GPT-OSS models participate in this system seamlessly. + diff --git a/docs/mcp/gmail_mcp_gcp_setup.md b/docs/mcp/gmail_mcp_gcp_setup.md new file mode 100644 index 0000000000000000000000000000000000000000..f7068088d6bf303e61c2aa8c04819541a749d971 --- /dev/null +++ b/docs/mcp/gmail_mcp_gcp_setup.md @@ -0,0 +1,305 @@ +# ***`Gmail MCP – GCP Setup (Terraform + Bash)`*** + +This folder provisions the minimum GCP infrastructure to run the [Gmail MCP server](https://github.com/theposch/gmail-mcp/tree/main) without requiring billing: +- Creates (or adopts) a ***GCP project*** +- Enables ***gmail.googleapis.com*** +- Grants your user: +- `roles/editor` +- `roles/serviceusage.serviceUsageAdmin` +Prints console links to finish OAuth (consent screen + Desktop client) +> Billing is **not required** for Gmail API or OAuth Desktop client. + +## ***Prerequisites*** +- **Terraform** ≥ 1.6 +- **gcloud** (optional but useful for verifying/importing projects) +- A Google account (you'll also add it as a **Test user** on the OAuth consent screen) + +## Files +- `versions.tf` – provider & Terraform version pins +- `providers.tf` – Google provider config (uses project_id and region) +- `variables.tf` – input variables +- `main.tf` – project, Gmail API enablement, IAM bindings +- `outputs.tf` – project IDs and console URLs +- `terraform.tfvars` – team defaults (simple `key = "value"` pairs) + +Example `terraform.tfvars`: +```python +project_id = "gradio-hackathon-25" +project_name = "Gradio Agent MCP Hackathon 25" +user_email = "hr.cjordan.agent.hack.winter25@gmail.com" +# region = "europe-west3" # optional +``` + +## ***Quick Start (recommended: use the scripts)** +From the ***repo root***: +1. **Authenticate gcloud + ADC** +```bash +chmod +x scripts/gcp_setup.sh +./scripts/gcp_setup.sh +``` + +2. **Apply Terraform with smart defaults + auto-import** +```bash +chmod +x scripts/terraform_apply.sh +./scripts/terraform_apply.sh +``` + +- The script **prompts** for `project_id`, `project_name`, `user_email`. +- Press ***Enter*** to use defaults from `terraform/terraform.tfvars`. +- If the project already exists, it is **auto-imported** to avoid `409 alreadyExists`. + +--- + +## ***Manual Usage (alternative)*** +Run these from this `terraform/` directory: +```bash +terraform init + +# If the project already exists, import it so Terraform manages it: +# terraform import google_project.project + +terraform apply +``` + +Override values: + +```bash +terraform apply \ + -var="project_id=my-mcp-project" \ + -var="project_name=My MCP Project" \ + -var="user_email=you@example.com" +``` + +Or via env vars: +```bash +export TF_VAR_project_id="my-mcp-project" +export TF_VAR_project_name="My MCP Project" +export TF_VAR_user_email="you@example.com" +terraform apply +``` + +## **Outputs** +- `project_id` / `project_number` +- `gmail_api_service` — `"gmail.googleapis.com"` (resource present ⇒ enabled) +- `console_oauth_consent_screen_url` — configure consent (External, Test user, add scope) +- `console_oauth_credentials_url` — create ***OAuth 2.0 Client ID*** (Desktop app) + +## **Final OAuth Setup (one-time, in Console)** + +Terraform cannot create the **OAuth consent screen** or **Desktop OAuth client**, so you'll do these two steps once in the Google Cloud Console. +This setup allows your **local Gmail MCP server** to access Gmail via OAuth securely. + +> 💡 **Tip:** In the new Google Cloud UI, the old “Scopes” and “Test users” tabs are now under **Data access** and **Audience** in the left sidebar. + +--- + +### 1️⃣ **Configure the OAuth Consent Screen** + +**Purpose:** Identify your app to Google and specify who can use it during testing. + +1. Open the link printed in Terraform outputs: + → `console_oauth_consent_screen_url` + +2. If prompted, choose **User type: External**, then click **Create**. + +3. Fill out **App info**: + - **App name:** `Gmail MCP Local` + - **User support email:** your Gmail address + - **Developer contact email:** your Gmail address + - Click **Save and Continue** + +4. In the left sidebar, go to **Data access** + - Click **Add or remove scopes** + - Add the following scopes: + ```text + https://www.googleapis.com/auth/gmail.modify + openid + https://www.googleapis.com/auth/userinfo.email + ``` + ✅ *These provide read, send, and modify access — no extra Gmail scopes required.* + +5. Go to **Audience** (left sidebar) + - Under **Test users**, click **Add users** + - Add your Gmail account address + - Click **Save** + +6. Go to **Summary** and confirm: + - User type → **External** + - Publishing status → **Testing** + - Test users → your Gmail account + - Scopes → shows Gmail modify, openid, userinfo.email + +--- + +### 2️⃣ **Create a Desktop OAuth Client** + +**Purpose:** This provides the credentials your **local MCP server** uses to initiate the OAuth flow. + +1. Open the second Terraform output link: + → `console_oauth_credentials_url` + +2. Click **Create credentials → OAuth client ID** + +3. Choose: + - **Application type:** `Desktop app` + - **Name:** `gmail-mcp-desktop` + +4. Click **Create**, then **Download JSON**, and move it into the expected directory: +```bash +mkdir -p ~/.gmail-mcp +mv ~/Downloads/client_secret_*.json ~/.gmail-mcp/credentials.json +``` + +### ⚠️ ***Important: Two Different JSONs*** + +- The file from: +```bash +gcloud auth application-default login +``` +is your ***Application Default Credentials (ADC)*** — used by Terraform and `gcloud`. +It is ***not*** the same as the Desktop OAuth client JSON. +The Gmail MCP server requires the ***Desktop OAuth client JSON*** you downloaded. +→ Place it at `~/.gmail-mcp/credentials.json`. + +### 🚀 ***When You Run the MCP Server*** +The server will open a browser window asking you to sign in and approve access. +You'll see your app name (`Gmail MCP Local`) and the Gmail modify scope. +After approving, tokens are cached locally (usually `~/.gmail-mcp/token.json`), so you won't need to approve again. + + +--- + +### ⚙️ **Install Required Tools** + +Before testing or running the Gmail MCP server, make sure the following tools are installed: + +#### 🟣 1. Install `uv` +`uv` is a fast Python package manager used to run the Gmail MCP server. + +Check if it's already installed: +```bash +uv --version +``` +If not, install it: + +```bash +curl -LsSf https://astral.sh/uv/install.sh | sh +``` + +Verify: +``` +uv --version +``` +#### 🟠 2. Install Node.js (includes `npm` and `npx`) +npx is used to run the MCP Inspector, which lets you test your Gmail MCP server locally. +```bash +brew install node +``` + +### 🧪 ***Testing Locally with MCP Inspector*** +Once toBefore connecting to Claude Desktop or a LangGraph agent, you can visually inspect and test your Gmail MCP server using the ***MCP Inspector*** web UI. + +#### 1️⃣ **Run with simple path (direct server entry)** +Use this if you're already inside the ``gmail-mcp directory or the script path resolves cleanly: +```bash +npx @modelcontextprotocol/inspector uv run /Users/sebastianwefers/Desktop/development/recruitment-agent-mcp-hackathon-winter25/src/mcp_servers/gmail-mcp/src/gmail/server.py \ + --creds-file-path ~/.gmail-mcp/credentials.json \ + --token-path ~/.gmail-mcp/token.json +``` + +#### 2️⃣ **Run with full project context (recommended)** +This variant is more robust and works regardless of your working directory, because it explicitly tells `uv` which project directory to use and where your binaries are. + +```bash +npx @modelcontextprotocol/inspector \ + /Users/sebastianwefers/.local/bin/uv \ + --directory /Users/sebastianwefers/Desktop/development/recruitment-agent-mcp-hackathon-winter25/src/mcp_servers/gmail-mcp \ + run gmail \ + --creds-file-path ~/.gmail-mcp/credentials.json \ + --token-path ~/.gmail-mcp/token.json +``` + +#### 🔍 What Happens +When you run either command, you should see output similar to: +```bash +Starting MCP inspector... +⚙️ Proxy server listening on localhost:6277 +🔑 Session token: 8498939effc01e03c1b879efa72768e45608056ef1ad45e5c80344a7d9362a72 + Use this token to authenticate requests or set DANGEROUSLY_OMIT_AUTH=true to disable auth + +🚀 MCP Inspector is up and running at: + http://localhost:6274/?MCP_PROXY_AUTH_TOKEN=8498939effc01e03c1b879efa72768e45608056ef1ad45e5c80344a7d9362a72 + +🌐 Opening browser... + +``` + +This automatically opens a local browser window to the MCP Inspector UI, connected to your Gmail MCP server. + +✅ Expected behavior: +- On first run, a browser window will prompt you to log in and approve access. +- After successful OAuth, a token file will be created: +```bash +~/.gmail-mcp/token.json +``` +- Subsequent runs reuse this token — no re-auth required. +- You can now explore, invoke, and inspect your Gmail MCP tools visually (e.g., `listEmails`, `sendEmail`, `modifyLabel`, etc.) right from the web UI. + +### 💻 ***Connecting the Gmail MCP Server to Claude Desktop*** +1. Open your Claude Desktop configuration file: +```bash +nano ~/Library/Application Support/Claude/claude_desktop_config.json +``` +2. Add this block (update paths if necessary): +```json +{ + "mcpServers": { + "gmail": { + "command": "uv", + "args": [ + "--directory", + "/Users/sebastianwefers/Desktop/development/recruitment-agent-mcp-hackathon-winter25/src/mcp_servers/gmail-mcp", + "run", + "gmail", + "--creds-file-path", + "/Users/sebastianwefers/.gmail-mcp/credentials.json", + "--token-path", + "/Users/sebastianwefers/.gmail-mcp/token.json" + ] + } + } +} +``` +3. Save the file and restart Claude Desktop. + +4. Open ***Settings → Model Context Protocol → Add Server***, then connect to gmail. + +Claude will now be able to: +- 📥 Read emails +- ✉️ Compose drafts +- 🏷 Send and modify Gmail messages directly from your account. + +— all directly via your Gmail MCP server. + + +## 🧩 Why Deleting token.json Fixes the “invalid_grant” Error +The error occurs because the stored refresh token in token.json is expired or revoked, so Google rejects all refresh attempts. +Deleting the file forces the app to start a new OAuth flow, prompting you to log in again and generating a new, valid refresh token — which restores access to the Gmail API. + +```bash +# 1. Remove the invalid cached token +rm /Users/sebastianwefers/Desktop/projects/recruitment-agent-mcp-hackathon-winter25/secrets/gmail-mcp/token.json + + +# 2. Re-run the Gmail MCP server (which triggers OAuth again) +python -m src.mcp_servers.gmail_mcp +``` + +Then, when the script printed a Google sign-in URL, you: +1. Opened it in your browser, +2. Logged in to your Google account, +3. Approved the Gmail API access, +4. And the new valid token.json was automatically recreated at: +```bash +~/.gmail-mcp/token.json +``` \ No newline at end of file diff --git a/docs/mcp/google_tools_mcp.md b/docs/mcp/google_tools_mcp.md new file mode 100644 index 0000000000000000000000000000000000000000..afba0a9ce53ba9c8cf4b3ba4ed6e016dc3885133 --- /dev/null +++ b/docs/mcp/google_tools_mcp.md @@ -0,0 +1,180 @@ +# Google Calendar and GMail Tools / MCP + +## 1) ***`Base setup`*** +### 1.1) ***GMail Account*** + +### 1.2) ***Google Cloud*** + +#### Terraform Modifications (Minimal) +You can extend your existing Gmail Terraform to include Calendar support. +Add these to your `main.tf`: +```bash +# Enable the Google Calendar API +resource "google_project_service" "calendar_api" { + project = google_project.project.project_id + service = "calendar.googleapis.com" + disable_on_destroy = false +} +``` +And if you want, you can add an output for convenience: +```bash +output "console_calendar_api_url" { + value = "https://console.cloud.google.com/apis/library/calendar.googleapis.com?project=${google_project.project.project_id}" +} +``` +After adding, re-run your scripts: +```bash +cd terraform +terraform apply +``` +This enables the Calendar API in the same project your Gmail MCP is using — so you don't have to create a second one. + +Terraform will: +1. Detect that you already have a project and Gmail API from before. +2. Notice the new Calendar API resource in ``main.tf. +3. Apply only that new change (plus any small diff in IAM roles if needed). + +💡 What Happens Internally +When you run `terraform apply`, Terraform will: +- Read your current state file (`terraform.tfstate`). +- Query GCP to check what's already deployed. +- Compute a plan (the difference between your state and the `.tf` files). + +#### 🔑 OAuth Setup — Shared Consent Screen +You do not need a new consent screen — just reuse your existing one (Gmail MCP Local) and add the Calendar scope. + +Go to: +👉 [Google Cloud Console → APIs & Services → OAuth consent screen → Edit app → Data access → Add scopes] +Add this scope: +```arduino +https://www.googleapis.com/auth/calendar +``` +You'll now have Gmail + Calendar under one consent. +Then, create a ***second OAuth client***: +Application type: Desktop app +Name: `calendar-mcp-desktop` +Download the credentials JSON → save it to: +```bash +~/.calendar-mcp/credentials.json +``` +The Calendar MCP server will then use this credentials file when it first authenticates. + +#### 🧩 MCP Client Config (Claude or LangGraph) +Just add a new block alongside your Gmail entry. +For Claude Desktop (`claude_desktop_config.json`) +```json +{ + "mcpServers": { + "gmail": { + "command": "uv", + "args": [ + "--directory", + "/Users/sebastianwefers/Desktop/development/recruitment-agent-mcp-hackathon-winter25/src/mcp_servers/gmail-mcp", + "run", + "gmail", + "--creds-file-path", + "/Users/sebastianwefers/.gmail-mcp/credentials.json", + "--token-path", + "/Users/sebastianwefers/.gmail-mcp/token.json" + ] + }, + "google_calendar": { + "command": "uv", + "args": [ + "--directory", + "/Users/sebastianwefers/Desktop/development/recruitment-agent-mcp-hackathon-winter25/src/mcp_servers/calendar-mcp", + "run", + "calendar" + ] + } + } +} +``` + +For LangGraph: +```python +client = MultiServerMCPClient({ + "gmail": { + "command": "uv", + "args": [ + "--directory", "/Users/sebastianwefers/Desktop/development/recruitment-agent-mcp-hackathon-winter25/src/mcp_servers/gmail-mcp", + "run", "gmail", + "--creds-file-path", "/Users/sebastianwefers/.gmail-mcp/credentials.json", + "--token-path", "/Users/sebastianwefers/.gmail-mcp/token.json" + ], + "transport": "stdio" + }, + "google_calendar": { + "command": "uv", + "args": [ + "--directory", "/Users/sebastianwefers/Desktop/development/recruitment-agent-mcp-hackathon-winter25/src/mcp_servers/calendar-mcp", + "run", "calendar" + ], + "transport": "stdio" + } +}) +``` +#### Environment Variables (.env) +Create the .env in your calendar-mcp repo root, just like described in its README: +```bash +GOOGLE_CLIENT_ID='YOUR_CLIENT_ID' +GOOGLE_CLIENT_SECRET='YOUR_CLIENT_SECRET' +TOKEN_FILE_PATH='.gcp-saved-tokens.json' +OAUTH_CALLBACK_PORT=8080 +CALENDAR_SCOPES='https://www.googleapis.com/auth/calendar' +``` +⚠️ Make sure the redirect URI matches: +```bash +http://localhost:8080/oauth2callback +``` +You'll go through one browser OAuth login on first run, and then `.gcp-saved-tokens.json` will be created — no need to repeat. + + +## 2) ***`Model Context Protocol`*** +**References** +- [Official MCP Docs](https://modelcontextprotocol.io/docs/getting-started/intro) +- [MCP Crash Course by YouTuber & AI Engineer Dave Ebbelaar](https://www.youtube.com/watch?v=5xqFjh56AwM&t=761s) +- *Existing Repo's* + - [Curated list of MCP servers](https://github.com/modelcontextprotocol/servers) hosted by `MCP` themselves. + - [Goole Calendar](https://github.com/deciduus/calendar-mcp/blob/main/README.md) + - calendar repo alterntives: + - https://github.com/nspady/google-calendar-mcp/tree/main/src/tools +```psql +# Calendar MCP (Dual Layer) +LLM Agent + │ + │ JSON-RPC over STDIO + ▼ +MCP Bridge (mcp_bridge.py) + │ HTTP requests to localhost:8000 + ▼ +FastAPI Server (server.py) + │ + └── Google Calendar API (OAuth + REST) +``` + + - [GMail](https://github.com/theposch/gmail-mcp/blob/main/README.md) +```psql + # Gmail MCP (Pure MCP) +LLM Agent + │ + │ JSON-RPC over STDIO + ▼ +Gmail MCP Server + │ + └── Gmail API (OAuth + REST) +``` + - [Gmail](https://github.com/MCP-Mirror/Samarth2001_gmail-mcp) + - [Gmail](https://github.com/jasonsum/gmail-mcp-server) + + +## 🧱 1️⃣ Compatibility Breakdown +| Area | Gmail MCP | Calendar MCP | Compatible? | Notes | +| ------------------- | ---------------------------------------------- | ----------------------------------------------- | ----------- | --------------------------------------------------------------------------- | +| **Transport** | MCP via STDIO | MCP via STDIO (through FastAPI bridge) | ✅ | Works out of the box with same client setup. | +| **Auth Type** | OAuth 2.0 Desktop Client | OAuth 2.0 Desktop Client | ✅ | Identical flow; can reuse same consent screen + test users. | +| **Scopes** | `https://www.googleapis.com/auth/gmail.modify` | `https://www.googleapis.com/auth/calendar` | ✅ | Different scopes, but both can live under one consent screen. | +| **Terraform** | Creates project, enables Gmail API, sets roles | Just needs Calendar API enabled too | ✅ | Add one more API + scope to Terraform config. | +| **Token Storage** | `~/.gmail-mcp/token.json` | `.gcp-saved-tokens.json` | ✅ | Each uses its own token file; keep separate to avoid refresh token mix-ups. | +| **Runtime** | `uv` stdio server | `python run_server.py` (auto-switches to stdio) | ✅ | You can use `uv` for both, if you prefer consistency. | +| **MCP Integration** | Claude / LangGraph via config | Same | ✅ | Just add another entry under `mcpServers`. | diff --git a/intro.md b/intro.md new file mode 100644 index 0000000000000000000000000000000000000000..fa85cc6996f102df7482ffe6511cb8fa3eae6cb4 --- /dev/null +++ b/intro.md @@ -0,0 +1,457 @@ +# ***`Gradio Agents & MCP Hackathon Winter Edition 2025`*** + +## 🏁 Overview +This repository hosts our team's submission for **Track 2: MCP in Action** in the [MCP's 1st Birthday Hackathon](https://huggingface.co/MCP-1st-Birthday). + +Our goal is to build an **autonomous agentic system** that demonstrates: +- **Planning, reasoning, and execution** +- Integration of **custom tools, MCP tools, or external APIs** +- Effective **context engineering** +- Clear, practical **user value** + +We'll use **LangGraph** as our orchestration backbone for building multi-turn, tool-using, and context-aware agents. + +> ***`Check hackathon README for detilaed requirements.`*** + +## 🧠 ***`Tools & Frameworks`*** + +- 🧩 [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview): for multi-agent orchestration and planning + - Why & how they built [LangGraph for production agents](https://blog.langchain.com/building-langgraph/) +- 🧠 **LLM Engines:** [OpenAI](https://openai.com) / [Anthropic](https://www.anthropic.com) — reasoning and planning models + - gpt-oss inference providers + - [Open Router](https://openrouter.ai/openai/gpt-oss-20b): + - LangChain Wrapper: https://github.com/langchain-ai/langchain/discussions/27964 + - [TogetherAI](https://www.together.ai/openai) +- 💬 [Gradio](https://www.gradio.app/): for the UI and context-engineering demos +- ⚙️ [MCP](https://modelcontextprotocol.io/docs/getting-started/intro) Tools: standardized interfaces for Gmail, Google Calendar, Voice technologies and other APIs +- ☁️ [Google Cloud Platform](https://cloud.google.com): optional backend for hosting MCP servers and integrated services +- 📞 [Twilio](https://www.twilio.com/en-us): enables automated voice calls and candidate interactions +- 🔊 [ElevenLabs](https://elevenlabs.io): (optional) natural text-to-speech for realistic voice screenings +- 🎙️ [Whisper-based Transcription API](https://whisperapi.com) (or [OpenAI Whisper API](https://platform.openai.com/docs/guides/speech-to-text) ) — for speech-to-text functionality in voice interviews +- 🧭 [Langfuse](https://langfuse.com) or [LangSmith](https://docs.langchain.com/langsmith/quick-start-studio): debugging, observability, and trace visualization +- 📄 [Docling](https://www.docling.ai): for parsing and analyzing uploaded CV documents +- 🧱 [Pydantic](https://docs.pydantic.dev/latest/): for structured outputs and data validation +- 🔀 [Parlant](https://github.com/emcie-co/parlant): enables agents to handle multi-intent, free-form conversations by dynamically activating relevant guidelines instead of rigidly routing to a single sub-agent — solving the context fragmentation problem inherent in traditional LangGraph supervisor patterns. + +## 📚 ***`References for Context Engineering`*** + +- [**Context Engineering for AI Agents — Manus Blog**](https://manus.im/blog/Context-Engineering-for-AI-Agents-Lessons-from-Building-Manus) +- [**YouTube Talk Manus**](https://www.youtube.com/watch?v=6_BcCthVvb8&start=2525) +- [**LangGraph Overview**](https://docs.langchain.com/oss/python/langgraph/overview) +- https://www.anthropic.com/engineering/effective-context-engineering-for-ai-agents +- https://medium.com/fundamentals-of-artificial-intelligence/mitigate-context-poisoning-in-ai-agents-using-context-engineering-96cf40dbb38d +- https://blog.langchain.com/context-engineering-for-agents/ +- **langgraph implementations** + - [video]((https://www.youtube.com/watch?v=nyKvyRrpbyY)) + - [good notebooks](https://github.com/langchain-ai/how_to_fix_your_context/blob/main/notebooks/utils.py) +- [Langgraph summary of what frontier labs and firms apply](https://www.youtube.com/watch?v=XFCkrYHHfpQ) + +These resources guide our approach to **memory management, planning transparency, and tool orchestration** in autonomous agents. + +## 🧾 ***`HR Candidate Screening Multi-Agent System`*** +An autonomous HR assistant that streamlines early recruitment through five steps: +1. **CV Upload (Application)** — candidate applications uploaded and parsed +2. **CV Screening** — rank and shortlist candidates using LLM reasoning +3. **Voice Screening** — invite and coordinate interviews using a voice agent. +4. **Person-to-Person Screening** — schedule HR interviews via Google Calendar integration +5. **Decision** — generate a concise summary and notify HR + +> **`NOTE`** +> - Final decision of whether candidate will be hired is made by human. +> - Just automate the boring, tedious stuff while keeping human final decision in the loop. + +**Architecture:** +1. **Main Planner Agent**: orchestrates the workflow +2. **Subagents**: + - CV Screening Agent + - Voice Screening Agent + - Meeting Scheduler Agent +3. **Tools (via MCP)** connect to Gmail, Calendar, and Voice APIs. +4. **Database** stores both candidate info and persistent agent memory. +5. **Gradio UI** visualizes workflow, reasoning, and results. +```mermaid +flowchart TD + subgraph MainAgent["🧠 Main Planner Agent"] + A1["Plans • Reasons • Executes"] + end + + subgraph Subagents["🤖 Subagents"] + S1["📄 CV Screening"] + S2["🎙️ Voice Screening"] + S3["📅 Scheduling"] + S4["🧾 Decision Summary"] + end + + subgraph Tools["⚙️ MCP & External Tools"] + T1["📧 Gmail"] + T2["🗓️ Google Calendar"] + T3["🗣️ Voice API"] + end + + subgraph Data["🗄️ Database"] + D1["Candidate Data"] + D2["Context Memory (Cognitive Offloading)"] + end + + subgraph UI["💬 Gradio Dashboard"] + U1["HR View & Interaction"] + end + + %% Connections + MainAgent --> Subagents + Subagents --> Tools + Subagents --> Data + MainAgent --> Data + MainAgent --> UI +``` + +**GCP Setup for Judges:** +A single demo Gmail/Calendar account (`scionhire.demo@gmail.com`) is pre-authorized via OAuth, with stored credentials in `.env`. +Judges can run or view the live demo without any credential setup, experiencing real Gmail + Calendar automation safely. + +We use **hierarchical planning**: +- **Main Agent:** decides next step in the workflow (plan, adapt, replan) +- **Subagents:** specialized executors (screening, scheduling, summarization) +- **Memory State:** tracks plan progress and tool results +- **Dashboard Visualization:** shows active plan steps and reasoning traces for transparency + +🧠 Why This Is an Agent (Not Just a Workflow) + +| Criterion | Workflow | Our System | +|------------|-----------|-------------| +| **Autonomy** | Executes fixed sequence of steps | Main agent decides next actions without manual triggers | +| **Planning** | Predefined order (A → B → C) | Main agent generates and adapts a plan (e.g., skip, retry, re-order) | +| **Reasoning** | No decision logic | Uses LLM reasoning to evaluate outputs and choose next subagent | +| **Context Awareness** | Stateless | Maintains shared memory of candidates, progress, and outcomes | +| **Adaptation** | Fails or stops on error | Re-plans (e.g., if calendar slots full or candidate unresponsive) | + +✅ **Therefore:** it qualifies as an *agentic system* because it **plans, reasons, and executes** autonomously rather than following a static workflow. + +## ***`Project Structure`*** +``` +agentic-hr/ +│ +├── 📁 src/ +│ │ +│ ├── 📁 core/ +│ │ │ ├── base_agent.py # Abstract BaseAgent (LangGraph-compatible) +│ │ │ ├── supervisor.py # Supervisor agent (LangGraph graph assembly) +│ │ │ ├── state.py # Shared AgentState + context window +│ │ │ ├── planner.py # High-level planning logic +│ │ │ └── executor.py # Graph executor / runner +│ │ +│ ├── 📁 agents/ +│ │ │ +│ │ ├── 📁 cv_screening/ +│ │ │ │ ├── agent.py # CVScreeningAgent implementation +│ │ │ │ ├── 📁 tools/ +│ │ │ │ │ ├── doc_parser.py +│ │ │ │ │ ├── normalize_skills.py +│ │ │ │ │ ├── rank_candidates.py +│ │ │ │ │ └── match_to_jd.py +│ │ │ │ └── 📁 schemas/ +│ │ │ │ ├── cv_schema.py # Parsed CV Pydantic schema +│ │ │ │ └── jd_schema.py # Job description schema +│ │ │ +│ │ ├── 📁 voice_screening/ +│ │ │ │ ├── agent.py # VoiceScreeningAgent +│ │ │ │ ├── 📁 tools/ +│ │ │ │ │ ├── twilio_client.py +│ │ │ │ │ ├── whisper_transcribe.py +│ │ │ │ │ └── tts_service.py +│ │ │ │ └── 📁 schemas/ +│ │ │ │ ├── call_result.py +│ │ │ │ └── transcript.py +│ │ │ +│ │ ├── 📁 scheduler/ +│ │ │ │ ├── agent.py # SchedulerAgent +│ │ │ │ ├── 📁 tools/ +│ │ │ │ │ ├── calendar_tool.py +│ │ │ │ │ ├── gmail_tool.py +│ │ │ │ │ └── slot_optimizer.py +│ │ │ │ └── 📁 schemas/ +│ │ │ │ └── meeting_schema.py +│ │ │ +│ │ └── 📁 decision/ +│ │ ├── agent.py # DecisionAgent (final summarizer/Reporter) +│ │ └── 📁 schemas/ +│ │ └── decision_report.py +│ │ +│ ├── 📁 mcp_server/ +│ │ ├── main.py +│ │ ├── 📁 endpoints/ +│ │ ├── auth.py +│ │ └── schemas.py +│ │ +│ ├── 📁 gradio/ +│ │ ├── app.py # Main Gradio app (Hugging Face Space entry) +│ │ ├── dashboard.py # Live agent graph & logs view +│ │ ├── candidate_portal.py # Candidate upload / screening status +│ │ ├── hr_portal.py # HR review + interview approval +│ │ ├── components.py # Shared Gradio components +│ │ └── 📁 assets/ # Logos, CSS, etc. +│ │ +│ ├── 📁 cv_ui/ +│ │ ├── app.py +│ │ +│ ├── 📁 voice_screening_ui/ +│ │ ├── app.py +│ │ +│ │ +│ ├── 📁 prompts/ +│ │ ├── prompt_manager.py # Centralized prompt versioning +│ │ ├── cv_prompts.py +│ │ ├── voice_prompts.py +│ │ └── scheduler_prompts.py +│ │ +│ ├── 📁 database/ +│ │ ├── models.py # SQLAlchemy models +│ │ ├── db_client.py # Connection & CRUD +│ │ └── context_sync.py # Cognitive offloading (context ⇄ DB) +│ │ +│ ├── main.py # CLI runner / local orchestrator entry +│ └── config.py # Environment configuration +│ +├── 📁 tests/ +│ │ ├── test_cv_agent.py +│ │ ├── test_voice_agent.py +│ │ ├── test_scheduler_agent.py +│ │ ├── test_mcp_server.py +│ │ └── test_integration.py +│ +├── .env.example +├── requirements.txt +├── Dockerfile +├── app.py # Shortcut to src/ui/app.py +├── README.md +└── LICENSE +``` + +## ***`Multi Agent System Architecture`*** +Below you will find an overview of the subagent components that mnake upo the entire system. More detailed information and brainstorming is decicated to the `docs/agents/..` directory. + +### 1) ***`Orchestrator`*** +#### Overview + +The orchestrator agent is reponsible for **supervising** and **triggering** the ***tasks of the subagents***. + +> For more planning and info, go to `docs/agents/agent_orchestrator.md` + +### 2) ***`CV Screener`*** +#### Overview +The cv screening agent deals with scanning the applicant's CV's, and deciding who are fruitful versus unpromising candidates as a first filtering step. + +> For more planning and info, go to `docs/agents/cv_screening.md` + +### 3) 🎙️ ***`Voice Screening Agent`*** + +#### Overview +The **Voice Screening Agent** conducts automated phone interviews and integrates with the **LangGraph HR Orchestrator**. +It uses **Twilio** for phone calls, **Whisper/ASR** for speech-to-text, **ElevenLabs** for natural voice output, and **LangGraph** for dialogue logic. + +> For more planning and info, go to `docs/agents/voice_screening.md` + +### 4) ***`Google MCP Agents`*** +#### Overview +The google mcp agents will be resposnible to: +a) writing emails +b) scheduling and menaging google calendar events + +It adviseable to break this up into two subagents, to get rid of `context poisoning`. + +> For more planning and info, go to `docs/agents/google_mcp_agent.md` + +### 4) ***`LLM as a Judge`*** +#### Overview +LLM-as-a-judge will be leveraged to judge call screening results. + +> For more planning and info, go to `docs/agents/judging_agent.md` + +## 🗄️ ***`Data Layer`*** + +The system uses a unified **SQLAlchemy-based database** for both **candidate data management** and **context engineering**. + +### 📦 Purpose +| Data Type | Description | +|------------|--------------| +| 🧾 **Candidates** | Stores CVs, parsed data, and screening results | +| 🎙️ **Voice Results** | Saves transcripts, evaluations, and tone analysis | +| 🗓️ **Scheduling** | Tracks HR availability and confirmed interviews | +| 🧠 **Agent Context Memory** | Enables **cognitive offloading** — storing reasoning traces and summaries so the active context stays uncluttered and information can be recalled when needed | +| 📚 **Logs / Tool History** | Archives tool interactions and results for transparency and reuse | + +We use [**SQLAlchemy**](https://www.sqlalchemy.org) as the ORM layer to manage both structured candidate data and **persistent agent memory**, allowing the system to offload, summarize, and retrieve context efficiently across sessions. + +## 🗃️ ***`Prompt Archive`*** + +To ensure consistent behavior and easy experimentation across subagents, the system includes a **centralized prompt management layer**. + +### 📦 Purpose +| Component | Description | +|------------|--------------| +| 🧠 **Prompt Templates** | Stores standardized prompts for each subagent (CV screening, voice screening, scheduling) | +| 🔄 **Prompt Versioning** | Allows tracking and updating of prompt iterations without changing agent code | +| 🧩 **Dynamic Injection** | Enables context-dependent prompt construction using retrieved memory or database summaries | +| 📚 **Archive** | Keeps older prompt variants for reproducibility and ablation testing | + +## 📺 ***`Gradio Interface`*** + +We use **Gradio** to demonstrate our agent's reasoning, planning, and tool use interactively — fully aligned with the **Agents & MCP Hackathon** focus on **context engineering** and **user value**. + +### 🧩 Key Features +| Section | Purpose | +|----------|----------| +| 🧍 **Candidate Portal** | Upload CVs, submit applications, and view screening results | +| 🧑‍💼 **HR Portal** | Review shortlisted candidates, trigger voice screenings, and schedule interviews | +| 🧠 **Agent Dashboard** | Visualizes the current plan, tool calls, and reasoning traces in real time | +| ⚙️ **Tool Integration** | Shows live MCP actions (Gmail send, Calendar scheduling) with status updates | +| 📊 **Context View** | Displays agent memory, current workflow stage, and adaptive plan updates | + +#### Context Engineering Visualization? +This is what judges really care about — it must show that the system is agentic (reasoning, memory, planning). +🧠 Agent Plan Viewer +gr.JSON() or custom visual showing the current plan state, e.g.: +```json +{ + "plan": [ + "1. Screen CVs ✅", + "2. Invite for voice screening 🔄", + "3. Schedule HR interview ⬜", + "4. Await HR decision ⬜" + ] +} +``` +🗺️ Live Plan Progress +- Use a progress bar or color-coded status list of steps. +- Judges must see autonomous transitions (from one step to another). + +💬 Reasoning Log / Memory +- Stream or text box showing LLM thought traces or context summary: + - “Detected strong match for Data Scientist role.” + - “Candidate completed voice interview; confidence: 8.4/10.” + - “Next step: scheduling HR interview.” + +⚙️ Tool Call Trace +- Small table showing: + +| Time | Tool | Action | Result | +| ----- | -------- | ---------------- | --------- | +| 12:05 | Gmail | `send_invite()` | Sent | +| 12:06 | Calendar | `create_event()` | Confirmed | + +## 🔗 ***`MCP Integration (Best Practice Setup)`*** + +To align fully with the **Agents & MCP Hackathon** standards, our system will use or extend a **standardized MCP server** for integrations such as **Gmail** and **Google Calendar** — and potentially **Scion Voice** in later stages. + +**`Inspired by`** [Huggingface MCP Course](https://huggingface.co/learn/mcp-course/en/unit2/introduction): shows how to build an MCP app. + +### 🧩 Why MCP? +| Benefit | Description | +|----------|--------------| +| ✅ **Standardized** | Exposes Gmail & Calendar as reusable MCP tools with a consistent schema | +| 🔐 **Secure** | OAuth handled once server-side — no tokens or secrets stored in the agent | +| 🧱 **Modular** | Clean separation between the agent's reasoning logic and the integration layer | +| 🔄 **Reusable** | Same MCP server can serve multiple projects or agents | +| 🚀 **Hackathon-Ready** | Directly fulfills the “use MCP tools or external APIs” requirement | + +--- + +### ⚙️ Why Use MCP Instead of Just Defining Tools +| Approach | Limitation / Risk | MCP Advantage | +|-----------|-------------------|----------------| +| **Custom-defined tools** (e.g., direct Gmail API calls in code) | Each project must re-implement auth, rate limits, and API logic | MCP provides a *shared, pre-authorized* interface any agent can use | +| **Embedded credentials** in `.env` | Security risk, harder for judges to test | Credentials handled server-side — no secrets in the repo | +| **Tight coupling** between agent and tool | Hard to swap or extend integrations | MCP creates a plug-and-play API boundary between reasoning and execution | +| **Limited reuse** | Tools only exist in one codebase | MCP servers can expose many tools to multiple agents dynamically | + +MCP turns these one-off integrations into **standardized, composable building blocks** that work across agents, organizations, or platforms — the same philosophy used by **Anthropic**, **LangChain**, and **Hugging Face** in 2025 agent ecosystems. + + +We will build or extend the open-source [**mcp-gsuite**](https://github.com/MarkusPfundstein/mcp-gsuite) server and host it securely on **Google Cloud Run**. +This server manages authentication, token refresh, and rate limiting — while exposing standardized MCP actions like: +```json +{ + "action": "gmail.send", + "parameters": { "to": "candidate@example.com", "subject": "Interview Invite", "body": "..." } +} +``` + +and + +```json +{ + "action": "calendar.create_event", + "parameters": { "summary": "HR Interview", "start": "...", "end": "..." } +} +``` +This architecture lets our HR agent (and future projects) perform real email and scheduling actions via secure MCP endpoints — giving judges a safe, live demo of true agentic behavior with no local credential setup required. + +## 🧠 ***`Agent Supervisor — Why Parlant + LangGraph`*** + +LangGraph provides a powerful orchestration backbone for planning, reasoning, and executing multi-agent workflows. +However, its common **supervisor pattern** has a key limitation: the supervisor routes each user query to **only one sub-agent** at a time. + +### ⚠️ Example Problem +> “I uploaded my CV yesterday. Can I also reschedule my interview — and how long is the voice call?” + +A standard LangGraph supervisor would forward this entire message to, say, the **CV Screening Agent**, +missing the **scheduling** and **voice screening** parts — causing incomplete or fragmented responses. + +### 💡 Parlant as the Fix +**[Parlant](https://github.com/emcie-co/parlant)** solves this by replacing single-route logic with **dynamic guideline activation**. +Instead of rigid routing, it loads multiple relevant *guidelines* into context simultaneously, allowing coherent handling of mixed intents. + +```python +agent.create_guideline( + condition="User asks about rescheduling", + action="Call SchedulerAgent via LangGraph tool" +) + +agent.create_guideline( + condition="User asks about voice screening duration", + action="Query VoiceScreeningAgent" +) +``` + +If a user blends both topics, ***both guidelines trigger***, producing a unified, context-aware response. + +### ⚙️ Why Combine Them +| Layer | Framework | Role | +| ----------------------------- | ------------- | ----------------------------------------------------------------------- | +| 🧠 **Workflow Orchestration** | **LangGraph** | Executes structured agent workflows (CV → Voice → Schedule → Decision). | +| 💬 **Conversational Layer** | **Parlant** | Dynamically manages mixed intents using guideline-based reasoning. | +| 🔧 **Integration Layer** | **MCP Tools** | Provides standardized access to Gmail, Calendar, and Voice APIs. | + + +Together, ***Parlant + LangGraph*** merge structured planning with conversational adaptability — +enabling our HR agent to reason, plan, and respond naturally to complex, multi-topic interactions. + +## ✨ ***`Agentic Enhancements [BONUS]`*** + +To make the system more **autonomous, interpretable, and resilient**, we integrated a few lightweight yet powerful improvements: + +- 🧠 **Self-Reflection** – before executing a step, the agent briefly states *why* it's taking that action, improving reasoning transparency. +- 🔄 **Adaptive Re-Planning** – if a subagent or tool call fails (e.g., no calendar slot, missing response, or API timeout), the main planner automatically updates its plan — skipping, retrying, or re-ordering steps instead of stopping. +- 🧮 **LLM Self-Evaluation** – after each stage (CV, voice, scheduling), a lightweight judge model rates the result and adds feedback for the next step. +- 🗂️ **Context Summary** – the dashboard displays a live summary of all candidates, their current stage, and key outcomes. +- 🤝 **Human-in-the-Loop Checkpoint** – HR receives a short confirmation prompt before final scheduling to ensure responsible autonomy. + +These enhancements demonstrate **true agentic behavior** — autonomous planning, adaptive execution, and transparent reasoning — in a simple, explainable way. + +## 👥 ***`Team`*** +| Member | +| -------- | +| [Sebastian Wefers](https://github.com/Ocean-code-1995) | +| [Owen Kaplinsky](https://github.com/owenkaplinsky) | +| [SrikarMK](https://github.com/Srikarmk) | +| [Dmitri Moscoglo](https://github.com/DimiM99) | + +# ***`License`*** + +This project includes and builds upon [gmail-mcp](https://github.com/theposch/gmail-mcp), +which is licensed under the [GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html). + +This repository extends gmail-mcp for experimental integration and automation with Claude Desktop. +All modifications are distributed under the same GPLv3 license. + +> **Note:** The original gmail-mcp code has not been modified at this stage. diff --git a/langgraph.json b/langgraph.json new file mode 100644 index 0000000000000000000000000000000000000000..1d9272ab2000126af1542bef1b7019f81b2af672 --- /dev/null +++ b/langgraph.json @@ -0,0 +1,7 @@ +{ + "dependencies": ["./src"], + "graphs": { + "supervisor": "src.agents.supervisor.supervisor_v2:supervisor_agent" + } +} + diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 0000000000000000000000000000000000000000..84e48a31df8d60c2c703846bea1841c83a38bb4d --- /dev/null +++ b/package-lock.json @@ -0,0 +1,6 @@ +{ + "name": "recruitment-agent3", + "lockfileVersion": 3, + "requires": true, + "packages": {} +} diff --git a/requirements/agent.txt b/requirements/agent.txt new file mode 100644 index 0000000000000000000000000000000000000000..eaf29769c53bb70724f730ff89301b6f725a704f --- /dev/null +++ b/requirements/agent.txt @@ -0,0 +1,3 @@ +langchain +langchain-openai +langgraph diff --git a/requirements/all.txt b/requirements/all.txt new file mode 100644 index 0000000000000000000000000000000000000000..a00ad88f7fabe6b302720bc3bd2b4e173d93f31d --- /dev/null +++ b/requirements/all.txt @@ -0,0 +1,64 @@ +# Consolidated requirements with all dependencies +# Deduplicated from all individual requirement files + +# Base dependencies +python-dotenv +pydantic>=2.0 +pydantic-settings +promptlayer + +# LangChain & LangGraph +langchain +langchain-openai +langchain-community +langchain-mcp-adapters +langgraph +langgraph-cli + +# API Framework +fastapi +uvicorn[standard] + +# Web & HTTP +websockets>=12.0 +requests +aiohttp +httpx>=0.28.1 + +# Database +sqlalchemy +psycopg2-binary + +# Google APIs +google-auth +google-auth-oauthlib +google-auth-httplib2 +google-api-python-client + +# MCP Server +mcp + +# OpenAI +openai + +# UI & Visualization +streamlit +rich + +# PDF & Image Processing +pypdfium2 +pillow +ftfy + +# Search & Tools +duckduckgo-search +langchain-tavily + +# Email validation +email-validator +pydantic[email] + +# Date utilities +python-dateutil + +gradio==6.0.1 \ No newline at end of file diff --git a/requirements/api.txt b/requirements/api.txt new file mode 100644 index 0000000000000000000000000000000000000000..02bf2c31689e18e97c940e32cabd38f392b4076c --- /dev/null +++ b/requirements/api.txt @@ -0,0 +1,7 @@ +# API Layer dependencies +-r base.txt + +fastapi +uvicorn[standard] +pydantic>=2.0 + diff --git a/requirements/base.txt b/requirements/base.txt new file mode 100644 index 0000000000000000000000000000000000000000..4c0808ce70d043db5dfd2129f2a5e0683a01bc5e --- /dev/null +++ b/requirements/base.txt @@ -0,0 +1,4 @@ +python-dotenv +pydantic +pydantic-settings +promptlayer \ No newline at end of file diff --git a/requirements/cv_ui.txt b/requirements/cv_ui.txt new file mode 100644 index 0000000000000000000000000000000000000000..49a3845f4575e289d59ef5e3b6f0a29806d4cf2d --- /dev/null +++ b/requirements/cv_ui.txt @@ -0,0 +1,15 @@ +# CV Upload UI requirements +# Includes base requirements +-r base.txt +-r db.txt + +openai + +# Streamlit for UI +streamlit + +# PDF processing +pypdfium2 +pillow +ftfy + diff --git a/requirements/db.txt b/requirements/db.txt new file mode 100644 index 0000000000000000000000000000000000000000..8a34a0a8dffbf695147dbab634325515cf791088 --- /dev/null +++ b/requirements/db.txt @@ -0,0 +1,4 @@ +# data base requirements +-r base.txt +sqlalchemy +psycopg2-binary \ No newline at end of file diff --git a/requirements/mcp_calendar.txt b/requirements/mcp_calendar.txt new file mode 100644 index 0000000000000000000000000000000000000000..42abebfa9cee65735aa2bd6183b6f43c4642a6d3 --- /dev/null +++ b/requirements/mcp_calendar.txt @@ -0,0 +1,15 @@ +# Google Calendar MCP Server requirements +# Includes base requirements +-r base.txt + +# MCP server framework +mcp + +# Google Calendar API +google-auth +google-auth-oauthlib +google-api-python-client + +# Date utilities +python-dateutil + diff --git a/requirements/mcp_gmail.txt b/requirements/mcp_gmail.txt new file mode 100644 index 0000000000000000000000000000000000000000..c5596905094599698f9961f7aaa289b6d4f5e502 --- /dev/null +++ b/requirements/mcp_gmail.txt @@ -0,0 +1,16 @@ +# Gmail MCP Server requirements +# Includes base requirements +-r base.txt + +# MCP server framework +mcp + +# Gmail API +google-auth +google-auth-oauthlib +google-api-python-client +google-auth-httplib2 + +# HTTP client +httpx>=0.28.1 + diff --git a/requirements/requirements.txt b/requirements/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..146d6a465b798b2a8ae379b7281e8b598a75c3e1 --- /dev/null +++ b/requirements/requirements.txt @@ -0,0 +1,24 @@ +google-auth +langgraph +langchain +langchain-openai +langchain-mcp-adapters +duckduckgo-search +langchain-tavily +langchain-community +langgraph-cli +promptlayer + +mcp +pypdfium2 +pillow +ftfy +sqlalchemy +psycopg2-binary +pydantic +pydantic-settings +python-dotenv +openai +pypdfium2 +Pillow +ftfy diff --git a/requirements/supervisor.txt b/requirements/supervisor.txt new file mode 100644 index 0000000000000000000000000000000000000000..2903298d76f271b45b75c05121ba0e87026b2d26 --- /dev/null +++ b/requirements/supervisor.txt @@ -0,0 +1,12 @@ +# Supervisor Agent requirements +# Includes base requirements +-r base.txt + +# LangGraph for agent orchestration +langgraph +langchain-community +langchain-mcp-adapters +streamlit +rich +requests + diff --git a/requirements/voice_proxy.txt b/requirements/voice_proxy.txt new file mode 100644 index 0000000000000000000000000000000000000000..66d8e3866fb714b5e073746cc56f43e04d36cb7c --- /dev/null +++ b/requirements/voice_proxy.txt @@ -0,0 +1,10 @@ +-r base.txt +-r db.txt +fastapi +uvicorn[standard] +websockets>=12.0 +requests +asyncio +aiohttp +email-validator +pydantic[email] \ No newline at end of file diff --git a/requirements/voice_screening_ui.txt b/requirements/voice_screening_ui.txt new file mode 100644 index 0000000000000000000000000000000000000000..22ba058f78a29f250f9704c936d9be790f8607b7 --- /dev/null +++ b/requirements/voice_screening_ui.txt @@ -0,0 +1,6 @@ +-r base.txt +streamlit +uvicorn[standard] +requests + + diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0fd0138db1063d6d1395ad06dbabfd6d1ab88134 --- /dev/null +++ b/scripts/__init__.py @@ -0,0 +1,2 @@ +"""CLI scripts package.""" + diff --git a/scripts/db/__init__.py b/scripts/db/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..38f917382a09b54a016a8ea1cef535f760f8d7ed --- /dev/null +++ b/scripts/db/__init__.py @@ -0,0 +1,10 @@ +"""Database CLI utilities.""" + +import sys +import os + +# Add project root to sys.path for all db scripts +project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')) +if project_root not in sys.path: + sys.path.insert(0, project_root) + diff --git a/scripts/db/debug_all.py b/scripts/db/debug_all.py new file mode 100644 index 0000000000000000000000000000000000000000..cebb8a379e7d0979ffca52dcbe229ab2e5d56c9a --- /dev/null +++ b/scripts/db/debug_all.py @@ -0,0 +1,60 @@ +""" +Run all database debug checks. + +Run as follows: +>>> POSTGRES_HOST=localhost POSTGRES_PORT=5433 POSTGRES_PASSWORD=password123 python -m scripts.db.debug_all + +This runs: +1. Connection test +2. Session query test +3. List existing candidates +""" + +import sys +import os + +# Add project root to sys.path +project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')) +if project_root not in sys.path: + sys.path.insert(0, project_root) + +from scripts.db.test_connection import test_connection +from scripts.db.test_session import test_session_query +from scripts.db.list_candidates import list_candidates + + +def run_all_checks() -> None: + """Run all database diagnostic checks.""" + print("=" * 50) + print("🔍 DATABASE DIAGNOSTICS") + print("=" * 50) + + # 1. Test connection + conn_ok = test_connection() + + if not conn_ok: + print("\n⛔ Stopping - connection failed") + return + + print() + + # 2. Test session + session_ok = test_session_query() + + if not session_ok: + print("\n⛔ Stopping - session failed") + return + + print() + + # 3. List candidates + list_candidates() + + print() + print("=" * 50) + print("✅ All checks completed") + print("=" * 50) + + +if __name__ == "__main__": + run_all_checks() diff --git a/scripts/db/list_candidates.py b/scripts/db/list_candidates.py new file mode 100644 index 0000000000000000000000000000000000000000..43662c9a4162515e1e967c16f49e4c7b37727dfe --- /dev/null +++ b/scripts/db/list_candidates.py @@ -0,0 +1,63 @@ +""" +List candidates in the database. + +Run standalone: +>>> POSTGRES_HOST=localhost POSTGRES_PORT=5433 python scripts/db/list_candidates.py +""" + +from sqlalchemy.exc import ProgrammingError + +# Ensure project root is in path +import scripts.db # noqa: F401 + +from src.database.candidates.client import SessionLocal +from src.database.candidates.models import Candidate + + +def list_candidates(limit: int = 10) -> bool: + """ + Check and list existing candidates in the database. + + Args: + limit: Maximum number of candidates to display. + + Returns: + True if query successful, False otherwise. + """ + print("--- 🧾 Checking Existing Candidates ---") + session = SessionLocal() + try: + count = session.query(Candidate).count() + print(f"📊 Found {count} candidate(s) in the database.") + + if count == 0: + print("⚠️ No candidates found.") + else: + print(f"\n👀 Listing candidates (up to {limit}):") + candidates = ( + session.query(Candidate) + .order_by(Candidate.full_name) + .limit(limit) + .all() + ) + for c in candidates: + print(f" - {c.full_name} | {c.email} | Status: {c.status}") + + return True + + except ProgrammingError as e: + print("❌ Table 'candidates' does not exist or schema not initialized.") + print("ℹ️ Try running your DB initialization script or migrations.") + print(f"Error: {e}") + return False + except Exception as e: + print("❌ Error during candidate check.") + print(f"Error: {e}") + return False + finally: + session.close() + + +if __name__ == "__main__": + list_candidates() + diff --git a/scripts/db/test_connection.py b/scripts/db/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..263abdecdbfe2bbfb866a5d0cf2950e32b7540b1 --- /dev/null +++ b/scripts/db/test_connection.py @@ -0,0 +1,51 @@ +""" +Test basic database connection. + +Run standalone: +>>> POSTGRES_HOST=localhost POSTGRES_PORT=5433 python scripts/db/test_connection.py +""" + +import os +from sqlalchemy import text + +# Ensure project root is in path +import scripts.db # noqa: F401 + +from src.database.candidates.client import get_engine + + +def test_connection() -> bool: + """ + Test basic database connectivity. + + Returns: + True if connection successful, False otherwise. + """ + print("--- Testing Database Connection ---") + + # Print environment info + print(f"POSTGRES_HOST (env): {os.environ.get('POSTGRES_HOST')}") + print(f"POSTGRES_PORT (env): {os.environ.get('POSTGRES_PORT')}") + + try: + engine = get_engine() + print(f"Engine URL: {engine.url}") + + with engine.connect() as connection: + print("✅ Connection successful!") + result = connection.execute(text("SELECT 1")) + print(f"✅ SELECT 1 result: {result.fetchone()}") + return True + + except Exception as e: + print("\n❌ Connection FAILED") + print(f"Error type: {type(e).__name__}") + print(f"Error message: {str(e)}") + import traceback + traceback.print_exc() + return False + + +if __name__ == "__main__": + test_connection() + diff --git a/scripts/db/test_session.py b/scripts/db/test_session.py new file mode 100644 index 0000000000000000000000000000000000000000..fe277cd6c5445474e0cd300f0d2acf15e452700a --- /dev/null +++ b/scripts/db/test_session.py @@ -0,0 +1,40 @@ +""" +Test database session and query execution. + +Run standalone: +>>> POSTGRES_HOST=localhost POSTGRES_PORT=5433 python scripts/db/test_session.py +""" + +from sqlalchemy import text + +# Ensure project root is in path +import scripts.db # noqa: F401 + +from src.database.candidates.client import SessionLocal + + +def test_session_query() -> bool: + """ + Test session creation and basic query execution. + + Returns: + True if session works, False otherwise. + """ + print("--- Testing Session Query ---") + session = SessionLocal() + try: + result = session.execute(text("SELECT now()")) + print(f"✅ Session execute successful: {result.fetchone()[0]}") + return True + + except Exception as e: + print("\n❌ Session Query FAILED") + print(f"Error: {e}") + return False + finally: + session.close() + + +if __name__ == "__main__": + test_session_query() + diff --git a/scripts/db/wipe.py b/scripts/db/wipe.py new file mode 100644 index 0000000000000000000000000000000000000000..d6208e3682513b84d0df0cce40e16b4340389253 --- /dev/null +++ b/scripts/db/wipe.py @@ -0,0 +1,44 @@ +""" +Run as follows: +>>> POSTGRES_HOST=localhost POSTGRES_PORT=5433 POSTGRES_PASSWORD=password123 python -m scripts.db.wipe +""" + +import sys +import os +from sqlalchemy import text + +# Add project root to sys.path +project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')) +sys.path.append(project_root) + +from src.database.candidates.client import get_engine + +def wipe_database(): + print("⚠️ WARNING: This will PERMANENTLY DELETE ALL RECORDS from the 'candidates' table and all related tables (CASCADE).") + confirm = input("Type 'yes' to confirm: ") + + if confirm.lower() != 'yes': + print("Operation cancelled.") + return + + engine = get_engine() + + try: + with engine.connect() as connection: + print("Connecting to database...") + + # Using TRUNCATE with CASCADE is faster and cleaner for Postgres + print("Truncating candidates table with CASCADE...") + connection.execute(text("TRUNCATE TABLE candidates CASCADE;")) + connection.commit() + + print("✅ Database entries wiped successfully.") + + except Exception as e: + print(f"❌ Error wiping database: {e}") + import traceback + traceback.print_exc() + +if __name__ == "__main__": + wipe_database() + diff --git a/scripts/infra/__init__.py b/scripts/infra/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9ff4712fee87ab6eb0f4ca5ec1c2e71d4b2ec94b --- /dev/null +++ b/scripts/infra/__init__.py @@ -0,0 +1,2 @@ +"""Infrastructure scripts package.""" + diff --git a/scripts/infra/gcp_setup.sh b/scripts/infra/gcp_setup.sh new file mode 100644 index 0000000000000000000000000000000000000000..8f19a40ccbc922d50b272984801e001c24094cd2 --- /dev/null +++ b/scripts/infra/gcp_setup.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Interactive gcloud + ADC setup for Gmail MCP. + +# --- sanity: require gcloud --- +if ! command -v gcloud >/dev/null 2>&1; then + echo "Error: gcloud CLI not found. Install Google Cloud SDK and retry." + exit 1 +fi + +read -r -p "Your Google account email: " ACCOUNT_EMAIL +read -r -p "Target GCP Project ID (e.g., gradio-hackathon-25): " PROJECT_ID + +# --- minimal validation --- +# Project ID: 6–30 chars, starts with letter, lowercase letters/digits/hyphens. +if [[ ! "$PROJECT_ID" =~ ^[a-z][a-z0-9-]{5,29}$ ]]; then + echo "Error: PROJECT_ID must start with a letter, be 6-30 chars, and use only lowercase letters, digits, and hyphens." + exit 1 +fi +# Basic email sanity check +if [[ ! "$ACCOUNT_EMAIL" =~ ^[^@]+@[^@]+\.[^@]+$ ]]; then + echo "Error: That doesn't look like a valid email." + exit 1 +fi + +echo "" +echo "---------------- Confirmation ----------------" +echo " Account Email : ${ACCOUNT_EMAIL}" +echo " Project ID : ${PROJECT_ID}" +echo "------------------------------------------------" +read -r -p "Proceed to authenticate and set config? [y/N]: " CONFIRM +CONFIRM=${CONFIRM:-N} +if [[ ! "$CONFIRM" =~ ^[Yy]$ ]]; then + echo "Aborted." + exit 0 +fi + +# Auth for gcloud and ADC (opens browser windows) +echo "🔑 Logging into GCP..." +gcloud auth login "${ACCOUNT_EMAIL}" --update-adc +echo "🔐 Setting up Application Default Credentials (ADC)..." +gcloud auth application-default login + +# Set defaults (safe even if project doesn't exist yet) +echo "📂 Setting gcloud config for account and project..." +gcloud config set core/account "${ACCOUNT_EMAIL}" +echo "📦 Setting project: $PROJECT_ID" +gcloud config set project "${PROJECT_ID}" + +echo "✅ gcloud & ADC configured for ${ACCOUNT_EMAIL} / project ${PROJECT_ID}" + diff --git a/scripts/infra/terraform_apply.sh b/scripts/infra/terraform_apply.sh new file mode 100644 index 0000000000000000000000000000000000000000..b352b5f5871da3c6a4eaa92e5be577fe1bc159c9 --- /dev/null +++ b/scripts/infra/terraform_apply.sh @@ -0,0 +1,111 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Interactive Terraform apply for Gmail MCP (no-billing setup). +# - Prompts for project_id, project_name, user_email +# - Falls back to terraform/terraform.tfvars if inputs are blank +# - Auto-imports an existing GCP project to avoid 409 alreadyExists +# - Applies the stack + +# --- resolve paths --- +script_dir="$(cd -- "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +TF_DIR="${TF_DIR:-${script_dir}/../../terraform}" +TFVARS_PATH="${TFVARS_PATH:-${TF_DIR}/terraform.tfvars}" + +# --- helpers --- +read_tfvar() { + # Extract a simple "key = \"value\"" from terraform.tfvars (no HCL expressions) + local key="$1" + local file="$2" + [[ -f "$file" ]] || { echo ""; return 0; } + sed -nE "s/^[[:space:]]*${key}[[:space:]]*=[[:space:]]*\"([^\"]+)\"[[:space:]]*$/\1/p" "$file" | head -n1 +} + +# load defaults from tfvars (if present) +DEFAULT_PROJECT_ID="$(read_tfvar project_id "$TFVARS_PATH")" +DEFAULT_PROJECT_NAME="$(read_tfvar project_name "$TFVARS_PATH")" +DEFAULT_USER_EMAIL="$(read_tfvar user_email "$TFVARS_PATH")" + +# prompt with defaults +read -r -p "GCP Project ID [${DEFAULT_PROJECT_ID:-none}]: " PROJECT_ID +read -r -p "Project Name [${DEFAULT_PROJECT_NAME:-none}]: " PROJECT_NAME +read -r -p "Your Google Account Email [${DEFAULT_USER_EMAIL:-none}]: " USER_EMAIL + +# use defaults if blank +PROJECT_ID="${PROJECT_ID:-$DEFAULT_PROJECT_ID}" +PROJECT_NAME="${PROJECT_NAME:-$DEFAULT_PROJECT_NAME}" +USER_EMAIL="${USER_EMAIL:-$DEFAULT_USER_EMAIL}" + +# validate presence +if [[ -z "${PROJECT_ID}" || -z "${PROJECT_NAME}" || -z "${USER_EMAIL}" ]]; then + echo "Error: project_id, project_name, and user_email are required (either input or terraform.tfvars defaults)." + exit 1 +fi + +# minimal validation +if [[ ! "$PROJECT_ID" =~ ^[a-z][a-z0-9-]{5,29}$ ]]; then + echo "Error: PROJECT_ID must start with a letter, be 6-30 chars, and use only lowercase letters, digits, and hyphens." + exit 1 +fi +if [[ ! "$USER_EMAIL" =~ ^[^@]+@[^@]+\.[^@]+$ ]]; then + echo "Error: USER_EMAIL doesn't look like a valid email." + exit 1 +fi + +# verify TF dir & files +if [[ ! -d "$TF_DIR" ]]; then + echo "Error: Terraform directory not found: $TF_DIR" + exit 1 +fi +if ! ls "$TF_DIR"/*.tf >/dev/null 2>&1; then + echo "Error: No .tf files found in $TF_DIR" + exit 1 +fi + +# summary +echo "" +echo "──────────────── Terraform Apply ────────────────" +echo " Terraform dir : ${TF_DIR}" +echo " tfvars path : ${TFVARS_PATH}" +echo " Project ID : ${PROJECT_ID}" +echo " Project Name : ${PROJECT_NAME}" +echo " User Email : ${USER_EMAIL}" +echo "─────────────────────────────────────────────────" +read -r -p "Proceed with terraform apply? [y/N]: " CONFIRM +CONFIRM=${CONFIRM:-N} +if [[ ! "$CONFIRM" =~ ^[Yy]$ ]]; then + echo "Aborted." + exit 0 +fi + +cd "$TF_DIR" + +echo "Initializing Terraform…" +terraform init -input=false + +# If project already exists, import into state to avoid 409 alreadyExists +if command -v gcloud >/dev/null 2>&1; then + if gcloud projects describe "$PROJECT_ID" >/dev/null 2>&1; then + echo "Project '${PROJECT_ID}' exists. Importing into Terraform state (idempotent)…" + terraform import -input=false google_project.project "$PROJECT_ID" || true + else + echo "Project '${PROJECT_ID}' does not exist yet. Terraform will create it." + fi +else + echo "Warning: gcloud not found; skipping existence check. If the project exists, apply may fail with 409." +fi + +echo "Applying Terraform…" +terraform apply -auto-approve \ + -var="project_id=${PROJECT_ID}" \ + -var="project_name=${PROJECT_NAME}" \ + -var="user_email=${USER_EMAIL}" + +echo "✅ Successfully applied Terraform changes." +echo "📝 Remaining manual setup steps for Gmail and Calendar MCP servers:" +echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" +echo " → Open OAuth consent screen: https://console.cloud.google.com/apis/credentials/consent?project=${PROJECT_ID}" +echo " → Create Desktop OAuth client(s): https://console.cloud.google.com/apis/credentials?project=${PROJECT_ID}" +echo " - ~/.gmail-mcp/credentials.json" +echo " - ~/.calendar-mcp/credentials.json" + diff --git a/secrets/auth_flow.md b/secrets/auth_flow.md new file mode 100644 index 0000000000000000000000000000000000000000..558cf6c3655070c4a79e0bea2c0390f3ad964f66 --- /dev/null +++ b/secrets/auth_flow.md @@ -0,0 +1,27 @@ +### 🔐 Google MCP Auth Flow Summary + +- **`credentials.json`** → App credentials (downloaded once from Google Cloud Console). +- **`token.json`** → User-specific OAuth token (auto-created on first run). + +--- + +### ⚙️ When & How It Happens + +1. **At MCP startup:** + The script loads `credentials.json` (OAuth client info). +2. **If no valid `token.json`:** + - Browser-based OAuth flow starts. + - You log in and grant access. + - A new `token.json` is saved locally. +3. **On later runs:** + - The MCP reads the existing `token.json`. + - Automatically refreshes it if expired. + - No browser prompt needed. + +--- + +### 🚫 Git Hygiene + +- ❌ Never commit `token.json` to Git. +- 👤 Each developer generates their own token. +- 🖥️ For servers or CI → use a **service account** instead. diff --git a/secrets/gcalendar-mcp/info.md b/secrets/gcalendar-mcp/info.md new file mode 100644 index 0000000000000000000000000000000000000000..367235b63b7d7f83c5a069bfc4ca7c72d09a3fd7 --- /dev/null +++ b/secrets/gcalendar-mcp/info.md @@ -0,0 +1 @@ +Put tokens and credentials here. \ No newline at end of file diff --git a/secrets/gmail-mcp/info.md b/secrets/gmail-mcp/info.md new file mode 100644 index 0000000000000000000000000000000000000000..367235b63b7d7f83c5a069bfc4ca7c72d09a3fd7 --- /dev/null +++ b/secrets/gmail-mcp/info.md @@ -0,0 +1 @@ +Put tokens and credentials here. \ No newline at end of file diff --git a/src/__init__.py b/src/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/agents/__init__.py b/src/agents/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7ae4be88d899e84028d5526e02bb6ce06c82dd11 --- /dev/null +++ b/src/agents/__init__.py @@ -0,0 +1,14 @@ +from .db_executor import db_executor +from .cv_screening import screen_cv, cv_screening_workflow +from .gcalendar import gcalendar_agent +from .gmail import gmail_agent +from .voice_screening import voice_judge + +__all__ = [ + "db_executor", + "screen_cv", + "cv_screening_workflow", + "gcalendar_agent", + "gmail_agent", + "voice_judge", +] diff --git a/src/agents/cv_screening/__init__.py b/src/agents/cv_screening/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..486b27dcdaec1d0947c1aec73433d3092dd15ba3 --- /dev/null +++ b/src/agents/cv_screening/__init__.py @@ -0,0 +1,4 @@ +from .cv_screener import screen_cv +from .cv_screening_workflow import cv_screening_workflow + +__all__ = ["screen_cv", "cv_screening_workflow"] diff --git a/src/agents/cv_screening/cv_screener.py b/src/agents/cv_screening/cv_screener.py new file mode 100644 index 0000000000000000000000000000000000000000..eb1e5469e6d6d28a211354782ef0873765a61f0b --- /dev/null +++ b/src/agents/cv_screening/cv_screener.py @@ -0,0 +1,88 @@ +"""CV Screening Agent Module + +Run as follows: +>>> docker compose up --build +>>> docker compose run --rm candidates_db_init python -m src.agents.cv_screening.screener +""" +import json +from langchain_openai import ChatOpenAI +from langchain.messages import SystemMessage, HumanMessage + +from dotenv import load_dotenv +from src.agents.cv_screening.schemas.output_schema import CVScreeningOutput +from src.agents.cv_screening.utils import read_file +from src.database.candidates import write_cv_results_to_db +from src.prompts import get_prompt + +load_dotenv() + +SYSTEM_PROMPT = get_prompt( + template_name="CV_Screener", + latest_version=True +) + +# --- The evaluator function --- +def screen_cv(cv_text: str, jd_text: str) -> CVScreeningOutput: + """ + Evaluate a candidate's CV against a job description using an LLM. + + Args: + cv_text (str): The text content of the candidate's CV. + jd_text (str): The text content of the Job Description. + + Returns: + CVScreeningOutput: The structured screening result. + Makes model write feedback before scoring, leading to better calibration + and genuine reasoning that leads to more balanced scores. + + **NOTE**: + >>> The model generates feedback first (Chain-of-Thought) + >>> to ensure calibrated scores. + + """ + llm = ( + ChatOpenAI( + model="gpt-4o-mini", + temperature=0, + max_tokens=1500, + ) + .with_structured_output(CVScreeningOutput) + ) + # payload + messages = [ + # Instruction + SystemMessage( + content=SYSTEM_PROMPT + ), + # Payload + HumanMessage( + content=( + f"Job Description:\n{jd_text}\n\n" + f"Candidate CV:\n{cv_text}\n" + ) + ), + ] + + return llm.invoke(messages) + + + +# --- Main execution for testing --- +if __name__ == "__main__": + from pathlib import Path + #BASE_PATH = Path("/Users/sebastianwefers/Desktop/projects/recruitment-agent/src/database") + BASE_PATH = Path(__file__).resolve().parents[2] / "database" + + cv_text = read_file(BASE_PATH / "cvs/parsed/c762271c-af8f-49db-acbb-e37e5f0f0f98_SWefers_CV-sections.txt") + jd_text = read_file(BASE_PATH / "cvs/job_postings/ai_engineer.txt") + + # trigger evaluation + result = screen_cv(cv_text, jd_text) + print(json.dumps(result.model_dump(), indent=2)) + + # optionally write to DB + write_cv_results_to_db( + candidate_email="sebastianwefersnz@gmail.com", + result=result, + job_title="AI Engineer" + ) \ No newline at end of file diff --git a/src/agents/cv_screening/cv_screening_workflow.py b/src/agents/cv_screening/cv_screening_workflow.py new file mode 100644 index 0000000000000000000000000000000000000000..a2c65d9dc316d2c407bda69f53b45a0399be5b0f --- /dev/null +++ b/src/agents/cv_screening/cv_screening_workflow.py @@ -0,0 +1,103 @@ +from pathlib import Path +from langchain_core.tools import tool + + +from src.agents.cv_screening.cv_screener import screen_cv +from src.agents.cv_screening.utils import read_file +from src.database.candidates import ( + write_cv_results_to_db, + get_candidate_by_name, +) + +@tool +def cv_screening_workflow(candidate_full_name: str = "") -> str: + """ + Runs the deterministic CV screening workflow for a candidate. + This is a fixed sequential process, not a reasoning agent. + + Steps: + 1. Retrieve candidate info from DB + 2. Read files (CV & Job Description) + 3. Evaluate CV + 4. Store results in DB & update status + + Args: + candidate_full_name (str): The full name of the candidate to screen. + + Returns: + str: A message indicating the outcome of the workflow. (✅ or ❌) + """ + if not candidate_full_name: + return "❌ Candidate name is required." + + # 1️⃣ Retrieve candidate info from DB + print(f"🔍 Looking up candidate: {candidate_full_name}") + candidate = get_candidate_by_name(candidate_full_name) + + if not candidate: + return f"❌ Candidate '{candidate_full_name}' not found in database." + + candidate_email = candidate["email"] + cv_path_str = candidate["parsed_cv_file_path"] + + if not cv_path_str: + return f"❌ No parsed CV path recorded for '{candidate_full_name}'." + + # Resolve paths + # Assuming the parsed path in DB is relative to project root (e.g., src/database/cvs/parsed/...) + # We need to ensure we can find it. + + # Calculate project root from this file location + # src/agents/cv_screening/cv_screening_workflow.py -> 3 levels up to src -> 4 to root + root_dir = Path(__file__).resolve().parents[3] + + cv_path = root_dir / cv_path_str + if not cv_path.exists(): + # Try treating it as absolute or check if the path in DB was absolute + cv_path = Path(cv_path_str) + if not cv_path.exists(): + return f"❌ CV file not found at: {cv_path}" + + # JD path is constant for this MVP + jd_path = root_dir / "src/database/job_postings/ai_engineer.txt" + + if not jd_path.exists(): + return f"❌ Job description not found at: {jd_path}" + + # 2️⃣ Read files + print(f"📄 Reading Job Description from: {jd_path}") + jd_text = read_file(jd_path) + + print(f"📄 Reading CV from: {cv_path}") + cv_text = read_file(cv_path) + + + # 3️⃣ Evaluate CV + print("🧠 Running LLM screening...") + try: + result = screen_cv(cv_text, jd_text) + except Exception as e: + return f"❌ Error during LLM screening: {str(e)}" + + # 4️⃣ Store results in DB & update status + print("💾 Saving results to database...") + try: + write_cv_results_to_db( + candidate_email=candidate_email, + result=result, + job_title="AI Engineer" + ) + except Exception as e: + return f"❌ Error saving results to DB: {str(e)}" + + return f"✅ CV Screening Workflow completed successfully for {candidate_full_name}. Scores and feedback have been saved to the database." + + + + +if __name__ == "__main__": + # Example usage for testing + # You can run this directly if you have a candidate in the DB + import sys + name = sys.argv[1] if len(sys.argv) > 1 else "Ada Lovelace" + cv_screening_workflow(name) diff --git a/src/agents/cv_screening/schemas/__init__.py b/src/agents/cv_screening/schemas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/agents/cv_screening/schemas/output_schema.py b/src/agents/cv_screening/schemas/output_schema.py new file mode 100644 index 0000000000000000000000000000000000000000..ea3fb659a95898f5a1743503addce73cc17d0961 --- /dev/null +++ b/src/agents/cv_screening/schemas/output_schema.py @@ -0,0 +1,12 @@ +from pydantic import BaseModel, Field +from typing import Optional, Dict, Any + +class CVScreeningOutput(BaseModel): + # CRITICAL: Keep llm_feedback as the first field. + # This enforces Chain-of-Thought reasoning: the model must explain its assessment + # BEFORE assigning scores, leading to better calibration. DO NOT REORDER. + llm_feedback: str + skills_match_score: float = Field(..., ge=0, le=1) + experience_match_score: float = Field(..., ge=0, le=1) + education_match_score: float = Field(..., ge=0, le=1) + overall_fit_score: float = Field(..., ge=0, le=1) diff --git a/src/agents/cv_screening/tools/__init__.py b/src/agents/cv_screening/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/agents/cv_screening/utils/__init__.py b/src/agents/cv_screening/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e07eaff522a748f7de9dcc023cc87e813b2a289d --- /dev/null +++ b/src/agents/cv_screening/utils/__init__.py @@ -0,0 +1,5 @@ +from .read_file import read_file + +__all__ = [ + "read_file", +] diff --git a/src/agents/cv_screening/utils/read_file.py b/src/agents/cv_screening/utils/read_file.py new file mode 100644 index 0000000000000000000000000000000000000000..9666bc79f32f7eee9917a6b9c6be4ff10e16ddba --- /dev/null +++ b/src/agents/cv_screening/utils/read_file.py @@ -0,0 +1,7 @@ +from pathlib import Path + +def read_file(path: Path) -> str: + """Read the contents of a file and return as a string. + """ + with open(path, "r", encoding="utf-8") as f: + return f.read() \ No newline at end of file diff --git a/src/agents/db_executor/__init__.py b/src/agents/db_executor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ab096c0839e81bf576b541809dd33c1d95e48c1 --- /dev/null +++ b/src/agents/db_executor/__init__.py @@ -0,0 +1,5 @@ +from .db_executor import db_executor + +__all__ = [ + "db_executor", +] diff --git a/src/agents/db_executor/codeact/__init__.py b/src/agents/db_executor/codeact/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..481ba62765fb1ab95aca5002fcb76fd7b6e97fc0 --- /dev/null +++ b/src/agents/db_executor/codeact/__init__.py @@ -0,0 +1,6 @@ +""" +This agent coding agent based `CodeAct`agent pattern, see: +- https://arxiv.org/abs/2408.02193 +- https://www.google.com/url?sa=t&source=web&rct=j&opi=89978449&url=https://huggingface.co/collections/DataImaginations/codeact-interaction-framework-for-llm-agents&ved=2ahUKEwjpkvKpwIWRAxWJ1wIHHY3KEzAQFnoECCEQAQ&usg=AOvVaw2IxMEyHwZPI7MfLSlFMyqN +- https://github.com/langchain-ai/langgraph-codeact +""" \ No newline at end of file diff --git a/src/agents/db_executor/codeact/core/codeact.py b/src/agents/db_executor/codeact/core/codeact.py new file mode 100644 index 0000000000000000000000000000000000000000..acf6b26292f57f80c26195a439bdd6e3358478a2 --- /dev/null +++ b/src/agents/db_executor/codeact/core/codeact.py @@ -0,0 +1,541 @@ +import re +import io +import builtins +import contextlib +from collections.abc import Generator +import inspect +from pathlib import Path +from typing import Any, Awaitable, Callable, Optional, Sequence, Type, TypeVar, Union, Literal +import types +import json + +from langchain.chat_models import init_chat_model +from langchain_core.language_models import BaseChatModel +from langchain_core.tools import StructuredTool +from langchain_core.tools import tool as create_tool +from langchain_core.messages import AIMessageChunk, AIMessage +from langgraph.graph import END, START, StateGraph, MessagesState +from langgraph.types import Command +from langgraph.checkpoint.memory import MemorySaver + +from ..schemas import TokenStream +from ..schemas.openai_key import OpenAIApiKey +from ..utils import pretty_print_state + + + + +class CodeActState(MessagesState): + """State for CodeAct agent.""" + + script: Optional[str] + """The Python code script to be executed.""" + context: dict[str, Any] + """Dictionary containing the execution context with available tools and variables.""" + +EvalFunction = Callable[[str, dict[str, Any]], tuple[str, dict[str, Any]]] +EvalCoroutine = Callable[[str, dict[str, Any]], Awaitable[tuple[str, dict[str, Any]]]] + +StateSchema = TypeVar("StateSchema", bound=CodeActState) +StateSchemaType = Type[StateSchema] + + +import inspect +from pathlib import Path +import tiktoken +from typing import Any, Optional, Union, Sequence +from langchain_core.tools import StructuredTool + + +class CodeActAgent: + def __init__( + self, + model_name: str, + model_provider: str, + tools: Optional[Sequence] = None, + eval_fn=None, + system_prompt: Union[str, Path] = None, + bind_tools: bool = False, + memory: bool = True, + ) -> None: + """ + Parameters + ---------- + - model_name : str + The name of the chat model to use (e.g., "gpt-4o"). + - model_provider : str + The model provider (e.g., "openai"). + - tools : Optional[Sequence], optional + A list of tools (functions or StructuredTool) available to the agent. + - eval_fn : Optional[EvalFunction or EvalCoroutine], optional + The function or coroutine to evaluate generated code. If None, uses default_eval. + - system_prompt : Union[str, Path], optional + The system prompt as a file path or raw string. + - bind_tools : bool, optional + Whether to bind tool signatures and docstrings into the system prompt. + - memory : bool, optional + Whether to enable memory checkpointing. + """ + self.model_name = model_name + self.model_provider = model_provider + self.tools = tools or [] + self.eval_fn = eval_fn or self.default_eval + self.system_prompt = system_prompt + self.bind_tools = bind_tools + self.memory = memory + + # Initialize components + self.model = init_chat_model(model_name, model_provider=model_provider) + self.prompt = self._create_system_prompt() + self.agent = self._create_codeact(self.model, self.tools, self.eval_fn) + + checkpointer = MemorySaver() if memory else None + self.compiled_agent = self.agent.compile(checkpointer=checkpointer) + + + def _create_system_prompt(self) -> tuple[str, dict[str, int]]: + """Build the final system prompt and compute token counts. + """ + system_text = self._load_prompt(self.system_prompt) + if not system_text: + raise ValueError("`system_prompt` must be provided as a file path or string.") + + system_text = system_text.strip() + + # Base version (without tools) + prompt_text = system_text + + # If bind_tools enabled, build and append + if self.bind_tools: + if not self.tools: + print("[⚠️] bind_tools=True but no tools provided. Skipping tool injection.") + else: + tools_text = self._build_tool_context() + prompt_text = f"{system_text.strip()}\n\n{tools_text.strip()}" + + # Compute token counts + tokens_without_tools = self._count_tokens(system_text) + tokens_with_tools = self._count_tokens(prompt_text) + + # Print summary neatly + print( + f"🧮 System prompt token count:\n" + f" - Without tools: {tokens_without_tools}\n" + f" - With tools: {tokens_with_tools}" + ) + + return prompt_text + + + def _build_tool_context(self) -> str: + """Constructs the tool context block with docstrings and signatures. + """ + tool_strings = [] + for t in self.tools: + func = t.func if isinstance(t, StructuredTool) else t + sig = inspect.signature(func) + doc = (func.__doc__ or "").strip() + tool_strings.append( + f"def {func.__name__}{sig}:\n \"\"\"{doc}\"\"\"\n ..." + ) + + joined_tools = "\n\n".join(tool_strings) + return ( + "\n\nNote that you have access to the following predefined tools:\n\n" + f"{joined_tools}" + ) + + @staticmethod + def _load_prompt(p: Optional[Union[str, Path]]) -> Optional[str]: + """Load a prompt from file path or treat as raw string.""" + if p is None: + return None + + # If it's already multiline or contains newlines, it's almost certainly a literal string + if isinstance(p, str) and ("\n" in p or len(p) > 200): + return p + + # Otherwise, check if it's an actual file path + path = Path(p) + if path.exists() and path.is_file(): + return path.read_text(encoding="utf-8") + + # Fallback: just return as string + return str(p) + + + def _count_tokens(self, text: str) -> int: + """Count tokens for a given text. + """ + try: + enc = tiktoken.encoding_for_model(self.model_name) + except Exception: + enc = tiktoken.get_encoding("cl100k_base") + return len(enc.encode(text)) + + + + def _extract_and_combine_codeblocks(self, text: str) -> str: + """ + Extract and combine code blocks from the model completion. + Helper function to execute extracted code in sandbox environment. + """ + pattern = r"(?:^|\n)```(.*?)(?:```(?:\n|$))" #r"(?:^|\n)```(.*?)(?:```(?:\n|$))" + code_blocks = re.findall(pattern, text, re.DOTALL) + if not code_blocks: + return "" + processed = [] + for block in code_blocks: + lines = block.strip().split("\n") + if lines and (not lines[0].strip() or " " not in lines[0].strip()): + block = "\n".join(lines[1:]) + processed.append(block) + return "\n\n".join(processed) + + + @staticmethod + def default_eval(code: str, _locals: dict[str, Any]) -> tuple[str, dict[str, Any]]: + """Evaluate the code in the sandbox. + """ + original_keys = set(_locals.keys()) + try: + with contextlib.redirect_stdout(io.StringIO()) as f: + exec(code, builtins.__dict__, _locals) + result = f.getvalue() or "" + except Exception as e: + result = f"Error during execution: {repr(e)}" + new_keys = set(_locals.keys()) - original_keys + new_vars = {key: _locals[key] for key in new_keys} + return result, new_vars + + @staticmethod + def _filter_serializable(d: dict[str, Any]) -> dict[str, Any]: + """Keep only JSON/msgpack-serializable values (basic Python types). + """ + serializable_types = ( + str, int, float, bool, list, dict, type(None) + ) + return { + k: v for k, v in d.items() if isinstance(v, serializable_types) + } + + + def _create_codeact( + self, + model: BaseChatModel, + tools: Sequence[Union[StructuredTool, Callable]], + eval_fn: Union[EvalFunction, EvalCoroutine], + *, + state_schema: StateSchemaType = CodeActState, + ) -> StateGraph: + """Create a LangGraph state graph for the CodeAct agent. + """ + tools = [ + t if isinstance(t, StructuredTool) else create_tool(t) + for t in tools + ] + self.tools_context = {tool.name: tool.func for tool in tools} + + def call_model_stream(state: StateSchema): + messages = [{"role": "system", "content": self.prompt}] + state["messages"] + + # Accumulate into one combined chunk + accumulated: AIMessageChunk | None = None + + # stream partial tokens as AIMessagesChunks wioth .content = "Hel", + for delta in self.model.stream(messages): + if accumulated is None: + accumulated = delta + else: + accumulated = accumulated + delta # merge chunks + + # yield partial update immediately (for streaming UI) + yield Command(update={"messages": [delta], "script": None}) + + # after streaming completes + if accumulated is None: + yield Command(update={"messages": [], "script": None}) + return # nothing came back + + # Convert merged chunks into a final message + full_text = accumulated.content or "" + + # Check for code blocks + code = self._extract_and_combine_codeblocks(full_text) + + if code: + # Create a fake tool call entry + tool_call_id = "sandbox" + fake_tool_call = { + "id": tool_call_id, + "type": "function", + "function": { + "name": "sandbox", + "arguments": code + } + } + # Patch the assistant message with tool_calls + accumulated.additional_kwargs = {"tool_calls": [fake_tool_call]} + + # Pass both the patched assistant message and code to sandbox + yield Command( + goto="sandbox", + update={ + "messages": [accumulated], + "script": code + } + ) + else: + yield Command( + update={ + "messages": [accumulated], + "script": None + } + ) + + + if inspect.iscoroutinefunction(eval_fn): + + async def sandbox(state: StateSchema): + """Run the code in the sandbox and return a proper OpenAI tool message. + """ + existing_context = state.get("context", {}) + + # Combine persistent context with runtime-only tools + exec_context = {**existing_context, **self.tools_context} + + # Get tool_call_id for traceability + prev_msgs = state.get("messages", []) + tool_call_id = "sandbox" + for msg in reversed(prev_msgs): + if hasattr(msg, "additional_kwargs") and msg.additional_kwargs.get("tool_calls"): + tool_call_id = msg.additional_kwargs["tool_calls"][0]["id"] + break + + # Execute user code + output, new_vars = await eval_fn(state["script"], exec_context) + + # Only persist serializable data + serializable_new_vars = self._filter_serializable(new_vars) + new_context = {**existing_context, **serializable_new_vars} + + # Return OpenAI-compliant tool result + return { + "messages": [ + { + "role": "tool", + "tool_call_id": tool_call_id, + "name": "sandbox", + "content": ( + f"Sandbox result of your executed code:\n{json.dumps(output)}" + if not isinstance(output, str) + else f"Sandbox result of your executed code:\n{output}" + # Keep as string if already string else JSON serialize + ), + + } + ], + "context": new_context, + } + + + else: + def sandbox(state: StateSchema): + """Run the code in the sandbox and return a proper OpenAI tool message. + """ + existing_context = state.get("context", {}) + + # Combine persistent context with runtime-only tools + exec_context = {**existing_context, **self.tools_context} + + # Get tool_call_id for traceability + prev_msgs = state.get("messages", []) + tool_call_id = "sandbox" + for msg in reversed(prev_msgs): + if hasattr(msg, "additional_kwargs") and msg.additional_kwargs.get("tool_calls"): + tool_call_id = msg.additional_kwargs["tool_calls"][0]["id"] + break + + # Execute user code + output, new_vars = eval_fn(state["script"], exec_context) + + # Only persist serializable data + serializable_new_vars = self._filter_serializable(new_vars) + new_context = {**existing_context, **serializable_new_vars} + + # Return OpenAI-compliant tool result + return { + "messages": [ + { + "role": "tool", + "tool_call_id": tool_call_id, + "name": "sandbox", + "content": ( + f"Sandbox result of your executed code:\n{json.dumps(output)}" + if not isinstance(output, str) + else f"Sandbox result of your executed code:\n{output}" + ), + # Keep as string if already string else JSON serialize + } + ], + "context": new_context, + } + + # --- Build the state graph --- + agent = StateGraph(state_schema) + agent.add_node(call_model_stream, destinations=(END, "sandbox")) + agent.add_node(sandbox) + agent.add_edge(START, "call_model_stream") + agent.add_edge("sandbox", "call_model_stream") + return agent + + + def stream( + self, + messages: list[dict], + thread_id: int = 1 + ) -> Generator[ + TokenStream, + None, + None + ]: + """ + Generator yielding agent outputs during execution. + + Yields + ------ + tuple[str, Any] + - "messages": list of chat message objects (e.g. AIMessage) + - "values": dict of current agent state (messages, script, context) + + Example + ------- + messages [AIMessage(content="```python\nresult = 3*7+5\nprint(result)\n```")] + values {"messages": [...], "script": "result = 3*7+5\nprint(result)", "context": {}} + messages [AIMessage(content="26")] + values {"messages": [...], "script": None, "context": {"result": 26}} + """ + + config = { + "configurable": { + "thread_id": thread_id + } + } + for typ, chunk in self.compiled_agent.stream( + {"messages": messages}, + stream_mode=["values", "messages"], + config=config, + ): + yield TokenStream(type=typ, data=chunk) + + #------- BEFORE DB AGENT EXECUTOR -------# + #def generate( + # self, + # messages: list[dict], + # thread_id: int = 1 + #) -> dict[str, Any]: + # """ + # Run the agent to completion and return final state.# + + # Returns + # ------- + # dict + # Final agent state containing messages, script, context. + # """ + # config = { + # "configurable": { + # "thread_id": thread_id + # } + # } + # final_state = self.compiled_agent.generate( + # {"messages": messages}, + # config=config, + # ) + # return final_state + #------- BEFORE DB AGENT EXECUTOR -------# + def generate( + self, + messages: list[dict], + thread_id: int = 1, + context: Optional[dict[str, Any]] = None, + ) -> dict[str, Any]: + """ + *** Test method for db executor *** + """ + config = { + "configurable": {"thread_id": thread_id} + } + state = { + "messages": messages, "context": context or {} + } + return self.compiled_agent.invoke( #TODO: note changed from generate to invoke, hope it works + state, config=config + ) + + + + +if __name__ == "__main__": + """ + Run the CodeActAgent in different modes: + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + - python -m agent.core.codeact --mode chat + - python -m agent.core.codeact --mode debug + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + """ + import argparse + import json + from rich.console import Console + + # Validate environment (api key) before doing *anything* else + OpenAIApiKey.validate_environment() + + # --- Parse args --- + parser = argparse.ArgumentParser(description="Run CodeActAgent in different modes") + parser.add_argument( + "--mode", + choices=["chat", "debug"], + default="chat", + help="Mode: 'chat' for normal conversation, 'debug' to also show state values." + ) + args = parser.parse_args() + + # --- Instantiate agent --- + agent = CodeActAgent( + model_name="gpt-4o", + model_provider="openai", + tools=[], + eval_fn=CodeActAgent.default_eval, # built-in evaluator + system_prompt="agent/prompts/local_archive/original.txt", + bind_tools=False, + memory=True + ) + #~~~~~~~~~~~~~~~~~~~~~~~~~~# + # --- Conversation loop ---# + #~~~~~~~~~~~~~~~~~~~~~~~~~~# + # --- Rich console setup --- + console = Console(width=100, soft_wrap=False) + + while True: + user_query = input("\n😎 USER:\n››› ") + if user_query.lower() == "exit": + break + + messages = [{"role": "user", "content": user_query}] + + # --- Dynamic assistant header (chat only) --- + if args.mode == "chat": + console.print("\n🧠 [bold magenta]Assistant[/]:\n››› ", end="") + + # --- Stream agent responses --- + for typ, chunk in agent.stream(messages): + if args.mode == "chat" and typ == "messages": + print(chunk[0].content, end="", flush=True) + + elif args.mode == "debug": + if typ == "values": + # Print only the nicely formatted message + optional context + pretty_print_state(chunk, show_context=False) + + print("\n") + diff --git a/src/agents/db_executor/codeact/prompts/local_archive/original.txt b/src/agents/db_executor/codeact/prompts/local_archive/original.txt new file mode 100644 index 0000000000000000000000000000000000000000..62441272f923dfa20a5a91fdbe8e49ae52050387 --- /dev/null +++ b/src/agents/db_executor/codeact/prompts/local_archive/original.txt @@ -0,0 +1,18 @@ + +You are a helpful assistant. You are encouraged to generate Python code for calculations. + +You will be given a task to perform. You should output either +- a Python code snippet that provides the solution to the task, or a step towards the solution. Any output you want +to extract from the code should be printed to the console. Code should be output in a fenced code block. +- text to be shown directly to the user, if you want to ask for more information or provide the final answer. + +In addition to the Python Standard Library, you can use the following functions: + +{tools} + +Variables defined at the top level of previous code snippets can be referenced in your code. + +When you include a code block, put a blank line after the closing triple backticks +before any further text. + +Reminder: use Python code snippets to call tools. \ No newline at end of file diff --git a/src/agents/db_executor/codeact/prompts/local_archive/test.txt b/src/agents/db_executor/codeact/prompts/local_archive/test.txt new file mode 100644 index 0000000000000000000000000000000000000000..860542653bdedc98c176c84142a6564275799f15 --- /dev/null +++ b/src/agents/db_executor/codeact/prompts/local_archive/test.txt @@ -0,0 +1,25 @@ +You are a helpful assistant that can solve tasks using Python code and a set of predefined tools. + +=== RULES === +1. CODE BLOCKS: + - Always use triple backticks: ```python ... ``` + - Never include natural language inside code blocks. + - Comments (#) are allowed but should be minimal. +before any further text. + +2. OUTPUT EXPLANATION: + - After each code block, provide a brief natural language explanation. + - Use code outputs in your response. + - Keep explanations separate from code. + +Note: +When you include a code block, put a blank line after the closing triple backticks +before any further text. + +=== VALID EXAMPLE === +```python +# Calculate the product +result = multiply(15, 23) +print(result) +``` +The calculation shows that 15 multiplied by 23 equals 345. \ No newline at end of file diff --git a/src/agents/db_executor/codeact/prompts/prompt_layer.py b/src/agents/db_executor/codeact/prompts/prompt_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..c3a866754ec68d3a056e9456be066e3533e51bcb --- /dev/null +++ b/src/agents/db_executor/codeact/prompts/prompt_layer.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python3 +""" +PromptLayer Integration for Prompt Management +============================================== + +This module provides a centralized way to manage prompts using PromptLayer platform. +Allows for versioned, labeled prompts that can be easily updated without code changes. +""" + +import promptlayer +from promptlayer import PromptLayer +from dotenv import load_dotenv +import os +from typing import Dict, Any, Optional +from functools import lru_cache + +load_dotenv() + + +class PromptManager: + """ + Centralized prompt management using PromptLayer platform. + link: + - https://www.promptlayer.com + + Features: + - Version control for prompts + - Environment-based prompt labels (dev, staging, production) + - Caching for performance + - Fallback to local files if PromptLayer unavailable + """ + + def __init__(self, api_key: Optional[str] = None, environment: str = "production"): + """ + Initialize PromptManager. + + Args: + api_key: PromptLayer API key (defaults to PROMPTLAYER_API_KEY env var) + environment: Environment label for prompts (dev, staging, production) + """ + self.api_key = api_key or os.getenv("PROMPTLAYER_API_KEY") + self.environment = environment + self.client = None + + # Initialize client if API key is available + if self.api_key: + try: + self.client = PromptLayer(api_key=self.api_key) + print(f"✅ PromptLayer connected (environment: {environment})") + + except Exception as e: + print(f"⚠️ PromptLayer connection failed: {e}") + self.client = None + else: + print("⚠️ No PROMPTLAYER_API_KEY found, using local fallback") + + @lru_cache(maxsize=128) + def get_prompt( + self, + template_name: str, + version: Optional[int] = None, + label: Optional[str] = None, + fallback_path: Optional[str] = None + ) -> str: + """ + Get a prompt from PromptLayer with fallback to local file. + + Args: + template_name: Name of the prompt template + version: Specific version number (defaults to latest) + label: Environment label (defaults to instance environment) + fallback_path: Local file path if PromptLayer unavailable + + Returns: + Prompt content as string + + Raises: + ValueError: If prompt cannot be found and no fallback provided + """ + # Use provided label or instance default + label = label or self.environment + + # Try PromptLayer first + if self.client: + try: + template_config = { + "label": label + } + if version: + template_config["version"] = version + + prompttemplate = self.client.templates.get( + template_name, + template_config + ) + # Extract prompt content from response + prompt_content = prompttemplate["llm_kwargs"]["messages"][0]["content"] + print(f"📋 Loaded prompt '{template_name}' from PromptLayer (v{prompttemplate.get('version', 'latest')}, {label})") + return prompt_content + + except Exception as e: + print(f"⚠️ PromptLayer failed: {e}, trying fallback...") + # Fall through to fallback instead of raising + + # Fallback to local file + if fallback_path: + try: + with open(fallback_path, 'r') as f: + content = f.read() + print(f"📂 Loaded prompt '{template_name}' from local file: {fallback_path}") + return content + except Exception as e: + raise ValueError( + f"❌ Failed to load fallback file '{fallback_path}': {e}" + ) + + # Only raise if both PromptLayer AND fallback fail + raise ValueError( + f"Could not load prompt '{template_name}' from any source" + ) + + + def list_available_prompts(self) -> Dict[str, Any]: + """ + List all available prompts from PromptLayer. + + Returns: + Dictionary of available prompts with metadata + """ + if not self.client: + return {"error": "PromptLayer client not available"} + + try: + # This would depend on PromptLayer's API for listing templates + # Placeholder implementation + return { + "message": "PromptLayer template listing not implemented in this version", + "available_methods": [ + "get_judge_prompt(simple=True/False)", + "get_agent_prompt(version=int)", + "get_prompt(template_name, version, label, fallback_path)" + ] + } + except Exception as e: + return {"error": f"Failed to list prompts: {e}"} + + def clear_cache(self): + """Clear the prompt cache.""" + self.get_prompt.cache_clear() + print("🗑️ Prompt cache cleared") + + def set_environment(self, environment: str): + """ + Change the environment label for subsequent prompt requests. + + Args: + environment: New environment (dev, staging, production) + """ + self.environment = environment + self.clear_cache() # Clear cache since environment changed + print(f"🔄 Environment changed to: {environment}") + diff --git a/src/agents/db_executor/codeact/schemas/__init__.py b/src/agents/db_executor/codeact/schemas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..19b98f8e8e1e260c3c80be6b16117af94b60e689 --- /dev/null +++ b/src/agents/db_executor/codeact/schemas/__init__.py @@ -0,0 +1,10 @@ +"""Init file for pydantic schemas. +""" + +from .openai_key import OpenAIApiKey +from .stream import TokenStream + +__all__ = [ + "OpenAIApiKey", + "TokenStream", +] diff --git a/src/agents/db_executor/codeact/schemas/openai_key.py b/src/agents/db_executor/codeact/schemas/openai_key.py new file mode 100644 index 0000000000000000000000000000000000000000..d8e89528b9103b4926680349f9bd580b6bdf99e9 --- /dev/null +++ b/src/agents/db_executor/codeact/schemas/openai_key.py @@ -0,0 +1,56 @@ +import os +from pydantic import Field, ConfigDict, field_validator +from pydantic_settings import BaseSettings +from pathlib import Path +from dotenv import load_dotenv +from pydantic import ValidationError +import sys + +# Load environment variables +load_dotenv() + + +class OpenAIApiKey(BaseSettings): + """Schema for validating and loading the OpenAI API key configuration. + """ + model_config = ConfigDict( + title="OpenAI API Key Schema", + description="Validates and loads the OpenAI API key from environment variables.", + ) + api_key: str = Field( + ..., # >>> required field + title="OpenAI API Key", + description="API key for OpenAI authentication.", + examples=["sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"], + alias="OPENAI_API_KEY", + ) + + @field_validator("api_key") + @classmethod + def validate_openai_api_key(cls, v: str) -> str: + """Validate that the API key is present and has the correct format. + """ + if not v: + raise ValueError( + "💥 Missing `OPENAI_API_KEY` environment variable." + ) + if not v.startswith("sk-"): + raise ValueError( + "💥 Invalid `OPENAI_API_KEY` — must start with 'sk-'." + ) + return v + + @classmethod + def validate_environment(cls) -> "OpenAIApiKey": + """ + Load .env from the root directory + and validate that the API key is present and valid. + """ + try: + # Pydantic auto-loads .env and validates + config = cls() + os.environ["OPENAI_API_KEY"] = config.api_key # Set for runtime access + return config + except ValidationError as e: + print(f"💥 OpenAI API key misconfiguration:\n{e}") + sys.exit(1) diff --git a/src/agents/db_executor/codeact/schemas/stream.py b/src/agents/db_executor/codeact/schemas/stream.py new file mode 100644 index 0000000000000000000000000000000000000000..4634438b163e0004c888e64a23de803e4e3ea012 --- /dev/null +++ b/src/agents/db_executor/codeact/schemas/stream.py @@ -0,0 +1,8 @@ +from typing import NamedTuple, Literal, Union, Any +from langchain_core.messages import AIMessage + +class TokenStream(NamedTuple): + """Represents a single streamed update emitted by the agent. + """ + type: Literal["messages", "values"] + data: Union[list[AIMessage], dict[str, Any]] \ No newline at end of file diff --git a/src/agents/db_executor/codeact/states/state.py b/src/agents/db_executor/codeact/states/state.py new file mode 100644 index 0000000000000000000000000000000000000000..11322aec1443a7e85ee73882924fafacbf49f7a3 --- /dev/null +++ b/src/agents/db_executor/codeact/states/state.py @@ -0,0 +1,10 @@ +from langgraph.graph import END, START, MessagesState +from typing import Optional, Any + +class CodeActState(MessagesState): + """State for CodeAct agent.""" + + script: Optional[str] + """The Python code script to be executed.""" + context: dict[str, Any] + """Dictionary containing the execution context with available tools and variables.""" \ No newline at end of file diff --git a/src/agents/db_executor/codeact/tools/__init__.py b/src/agents/db_executor/codeact/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/agents/db_executor/codeact/tools/tools.py b/src/agents/db_executor/codeact/tools/tools.py new file mode 100644 index 0000000000000000000000000000000000000000..3ad8a8b5a9957e718f63a74def77ccc59266b914 --- /dev/null +++ b/src/agents/db_executor/codeact/tools/tools.py @@ -0,0 +1,53 @@ +import inspect +from langchain_core.tools import StructuredTool +from typing import Optional +from pathlib import Path + +# Example tools +def add(a: float, b: float) -> float: + """Add two numbers together.""" + return a + b + +def multiply(a: float, b: float) -> float: + """Multiply two numbers together.""" + return a * b + +def divide(a: float, b: float) -> float: + """Divide two numbers.""" + return a / b + +def subtract(a: float, b: float) -> float: + """Subtract two numbers.""" + return a - b + +# Prompt creation +def create_default_prompt( + tools: list, + system_prompt: Optional[str] = None, + base_prompt: str = "original.txt", +) -> str: + template_path = Path(__file__).parent.parent / "prompts" / base_prompt + template = template_path.read_text() + + tool_strings = [] + for t in tools: + func = t.func if isinstance(t, StructuredTool) else t + sig = inspect.signature(func) + doc = (func.__doc__ or "").strip() + tool_strings.append( + f"def {func.__name__}{sig}:\n \"\"\"{doc}\"\"\"\n ..." + ) + tools_str = "\n\n".join(tool_strings) + + prompt = template.replace("{tools}", tools_str) + + if system_prompt: + prompt = f"{system_prompt}\n\n{prompt}" + + return prompt + + + +if __name__ == "__main__": + tools = [multiply, divide, subtract] + print(create_default_prompt(tools, system_prompt="You are a coding agent.")) diff --git a/src/agents/db_executor/codeact/utils/__init__.py b/src/agents/db_executor/codeact/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..51ea01dbad8225be4033a4e0bb2b2417ec093510 --- /dev/null +++ b/src/agents/db_executor/codeact/utils/__init__.py @@ -0,0 +1,5 @@ +"""Utility functions for the agent.""" + +from .pretty_state import pretty_print_state + +__all__ = ["pretty_print_state"] \ No newline at end of file diff --git a/src/agents/db_executor/codeact/utils/pretty_state.py b/src/agents/db_executor/codeact/utils/pretty_state.py new file mode 100644 index 0000000000000000000000000000000000000000..1157d59d967446aeaf52ca6bfb0421a9c49a3da3 --- /dev/null +++ b/src/agents/db_executor/codeact/utils/pretty_state.py @@ -0,0 +1,73 @@ +import json +from rich.console import Console +from rich.syntax import Syntax +from rich.panel import Panel +from langchain_core.messages import HumanMessage, AIMessage, ToolMessage + +console = Console(width=100, soft_wrap=False) + +_last_context_snapshot = None # used to suppress repeated context +_last_message_ids = set() # track printed messages + + + +def serialize_message(msg) -> dict: + """Convert LangChain message objects into serializable dicts.""" + if hasattr(msg, "dict"): + return msg.dict() + elif hasattr(msg, "__dict__"): + return {k: serialize_message(v) for k, v in msg.__dict__.items()} + elif isinstance(msg, list): + return [serialize_message(v) for v in msg] + elif isinstance(msg, dict): + return {k: serialize_message(v) for k, v in msg.items()} + else: + return msg + + +def pretty_print_state(state: dict, show_context: bool = True) -> None: + """ + Pretty-print the agent's state in a clean, color-coded way. + + Parameters + ---------- + state : dict + The LangGraph agent state chunk (from the stream). + show_context : bool, optional + Whether to display the context (default True). + If True, only shows context when it has changed since last call. + """ + global _last_context_snapshot + + # --- Display message chunks --- + for msg in state.get("messages", []): + + msg_id = getattr(msg, "id", id(msg)) + if msg_id in _last_message_ids: + continue # skip duplicates + _last_message_ids.add(msg_id) + + msg_dict = serialize_message(msg) + msg_json = json.dumps(msg_dict, indent=2) + + if isinstance(msg, HumanMessage): + color, title = "cyan", "🧑 HumanMessage" + elif isinstance(msg, ToolMessage): + color, title = "yellow", f"🧰 ToolMessage ({msg_dict.get('name','?')})" + elif isinstance(msg, AIMessage): + color, title = "magenta", "🤖 AIMessage" + else: + color, title = "white", "Other" + + syntax = Syntax(msg_json, "json", theme="monokai", line_numbers=False) + console.print(Panel(syntax, title=title, border_style=color)) + + # --- Optional context view --- + #if show_context: + # context = state.get("context", {}) + # if context and context != _last_context_snapshot: + # _last_context_snapshot = context.copy() # cache for next comparison + + # context_json = json.dumps(context, indent=2, default=str) + # syntax = Syntax(context_json, "json", theme="monokai", line_numbers=False) + # console.print(Panel(syntax, title="🧠 Context (updated)", border_style="green")) diff --git a/src/agents/db_executor/db_executor.py b/src/agents/db_executor/db_executor.py new file mode 100644 index 0000000000000000000000000000000000000000..a13d109eaddac98dc1383e7651e43c9c760927ff --- /dev/null +++ b/src/agents/db_executor/db_executor.py @@ -0,0 +1,95 @@ +from .codeact.core.codeact import CodeActAgent +from src.database.candidates.client import SessionLocal +from src.database.candidates.models import ( + Candidate, + CVScreeningResult, + VoiceScreeningResult, + InterviewScheduling, + FinalDecision, +) +from langchain_core.tools import tool +from typing import Dict, Any +from src.prompts import get_prompt +from src.database.candidates import evaluate_cv_screening_decision + + +SYSTEM_PROMPT = get_prompt( + template_name="DB_Executor", + latest_version=True +) + + +@tool +def db_executor(query: str) -> str: + """ + Consumes a natural-language query as input which is being translated into + SQLAlchemy ORM code by the coding agent. Finally, the code is executed against + the database and the result is returned. + + Args: + query (str): Natural-language database query. + Returns: + str: The natural language summary of the result or error. + """ + # 1. Initialize DB session and ORM context + session = SessionLocal() + context = { + "session": session, + "Candidate": Candidate, + "CVScreeningResult": CVScreeningResult, + "VoiceScreeningResult": VoiceScreeningResult, + "InterviewScheduling": InterviewScheduling, + "FinalDecision": FinalDecision, + } + + try: + # 2. Initialize CodeAct agent with system prompt + agent = CodeActAgent( + model_name="gpt-4o", + model_provider="openai", + tools=[evaluate_cv_screening_decision], # Passed as a tool + eval_fn=CodeActAgent.default_eval, + system_prompt=SYSTEM_PROMPT, + bind_tools=True, # Enable tool binding so agent sees signature + memory=False, # optional — can enable if you want persistent thread context + ) + + # 3. Run natural-language query + messages = [{"role": "user", "content": query}] + final_state = agent.generate(messages, context=context) + + # 4. Extract model output + # Return the final natural language response from the assistant + output_msg = final_state["messages"][-1].content if final_state.get("messages") else "" + + return output_msg + + except Exception as e: + import traceback + error_trace = traceback.format_exc() + print(f"\n❌ Error in db_executor: {e}\n{error_trace}") + + # Return a clear text error message + return f"The DB Executor encountered an internal error: {str(e)}" + + finally: + session.close() + + + +if __name__ == "__main__": + from rich.console import Console + from rich.panel import Panel + + console = Console() + query = "Fetch all candidates and their status." + + console.rule("[bold magenta]DB Executor Test Run[/bold magenta]") + console.print(f"[cyan]Query:[/] {query}\n") + + result = db_executor(query) + + # 🧠 Show model result nicely + console.print(Panel.fit(result, title="🧠 Model Output", border_style="blue")) + + console.rule("[bold green]End of Execution[/bold green]") diff --git a/src/agents/db_executor/info.md b/src/agents/db_executor/info.md new file mode 100644 index 0000000000000000000000000000000000000000..64c815fdee312948d517ed3ca7d818398980d3d4 --- /dev/null +++ b/src/agents/db_executor/info.md @@ -0,0 +1,22 @@ +This agent coding agent based `CodeAct`agent pattern, see: +https://github.com/langchain-ai/langgraph-codeact + + +Test as follows: + +>>> cd /Users/sebastianwefers/Desktop/projects/recruitment-agent + +>>> docker compose -f docker/docker-compose.yml up --build candidates_db_init + + +# Make sure your OpenAI key is available to the process +>>> export OPENAI_API_KEY=sk-... # or however you normally set it + +# Override host so the Python code connects to localhost, not 'db' and run "db_executor" +>>> POSTGRES_HOST=localhost POSTGRES_PORT=5433 python -m src.agents.db_executor.db_executor + + +# DEBUG attempt +------------------------------------------------------------------------------------ +- works: +POSTGRES_HOST=localhost POSTGRES_PORT=5433 python src/agents/db_executor/debug_db_connection.py \ No newline at end of file diff --git a/src/agents/example/info.md b/src/agents/example/info.md new file mode 100644 index 0000000000000000000000000000000000000000..2edd6f86c1ca02a83dce3a9d393a0c3da85b160d --- /dev/null +++ b/src/agents/example/info.md @@ -0,0 +1,66 @@ +### How to Run the LangGraph Reasoning Monitoring Demo Agent + +1. Make sure to have the follwijg installed +```bash +pip install -r requriements/dev.txt +``` + +2. Set TAVILY_API_KEY: +- link: https://www.tavily.com + +3. Run the following from repo root: +```bash +export PYTHONPATH=./src +langgraph dev +``` +This loads the root-level `langgraph.json` and makes all agents available in LangGraph Studio. + +4 Open the Studio UI +After the server starts, open: +```bash +https://smith.langchain.com/studio/?baseUrl=http://127.0.0.1:2024 +``` +**NOTE:** Open it in anything, but safari! + +Select the agent named react_agent (or whichever your config specifies). + +--- + +### Demo Prompt to Use +Paste the following into the Studio console: +```txt +First search for the current temperature in Fahrenheit in Cape Town, South Africa. +Then convert that temperature to Celsius using the conversion tool. +``` + +***This triggers:*** +1. A Tavily search for the current Fahrenheit temperature +2. A tool call to convert Fahrenheit → Celsius +3. Full ReAct reasoning + tool trace in the UI + +--- + +### ⚙️ Multiple Agents in langgraph.json +You can expose multiple agents to LangGraph Studio by listing them under the graphs section of your root `langgraph.json`. + +Example: +```json +{ + "dependencies": ["src"], + "graphs": { + "react_agent": "agents.example.react_agent:agent", + "cv_screener": "agents.cv_screening.screener:agent", + "supervisor": "agents.supervisor.supervisor:agent" + } +} +``` +Each entry maps: +```bash +"graph_name": "module.path:object_name" +``` + +Where: +- `graph_name` → appears in LangGraph Studio +- `module.path` → Python import path under `src/` +- `object_name` → the variable that contains the graph/agent +This allows one project to host many agents simultaneously (e.g., supervisor, tools agent, CV-screening agent, etc.). diff --git a/src/agents/example/react_agent.py b/src/agents/example/react_agent.py new file mode 100644 index 0000000000000000000000000000000000000000..af674c94abfe1264b21806f2c86067e18d61ad80 --- /dev/null +++ b/src/agents/example/react_agent.py @@ -0,0 +1,59 @@ +""" +Simple React Agent implementation with monitoring capabilities. + +- React agent: + - https://docs.langchain.com/oss/python/langchain/agents + + +install: + - langgraph-cli + +Run as follows: +>>> cd src/agents/example/ +>>> langgraph dev + +""" +from langchain.agents import create_agent +from langchain_tavily import TavilySearch +from langchain_core.tools import tool +from dotenv import load_dotenv + + + +load_dotenv() + + + +# --- Tools --- +@tool +def convert_fahrenheit_celsius(fahrenheit: float) -> float: + """ + Convert fahrenheit to celsius. + Args: + fahrenheit (float): Temperature in fahrenheit. + Returns: + float: Temperature in celsius. + """ + return (fahrenheit - 32) * 5.0/9.0 + + + +web_search = TavilySearch( + max_results = 5, + topic = "general", + # include_answer = False, + # include_raw_content = False, + # ... +) + + +tools = [ + web_search, + convert_fahrenheit_celsius +] + + +agent = create_agent( + "gpt-5", + tools=tools +) \ No newline at end of file diff --git a/src/agents/gcalendar/__init__.py b/src/agents/gcalendar/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..af440251541c569d239ee088ebe55df2191a393b --- /dev/null +++ b/src/agents/gcalendar/__init__.py @@ -0,0 +1,2 @@ +from .gcalendar_agent import gcalendar_agent + diff --git a/src/agents/gcalendar/gcalendar_agent.py b/src/agents/gcalendar/gcalendar_agent.py new file mode 100644 index 0000000000000000000000000000000000000000..3330327c86c13a29057f119672e41af92c602ef3 --- /dev/null +++ b/src/agents/gcalendar/gcalendar_agent.py @@ -0,0 +1,94 @@ +import asyncio +import sys +from langchain_core.tools import tool +from langchain_mcp_adapters.client import MultiServerMCPClient +from langchain.agents import create_agent +from langchain_openai import ChatOpenAI +from src.mcp_servers.examples.gcalendar.settings import GoogleCalendarSettings +from src.prompts import get_prompt + + +SYSTEM_PROMPT = get_prompt( + template_name="GCalendar", + latest_version=True +) + +@tool +def gcalendar_agent(query: str) -> str: + """ + A tool that acts as a Google Calendar agent. + It can list, create, and analyze calendar events using the Google Calendar MCP server. + + Args: + query (str): The natural language request for the calendar (e.g., "Schedule a meeting with X on Friday at 3pm"). + + Returns: + str: The natural language response from the agent confirming the action or providing the requested information. + + Example output: + "I have successfully scheduled the meeting with X for Friday at 3pm. The event ID is 1234567890." + """ + try: + import asyncio + async def _run_async(): + # Load settings + settings = GoogleCalendarSettings() + CALENDAR_MCP_DIR = settings.calendar_mcp_dir + CREDS = settings.creds + TOKEN = settings.token + + # Initialize model + model = ChatOpenAI(model="gpt-4o", temperature=0) + + # Connect to MCP server + # Note: This spawns a new process for each call. + # In a production environment, you might want to manage a persistent connection. + client = MultiServerMCPClient({ + "calendar": { + "command": sys.executable, + "args": [ + f"{CALENDAR_MCP_DIR}/run_server.py", + "--creds-file-path", str(CREDS), + "--token-path", str(TOKEN), + ], + "transport": "stdio", + } + }) + + # Fetch tools + try: + tools = await client.get_tools() + except Exception as e: + return f"❌ Failed to connect to Calendar MCP server: {str(e)}" + + if not tools: + return "❌ No tools available from Calendar MCP server." + + # Create agent + agent = create_agent(model, tools) + + # Run agent + # We wrap the user query in a system/user message structure + result = await agent.ainvoke({ + "messages": [ + { + "role": "system", + "content": SYSTEM_PROMPT, + }, + { + "role": "user", + "content": query, + }, + ] + }) + + # Extract result + output = result["messages"][-1].content + return output + + return asyncio.run(_run_async()) + + except Exception as e: + import traceback + return f"❌ Error in gcalendar_agent: {str(e)}\n{traceback.format_exc()}" + diff --git a/src/agents/gcalendar/schemas/__init__.py b/src/agents/gcalendar/schemas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/agents/gcalendar/tools/__init__.py b/src/agents/gcalendar/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/agents/gmail/__init__.py b/src/agents/gmail/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f23865c6eb94070c2fc8d5b6fc53087f8f7a05e4 --- /dev/null +++ b/src/agents/gmail/__init__.py @@ -0,0 +1,2 @@ +from .gmail_agent import gmail_agent + diff --git a/src/agents/gmail/gmail_agent.py b/src/agents/gmail/gmail_agent.py new file mode 100644 index 0000000000000000000000000000000000000000..c670447053debd05d359007f10d4fc2e44c1d25b --- /dev/null +++ b/src/agents/gmail/gmail_agent.py @@ -0,0 +1,102 @@ +import asyncio +import shutil +from pathlib import Path +from langchain_core.tools import tool +from langchain_mcp_adapters.client import MultiServerMCPClient +from langchain.agents import create_agent +from langchain_openai import ChatOpenAI +from src.mcp_servers.examples.gmail.settings import GMailSettings +from src.prompts import get_prompt + + +# Attempt to find uv executable +#----------------------------------------------------------------------------- +# `Dockerfile.supervisor` installs uv in the base image in `/usr/local/bin/uv` +# `which` attempts to find it in the system PATH and returns the full path to it. +UV_PATH = shutil.which("uv") + + +SYSTEM_PROMPT = get_prompt( + template_name="GMail", + latest_version=True +) + +@tool +def gmail_agent(query: str) -> str: + """ + A tool that acts as a Gmail agent. + It can read, search, label, and send emails using the Gmail MCP server. + + Args: + query (str): The natural language request (e.g., "Send an email to X", "Check unread emails"). + + Returns: + str: The natural language response from the agent confirming the action or providing the requested information. + + Example output: + "I have successfully sent the email to X with the subject 'Interview Invitation'." + """ + if not UV_PATH: + return "❌ Error: 'uv' executable not found. Please ensure uv is installed and in the system PATH." + + try: + import asyncio + async def _run_async(): + # Load settings + settings = GMailSettings() + + # Initialize model + model = ChatOpenAI(model="gpt-4o", temperature=0) + + # Connect to MCP server + client = MultiServerMCPClient( + { + "gmail": { + "command": UV_PATH, + "args": [ + "--directory", str(settings.gmail_mcp_dir), + "run", "gmail", + "--creds-file-path", str(settings.creds), + "--token-path", str(settings.token), + ], + "transport": "stdio", + } + } + ) + + # Fetch tools + try: + tools = await client.get_tools() + except Exception as e: + return f"❌ Failed to connect to Gmail MCP server: {str(e)}" + + if not tools: + return "❌ No tools available from Gmail MCP server." + + # Create agent + agent = create_agent(model, tools) + + # Run agent + result = await agent.ainvoke({ + "messages": [ + { + "role": "system", + "content": SYSTEM_PROMPT, + }, + { + "role": "user", + "content": query, + }, + ] + }) + + # Extract result + output = result["messages"][-1].content + return output + + return asyncio.run(_run_async()) + + except Exception as e: + import traceback + return f"❌ Error in gmail_agent: {str(e)}\n{traceback.format_exc()}" + diff --git a/src/agents/supervisor/__init__.py b/src/agents/supervisor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e5816d344407963fd580bdac4f9f0220965732a8 --- /dev/null +++ b/src/agents/supervisor/__init__.py @@ -0,0 +1 @@ +"""Supervisor agent for HR recruitment workflow""" diff --git a/src/agents/supervisor/info.md b/src/agents/supervisor/info.md new file mode 100644 index 0000000000000000000000000000000000000000..2efc0d5ca4af2314d4e8c85de730af74330708a8 --- /dev/null +++ b/src/agents/supervisor/info.md @@ -0,0 +1,123 @@ +# Supervisor Agent - Implementation Details + +## Architecture + +The Supervisor Agent (`src/agents/supervisor/supervisor_v2.py`) uses a **LangGraph** orchestration model to manage the recruitment workflow. It maintains state for each interaction using a `MemorySaver` checkpointer, which allows it to remember context across multiple turns of conversation. + +### Core Components + +1. **Agent Type**: OpenAI Functions Agent (powered by `gpt-4o`). +2. **Orchestration Framework**: [LangGraph](https://github.com/langchain-ai/langgraph) for state management and tool execution. +3. **State Persistence**: Uses `thread_id` to maintain conversation history per session. + +### System Overview + +```mermaid +graph TD + User(User / HR Manager) -->|Chat Request| Supervisor[Supervisor Agent] + + Supervisor -->|Delegate| DB_Exec[DB Executor] + Supervisor -->|Delegate| CV_Screen[CV Screening] + Supervisor -->|Delegate| Cal_Agent[Calendar Agent] + Supervisor -->|Delegate| Gmail_Agent[Gmail Agent] + + DB_Exec -->|SQL| Database[(PostgreSQL)] + CV_Screen -->|Parse & Score| Database + + Cal_Agent -->|MCP| Calendar_MCP[Google Calendar MCP] + Gmail_Agent -->|MCP| Gmail_MCP[Gmail MCP] + + Calendar_MCP -->|API| GCalendar(Google Calendar) + Gmail_MCP -->|API| GMail(Google Mail) +``` + +## Tools & Sub-agents + +The supervisor has access to the following tools, which it delegates tasks to: + +1. **`db_executor`**: + * **Purpose**: Querying the database for candidate information, status updates, and aggregations. + * **Capabilities**: SQL generation and execution (read-only by default, with specific write actions allowed). +2. **`cv_screening_workflow`**: + * **Purpose**: Running the CV analysis pipeline. + * **Behavior**: Takes a candidate name, finds their CV, parses it, compares it against the job description, and saves the score/decision to the DB. + * **Sync/Async**: Currently synchronous wrapper around the workflow. +3. **`gcalendar_agent`**: + * **Purpose**: Interacting with Google Calendar. + * **Backend**: Connects to a local Model Context Protocol (MCP) server (`src/mcp_servers/calendar-mcp`). + * **Capabilities**: List events, create events, check availability. + * **Execution**: Spawns a subprocess to run the MCP server. +4. **`gmail_agent`**: + * **Purpose**: Interacting with Gmail. + * **Backend**: Connects to a local Model Context Protocol (MCP) server (`src/mcp_servers/gmail-mcp`). + * **Capabilities**: Send emails, read emails, search threads. + * **Execution**: Spawns a subprocess to run the MCP server. + +## Development & Debugging + +### Running the Supervisor UI + +The supervisor is exposed via a Streamlit UI (`src/supervisor_ui/app.py`). + +```bash +# Run with Docker (Recommended) +docker compose -f docker/docker-compose.yml up --build supervisor_ui +``` + +### Debugging with LangSmith UI + +For deeper transparency and debugging of the agent's thought process, you can run it with LangSmith. + +**Note:** You may need to temporarily disable `memory` (checkpointer) in `src/agents/supervisor/supervisor_v2.py` for the LangSmith UI to work correctly with this specific setup. + +```bash +# 1. Start the database service +docker compose --env-file .env -f docker/docker-compose.yml up --build + +# 2. Run LangGraph dev server (pointing to local DB port) +POSTGRES_HOST=localhost POSTGRES_PORT=5433 langgraph dev +``` + +### Troubleshooting MCP Connections + +If the supervisor fails to use Gmail or Calendar tools with errors like `Connection refused`, `Executable not found`, or `invalid_grant`: + +#### 1. `invalid_grant: Token has been expired or revoked` + +**Cause:** +The Google OAuth2 tokens (`token.json` or `calendar_token.json`) copied into the Docker container are invalid, expired, or were revoked. This happens if tokens are old or the environment changed. + +**Solution:** +You must regenerate the tokens **locally** and then rebuild the container to copy the fresh tokens inside. + +1. **Delete old tokens locally:** + ```bash + rm secrets/gmail-mcp/token.json + rm secrets/gcalendar-mcp/calendar_token.json + ``` + *(Do NOT delete the `credentials.json` files!)* + +2. **Regenerate tokens:** + Run the local test scripts. **A browser window will open asking you to log in and authorize the app.** + + *For Gmail:* + ```bash + python -m src.mcp_servers.examples.gmail.send_email + ``` + + *For Calendar:* + ```bash + python -m src.mcp_servers.examples.gcalendar.interact_calendar + ``` + +3. **Rebuild and Restart Container:** + The `Dockerfile.supervisor` copies the `secrets/` folder at build time. You must rebuild to get the new files. + ```bash + docker compose -f docker/docker-compose.yml build supervisor_ui + docker compose -f docker/docker-compose.yml up -d + ``` + +#### 2. Other Issues +1. **Check `uv` installation**: The MCP servers use `uv` to run. In Docker, this is installed at `/usr/local/bin/uv`. +2. **Check Subprocess**: The supervisor spawns new processes for MCP servers. Ensure the container has enough memory. + diff --git a/src/agents/supervisor/supervisor_v2.py b/src/agents/supervisor/supervisor_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..cad721def51354ab2df93b41533e382c00afde38 --- /dev/null +++ b/src/agents/supervisor/supervisor_v2.py @@ -0,0 +1,61 @@ +""" +Supervisor Agent that orchestrates sub-agents for recruitment tasks. + +For more transparency in langsmith UI disable memory, then run: +---------------------------------------------------------------- +| >>> docker compose -f docker/docker-compose.yml up --build. | +| >>> POSTGRES_HOST=localhost POSTGRES_PORT=5433 langgraph dev | +---------------------------------------------------------------- +""" + + +from langchain.agents import create_agent +from langchain_openai import ChatOpenAI +from langgraph.checkpoint.memory import MemorySaver +from dotenv import load_dotenv +from src.prompts import get_prompt + +load_dotenv() + +# ✅ Correct import via src.agents package (which re-exports from src.agents.db_executor) +from src.agents import ( + db_executor, + cv_screening_workflow, + gcalendar_agent, + gmail_agent, + voice_judge, +) + +SYSTEM_PROMPT = get_prompt( + template_name="Supervisor", + latest_version=True, +) + +# --------- Subagents as tools --------- +subagents = [ + db_executor, + cv_screening_workflow, + gcalendar_agent, + gmail_agent, + voice_judge, +] + +# --------------- Memory ---------------- +# **NOTE:** +# >>> In UI make sure to use 'thread_id' as a configurable parameter to the agent.invoke() method. +# >>> When willing to use langsmith UI, then you must remove the checkpointer=memory, +# otherwise it will not work. +memory = MemorySaver() + +# ------------- Supervisor -------------- +supervisor_model = ChatOpenAI( + model="gpt-4o", + temperature=0, +) + +supervisor_agent = create_agent( + model=supervisor_model, + tools=subagents, + system_prompt=SYSTEM_PROMPT, + checkpointer=memory, # outcomment for langsmith UI +) diff --git a/src/agents/supervisor/test_queries/owen.md b/src/agents/supervisor/test_queries/owen.md new file mode 100644 index 0000000000000000000000000000000000000000..fdc44bb1996f0b43e53fa5b1e61f7084c7358517 --- /dev/null +++ b/src/agents/supervisor/test_queries/owen.md @@ -0,0 +1,171 @@ +# Queries +These queries / tests are used to test how well the supervisor agent performs by evaluating its responses to various tasks. + +## 1. Run CV screening for a newly uploaded candidate +### Queries +- "Please screen the new applicant and update their status accordingly." +- "Please check if there is any applicants. Please tell me who if so. Then send them to the cv screening and update their status accordingly" + +### Expected behavior +Supervisor identifies that the candidate is in a state requiring CV screening. +Supervisor delegates the work to the CV Screening agent. +CV Screening agent parses the CV, scores it, determines pass or fail, and writes results into the database via the DB Executor. +Supervisor waits for the tool output and then reports the updated status without performing the screening itself. +### Notes / issues +- The supervisor asks for the name of the applicant. Instead it should have automatically delegated it to the DB Executor. It needs to be less reliant on the user for something this trivial. +- The DB agent keeps trying to get a file at src\database\cvs\parsed\1dd5c1f2-737e-430f-9747-8b77d60219f3_SWefers_CV.txt. That path doesn't exist. Something is confusing it on which path the CVs are at. + +***`Comments`: Note that the cv screening workflow currently only set status to **applied**. Status quo is to let db executor run the eval. Was not sure yesterday to already include **devision node** in cv screening workflow. But would defintiely make it more autonomous. For now I decided to attach `cv screening decision tool`to db agent, where the supervisor can set a threshold of what defines fail vs. pass*** + +--- + +## 2. Process multiple new candidates simultaneously +### Query + +**Queries:** +- "We have several new applicants. Process all of them and let me know how the screening went." + +- """We have two applicants in our database: one has just applied and the other has passed CV screening, correct? +Please confirm that first and tell me what the actual statuses are. + +Then: +- If one has status "applied", send him to the voice screening. +- If the other candidate has successfully passed the CV screening, then prepare a congratulatory email. + - Before preparing the email, check our calendar for available time slots for a person-to-person interview. + - Include these available time slots in the email. + +At the end, summarize the actions you took. +""" +### Expected behavior +Supervisor queries current candidate states via DB Executor and identifies all candidates in the new or cv_uploaded state. +Supervisor routes each candidate to the CV Screening agent using isolated per candidate threads. +Each CV Screening agent run updates the database through DB Executor. +Supervisor receives aggregated outcomes and summarizes them for HR. +### Notes / issues +TODO + +--- + +## 3. Notify a passed candidate and request time slots +### Query +"This candidate X passed screening. Notify them and ask for their availability." +### Expected behavior +Supervisor detects that the candidate is in a screened_passed state. +Supervisor delegates email sending to the Gmail Agent. +Gmail Agent contacts Gmail MCP to send the message. +DB Executor updates the candidate status to awaiting_time_slots. +Supervisor reports the next expected step. +### Notes / issues +- Works correctly. Asked DB Executor for email, then sent the email. + +--- + +## 4. Notify a failed candidate +### Query +"The screening result is fail. Please notify the candidate and update the system." +### Expected behavior +Supervisor sees the screened_failed state. +Supervisor calls the Gmail Agent. +Gmail Agent sends a rejection email using Gmail MCP. +DB Executor updates status to rejected. +Supervisor returns a clean confirmation. +### Notes / issues +- Gmail has issue without using `--allow-blocking` when launching `langgraph dev`. But this also breaks the database. +- Gmail agent kept asking multiple times whether info was correct, even after being told yes. It needs +to just do what it is told. + +***`Comment:`*** sending emails works for me on mac without any issues. not sure whether windows thing? + +--- + +## 5. Generate a system wide status report +### Query +"What is the current status of all candidates?" +### Expected behavior +Supervisor calls DB Executor to retrieve aggregated counts and per status numbers. +Supervisor formats the report for HR. +No state transitions occur. +No subagent beyond DB Executor is involved. +### Notes / issues +- Worked correctly without issues. + +--- + +## 6. Schedule an interview for a candidate with provided availability +### Query +"The candidate already provided availability. Please schedule their interview." +### Expected behavior +Supervisor determines the candidate is in awaiting_time_slots and that availability is present in the DB. +Supervisor calls the Calendar Agent. +Calendar Agent uses the Calendar MCP to match candidate availability with HR calendar and schedules a meeting. +DB Executor updates the status to interview_scheduled. +Supervisor reports the scheduled event. +### Notes / issues +- Works correctly. Asked DB Executor for email, then sent the email. + +--- + +## 7. Process all candidates to their next required step +### Query +"Process all candidates and advance everyone to the next appropriate step." +### Expected behavior +Supervisor retrieves all candidates via DB Executor. +Supervisor groups them by workflow state and delegates each group to the appropriate subagent (CV Screening, Gmail Agent, Calendar Agent). +All work is executed per candidate thread. +DB Executor performs all writes. +Supervisor produces a summary of completed actions. +### Notes / issues +- Got lots of database errors and missing CVs (the CVs should be in the DB, they're in the files). The DB Executor needs to have clearer instructions for how to use it, and be more persistant. It cannot give up if it fails once. + +***`Comment`: we are still at single candidate mvp.*** `BUT`we should still try if already `possible`! + +--- + +## 8. Parse a CV without screening +### Query +"Only parse this new CV and store the structured data. Do not run screening." +### Expected behavior +Supervisor identifies that parsing is needed but screening is not requested. +Supervisor routes to the CV Screening agent or dedicated parser if available. +Parser extracts structured data and writes it to the DB via DB Executor. +Supervisor leaves candidate in the correct state without triggering screening logic. +### Notes / issues +- Worked without issues. + +--- + +## 9. Follow up when no time slots were received +### Query +"The candidate has not replied with availability. Follow up with them." +### Expected behavior +Supervisor identifies the state awaiting_time_slots with no stored availability. +Supervisor delegates a follow up email to the Gmail Agent. +Gmail Agent sends the email through Gmail MCP. +DB Executor records that a follow up was sent. +Supervisor confirms the action. +### Notes / issues +- Works correctly. Asked DB Executor for email, then sent the email. + +--- + +## 10. Resume a stuck candidate from checklist state +### Query +"This candidate is stuck. Resume from exactly where they left off." +### Expected behavior +Supervisor loads the stored checklist and candidate state via DB Executor. +Supervisor identifies the next unchecked atomic step. +Supervisor routes to the appropriate subagent for that specific step. +Subagent performs the atomic action and DB Executor persists the update. +Supervisor does not repeat completed steps or skip steps. +### Notes / issues +- It worked fine, but it took many attempts to set info in the database. Maybe more clear explanation is needed. + +## 11. High level summary of all candidates +### Query +"Tell me a high level summary about all candidates that have "applied" but not yet moved on." +### Expected Behavior +Supervisor does not provide just the names, emails, or phone numbers. +Supervisor asks CV screener for info about the candidates. +### Notes / issues +- CV screener is having issues finding applicant CVs. +- Supervisor tried just giving names or contact info which is insufficient. \ No newline at end of file diff --git a/src/agents/supervisor/test_queries/seb.md b/src/agents/supervisor/test_queries/seb.md new file mode 100644 index 0000000000000000000000000000000000000000..90fb394784088daa662933a33508deb88876ab31 --- /dev/null +++ b/src/agents/supervisor/test_queries/seb.md @@ -0,0 +1,47 @@ +## ***`Example Queries`*** +--- + +### ***`Tool Availabiity`*** + +Use the following prompt to see if it cann see the tools atatched to the agent without having to mention in systemn prompt, since when passing the tools as list in `create_agent`it alkready takes care of that. +```text +hey what tools do you have avialable? please give comprehensive info and overview +``` +**NOTE** +In my last run it listed an additional tool called `Multi-Tool Use (multi_tool_use.parallel)`. This is NOT a real tool in the codebase but an internal OpenAI artifact representing the model's capability to call multiple tools in parallel. It can be ignored. + +### ***`Already working:`*** +```text +>>> Hey is there any candidates in our databse? + +>>> Great tell me more about this person! + +>>> Ok so please send him an email and notify him that his cv has been screened! + +>>> Please udate his status from cv screened to applied! + +>>> I checked his cv and it looks great by manual inspection by myself. hence can we set his interview status to scheduled? +``` + + +### ***`Goals:`*** +```text +>>> Since his cv was screened, can you update his interview status as completed and decision status as maybe? + +>>> Since his cv was screened, can you update his interview status as completed and decision status as maybe? Then also send him an email that we will soon schedule a person-to-person interview with him. + +>>> can you send him an email that we liked his cv and want to schedule a meeting with him for for the foloowing friday at 3pm? After sending the email please update interview scheduling statius as 'scheduled' + +--- + +>>> Please schedule an interview for that person for this friday 2pm and then notfiy the applicant that he has an personal interview at that time and shall mark it in his calendar. + +>>> Please schedule an interview in our hr calendar that candidate x will have an person-to-person interview. Also notofy both hr and the applicant by email and send ***calendar invitation** to the candidate! + + +>>> Please check in our HR calendar what days havee available slots for an 1h interview. Once we found we found that out we suggest the candidate the available time slots. Once he agrees to one slot we can schedule that agreed slot. + +>>> Can you please send an calendar invite to that person for this friday 2pm and to HR as well? + +>>> +``` diff --git a/src/agents/voice_screening/__init__.py b/src/agents/voice_screening/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1a0b97ee54b01cd516d5545a0b690a940fcc8038 --- /dev/null +++ b/src/agents/voice_screening/__init__.py @@ -0,0 +1,3 @@ +from .judge import voice_judge, evaluate_voice_screening + +__all__ = ["voice_judge", "evaluate_voice_screening"] diff --git a/src/agents/voice_screening/audio_processor.py b/src/agents/voice_screening/audio_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..ab6247755e0dd1bad9957c3acf31ee68e2cbc31a --- /dev/null +++ b/src/agents/voice_screening/audio_processor.py @@ -0,0 +1,188 @@ +""" +Audio processing utilities for voice screening. +Handles audio combining, resampling, and WAV export. +""" +import io +import wave +import struct +import logging +from typing import List, Dict + +logger = logging.getLogger(__name__) + + +def combine_and_export_audio( + user_chunks: List[Dict], + agent_chunks: List[Dict], + session_start_time: float, + session_id: str +) -> bytes: + """ + Combine user and agent audio chunks and export as WAV file. + + Audio chunks are continuous streams - we concatenate them in order and mix + based on when each stream actually started relative to session start. + + Args: + user_chunks: List of dicts with 'timestamp' and 'data' (bytes) + agent_chunks: List of dicts with 'timestamp' and 'data' (bytes) + session_start_time: Session start timestamp for relative positioning + session_id: The session ID for logging. + + Returns: + bytes: WAV file data. + """ + if not session_start_time: + raise ValueError("Session start time not found") + + if not user_chunks and not agent_chunks: + logger.warning(f"No audio chunks found for session {session_id}") + # Return empty WAV file + wav_buffer = io.BytesIO() + with wave.open(wav_buffer, 'wb') as wav_file: + wav_file.setnchannels(1) # Mono + wav_file.setsampwidth(2) # 16-bit = 2 bytes + wav_file.setframerate(24000) # OpenAI uses 24kHz + wav_file.writeframes(b'') + return wav_buffer.getvalue() + + # Sample rate: OpenAI Realtime API uses 24kHz PCM16 + SAMPLE_RATE = 24000 + BYTES_PER_SAMPLE = 2 # 16-bit = 2 bytes + + # Detect user audio sample rate (browser typically captures at 48kHz) + # NOTE: Frontend now resamples to 24kHz before sending, so we can trust it matches. + user_sample_rate = SAMPLE_RATE # Always 24kHz + logger.info(f"Using standard sample rate: {user_sample_rate}Hz") + + # Process and prepare all chunks with their timestamps + # We need to interleave user and agent chunks based on when they actually occurred + all_chunks = [] + + # Process user chunks (resample if needed) + for chunk in user_chunks: + chunk_data = chunk["data"] + chunk_samples = len(chunk_data) // BYTES_PER_SAMPLE + + all_chunks.append({ + "timestamp": chunk["timestamp"], + "type": "user", + "data": chunk_data, + "samples": chunk_samples + }) + + # Process agent chunks (already at 24kHz, no resampling needed) + for chunk in agent_chunks: + chunk_data = chunk["data"] + chunk_samples = len(chunk_data) // BYTES_PER_SAMPLE + + all_chunks.append({ + "timestamp": chunk["timestamp"], + "type": "agent", + "data": chunk_data, + "samples": chunk_samples + }) + + # Sort all chunks by timestamp to get chronological order + all_chunks.sort(key=lambda x: x["timestamp"]) + + # Now place chunks sequentially, maintaining continuity within each stream + # Track cumulative position for each stream type + user_cumulative = None + agent_cumulative = None + + chunk_placements = [] + + for chunk in all_chunks: + chunk_timestamp = chunk["timestamp"] + chunk_offset_seconds = chunk_timestamp - session_start_time + chunk_start_sample = max(0, int(chunk_offset_seconds * SAMPLE_RATE)) + + if chunk["type"] == "user": + # For user audio, maintain continuity within user stream + if user_cumulative is None: + user_cumulative = chunk_start_sample + + # Ensure no gaps - if there's a gap, start from where previous user chunk ended + if chunk_start_sample < user_cumulative: + chunk_start_sample = user_cumulative + + chunk_placements.append({ + "start_sample": chunk_start_sample, + "data": chunk["data"], + "samples": chunk["samples"], + "type": "user" + }) + + user_cumulative = chunk_start_sample + chunk["samples"] + else: # agent + # For agent audio, maintain continuity within agent stream + if agent_cumulative is None: + agent_cumulative = chunk_start_sample + + # Ensure no gaps - if there's a gap, start from where previous agent chunk ended + if chunk_start_sample < agent_cumulative: + chunk_start_sample = agent_cumulative + + chunk_placements.append({ + "start_sample": chunk_start_sample, + "data": chunk["data"], + "samples": chunk["samples"], + "type": "agent" + }) + + agent_cumulative = chunk_start_sample + chunk["samples"] + + # Calculate total duration needed + total_samples = 0 + if chunk_placements: + for placement in chunk_placements: + total_samples = max(total_samples, placement["start_sample"] + placement["samples"]) + + if total_samples == 0: + logger.warning(f"No audio samples to export for session {session_id}") + wav_buffer = io.BytesIO() + with wave.open(wav_buffer, 'wb') as wav_file: + wav_file.setnchannels(1) + wav_file.setsampwidth(2) + wav_file.setframerate(SAMPLE_RATE) + wav_file.writeframes(b'') + return wav_buffer.getvalue() + + # Initialize output buffer with zeros + output_buffer = bytearray(total_samples * BYTES_PER_SAMPLE) + + # Place all chunks in chronological order + for placement in chunk_placements: + chunk_data = placement["data"] + chunk_start = placement["start_sample"] + chunk_samples = placement["samples"] + + for i in range(chunk_samples): + sample_offset = chunk_start + i + if 0 <= sample_offset < total_samples: + # Read PCM16 sample from chunk + sample_value = struct.unpack(' str: + """ + Evaluates a completed voice screening session for a candidate. + + Args: + candidate_id (str): The UUID of the candidate to evaluate. + + Returns: + str: A summary of the evaluation result. + """ + try: + with SessionLocal() as session: + # 1. Fetch Candidate and VoiceScreeningResult + candidate = session.execute( + select(Candidate).where(Candidate.id == UUID(candidate_id)) + ).scalar_one_or_none() + + if not candidate: + return f"❌ Candidate {candidate_id} not found." + + # Fetch latest voice screening result + voice_result = session.execute( + select(VoiceScreeningResult) + .where(VoiceScreeningResult.candidate_id == UUID(candidate_id)) + .order_by(VoiceScreeningResult.timestamp.desc()) + ).scalar_one_or_none() + + if not voice_result or not voice_result.transcript_text: + return f"❌ No voice screening transcript found for candidate {candidate.full_name}." + + # Fetch job title from CV screening result (for context) + cv_result = session.execute( + select(CVScreeningResult) + .where(CVScreeningResult.candidate_id == UUID(candidate_id)) + .order_by(CVScreeningResult.timestamp.desc()) + ).scalar_one_or_none() + + job_title = cv_result.job_title if cv_result else "the position" + + # 2. Prepare Input (Audio + Text) + messages = [] + + messages.append(SystemMessage(content=SYSTEM_PROMPT)) + + user_content = [] + user_content.append({"type": "text", "text": f"Candidate: {candidate.full_name}\nPosition: {job_title}\n"}) + + # Try to load audio + audio_loaded = False + if voice_result.audio_url and os.path.exists(voice_result.audio_url): + try: + with open(voice_result.audio_url, "rb") as audio_file: + audio_data = base64.b64encode(audio_file.read()).decode("utf-8") + user_content.append({ + "type": "input_audio", + "input_audio": { + "data": audio_data, + "format": "wav" + } + }) + audio_loaded = True + except Exception as e: + print(f"⚠️ Failed to load audio file: {e}") + + # Always include transcript as text context + user_content.append({"type": "text", "text": f"Transcript:\n{voice_result.transcript_text}\n"}) + + messages.append(HumanMessage(content=user_content)) + + # 3. Call LLM + # Use audio-capable model if audio is loaded, otherwise standard model + model_name = "gpt-4o-audio-preview" if audio_loaded else "gpt-4o" + llm = ChatOpenAI(model=model_name, temperature=0) + + # gpt-4o-audio-preview doesn't support 'json_schema' response format yet, use function calling + method = "function_calling" if audio_loaded else "function_calling" + + structured_llm = llm.with_structured_output(VoiceScreeningOutput, method=method) + evaluation: VoiceScreeningOutput = structured_llm.invoke(messages) + + # 4. Update Database + voice_result.sentiment_score = evaluation.sentiment_score + voice_result.confidence_score = evaluation.confidence_score + voice_result.communication_score = evaluation.communication_score + voice_result.proficiency_score = evaluation.proficiency_score + voice_result.llm_summary = evaluation.llm_summary + # voice_result.llm_judgment_json = evaluation.model_dump() # Removed from schema + + # 5. Determine Pass/Fail + # Calculate average score (0-1 scale -> 0-100 scale for threshold comparison) + avg_score = ( + evaluation.sentiment_score + + evaluation.confidence_score + + evaluation.communication_score + + evaluation.proficiency_score + ) / 4.0 * 100 + + if avg_score >= 75: + candidate.status = CandidateStatus.voice_passed + result_msg = "PASSED" + else: + candidate.status = CandidateStatus.voice_rejected + result_msg = "REJECTED" + + candidate.updated_at = datetime.utcnow() + session.commit() + + return ( + f"✅ Evaluation complete for {candidate.full_name} using {model_name}.\n" + f"Result: {result_msg} (Score: {avg_score:.1f}/100)\n" + f"Summary: {evaluation.llm_summary}" + ) + + except Exception as e: + import traceback + return f"❌ Error evaluating voice screening: {str(e)}\n{traceback.format_exc()}" + +# Alias for the tool to be used in supervisor +voice_judge = evaluate_voice_screening diff --git a/src/agents/voice_screening/schemas/__init__.py b/src/agents/voice_screening/schemas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4dfb7551f3aeedd28a237284509974df753a7c9d --- /dev/null +++ b/src/agents/voice_screening/schemas/__init__.py @@ -0,0 +1,6 @@ +from src.agents.voice_screening.schemas.output_schema import ( + VoiceScreeningOutput +) + +__all__ = ["VoiceScreeningOutput"] + diff --git a/src/agents/voice_screening/schemas/output_schema.py b/src/agents/voice_screening/schemas/output_schema.py new file mode 100644 index 0000000000000000000000000000000000000000..8e13ee217ca7b060c0b991ed0f13dc18501c7f5a --- /dev/null +++ b/src/agents/voice_screening/schemas/output_schema.py @@ -0,0 +1,35 @@ +from pydantic import BaseModel, Field +from typing import Optional, Dict, Any, List +from datetime import datetime + + +class CallTranscript(BaseModel): + """Structure for storing conversation transcript.""" + speaker: str = Field(..., description="Speaker identifier ('agent' or 'candidate')") + text: str = Field(..., description="Transcribed text") + timestamp: datetime = Field(default_factory=datetime.utcnow, description="When the utterance occurred") + + +class ConversationState(BaseModel): + """State management during an active call.""" + call_sid: str = Field(..., description="Twilio Call SID") + candidate_id: str = Field(..., description="Candidate UUID") + transcript: List[CallTranscript] = Field(default_factory=list, description="Full conversation transcript") + current_question_index: int = Field(default=0, description="Index of current interview question") + interview_questions: List[str] = Field(default_factory=list, description="List of interview questions") + is_active: bool = Field(default=True, description="Whether the call is currently active") + started_at: Optional[datetime] = Field(default=None, description="Call start time") + ended_at: Optional[datetime] = Field(default=None, description="Call end time") + + +class VoiceScreeningOutput(BaseModel): + """Structured results from voice screening evaluation.""" + sentiment_score: float = Field(..., ge=0, le=1, description="Overall sentiment score (0=negative, 1=positive)") + confidence_score: float = Field(..., ge=0, le=1, description="Candidate's confidence level") + communication_score: float = Field(..., ge=0, le=1, description="Communication clarity and effectiveness") + proficiency_score: float = Field(..., ge=0, le=1, description="Candidate's technical proficiency") + llm_summary: str = Field(..., description="LLM-generated summary of the interview") + llm_judgment_json: Optional[Dict[str, Any]] = Field(default=None, description="Structured judgment data from LLM") + key_traits: List[str] = Field(default_factory=list, description="Key personality/technical traits identified") + recommendation: str = Field(..., description="Pass/fail or next-step recommendation") + diff --git a/src/agents/voice_screening/session_service.py b/src/agents/voice_screening/session_service.py new file mode 100644 index 0000000000000000000000000000000000000000..dde94851626b54515b2191295ef08b13a97dccd1 --- /dev/null +++ b/src/agents/voice_screening/session_service.py @@ -0,0 +1,131 @@ +""" +Session service for voice screening. +Handles session configuration, screening questions, and database operations. +""" +import logging +from typing import Optional, Dict +from uuid import UUID +from datetime import datetime + +from sqlalchemy import select, desc + +from src.database.candidates.client import SessionLocal +from src.database.candidates.models import Candidate, CVScreeningResult, VoiceScreeningResult +from src.state.candidate import CandidateStatus +from src.agents.voice_screening.utils.questions import get_screening_questions + +logger = logging.getLogger(__name__) + + +def get_session_config(candidate_id: str) -> Dict: + """ + Generate session configuration for a candidate. + + Args: + candidate_id: UUID of the candidate + + Returns: + Dict with session configuration including instructions and questions + """ + with SessionLocal() as db: + # Fetch candidate + candidate = db.execute( + select(Candidate).where(Candidate.id == UUID(candidate_id)) + ).scalar_one_or_none() + + if not candidate: + raise ValueError(f"Candidate {candidate_id} not found") + + # Fetch latest CV screening result for job title + cv_result = db.execute( + select(CVScreeningResult) + .where(CVScreeningResult.candidate_id == UUID(candidate_id)) + .order_by(desc(CVScreeningResult.timestamp)) + .limit(1) + ).scalar_one_or_none() + + job_title = cv_result.job_title if cv_result else "the position" + questions = get_screening_questions(job_title) + + # Build instructions + instructions = ( + f"You are a friendly HR assistant conducting a phone screening interview with {candidate.full_name} " + f"for the position of {job_title}. " + f"Greet the candidate warmly by name. " + f"Your goal is to ask the following main questions to assess their fit:\n\n" + ) + + for i, q in enumerate(questions, 1): + instructions += f"{i}. {q}\n" + + instructions += ( + "\nAsk one question at a time. Wait for their response before moving to the next. " + "Keep the conversations brief and to the point, ask only one follow-up question per main question. " + "If they ask clarifying questions, answer them briefly." + ) + + return { + "candidate_name": candidate.full_name, + "job_title": job_title, + "instructions": instructions, + "questions": questions, + "config": { + "modalities": ["audio", "text"], + "instructions": instructions, + "voice": "alloy", + "input_audio_format": "pcm16", + "output_audio_format": "pcm16", + "turn_detection": { + "type": "server_vad", + "threshold": 0.5, + "prefix_padding_ms": 300, + "silence_duration_ms": 10000 + } + } + } + + +def save_voice_screening_session( + candidate_id: str, + session_id: str, + transcript_text: str, + audio_url: Optional[str] = None +) -> None: + """ + Save voice screening session to database. + + Args: + candidate_id: UUID of the candidate + session_id: Session identifier + transcript_text: Full conversation transcript + audio_url: Path to saved audio file + """ + with SessionLocal() as db: + candidate = db.execute( + select(Candidate).where(Candidate.id == UUID(candidate_id)) + ).scalar_one_or_none() + + if not candidate: + raise ValueError(f"Candidate {candidate_id} not found") + + # Create new voice screening result entry + screening_entry = VoiceScreeningResult( + candidate_id=candidate.id, + call_sid=session_id, # Using session_id instead of Twilio call_sid + transcript_text=transcript_text, + audio_url=audio_url, + timestamp=datetime.utcnow(), + # Scores will be filled by judge later + sentiment_score=None, + confidence_score=None, + communication_score=None, + llm_summary=None, + ) + + # Add and commit + db.add(screening_entry) + candidate.status = CandidateStatus.voice_done + candidate.updated_at = datetime.utcnow() + db.commit() + + logger.info(f"Voice screening session saved for candidate {candidate_id}") diff --git a/src/agents/voice_screening/utils/__init__.py b/src/agents/voice_screening/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..36432d6a6d27f101ebb84cd67aa20aa9cc187369 --- /dev/null +++ b/src/agents/voice_screening/utils/__init__.py @@ -0,0 +1,2 @@ +# Utils module for voice screening agent + diff --git a/src/agents/voice_screening/utils/questions.py b/src/agents/voice_screening/utils/questions.py new file mode 100644 index 0000000000000000000000000000000000000000..9bb0f1f748c399c077f7b3a46f7e720fd9c7d738 --- /dev/null +++ b/src/agents/voice_screening/utils/questions.py @@ -0,0 +1,20 @@ +from typing import List + +def get_screening_questions(job_title: str) -> List[str]: + """ + Returns a list of static screening questions based on the job title. + For now, it returns a generic set of questions, but can be expanded. + """ + # Generic questions for any role + base_questions = [ + "Can you briefly walk me through your background and experience?", + "What motivated you to apply for this position?", + "What are your salary expectations?", + "When would you be available to start?", + ] + + # We could add specific questions based on job_title here + # if "engineer" in job_title.lower(): + # base_questions.append("Describe a challenging technical problem you solved.") + + return base_questions diff --git a/src/api/__init__.py b/src/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5415d7b32a997788bdf88ea42f87c81ef7ae1134 --- /dev/null +++ b/src/api/__init__.py @@ -0,0 +1,6 @@ +"""API package for Recruitment Agent.""" + +from src.api.app import app + +__all__ = ["app"] + diff --git a/src/api/app.py b/src/api/app.py new file mode 100644 index 0000000000000000000000000000000000000000..ad81cbf4d4b5193e3cd8bb3c1c4646a242911c2b --- /dev/null +++ b/src/api/app.py @@ -0,0 +1,59 @@ +""" +FastAPI Application for Recruitment Agent API. + +Run with: + uvicorn src.api.app:app --reload --port 8000 +""" + +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware + +from src.api.routers import supervisor, cv_upload, voice_screener, database +from src.configs import get_openai_settings +from src.api.middleware.session import SessionMiddleware + +# Validate OpenAI API key at startup (shows nice error if missing) +get_openai_settings() + +app = FastAPI( + title="Recruitment Agent API", + description="API layer for the HR Supervisor Agent and recruitment tools", + version="1.0.0", +) + +# Per-user session isolation +app.add_middleware(SessionMiddleware) + +# CORS middleware for frontend integration +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], # Configure appropriately for production + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +# Include routers +app.include_router(supervisor.router, prefix="/api/v1/supervisor", tags=["Supervisor"]) +app.include_router(cv_upload.router, prefix="/api/v1/cv", tags=["CV Upload"]) +app.include_router(database.router, prefix="/api/v1/db", tags=["Database"]) +app.include_router(voice_screener.router, prefix="/api/v1/voice-screener", tags=["Voice Screener"]) + + +@app.get("/health") +async def health_check() -> dict[str, str]: + """Health check endpoint. + """ + return {"status": "healthy"} + + +@app.get("/") +async def root(): + """Root endpoint with API info. + """ + return { + "message": "Recruitment Agent API", + "docs": "/docs", + "health": "/health", + } + diff --git a/src/api/info.md b/src/api/info.md new file mode 100644 index 0000000000000000000000000000000000000000..94fcc58c0c1e434f489d5c05462b94a4bd9ff10e --- /dev/null +++ b/src/api/info.md @@ -0,0 +1,173 @@ +# API Layer 🔌 + +> FastAPI backend decoupling agents from frontends. + +## Quick Start + +```bash +# Local +uvicorn src.api.app:app --reload --port 8080 + +# Docker +docker compose --env-file .env -f docker/docker-compose.yml up supervisor_api +``` + +**Docs:** http://localhost:8080/docs + +## Endpoints + +### Supervisor Agent `/api/v1/supervisor` + +| Method | Endpoint | Description | +|--------|----------|-------------| +| POST | `/chat` | Batch response with context compaction | +| POST | `/chat/stream` | SSE streaming with context compaction ⚠️ | +| POST | `/raw/chat` | Batch response, direct agent (no compaction) | +| POST | `/raw/chat/stream` | SSE streaming, direct agent ⚠️ | +| POST | `/new` | Create new chat session | +| GET | `/health` | Health check | + +⚠️ **Note:** Streaming endpoints have known issues. Use batch endpoints (`/chat` or `/raw/chat`) for reliable operation. + +**With vs Raw endpoints:** +- `/chat` and `/chat/stream` use `CompactingSupervisor` wrapper (auto context management) +- `/raw/chat` and `/raw/chat/stream` bypass wrapper (direct agent access, useful for debugging) + +**Streaming (SSE) events:** +``` +event: token → {"content": "Hello"} +event: done → {"thread_id": "abc123", "token_count": 150} +event: error → {"error": "Something went wrong"} +``` + +### CV Upload `/api/v1/cv` + +| Method | Endpoint | Description | +|--------|----------|-------------| +| POST | `/submit` | Submit application + CV | +| GET | `/health` | Health check | + +**Submit flow:** +1. Save CV file to disk +2. Register candidate in DB +3. Parse CV → Markdown (GPT-4 Vision) +4. Update parsed path in DB + +### Database `/api/v1/db` + +| Method | Endpoint | Description | +|--------|----------|-------------| +| POST | `/query` | Flexible query any table | +| GET | `/candidates` | List candidates with filters | +| GET | `/candidates/{id}` | Get full candidate profile by UUID | +| GET | `/candidates/email/{email}` | Get full candidate profile by email | +| GET | `/cv-screening` | List CV screening results | +| GET | `/voice-screening` | List voice screening results | +| GET | `/interviews` | List interview scheduling | +| GET | `/decisions` | List final decisions | +| GET | `/stats` | Database statistics | +| GET | `/health` | Health check | + +**Full Candidate Profile** (`/candidates/{id}` and `/candidates/email/{email}`): + +Returns ALL data for a candidate including related records (by default `include_relations=true`): +- **Base fields:** id, full_name, email, phone_number, cv_file_path, parsed_cv_file_path, status, created_at, updated_at +- **cv_screening_results:** list of CV screening scores and feedback +- **voice_screening_results:** list of voice screening transcripts and scores +- **interview_scheduling:** list of scheduled interviews +- **final_decision:** hiring decision with rationale (if any) + +Use `?include_relations=false` to fetch only base candidate fields. + +**Flexible Query Example:** +```json +POST /api/v1/db/query +{ + "table": "candidates", + "filters": {"status": "applied"}, + "fields": ["id", "full_name", "email"], + "include_relations": true, + "limit": 10, + "offset": 0, + "sort_by": "created_at", + "sort_order": "desc" +} +``` + +**Supported filter operators:** +- `$eq`, `$ne`: equality/inequality +- `$gt`, `$gte`, `$lt`, `$lte`: comparisons +- `$in`, `$nin`: list membership +- `$like`, `$ilike`: pattern matching + +## Structure + +``` +src/api/ +├── app.py ← FastAPI app + CORS + router mounting +├── routers/ +│ ├── supervisor.py ← Chat endpoints (regular + streaming) +│ ├── cv_upload.py ← CV submission endpoint +│ └── database.py ← Flexible database query endpoints +└── schemas/ + ├── supervisor_chat.py ← ChatRequest, ChatResponse + ├── cv_upload.py ← SubmitResponse + └── database.py ← QueryRequest, QueryResponse, etc. +``` + +## SDK Clients + +Frontends use SDK clients instead of raw HTTP: + +```python +# Supervisor +from src.sdk import SupervisorClient +client = SupervisorClient() +for chunk in client.stream("Show candidates", thread_id): + print(chunk.content) + +# CV Upload +from src.sdk import CVUploadClient +client = CVUploadClient() +response = client.submit(name, email, phone, cv_file, filename) + +# Database Queries +from src.sdk import DatabaseClient +db = DatabaseClient() + +# List candidates +candidates = db.get_candidates(status="applied", include_relations=True) +for c in candidates.data: + print(c["full_name"], c["status"]) + +# Get full candidate profile by email +profile = db.get_candidate_by_email("ada@example.com") +print(profile.data["cv_screening_results"]) + +# Flexible query with filters +results = db.query( + table="cv_screening_results", + filters={"overall_fit_score": {"$gte": 0.8}}, + sort_by="overall_fit_score", + sort_order="desc" +) + +# Get database stats +stats = db.get_stats() +print(stats.stats["candidates"]["by_status"]) +``` + +## Environment + +| Variable | Default | Used By | +|----------|---------|---------| +| `OPENAI_API_KEY` | required | Validated at startup | +| `CV_UPLOAD_PATH` | `src/database/cvs/uploads` | CV router | +| `CV_PARSED_PATH` | `src/database/cvs/parsed` | CV router | +| `POSTGRES_*` | varies | Database connection | + +## TODO + +- [ ] Voice agent router +- [x] Candidate database router + diff --git a/src/api/middleware/session.py b/src/api/middleware/session.py new file mode 100644 index 0000000000000000000000000000000000000000..0a43608fabedba45854963887d6129cd8c358e5b --- /dev/null +++ b/src/api/middleware/session.py @@ -0,0 +1,53 @@ +"""Simple per-user session middleware. + +Extracts a session_id from header, query, or cookie and ensures it is attached +to the request state. If missing, a new UUID is generated and returned in both +response headers and a cookie (non-httponly for frontend JS use). +""" + +import uuid +from typing import Optional +from starlette.middleware.base import BaseHTTPMiddleware +from starlette.requests import Request +from starlette.responses import Response + + +SESSION_HEADER = "X-Session-Id" +SESSION_COOKIE = "session_id" + + +def _normalize(session_id: Optional[str]) -> Optional[str]: + if not session_id: + return None + return session_id.strip().strip("\"'") + + +class SessionMiddleware(BaseHTTPMiddleware): + """Attach a per-user session_id to request.state and response headers.""" + + async def dispatch(self, request: Request, call_next): + session_id = ( + request.query_params.get("session_id") + or request.headers.get(SESSION_HEADER) + or request.cookies.get(SESSION_COOKIE) + ) + session_id = _normalize(session_id) + new_session = False + + if not session_id: + session_id = uuid.uuid4().hex + new_session = True + + request.state.session_id = session_id + + response: Response = await call_next(request) + response.headers[SESSION_HEADER] = session_id + if new_session: + response.set_cookie( + SESSION_COOKIE, + session_id, + httponly=False, + samesite="lax", + secure=False, + ) + return response diff --git a/src/api/routers/__init__.py b/src/api/routers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9d9cd824fb94d99fb8dbac25ebf4a71f12f994ae --- /dev/null +++ b/src/api/routers/__init__.py @@ -0,0 +1,6 @@ +""" +Router modules for the Recruitment Agent API. +""" +from . import supervisor, cv_upload, voice_screener, database + +__all__ = ["supervisor", "cv_upload", "voice_screener", "database"] diff --git a/src/api/routers/cv_upload.py b/src/api/routers/cv_upload.py new file mode 100644 index 0000000000000000000000000000000000000000..b6a6882b7e91a248538c858be31340847b06e628 --- /dev/null +++ b/src/api/routers/cv_upload.py @@ -0,0 +1,102 @@ +""" +CV Upload Router. + +Handles CV submission and candidate registration. +""" + +from pathlib import Path +from fastapi import APIRouter, HTTPException, UploadFile, File, Form + +from src.api.schemas.cv_upload import SubmitResponse +from src.configs import get_cv_settings +from src.database.candidates import register_candidate, update_parsed_cv_path +from src.database.cvs import save_cv +from src.doc_parser import pdf_to_markdown + + +router = APIRouter() + +# Load settings and ensure directories exist +settings = get_cv_settings() +settings.ensure_dirs() + + +# ================================================================================== +# ENDPOINTS +# ================================================================================== + +@router.post("/submit", response_model=SubmitResponse) +async def submit_application( + full_name: str = Form(..., description="Candidate's full name"), + email: str = Form(..., description="Candidate's email address"), + phone: str = Form(default="", description="Candidate's phone number"), + cv_file: UploadFile = File(..., description="CV file (PDF or DOCX)") +) -> SubmitResponse: + """ + Submit a job application with CV. + + This endpoint: + 1. Saves the uploaded CV file + 2. Registers the candidate in the database + 3. Parses the CV to markdown for AI processing + 4. Updates the parsed CV path in the database + + Returns success status and details about the submission. + """ + # Validate file type + allowed_extensions = {".pdf", ".docx"} + file_ext = Path(cv_file.filename or "").suffix.lower() + if file_ext not in allowed_extensions: + raise HTTPException( + status_code=400, + detail=f"Invalid file type. Allowed: {', '.join(allowed_extensions)}" + ) + + try: + # 1. Save CV locally + file_path = save_cv(cv_file.file, cv_file.filename or "cv.pdf", candidate_name=full_name) + file_path = Path(file_path) + + # 2. Register candidate in DB + success = register_candidate(full_name, email, phone, str(file_path)) + + if not success: + return SubmitResponse( + success=False, + message=f"An application with email '{email}' already exists. You can only apply once.", + candidate_name=full_name, + email=email, + already_exists=True, + ) + + # 3. Parse CV to markdown + pdf_to_markdown( + input_path=file_path, + output_path=settings.parsed_path, + model="gpt-4.1-mini", + ) + + # 4. Update parsed CV path in DB + parsed_path = settings.parsed_path / (file_path.stem + ".txt") + update_parsed_cv_path(email, str(parsed_path)) + + return SubmitResponse( + success=True, + message=f"Application submitted successfully for {full_name}!", + candidate_name=full_name, + email=email, + cv_file_path=str(file_path), + ) + + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Failed to process application: {str(e)}" + ) + + +@router.get("/health") +async def cv_upload_health(): + """Health check for CV upload router.""" + return {"status": "healthy", "service": "cv_upload"} + diff --git a/src/api/routers/database.py b/src/api/routers/database.py new file mode 100644 index 0000000000000000000000000000000000000000..5b1e8f0d9d16a8bcbbcf819878abe09549dc4765 --- /dev/null +++ b/src/api/routers/database.py @@ -0,0 +1,505 @@ +""" +Database Query Router. + +Flexible endpoints for querying any table in the recruitment database. +""" + +from typing import Any, Optional +from uuid import UUID + +from fastapi import APIRouter, HTTPException, Query +from sqlalchemy import asc, desc +from sqlalchemy.orm import joinedload + +from src.api.schemas.database import ( + TableName, + SortOrder, + QueryRequest, + QueryResponse, + SingleRecordResponse, +) +from src.database.candidates.client import SessionLocal +from src.database.candidates.models import ( + Candidate, + CVScreeningResult, + VoiceScreeningResult, + InterviewScheduling, + FinalDecision, +) + + +router = APIRouter() + + +# ================================================================================== +# TABLE MAPPING +# ================================================================================== + +TABLE_MAP = { + TableName.candidates: Candidate, + TableName.cv_screening_results: CVScreeningResult, + TableName.voice_screening_results: VoiceScreeningResult, + TableName.interview_scheduling: InterviewScheduling, + TableName.final_decision: FinalDecision, +} + + +# ================================================================================== +# HELPER FUNCTIONS +# ================================================================================== + +def model_to_dict(obj: Any, fields: Optional[list[str]] = None) -> dict[str, Any]: + """ + Convert a SQLAlchemy model instance to a dictionary. + + Args: + obj: SQLAlchemy model instance + fields: Optional list of fields to include. If None, includes all. + + Returns: + Dictionary representation of the model + """ + if obj is None: + return None + + result = {} + for column in obj.__table__.columns: + key = column.name + if fields is None or key in fields: + value = getattr(obj, key) + # Convert UUID and Enum to string for JSON serialization + if hasattr(value, 'hex'): # UUID + value = str(value) + elif hasattr(value, 'value'): # Enum + value = value.value + result[key] = value + return result + + +def serialize_relation(relation_data: Any, is_list: bool = True) -> Any: + """Serialize relationship data.""" + if relation_data is None: + return None + if is_list: + return [model_to_dict(item) for item in relation_data] + return model_to_dict(relation_data) + + +def apply_filters(query, model, filters: dict[str, Any]): + """ + Apply filters to a SQLAlchemy query. + + Supports: + - Simple equality: {"field": "value"} + - Comparison operators: {"field": {"$gt": 5, "$lte": 10}} + - List membership: {"field": {"$in": [1, 2, 3]}} + """ + for field, value in filters.items(): + if not hasattr(model, field): + continue + + column = getattr(model, field) + + if isinstance(value, dict): + # Handle comparison operators + for op, op_value in value.items(): + if op == "$eq": + query = query.filter(column == op_value) + elif op == "$ne": + query = query.filter(column != op_value) + elif op == "$gt": + query = query.filter(column > op_value) + elif op == "$gte": + query = query.filter(column >= op_value) + elif op == "$lt": + query = query.filter(column < op_value) + elif op == "$lte": + query = query.filter(column <= op_value) + elif op == "$in": + query = query.filter(column.in_(op_value)) + elif op == "$nin": + query = query.filter(~column.in_(op_value)) + elif op == "$like": + query = query.filter(column.like(op_value)) + elif op == "$ilike": + query = query.filter(column.ilike(op_value)) + else: + # Simple equality + query = query.filter(column == value) + + return query + + +# ================================================================================== +# ENDPOINTS +# ================================================================================== + +@router.post("/query", response_model=QueryResponse) +async def query_table(request: QueryRequest) -> QueryResponse: + """ + Flexible query endpoint for any table. + + Supports filtering, field selection, pagination, and sorting. + + Example request body: + ```json + { + "table": "candidates", + "filters": {"status": "applied"}, + "fields": ["id", "full_name", "email"], + "limit": 10, + "offset": 0, + "sort_by": "created_at", + "sort_order": "desc" + } + ``` + """ + model = TABLE_MAP.get(request.table) + if not model: + raise HTTPException(status_code=400, detail=f"Unknown table: {request.table}") + + try: + with SessionLocal() as session: + # Base query + query = session.query(model) + + # Apply eager loading for relations if requested (candidates only) + if request.include_relations and request.table == TableName.candidates: + query = query.options( + joinedload(Candidate.cv_screening_results), + joinedload(Candidate.voice_screening_results), + joinedload(Candidate.interview_scheduling), + joinedload(Candidate.final_decision), + ) + + # Apply filters + if request.filters: + query = apply_filters(query, model, request.filters) + + # Get total count before pagination + total_count = query.count() + + # Apply sorting + if request.sort_by and hasattr(model, request.sort_by): + sort_column = getattr(model, request.sort_by) + if request.sort_order == SortOrder.asc: + query = query.order_by(asc(sort_column)) + else: + query = query.order_by(desc(sort_column)) + + # Apply pagination + query = query.offset(request.offset).limit(request.limit) + + # Execute query + results = query.all() + + # Serialize results + data = [] + for row in results: + row_dict = model_to_dict(row, request.fields) + + # Include relations for candidates if requested + if request.include_relations and request.table == TableName.candidates: + row_dict["cv_screening_results"] = serialize_relation(row.cv_screening_results) + row_dict["voice_screening_results"] = serialize_relation(row.voice_screening_results) + row_dict["interview_scheduling"] = serialize_relation(row.interview_scheduling) + row_dict["final_decision"] = serialize_relation(row.final_decision, is_list=False) + + data.append(row_dict) + + return QueryResponse( + success=True, + table=request.table.value, + total_count=total_count, + returned_count=len(data), + offset=request.offset, + data=data, + ) + + except Exception as e: + raise HTTPException(status_code=500, detail=f"Query failed: {str(e)}") + + +@router.get("/candidates", response_model=QueryResponse) +async def list_candidates( + status: Optional[str] = Query(default=None, description="Filter by status"), + limit: int = Query(default=100, ge=1, le=1000, description="Max records"), + offset: int = Query(default=0, ge=0, description="Offset for pagination"), + include_relations: bool = Query(default=False, description="Include related screening data"), +) -> QueryResponse: + """ + List all candidates with optional filtering. + + Convenience endpoint for the most common query. + """ + filters = {} + if status: + filters["status"] = status + + request = QueryRequest( + table=TableName.candidates, + filters=filters if filters else None, + include_relations=include_relations, + limit=limit, + offset=offset, + sort_by="created_at", + sort_order=SortOrder.desc, + ) + return await query_table(request) + + +@router.get("/candidates/{candidate_id}", response_model=SingleRecordResponse) +async def get_candidate( + candidate_id: UUID, + include_relations: bool = Query(default=True, description="Include related screening data"), +) -> SingleRecordResponse: + """ + Get a single candidate by ID with all related data. + """ + try: + with SessionLocal() as session: + query = session.query(Candidate).filter(Candidate.id == candidate_id) + + if include_relations: + query = query.options( + joinedload(Candidate.cv_screening_results), + joinedload(Candidate.voice_screening_results), + joinedload(Candidate.interview_scheduling), + joinedload(Candidate.final_decision), + ) + + candidate = query.first() + + if not candidate: + return SingleRecordResponse( + success=False, + table="candidates", + data=None, + message=f"Candidate with ID {candidate_id} not found", + ) + + data = model_to_dict(candidate) + + if include_relations: + data["cv_screening_results"] = serialize_relation(candidate.cv_screening_results) + data["voice_screening_results"] = serialize_relation(candidate.voice_screening_results) + data["interview_scheduling"] = serialize_relation(candidate.interview_scheduling) + data["final_decision"] = serialize_relation(candidate.final_decision, is_list=False) + + return SingleRecordResponse( + success=True, + table="candidates", + data=data, + ) + + except Exception as e: + raise HTTPException(status_code=500, detail=f"Failed to fetch candidate: {str(e)}") + + +@router.get("/candidates/email/{email}", response_model=SingleRecordResponse) +async def get_candidate_by_email( + email: str, + include_relations: bool = Query(default=True, description="Include related screening data"), +) -> SingleRecordResponse: + """ + Get a candidate by email address. + """ + try: + with SessionLocal() as session: + query = session.query(Candidate).filter(Candidate.email == email) + + if include_relations: + query = query.options( + joinedload(Candidate.cv_screening_results), + joinedload(Candidate.voice_screening_results), + joinedload(Candidate.interview_scheduling), + joinedload(Candidate.final_decision), + ) + + candidate = query.first() + + if not candidate: + return SingleRecordResponse( + success=False, + table="candidates", + data=None, + message=f"Candidate with email '{email}' not found", + ) + + data = model_to_dict(candidate) + + if include_relations: + data["cv_screening_results"] = serialize_relation(candidate.cv_screening_results) + data["voice_screening_results"] = serialize_relation(candidate.voice_screening_results) + data["interview_scheduling"] = serialize_relation(candidate.interview_scheduling) + data["final_decision"] = serialize_relation(candidate.final_decision, is_list=False) + + return SingleRecordResponse( + success=True, + table="candidates", + data=data, + ) + + except Exception as e: + raise HTTPException(status_code=500, detail=f"Failed to fetch candidate: {str(e)}") + + +@router.get("/cv-screening", response_model=QueryResponse) +async def list_cv_screenings( + candidate_id: Optional[UUID] = Query(default=None, description="Filter by candidate ID"), + min_score: Optional[float] = Query(default=None, ge=0, le=1, description="Minimum overall fit score"), + limit: int = Query(default=100, ge=1, le=1000), + offset: int = Query(default=0, ge=0), +) -> QueryResponse: + """ + List CV screening results with optional filtering. + """ + filters = {} + if candidate_id: + filters["candidate_id"] = str(candidate_id) + if min_score is not None: + filters["overall_fit_score"] = {"$gte": min_score} + + request = QueryRequest( + table=TableName.cv_screening_results, + filters=filters if filters else None, + limit=limit, + offset=offset, + sort_by="timestamp", + sort_order=SortOrder.desc, + ) + return await query_table(request) + + +@router.get("/voice-screening", response_model=QueryResponse) +async def list_voice_screenings( + candidate_id: Optional[UUID] = Query(default=None, description="Filter by candidate ID"), + limit: int = Query(default=100, ge=1, le=1000), + offset: int = Query(default=0, ge=0), +) -> QueryResponse: + """ + List voice screening results with optional filtering. + """ + filters = {} + if candidate_id: + filters["candidate_id"] = str(candidate_id) + + request = QueryRequest( + table=TableName.voice_screening_results, + filters=filters if filters else None, + limit=limit, + offset=offset, + sort_by="timestamp", + sort_order=SortOrder.desc, + ) + return await query_table(request) + + +@router.get("/interviews", response_model=QueryResponse) +async def list_interviews( + candidate_id: Optional[UUID] = Query(default=None, description="Filter by candidate ID"), + status: Optional[str] = Query(default=None, description="Filter by interview status"), + limit: int = Query(default=100, ge=1, le=1000), + offset: int = Query(default=0, ge=0), +) -> QueryResponse: + """ + List interview scheduling records with optional filtering. + """ + filters = {} + if candidate_id: + filters["candidate_id"] = str(candidate_id) + if status: + filters["status"] = status + + request = QueryRequest( + table=TableName.interview_scheduling, + filters=filters if filters else None, + limit=limit, + offset=offset, + sort_by="start_time", + sort_order=SortOrder.desc, + ) + return await query_table(request) + + +@router.get("/decisions", response_model=QueryResponse) +async def list_decisions( + decision: Optional[str] = Query(default=None, description="Filter by decision (e.g., 'hired', 'rejected')"), + min_score: Optional[float] = Query(default=None, ge=0, le=1, description="Minimum overall score"), + limit: int = Query(default=100, ge=1, le=1000), + offset: int = Query(default=0, ge=0), +) -> QueryResponse: + """ + List final decisions with optional filtering. + """ + filters = {} + if decision: + filters["decision"] = decision + if min_score is not None: + filters["overall_score"] = {"$gte": min_score} + + request = QueryRequest( + table=TableName.final_decision, + filters=filters if filters else None, + limit=limit, + offset=offset, + sort_by="timestamp", + sort_order=SortOrder.desc, + ) + return await query_table(request) + + +@router.get("/stats") +async def get_database_stats() -> dict: + """ + Get summary statistics for all tables. + """ + try: + with SessionLocal() as session: + stats = { + "candidates": { + "total": session.query(Candidate).count(), + }, + "cv_screening_results": { + "total": session.query(CVScreeningResult).count(), + }, + "voice_screening_results": { + "total": session.query(VoiceScreeningResult).count(), + }, + "interview_scheduling": { + "total": session.query(InterviewScheduling).count(), + }, + "final_decision": { + "total": session.query(FinalDecision).count(), + }, + } + + # Get candidate status breakdown + from sqlalchemy import func + status_counts = session.query( + Candidate.status, func.count(Candidate.id) + ).group_by(Candidate.status).all() + + stats["candidates"]["by_status"] = { + str(status.value) if hasattr(status, 'value') else str(status): count + for status, count in status_counts + } + + return {"success": True, "stats": stats} + + except Exception as e: + raise HTTPException(status_code=500, detail=f"Failed to get stats: {str(e)}") + + +@router.get("/health") +async def database_health(): + """Health check for database router.""" + try: + with SessionLocal() as session: + # Simple connectivity check + from sqlalchemy import text + session.execute(text("SELECT 1")) + return {"status": "healthy", "service": "database", "connection": "ok"} + except Exception as e: + return {"status": "unhealthy", "service": "database", "error": str(e)} + diff --git a/src/api/routers/supervisor.py b/src/api/routers/supervisor.py new file mode 100644 index 0000000000000000000000000000000000000000..86ec376b2f64c2c0198ca478ccfe26e4f70acf74 --- /dev/null +++ b/src/api/routers/supervisor.py @@ -0,0 +1,264 @@ +""" +Supervisor Agent Router. + +Handles chat interactions with the HR Supervisor Agent. +Supports both regular and streaming responses. + +============================================================================= +ENDPOINTS: +============================================================================= + +WITH CONTEXT ENGINEERING (CompactingSupervisor wrapper): + - POST /chat : Batch response with automatic context compaction + - POST /chat/stream : Streaming with context compaction [HAS ERRORS - TODO FIX] + +RAW SUPERVISOR (Direct agent access, no wrapper): + - POST /raw/chat : Batch response, direct supervisor agent + - POST /raw/chat/stream : Streaming, direct supervisor agent [HAS ERRORS - TODO FIX] + +UTILITY: + - POST /new : Create new chat session + - GET /health : Health check + +============================================================================= +NOTE: Both streaming endpoints (/chat/stream and /raw/chat/stream) have +known issues that need to be fixed. Use batch endpoints (/chat or /raw/chat) +for reliable operation. +============================================================================= +""" + +import json +import uuid +from fastapi import APIRouter, HTTPException +from fastapi.responses import StreamingResponse + +from langchain_core.messages import HumanMessage +from src.api.schemas.supervisor_chat import ChatRequest, ChatResponse, NewChatResponse +from src.context_eng import compacting_supervisor, count_tokens_for_messages +from src.agents.supervisor.supervisor_v2 import supervisor_agent + + +router = APIRouter() + +@router.post("/chat", response_model=ChatResponse) +async def chat(request: ChatRequest) -> ChatResponse: + """ + Send a message to the HR Supervisor Agent and receive a response. + + Uses CompactingSupervisor wrapper for automatic context management. + When token limit is exceeded, old messages are compacted/summarized. + + The agent can: + - Query the candidate database + - Screen CVs + - Schedule calendar events + - Send emails via Gmail + + Use the returned `thread_id` in subsequent requests to maintain conversation context. + """ + # Generate or use provided thread_id + thread_id = request.thread_id or str(uuid.uuid4())[:8] + + try: + # Config for stateful conversation + config = {"configurable": {"thread_id": thread_id}} + + # Invoke the compacting supervisor wrapper + response = compacting_supervisor.invoke( + {"messages": [HumanMessage(content=request.message)]}, + config=config + ) + + # Extract response and calculate tokens + final_message = response["messages"][-1] + all_messages = response["messages"] + token_count = count_tokens_for_messages(all_messages) + + return ChatResponse( + response=final_message.content, + thread_id=thread_id, + token_count=token_count, + ) + + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Agent execution failed: {str(e)}" + ) + + +@router.post("/chat/stream") +async def chat_stream(request: ChatRequest): + """ + Stream a response from the HR Supervisor Agent using Server-Sent Events (SSE). + + ⚠️ WARNING: This endpoint has known issues and needs to be fixed. + Use /raw/chat/stream for reliable streaming, or /chat for batch requests. + + Uses CompactingSupervisor wrapper for automatic context management. + + Yields chunks as SSE events: + - event: token - A content token from the AI response + - event: done - Final message with metadata (token_count, thread_id) + - event: error - Error occurred + + Use the returned `thread_id` in subsequent requests to maintain conversation context. + """ + thread_id = request.thread_id or str(uuid.uuid4())[:8] + + def generate(): + try: + config = {"configurable": {"thread_id": thread_id}} + + for chunk in compacting_supervisor.stream( + {"messages": [HumanMessage(content=request.message)]}, + config=config + ): + if chunk["type"] == "token": + # SSE format: event type + data + yield f"event: token\ndata: {json.dumps({'content': chunk['content']})}\n\n" + elif chunk["type"] == "done": + yield f"event: done\ndata: {json.dumps({'thread_id': thread_id, 'token_count': chunk['token_count']})}\n\n" + elif chunk["type"] == "error": + yield f"event: error\ndata: {json.dumps({'error': chunk['content']})}\n\n" + + except Exception as e: + yield f"event: error\ndata: {json.dumps({'error': str(e)})}\n\n" + + return StreamingResponse( + generate(), + media_type="text/event-stream", + headers={ + "Cache-Control": "no-cache", + "Connection": "keep-alive", + "X-Accel-Buffering": "no", # Disable nginx buffering + } + ) + + +@router.post("/new", response_model=NewChatResponse) +async def new_chat() -> NewChatResponse: + """ + Create a new chat session with a fresh thread ID. + + Returns a new thread_id to use for subsequent chat requests. + """ + thread_id = str(uuid.uuid4())[:8] + + return NewChatResponse( + thread_id=thread_id, + message="New chat session created. Use the thread_id for your conversations.", + ) + + +@router.get("/health") +async def supervisor_health(): + """Health check for supervisor router.""" + return {"status": "healthy", "service": "supervisor"} + + +# ============================================================================= +# RAW SUPERVISOR ENDPOINTS (No CompactingSupervisor wrapper) +# ============================================================================= + +@router.post("/raw/chat", response_model=ChatResponse) +async def raw_chat(request: ChatRequest) -> ChatResponse: + """ + Send a message to the raw HR Supervisor Agent (without context compaction). + + This endpoint bypasses the CompactingSupervisor wrapper, giving direct access + to the underlying supervisor agent. Useful for debugging or when you want + full control over context management. + + Use the returned `thread_id` in subsequent requests to maintain conversation context. + """ + thread_id = request.thread_id or str(uuid.uuid4())[:8] + + try: + config = {"configurable": {"thread_id": thread_id}} + + # Invoke the raw supervisor agent directly + response = supervisor_agent.invoke( + {"messages": [HumanMessage(content=request.message)]}, + config=config + ) + + # Extract response and calculate tokens + final_message = response["messages"][-1] + all_messages = response["messages"] + token_count = count_tokens_for_messages(all_messages) + + return ChatResponse( + response=final_message.content, + thread_id=thread_id, + token_count=token_count, + ) + + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Raw agent execution failed: {str(e)}" + ) + + +@router.post("/raw/chat/stream") +async def raw_chat_stream(request: ChatRequest): + """ + Stream a response from the raw HR Supervisor Agent using Server-Sent Events (SSE). + + ⚠️ WARNING: This endpoint has known issues and needs to be fixed. + Use /raw/chat for reliable batch requests. + + This endpoint bypasses the CompactingSupervisor wrapper, giving direct access + to the underlying supervisor agent's streaming capabilities. + + Yields chunks as SSE events: + - event: token - A content token from the AI response + - event: done - Final message with metadata (token_count, thread_id) + - event: error - Error occurred + """ + thread_id = request.thread_id or str(uuid.uuid4())[:8] + + def generate(): + try: + config = {"configurable": {"thread_id": thread_id}} + full_response_content = "" + + # Stream from the raw supervisor agent + for chunk in supervisor_agent.stream( + {"messages": [HumanMessage(content=request.message)]}, + config=config, + stream_mode="messages" + ): + # chunk is a tuple: (message, metadata) + message, metadata = chunk + + # Only yield content from AI messages that have content + if hasattr(message, 'content') and message.content: + msg_type = message.__class__.__name__ + if 'AIMessage' in msg_type: + yield f"event: token\ndata: {json.dumps({'content': message.content})}\n\n" + full_response_content += message.content + + # Get final state for token counting + final_state = supervisor_agent.get_state(config) + token_count = 0 + if final_state and hasattr(final_state, 'values'): + final_messages = final_state.values.get("messages", []) + token_count = count_tokens_for_messages(final_messages) + + yield f"event: done\ndata: {json.dumps({'thread_id': thread_id, 'token_count': token_count})}\n\n" + + except Exception as e: + yield f"event: error\ndata: {json.dumps({'error': str(e)})}\n\n" + + return StreamingResponse( + generate(), + media_type="text/event-stream", + headers={ + "Cache-Control": "no-cache", + "Connection": "keep-alive", + "X-Accel-Buffering": "no", + } + ) + diff --git a/src/api/routers/voice_screener.py b/src/api/routers/voice_screener.py new file mode 100644 index 0000000000000000000000000000000000000000..5acef981bd8fd7f2e37c377c4324a555538c16f6 --- /dev/null +++ b/src/api/routers/voice_screener.py @@ -0,0 +1,265 @@ +""" +Voice Screener API Router. +Handles voice screening sessions, configuration, and audio/transcript saving. +""" +import logging +import os +from typing import Optional +from pathlib import Path + +from fastapi import APIRouter, HTTPException, Query +from pydantic import BaseModel + +from src.agents.voice_screening.session_service import ( + get_session_config, + save_voice_screening_session +) +from src.agents.voice_screening.audio_processor import combine_and_export_audio + +logger = logging.getLogger(__name__) + +router = APIRouter() + + +# Request/Response Models +class CreateSessionRequest(BaseModel): + candidate_id: str + + +class CreateSessionResponse(BaseModel): + session_id: str + candidate_name: str + job_title: str + message: str + + +class SessionConfigResponse(BaseModel): + candidate_name: str + job_title: str + instructions: str + questions: list[str] + config: dict + + +class SaveSessionRequest(BaseModel): + session_id: str + candidate_id: str + transcript_text: str + proxy_token: str # Token to retrieve audio chunks from proxy + + +class SaveSessionResponse(BaseModel): + audio_file_path: Optional[str] + message: str + + +@router.post("/session/create", response_model=CreateSessionResponse) +async def create_session(request: CreateSessionRequest): + """ + Create a new voice screening session for a candidate. + + Args: + request: Contains candidate_id + + Returns: + Session information including session_id + """ + try: + import uuid + + # Generate session ID + session_id = str(uuid.uuid4()) + + # Get session config (validates candidate exists) + config = get_session_config(request.candidate_id) + + logger.info(f"Created session {session_id} for candidate {request.candidate_id}") + + return CreateSessionResponse( + session_id=session_id, + candidate_name=config["candidate_name"], + job_title=config["job_title"], + message="Session created successfully" + ) + except ValueError as e: + raise HTTPException(status_code=404, detail=str(e)) + except Exception as e: + logger.error(f"Error creating session: {e}", exc_info=True) + raise HTTPException(status_code=500, detail=f"Failed to create session: {str(e)}") + + +@router.get("/session/{session_id}/config", response_model=SessionConfigResponse) +async def get_config(session_id: str, candidate_id: str = Query(...)): + """ + Get session configuration for a candidate. + + Args: + session_id: Session identifier (for logging) + candidate_id: Candidate UUID + + Returns: + Session configuration including instructions and questions + """ + try: + config = get_session_config(candidate_id) + + logger.info(f"Retrieved config for session {session_id}, candidate {candidate_id}") + + return SessionConfigResponse( + candidate_name=config["candidate_name"], + job_title=config["job_title"], + instructions=config["instructions"], + questions=config["questions"], + config=config["config"] + ) + except ValueError as e: + raise HTTPException(status_code=404, detail=str(e)) + except Exception as e: + logger.error(f"Error getting config: {e}", exc_info=True) + raise HTTPException(status_code=500, detail=f"Failed to get config: {str(e)}") + + +@router.post("/session/{session_id}/save", response_model=SaveSessionResponse) +async def save_session(session_id: str, request: SaveSessionRequest): + """ + Save audio recording and transcript for a session. + + This endpoint: + 1. Retrieves audio chunks from the proxy using the token + 2. Combines and saves the audio file + 3. Saves transcript and audio path to database + + Args: + session_id: Session identifier (must match request.session_id) + request: Contains candidate_id, transcript_text, and proxy_token + + Returns: + Audio file path and success message + """ + if session_id != request.session_id: + raise HTTPException(status_code=400, detail="Session ID mismatch") + + try: + # Import here to avoid circular dependency + import requests + + # Get proxy URL from environment + proxy_url = os.getenv("WEBSOCKET_PROXY_URL", "ws://localhost:8000/ws/realtime") + proxy_base = proxy_url.replace("ws://", "http://").replace("wss://", "https://").replace("/ws/realtime", "") + + # Retrieve audio chunks from proxy + try: + response = requests.post( + f"{proxy_base}/audio/retrieve", + params={"token": request.proxy_token}, + json={"session_id": session_id}, + timeout=30 + ) + response.raise_for_status() + audio_data = response.json() + + import base64 + + user_chunks = audio_data.get("user_chunks", []) + # Decode Base64 audio data + for chunk in user_chunks: + if isinstance(chunk.get("data"), str): + chunk["data"] = base64.b64decode(chunk["data"]) + + agent_chunks = audio_data.get("agent_chunks", []) + # Decode Base64 audio data + for chunk in agent_chunks: + if isinstance(chunk.get("data"), str): + chunk["data"] = base64.b64decode(chunk["data"]) + + session_start_time = audio_data.get("session_start_time") + + # Get transcript from proxy if available + proxy_transcript = audio_data.get("transcript", []) + transcript_text = request.transcript_text + + if proxy_transcript: + logger.info(f"Using transcript from proxy ({len(proxy_transcript)} entries)") + transcript_text = "\n".join([ + f"{entry.get('speaker', 'unknown')}: {entry.get('text', '')}" + for entry in proxy_transcript + ]) + + if not session_start_time: + raise ValueError("Session start time not found in proxy response") + + logger.info(f"Audio Debug: Retrieved {len(user_chunks)} user chunks and {len(agent_chunks)} agent chunks") + if user_chunks: + logger.info(f"Audio Debug: First user chunk size: {len(user_chunks[0].get('data', b''))} bytes") + + except Exception as e: + logger.error(f"Error retrieving audio from proxy: {e}") + raise HTTPException(status_code=500, detail=f"Failed to retrieve audio from proxy: {str(e)}") + + # Combine audio chunks + audio_file_path = None + if user_chunks or agent_chunks: + try: + logger.info("Audio Debug: Combining audio chunks...") + wav_data = combine_and_export_audio( + user_chunks=user_chunks, + agent_chunks=agent_chunks, + session_start_time=session_start_time, + session_id=session_id + ) + + logger.info(f"Audio Debug: Generated WAV data size: {len(wav_data)} bytes") + + # Save WAV file + recordings_dir = Path("src/database/voice_recordings") + recordings_dir.mkdir(parents=True, exist_ok=True) + audio_file_path = str(recordings_dir / f"{session_id}.wav") + + with open(audio_file_path, "wb") as f: + f.write(wav_data) + + logger.info(f"Saved audio file: {audio_file_path}") + + # Verify file exists and size + if os.path.exists(audio_file_path): + size = os.path.getsize(audio_file_path) + logger.info(f"Audio Debug: File verified on disk. Size: {size} bytes") + else: + logger.error("Audio Debug: File NOT found on disk after writing!") + + except Exception as e: + logger.error(f"Error processing audio: {e}", exc_info=True) + # Continue even if audio fails - we still want to save the transcript + else: + logger.warning("Audio Debug: No audio chunks found to process!") + + # Save to database + try: + save_voice_screening_session( + candidate_id=request.candidate_id, + session_id=session_id, + transcript_text=transcript_text, + audio_url=audio_file_path + ) + except ValueError as e: + # Candidate not found + logger.warning(f"Failed to save session: {e}") + raise HTTPException(status_code=404, detail=str(e)) + + logger.info(f"Saved session {session_id} for candidate {request.candidate_id}") + + return SaveSessionResponse( + audio_file_path=audio_file_path, + message="Session saved successfully" + ) + except HTTPException: + raise + except Exception as e: + logger.error(f"Error saving session: {e}", exc_info=True) + raise HTTPException(status_code=500, detail=f"Failed to save session: {str(e)}") + + +@router.get("/health") +async def health_check(): + """Health check endpoint.""" + return {"status": "healthy", "service": "voice-screener"} diff --git a/src/api/schemas/__init__.py b/src/api/schemas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5398f4862c5c67e57fea1b4fd26b5f3c2bd3559a --- /dev/null +++ b/src/api/schemas/__init__.py @@ -0,0 +1,33 @@ +"""API schemas.""" + +from .cv_upload import SubmitResponse +from .supervisor_chat import ChatRequest, ChatResponse, NewChatResponse +from .database import ( + TableName, + QueryRequest, + QueryResponse, + SingleRecordResponse, + CandidateResponse, + CVScreeningResponse, + VoiceScreeningResponse, + InterviewSchedulingResponse, + FinalDecisionResponse, +) + +__all__ = [ + "SubmitResponse", + "ChatRequest", + "ChatResponse", + "NewChatResponse", + # Database schemas + "TableName", + "QueryRequest", + "QueryResponse", + "SingleRecordResponse", + "CandidateResponse", + "CVScreeningResponse", + "VoiceScreeningResponse", + "InterviewSchedulingResponse", + "FinalDecisionResponse", +] + diff --git a/src/api/schemas/cv_upload.py b/src/api/schemas/cv_upload.py new file mode 100644 index 0000000000000000000000000000000000000000..d9e2c4a30f0f6c30c608a696d1f5dc8213853cd5 --- /dev/null +++ b/src/api/schemas/cv_upload.py @@ -0,0 +1,14 @@ +"""CV Upload API schemas.""" + +from pydantic import BaseModel, Field + + +class SubmitResponse(BaseModel): + """Response model for CV submission.""" + success: bool = Field(..., description="Whether the submission was successful") + message: str = Field(..., description="Status message") + candidate_name: str = Field(default="", description="Name of the candidate") + email: str = Field(default="", description="Email of the candidate") + cv_file_path: str = Field(default="", description="Path where CV was saved") + already_exists: bool = Field(default=False, description="True if candidate already applied") + diff --git a/src/api/schemas/database.py b/src/api/schemas/database.py new file mode 100644 index 0000000000000000000000000000000000000000..b7acbe2031e83c6b9de9625ec4069c970d5de9c3 --- /dev/null +++ b/src/api/schemas/database.py @@ -0,0 +1,191 @@ +""" +Database Query Schemas. + +Flexible schemas for querying any table in the recruitment database. +""" + +from datetime import datetime +from typing import Any, Optional +from uuid import UUID +from enum import Enum + +from pydantic import BaseModel, Field + + +# ================================================================================== +# ENUMS +# ================================================================================== + +class TableName(str, Enum): + """Available tables for querying.""" + candidates = "candidates" + cv_screening_results = "cv_screening_results" + voice_screening_results = "voice_screening_results" + interview_scheduling = "interview_scheduling" + final_decision = "final_decision" + + +class SortOrder(str, Enum): + """Sort order options.""" + asc = "asc" + desc = "desc" + + +# ================================================================================== +# REQUEST SCHEMAS +# ================================================================================== + +class QueryRequest(BaseModel): + """Flexible query request for any table.""" + + table: TableName = Field(..., description="Table to query") + + # Filtering + filters: Optional[dict[str, Any]] = Field( + default=None, + description="Key-value filters (e.g., {'email': 'john@example.com', 'status': 'applied'})" + ) + + # Field selection + fields: Optional[list[str]] = Field( + default=None, + description="Specific fields to return. If None, returns all fields." + ) + + # Include related data + include_relations: Optional[bool] = Field( + default=False, + description="Include related tables (only for candidates table)" + ) + + # Pagination + limit: Optional[int] = Field(default=100, ge=1, le=1000, description="Max records to return") + offset: Optional[int] = Field(default=0, ge=0, description="Number of records to skip") + + # Sorting + sort_by: Optional[str] = Field(default=None, description="Field to sort by") + sort_order: SortOrder = Field(default=SortOrder.desc, description="Sort order") + + model_config = { + "json_schema_extra": { + "examples": [ + { + "table": "candidates", + "filters": {"status": "applied"}, + "fields": ["id", "full_name", "email", "status"], + "limit": 10 + }, + { + "table": "cv_screening_results", + "filters": {"overall_fit_score": {"$gte": 0.8}}, + "sort_by": "overall_fit_score", + "sort_order": "desc" + } + ] + } + } + + +# ================================================================================== +# RESPONSE SCHEMAS +# ================================================================================== + +class CandidateResponse(BaseModel): + """Candidate data response.""" + id: UUID + full_name: str + email: str + phone_number: Optional[str] = None + cv_file_path: Optional[str] = None + parsed_cv_file_path: Optional[str] = None + status: str + created_at: datetime + updated_at: datetime + + # Related data (populated when include_relations=True) + cv_screening_results: Optional[list[dict[str, Any]]] = None + voice_screening_results: Optional[list[dict[str, Any]]] = None + interview_scheduling: Optional[list[dict[str, Any]]] = None + final_decision: Optional[dict[str, Any]] = None + + model_config = {"from_attributes": True} + + +class CVScreeningResponse(BaseModel): + """CV Screening result response.""" + id: UUID + candidate_id: UUID + job_title: Optional[str] = None + skills_match_score: Optional[float] = None + experience_match_score: Optional[float] = None + education_match_score: Optional[float] = None + overall_fit_score: Optional[float] = None + llm_feedback: Optional[str] = None + reasoning_trace: Optional[dict[str, Any]] = None + timestamp: datetime + + model_config = {"from_attributes": True} + + +class VoiceScreeningResponse(BaseModel): + """Voice Screening result response.""" + id: UUID + candidate_id: UUID + call_sid: Optional[str] = None + transcript_text: Optional[str] = None + sentiment_score: Optional[float] = None + confidence_score: Optional[float] = None + communication_score: Optional[float] = None + llm_summary: Optional[str] = None + llm_judgment_json: Optional[dict[str, Any]] = None + audio_url: Optional[str] = None + timestamp: datetime + + model_config = {"from_attributes": True} + + +class InterviewSchedulingResponse(BaseModel): + """Interview scheduling response.""" + id: UUID + candidate_id: UUID + calendar_event_id: Optional[str] = None + event_summary: Optional[str] = None + start_time: Optional[datetime] = None + end_time: Optional[datetime] = None + status: Optional[str] = None + timestamp: datetime + + model_config = {"from_attributes": True} + + +class FinalDecisionResponse(BaseModel): + """Final decision response.""" + id: UUID + candidate_id: UUID + overall_score: Optional[float] = None + decision: Optional[str] = None + llm_rationale: Optional[str] = None + human_notes: Optional[str] = None + timestamp: datetime + + model_config = {"from_attributes": True} + + +class QueryResponse(BaseModel): + """Generic query response wrapper.""" + success: bool + table: str + total_count: int + returned_count: int + offset: int + data: list[dict[str, Any]] + message: Optional[str] = None + + +class SingleRecordResponse(BaseModel): + """Single record response.""" + success: bool + table: str + data: Optional[dict[str, Any]] = None + message: Optional[str] = None + diff --git a/src/api/schemas/supervisor_chat.py b/src/api/schemas/supervisor_chat.py new file mode 100644 index 0000000000000000000000000000000000000000..4dfc7b15925d61005c33101577fefa185de0bf4c --- /dev/null +++ b/src/api/schemas/supervisor_chat.py @@ -0,0 +1,23 @@ +from pydantic import BaseModel, Field +from typing import Optional + +class ChatRequest(BaseModel): + """Request model for chat endpoint.""" + message: str = Field(..., description="User message to send to the supervisor agent") + thread_id: Optional[str] = Field( + default=None, + description="Thread ID for conversation continuity. If not provided, a new thread is created." + ) + + +class ChatResponse(BaseModel): + """Response model for chat endpoint.""" + response: str = Field(..., description="Agent's response message") + thread_id: str = Field(..., description="Thread ID for conversation continuity") + token_count: int = Field(..., description="Current token count in context window") + + +class NewChatResponse(BaseModel): + """Response model for creating a new chat session.""" + thread_id: str = Field(..., description="New thread ID for the conversation") + message: str = Field(..., description="Welcome message") \ No newline at end of file diff --git a/src/configs/__init__.py b/src/configs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..064d9378be296ceb7bdd38245bb453a76a615ad3 --- /dev/null +++ b/src/configs/__init__.py @@ -0,0 +1,22 @@ +"""Application configuration.""" + +from .cv import CVUploadSettings, get_cv_settings +from .database import DatabaseSettings, get_database_settings +from .openai import OpenAISettings, get_openai_settings, get_openai_api_key +from .settings import Settings, get_settings + +__all__ = [ + # CV Upload + "CVUploadSettings", + "get_cv_settings", + # Database + "DatabaseSettings", + "get_database_settings", + # OpenAI + "OpenAISettings", + "get_openai_settings", + "get_openai_api_key", + # Main settings + "Settings", + "get_settings", +] diff --git a/src/configs/cv.py b/src/configs/cv.py new file mode 100644 index 0000000000000000000000000000000000000000..8eb87b44e2f8edf274e66a6162e81d40390ff2f6 --- /dev/null +++ b/src/configs/cv.py @@ -0,0 +1,36 @@ +"""CV Upload settings.""" + +from functools import lru_cache +from pathlib import Path +from pydantic import Field +from pydantic_settings import BaseSettings, SettingsConfigDict + + +class CVUploadSettings(BaseSettings): + """Settings for CV upload and parsing.""" + + model_config = SettingsConfigDict( + env_prefix="CV_", # CV_UPLOAD_PATH, CV_PARSED_PATH + extra="ignore", + ) + + upload_path: Path = Field( + default=Path("src/database/cvs/uploads"), + description="Directory for uploaded CV files", + ) + parsed_path: Path = Field( + default=Path("src/database/cvs/parsed"), + description="Directory for parsed CV markdown files", + ) + + def ensure_dirs(self) -> None: + """Create upload and parsed directories if they don't exist.""" + self.upload_path.mkdir(parents=True, exist_ok=True) + self.parsed_path.mkdir(parents=True, exist_ok=True) + + +@lru_cache +def get_cv_settings() -> CVUploadSettings: + """Get cached CV upload settings.""" + return CVUploadSettings() + diff --git a/src/configs/database.py b/src/configs/database.py new file mode 100644 index 0000000000000000000000000000000000000000..49cbdfb58b9dcfc0e83aa6a9a81f62d00c19de9b --- /dev/null +++ b/src/configs/database.py @@ -0,0 +1,42 @@ +"""Database connection settings.""" + +from functools import lru_cache +from pydantic import Field +from pydantic_settings import BaseSettings, SettingsConfigDict + + +class DatabaseSettings(BaseSettings): + """PostgreSQL database connection settings.""" + + model_config = SettingsConfigDict( + env_prefix="POSTGRES_", + extra="ignore", + ) + + host: str = Field(default="localhost") + port: int = Field(default=5432) + user: str = Field(default="agentic_user") + password: str = Field(default="") + db: str = Field(default="agentic_hr") + + @property + def url(self) -> str: + """Build database URL.""" + return f"postgresql://{self.user}:{self.password}@{self.host}:{self.port}/{self.db}" + + @property + def psycopg2_url(self) -> str: + """Build database URL with psycopg2 driver.""" + return f"postgresql+psycopg2://{self.user}:{self.password}@{self.host}:{self.port}/{self.db}" + + @property + def async_url(self) -> str: + """Build async database URL for SQLAlchemy async.""" + return f"postgresql+asyncpg://{self.user}:{self.password}@{self.host}:{self.port}/{self.db}" + + +@lru_cache +def get_database_settings() -> DatabaseSettings: + """Get cached database settings.""" + return DatabaseSettings() + diff --git a/src/configs/openai.py b/src/configs/openai.py new file mode 100644 index 0000000000000000000000000000000000000000..5cae55e9ea50ccb1f809a1ddc7b9b8c61cf203b8 --- /dev/null +++ b/src/configs/openai.py @@ -0,0 +1,89 @@ +"""OpenAI API settings.""" + +import sys +from functools import lru_cache +from pydantic import Field, model_validator +from pydantic_settings import BaseSettings, SettingsConfigDict + + +class OpenAISettings(BaseSettings): + """ + OpenAI API configuration. + + Validates that OPENAI_API_KEY is set and provides a helpful error message + if missing, especially useful in Docker environments. + """ + + model_config = SettingsConfigDict( + extra="ignore", + ) + + api_key: str = Field( + default="", + alias="OPENAI_API_KEY", + description="OpenAI API key for model access", + ) + + @model_validator(mode="after") + def validate_api_key(self) -> "OpenAISettings": + """Validate that API key is set and provide helpful error message.""" + if not self.api_key: + error_message = """ +╔══════════════════════════════════════════════════════════════════════════════╗ +║ ⚠️ OPENAI_API_KEY NOT SET ⚠️ ║ +╠══════════════════════════════════════════════════════════════════════════════╣ +║ ║ +║ The OPENAI_API_KEY environment variable is required but not set. ║ +║ ║ +║ To fix this: ║ +║ ║ +║ 1. Create a .env file in the project root: ║ +║ OPENAI_API_KEY=sk-your-api-key-here ║ +║ ║ +║ 2. Or set it directly in your shell: ║ +║ export OPENAI_API_KEY=sk-your-api-key-here ║ +║ ║ +║ 3. Or pass it to Docker: ║ +║ docker compose --env-file .env -f docker/docker-compose.yml up ║ +║ ║ +║ Get your API key at: https://platform.openai.com/api-keys ║ +║ ║ +╚══════════════════════════════════════════════════════════════════════════════╝ +""" + print(error_message, file=sys.stderr) + raise ValueError("OPENAI_API_KEY environment variable is required") + + # Basic validation that it looks like an OpenAI key + if not (self.api_key.startswith("sk-") or self.api_key.startswith("org-")): + print( + "⚠️ Warning: OPENAI_API_KEY doesn't start with 'sk-' - " + "make sure it's a valid OpenAI API key.", + file=sys.stderr + ) + + return self + + def __repr__(self) -> str: + """Safe representation without exposing the key.""" + masked = f"{self.api_key[:7]}...{self.api_key[-4:]}" if len(self.api_key) > 11 else "***" + return f"OpenAISettings(api_key={masked})" + + +@lru_cache +def get_openai_settings() -> OpenAISettings: + """ + Get cached OpenAI settings. + + Raises ValueError with helpful message if OPENAI_API_KEY is not set. + """ + return OpenAISettings() + + +def get_openai_api_key() -> str: + """ + Convenience function to get just the API key. + + Raises ValueError with helpful message if OPENAI_API_KEY is not set. + """ + return get_openai_settings().api_key + diff --git a/src/configs/settings.py b/src/configs/settings.py new file mode 100644 index 0000000000000000000000000000000000000000..adc57aa40f54fe774a3bdb4c07a0f11ab24e8807 --- /dev/null +++ b/src/configs/settings.py @@ -0,0 +1,46 @@ +""" +Main application settings. + +This module aggregates all settings into a single Settings class. +For most use cases, import individual settings directly: + + from src.configs import get_cv_settings, get_openai_settings + + cv = get_cv_settings() + openai = get_openai_settings() +""" + +from functools import lru_cache +from pydantic import Field +from pydantic_settings import BaseSettings, SettingsConfigDict +from dotenv import load_dotenv + +# Load environment variables +load_dotenv() + +from .cv import CVUploadSettings +from .database import DatabaseSettings +from .openai import OpenAISettings + + +class Settings(BaseSettings): + """ + Aggregated application settings. + + Combines all configuration in one place. Individual settings + can also be accessed via their dedicated getter functions. + """ + + model_config = SettingsConfigDict( + extra="ignore", + ) + + cv: CVUploadSettings = Field(default_factory=CVUploadSettings) + database: DatabaseSettings = Field(default_factory=DatabaseSettings) + openai: OpenAISettings = Field(default_factory=OpenAISettings) + + +@lru_cache +def get_settings() -> Settings: + """Get cached main settings instance.""" + return Settings() diff --git a/src/context_eng/__init__.py b/src/context_eng/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6c970611492b40d4eb3a0171fdcbab239dc1a6e --- /dev/null +++ b/src/context_eng/__init__.py @@ -0,0 +1,23 @@ +""" +Context Engineering Module. + +Handles context window optimization and memory management for Long-Running Agents. +Implements "Compactive Summarization" to prevent token overflow while preserving +critical conversation history. +""" + +from .token_counter import count_tokens_for_messages +from .history_manager import HistoryManager +from .compacting_supervisor import CompactingSupervisor, compacting_supervisor, history_manager + +__all__ = [ + # Utilities + "count_tokens_for_messages", + # Classes + "HistoryManager", + "CompactingSupervisor", + # Singletons + "compacting_supervisor", + "history_manager", +] + diff --git a/src/context_eng/compacting_supervisor.py b/src/context_eng/compacting_supervisor.py new file mode 100644 index 0000000000000000000000000000000000000000..b250a2616b9affff38dee3e8bcd52bcffde750b4 --- /dev/null +++ b/src/context_eng/compacting_supervisor.py @@ -0,0 +1,141 @@ +""" +Compacting Supervisor - Agent wrapper with automatic context management. + +Wraps an agent to enforce Context Window limits via 'Compaction'. +Implements the Interceptor Pattern to transparently manage token usage. +""" + +from typing import Dict, Any, Generator + +from src.agents.supervisor.supervisor_v2 import supervisor_agent, memory + +from .token_counter import count_tokens_for_messages +from .history_manager import HistoryManager + + +class CompactingSupervisor: + """ + Wraps an agent to enforce Context Window limits via 'Compaction'. + + Technique (Interceptor Pattern): + 1. Intercepts the agent's execution flow. + 2. Runs the agent normally. + 3. Post-execution: Checks if the total context (tokens) exceeds the limit. + 4. If exceeded, triggers `HistoryManager` to compact old history and rewrite memory. + + This ensures the agent remains "forever young" regarding token usage, + without losing long-term context. + """ + + def __init__(self, agent, history_manager: HistoryManager, token_limit: int = 3000, compaction_ratio: float = 0.5): + self.agent = agent + self.history_manager = history_manager + self.token_limit = token_limit + self.compaction_ratio = compaction_ratio + + def invoke(self, input_data: Dict[str, Any], config: Dict[str, Any]) -> Dict[str, Any]: + """ + Execute the agent and perform context maintenance if needed. + """ + thread_id = config.get("configurable", {}).get("thread_id") + + # 1. Invoke the agent + response = self.agent.invoke(input_data, config) + + # 2. Check total tokens after response + if thread_id and "messages" in response: + all_messages = response["messages"] + total_tokens = count_tokens_for_messages(all_messages) + + if total_tokens > self.token_limit: + print(f"Tokens ({total_tokens}) exceeded limit ({self.token_limit}). Compacting...", flush=True) + try: + # Delegate complex logic to HistoryManager + compacted_messages = self.history_manager.compact_messages( + all_messages, + compaction_ratio=self.compaction_ratio + ) + self.history_manager.replace_thread_history(thread_id, compacted_messages) + + # Update response to reflect compacted state so UI sees the change + response["messages"] = compacted_messages + + # Verify reduction + new_tokens = count_tokens_for_messages(compacted_messages) + print(f"Compaction complete. {total_tokens} -> {new_tokens}", flush=True) + except Exception as e: + print(f"Compaction failed: {e}", flush=True) + + return response + + def stream(self, input_data: Dict[str, Any], config: Dict[str, Any]) -> Generator[Dict[str, Any], None, None]: + """ + Stream the agent response token by token, then perform compaction if needed. + + Yields: + dict: Streaming chunks with 'type' and 'content' keys. + - type='token': A content token from the AI response + - type='done': Final message with token count + - type='error': Error occurred + """ + thread_id = config.get("configurable", {}).get("thread_id") + full_response_content = "" + final_messages = [] + + try: + # Stream from the agent + for chunk in self.agent.stream(input_data, config, stream_mode="messages"): + # chunk is a tuple: (message, metadata) + message, metadata = chunk + + # Only yield content from AI messages that have content + if hasattr(message, 'content') and message.content: + # Check if this is an AIMessageChunk (streaming token) + msg_type = message.__class__.__name__ + if 'AIMessage' in msg_type: + yield {"type": "token", "content": message.content} + full_response_content += message.content + + # After streaming completes, get the final state for compaction check + # We need to get the current state from memory + final_state = self.agent.get_state(config) + if final_state and hasattr(final_state, 'values'): + final_messages = final_state.values.get("messages", []) + + # Perform compaction if needed + token_count = 0 + if thread_id and final_messages: + token_count = count_tokens_for_messages(final_messages) + + if token_count > self.token_limit: + print(f"Tokens ({token_count}) exceeded limit ({self.token_limit}). Compacting...", flush=True) + try: + compacted_messages = self.history_manager.compact_messages( + final_messages, + compaction_ratio=self.compaction_ratio + ) + self.history_manager.replace_thread_history(thread_id, compacted_messages) + token_count = count_tokens_for_messages(compacted_messages) + print(f"Compaction complete. New token count: {token_count}", flush=True) + except Exception as e: + print(f"Compaction failed: {e}", flush=True) + + yield {"type": "done", "token_count": token_count} + + except Exception as e: + yield {"type": "error", "content": str(e)} + + +# ============================================================================= +# SINGLETON INSTANCES +# ============================================================================= + +history_manager = HistoryManager(memory_saver=memory) + +compacting_supervisor = CompactingSupervisor( + agent=supervisor_agent, + history_manager=history_manager, + token_limit=500, + compaction_ratio=0.5 +) + diff --git a/src/context_eng/history_manager.py b/src/context_eng/history_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..29dc78205f12fa227dbad65ab1e6af6525985243 --- /dev/null +++ b/src/context_eng/history_manager.py @@ -0,0 +1,175 @@ +""" +History Manager for conversation memory and compaction. + +Handles persistent conversation state and implements "Compactive Summarization" +to prevent token overflow while preserving critical conversation history. +""" + +import time +import random +import uuid +from datetime import datetime +from typing import List + +from langchain_openai import ChatOpenAI +from langchain_core.messages import BaseMessage, SystemMessage, HumanMessage, AIMessage + +from src.prompts import get_prompt + + +class HistoryManager: + """ + Manages persistent conversation state and implements compaction logic. + + Responsibilities: + 1. Compaction: Summarizing old messages to save tokens. + 2. Persistence: Safely updating the low-level checkpoint storage. + """ + + def __init__(self, memory_saver): + self.memory = memory_saver + + def _messages_to_text(self, messages: List[BaseMessage]) -> str: + """Convert messages to a plain text transcript.""" + text_parts = [] + for msg in messages: + role = msg.__class__.__name__ + content = msg.content + if isinstance(content, str): + text_parts.append(f"{role}: {content}") + else: + text_parts.append(f"{role}: {str(content)}") + return "\n\n".join(text_parts) + + def _is_tool_message(self, msg: BaseMessage) -> bool: + """Check if a message is a ToolMessage or Tool output.""" + msg_type = getattr(msg, "type", None) + role = getattr(msg, "role", None) + return msg_type == "tool" or role == "tool" or msg.__class__.__name__ == "ToolMessage" + + def compact_messages(self, messages: List[BaseMessage], compaction_ratio: float = 0.5) -> List[BaseMessage]: + """ + Apply "Compactive Summarization" to the conversation history. + + Technique: + - Splits history into Old and Recent based on compaction_ratio. + - Summarizes Old messages into a single narrative block using an LLM. + - Preserves the System Prompt and Recent messages verbatim. + + Args: + messages: Full list of conversation messages. + compaction_ratio: Fraction of messages to compact (0.0 to 1.0). + - 0.5 (Default): Summarizes 50% (Oldest half). + - 0.8: Aggressive. Summarizes 80% (Keeps only very recent messages). + - 0.2: Gentle. Summarizes only the oldest 20%. + + Returns: + List[BaseMessage]: optimized list with summary replacing old history. + """ + if len(messages) < 2: + return messages + + system_msg = None + conversation_msgs = messages + + # Preserve system prompt + if isinstance(messages[0], SystemMessage): + system_msg = messages[0] + conversation_msgs = messages[1:] + + if len(conversation_msgs) < 2: + return messages + + # Calculate split point based on ratio + split_idx = int(len(conversation_msgs) * compaction_ratio) + + # Ensure we compact at least something if ratio > 0, but keep at least one recent message + split_idx = max(1, min(split_idx, len(conversation_msgs) - 1)) + + first_half = conversation_msgs[:split_idx] + second_half = conversation_msgs[split_idx:] + + # Ensure second_half does not start with orphaned tool message + while second_half and self._is_tool_message(second_half[0]): + if first_half: + second_half.insert(0, first_half.pop()) + else: + second_half.pop(0) + + # Generate summary + compactor_prompt = get_prompt(template_name="Compactor", latest_version=True) + conversation_text = self._messages_to_text(first_half) + + llm = ChatOpenAI(model="gpt-4o-mini", temperature=0, max_tokens=1000) + messages_for_llm = [ + SystemMessage(content=compactor_prompt), + HumanMessage(content=f"Conversation history to summarize:\n\n{conversation_text}") + ] + + response = llm.invoke(messages_for_llm) + summary_text = response.content + + print(f"\n{'='*80}\n📝 COMPACTION MESSAGE:\n{summary_text}\n{'='*80}\n", flush=True) + + summary_message = AIMessage(content=f"[COMPACTED SUMMARY OF EARLIER CONVERSATION]\n\n{summary_text}") + + result = [] + if system_msg: + result.append(system_msg) + result.append(summary_message) + result.extend(second_half) + + return result + + def replace_thread_history(self, thread_id: str, new_messages: List[BaseMessage]) -> bool: + """ + Atomically overwrite the message history in the checkpoint storage. + + This bypasses the standard append-only reducer to force a history rewrite. + Crucial for finalizing the compaction process. + """ + config = {"configurable": {"thread_id": thread_id}} + current_checkpoint = self.memory.get_tuple(config) + + if not current_checkpoint or not current_checkpoint.checkpoint: + return False + + checkpoint_config = { + "configurable": {**current_checkpoint.config.get("configurable", {})} + } + checkpoint_config["configurable"].setdefault("thread_id", thread_id) + checkpoint_config["configurable"].setdefault("checkpoint_ns", "") + + current_versions = current_checkpoint.checkpoint.get('channel_versions', {}) + new_msg_version = f"{str(int(time.time())).zfill(32)}.0.{random.random()}" + + new_versions = current_versions.copy() + new_versions['messages'] = new_msg_version + + new_checkpoint = { + 'v': current_checkpoint.checkpoint.get('v', 1) + 1, + 'ts': datetime.utcnow().isoformat(), + 'id': str(uuid.uuid4()), + 'channel_versions': new_versions, + 'versions_seen': current_checkpoint.checkpoint.get('versions_seen', {}), + 'updated_channels': ['messages'], + 'channel_values': {'messages': new_messages} + } + + existing_metadata = current_checkpoint.metadata or {} + new_metadata = { + **existing_metadata, + "source": "compaction", + "compacted_at": datetime.utcnow().isoformat(), + } + if "step" not in new_metadata: + new_metadata["step"] = existing_metadata.get("step", 0) + + self.memory.put( + config=checkpoint_config, + checkpoint=new_checkpoint, + metadata=new_metadata, + new_versions={'messages': new_msg_version} + ) + return True + diff --git a/src/context_eng/info.md b/src/context_eng/info.md new file mode 100644 index 0000000000000000000000000000000000000000..3705079fa624b4fa017ed60bc38efd05f6a86160 --- /dev/null +++ b/src/context_eng/info.md @@ -0,0 +1,186 @@ +# Context Engineering 🧠 + +> Keeping long-running agents "forever young" by managing their memory. + +## The Problem + +LLMs have finite context windows. As conversations grow, you eventually hit the token limit and the agent breaks. Simply truncating old messages loses valuable context. + +## The Solution: Compactive Summarization + +Instead of truncating, we **summarize** old conversation history into a compact narrative, preserving the essential context while freeing up tokens. + +``` +┌─────────────────────────────────────────────────────────┐ +│ Before Compaction (500+ tokens) │ +├─────────────────────────────────────────────────────────┤ +│ [System] You are an HR assistant... │ +│ [Human] Show me all candidates │ +│ [AI] Here are 5 candidates: Alice, Bob... │ +│ [Human] Tell me about Alice │ +│ [AI] Alice is a senior engineer with 5 years... │ +│ [Human] Schedule an interview with her │ +│ [Tool] Calendar event created... │ +│ [AI] Done! Interview scheduled for Monday. │ +│ [Human] Now check Bob's CV ← new │ +└─────────────────────────────────────────────────────────┘ + ↓ COMPACTION ↓ +┌─────────────────────────────────────────────────────────┐ +│ After Compaction (~200 tokens) │ +├─────────────────────────────────────────────────────────┤ +│ [System] You are an HR assistant... │ +│ [AI Summary] User reviewed candidates, focused on │ +│ Alice (senior engineer), scheduled interview │ +│ for Monday. │ +│ [Human] Now check Bob's CV ← kept │ +└─────────────────────────────────────────────────────────┘ +``` + +## Architecture + +``` +┌──────────────────────────────────────────────────────────┐ +│ CompactingSupervisor │ +│ ┌────────────────────────────────────────────────────┐ │ +│ │ 1. Intercept agent execution │ │ +│ │ 2. Run agent normally │ │ +│ │ 3. Count tokens after response │ │ +│ │ 4. If over limit → trigger compaction │ │ +│ └────────────────────────────────────────────────────┘ │ +│ │ │ +│ ▼ │ +│ ┌────────────────────────────────────────────────────┐ │ +│ │ HistoryManager │ │ +│ │ • compact_messages() → LLM summarization │ │ +│ │ • replace_thread_history() → checkpoint update │ │ +│ └────────────────────────────────────────────────────┘ │ +└──────────────────────────────────────────────────────────┘ +``` + +## 🔒 Subagents and Memory Safety + +Compaction affects **only the supervisor’s `messages` channel** inside LangGraph’s checkpoint. + +This includes: + +- User messages +- Supervisor AI messages +- **Tool call and Tool result messages** (because these are part of the supervisor’s visible conversation history) + +This does **not** include: + +- Sub-agent internal reasoning +- Sub-agent private memory +- Hidden chain-of-thought +- Any messages stored in sub-agent–specific channels + +Only the messages that the supervisor itself receives are ever compacted. +No internal sub-agent state leaks into the compacted summary. + + +## Key Parameters + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `token_limit` | 500 | Trigger compaction when exceeded | +| `compaction_ratio` | 0.5 | Fraction of messages to summarize | + +### Compaction Ratio Explained + +The `compaction_ratio` controls how aggressively we summarize: + +``` +compaction_ratio = 0.5 (Default) +├── Summarizes: oldest 50% of messages +└── Keeps verbatim: newest 50% of messages + +compaction_ratio = 0.8 (Aggressive) +├── Summarizes: oldest 80% of messages +└── Keeps verbatim: only newest 20% + → Use when context is very tight + +compaction_ratio = 0.2 (Gentle) +├── Summarizes: only oldest 20% +└── Keeps verbatim: newest 80% + → Use when you want more history preserved +``` + +**Example with 10 messages:** +- `ratio=0.5` → Summarize messages 1-5, keep 6-10 verbatim +- `ratio=0.8` → Summarize messages 1-8, keep 9-10 verbatim +- `ratio=0.2` → Summarize messages 1-2, keep 3-10 verbatim + +## Usage + +```python +from src.context_eng import compacting_supervisor + +# Just use it like a normal agent - compaction is automatic! +response = compacting_supervisor.invoke( + {"messages": [HumanMessage(content="Hello")]}, + config={"configurable": {"thread_id": "my-thread"}} +) + +# Streaming works too +for chunk in compacting_supervisor.stream(...): + if chunk["type"] == "token": + print(chunk["content"], end="") +``` + +## LangGraph Integration + +### How It Wraps the Agent + +The `CompactingSupervisor` uses the **Interceptor Pattern** - it wraps the existing LangGraph agent without modifying it: + +```python +# In compacting_supervisor.py +from src.agents.supervisor.supervisor_v2 import supervisor_agent, memory + +compacting_supervisor = CompactingSupervisor( + agent=supervisor_agent, # ← Original LangGraph agent + history_manager=HistoryManager(memory_saver=memory), # ← LangGraph's MemorySaver + ... +) +``` + +The agent itself is **unchanged**. We just intercept `invoke()` and `stream()` calls. + +### How It Manipulates LangGraph Memory + +LangGraph uses **checkpoints** to persist conversation state. Normally, messages are append-only. Our `HistoryManager.replace_thread_history()` bypasses this to force a rewrite: + +``` +Normal LangGraph flow: +┌─────────────────────────────────────┐ +│ Checkpoint Storage (MemorySaver) │ +│ ┌───────────────────────────────┐ │ +│ │ messages: [m1, m2, m3, m4...] │ │ ← Append-only +│ └───────────────────────────────┘ │ +└─────────────────────────────────────┘ + +After compaction (we override): +┌─────────────────────────────────────┐ +│ Checkpoint Storage (MemorySaver) │ +│ ┌───────────────────────────────┐ │ +│ │ messages: [sys, summary, m4] │ │ ← Force-replaced! +│ └───────────────────────────────┘ │ +└─────────────────────────────────────┘ +``` + +**Key mechanism in `replace_thread_history()`:** +1. Get current checkpoint via `memory.get_tuple(config)` +2. Build new checkpoint with compacted messages +3. Increment version + update timestamps +4. Write directly via `memory.put(...)` - bypassing normal reducers + +This is a **low-level override** of LangGraph's internal checkpoint format. It works because we maintain the expected checkpoint structure (`channel_versions`, `channel_values`, etc.). + +## Files + +| File | Purpose | +|------|---------| +| `token_counter.py` | Count tokens in message lists | +| `history_manager.py` | Summarization + checkpoint manipulation | +| `compacting_supervisor.py` | Agent wrapper (Interceptor Pattern) | + diff --git a/src/context_eng/token_counter.py b/src/context_eng/token_counter.py new file mode 100644 index 0000000000000000000000000000000000000000..9b144d29f7db573113a37482100164944ccbc184 --- /dev/null +++ b/src/context_eng/token_counter.py @@ -0,0 +1,47 @@ +"""Token counting utilities for context window management.""" + +from typing import List, Any + +import tiktoken + + +def count_tokens_for_messages(messages: List[Any], model: str = "gpt-4o") -> int: + """ + Calculate token usage for a list of messages using tiktoken. + + Handles text content, function calls, and tool outputs with approximate + overhead calculations for the ChatML format. + + Args: + messages: List of LangChain message objects. + model: Target model encoding to use. + + Returns: + int: Total estimated token count. + """ + try: + encoding = tiktoken.encoding_for_model(model) + except KeyError: + encoding = tiktoken.get_encoding("cl100k_base") + + num_tokens = 0 + for message in messages: + # Every message follows {role/name}\n{content}\n + num_tokens += 4 + + # Handle content which might be a string or list of content blocks + content = getattr(message, "content", "") + if isinstance(content, str): + num_tokens += len(encoding.encode(content)) + + # If there are additional keys (like name, function_call, etc.) we should add them + if hasattr(message, "name") and message.name: + num_tokens += len(encoding.encode(message.name)) + + if hasattr(message, "tool_calls") and message.tool_calls: + for tool_call in message.tool_calls: + num_tokens += len(encoding.encode(str(tool_call))) + + num_tokens += 2 # every reply is primed with assistant + return num_tokens + diff --git a/src/core/__init__.py b/src/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/core/base_agent.py b/src/core/base_agent.py new file mode 100644 index 0000000000000000000000000000000000000000..043a032ba6d206d6d11f02757be931d145a03328 --- /dev/null +++ b/src/core/base_agent.py @@ -0,0 +1,186 @@ +""" +Base class for LangGraph-based agents that serves an interface for building, +compiling, and executing custom agent graphs. + +Alternatively you can also use `create_agent`, which implements a ReAct agent by default. +It may be of particular interest, since it enables MiddleWare like `context summarization` +and `human in the loop`, `dynamic model selection` out of the box. +links: + - create_agent: https://docs.langchain.com/oss/python/langchain/agents + - middleware: https://docs.langchain.com/oss/python/langchain/middleware +""" + +from abc import ABC, abstractmethod +from typing import Dict, List, Optional + +from langgraph.graph import StateGraph +from langchain_core.tools import BaseTool +from langchain_openai import ChatOpenAI +from langchain_openrouter import ChatOpenRouter + +from src.core.configs.agent import AgentConfig + + +class BaseAgent(ABC): + """Abstract base class for all LangGraph-based agents. + """ + + def __init__(self, config: AgentConfig) -> None: + """Initialize the agent with configuration. + """ + self.config = config + self.name = config.name + self.description = config.description + self._graph: Optional[StateGraph] = None + self._compiled_graph = None + + # Initialize model (tools are bound optionally via bind_tools) + self.llm = self._init_model() + + # ~~~ ABSTRACT METHODS ~~~ + @abstractmethod + def build_graph(self) -> StateGraph: + """Build the agent's LangGraph structure. + """ + pass + + # ~~~ MODEL INITIALIZATION ~~~ + def _init_model(self) -> ChatOpenAI: + """Initialize LLM engine based on model provider. + """ + model_cfg = self.config.model_config + provider = model_cfg.provider.lower() + + if provider == "openai": + return ChatOpenAI( + model=model_cfg.model_name, + api_key=model_cfg.get_api_key(), + temperature=model_cfg.temperature, + max_tokens=model_cfg.max_tokens, + base_url=model_cfg.api_base, + ) + elif provider == "openrouter": + return ChatOpenRouter( + model=model_cfg.model_name, + api_key=model_cfg.get_api_key(), + temperature=model_cfg.temperature, + max_tokens=model_cfg.max_tokens, + base_url=model_cfg.api_base, + ) + else: + raise NotImplementedError( + f"Provider '{provider}' not supported yet." + ) + + + def bind_tools( + self, + tools: Optional[List[BaseTool]] = None, + strict: bool = True + ) -> ChatOpenAI: + """ + Optionally bind tools to the initialized model. + + Args: + tools: List of tools to bind. Defaults to `self.config.tools` if not provided. + strict: Enforce schema validation for tools. + """ + if not hasattr(self, "llm"): + raise RuntimeError("Model must be initialized before binding tools.") + + tools_to_bind = tools or self.config.tools + if not tools_to_bind: + return self.llm # no-op + + self.llm = self.llm.bind_tools(tools_to_bind, strict=strict) + return self.llm + + + # ~~~ GRAPH MANAGEMENT~~~ + def compile(self, checkpointer=None, store=None) -> StateGraph: + """Compile the agent graph for execution. + """ + if self._graph is None: + self._graph = self.build_graph() + + self._compiled_graph = self._graph.compile(checkpointer=checkpointer, store=store) + return self._compiled_graph + + + def get_graph(self) -> StateGraph: + """Return compiled graph (compile if needed). + """ + if self._compiled_graph is None: + self.compile() + return self._compiled_graph + + + def visualize(self, output_path: Optional[str] = None): + """Render the graph as a Mermaid diagram. + """ + if self._compiled_graph is None: + self.compile() + return self._compiled_graph.get_graph().draw_mermaid_png(output_file_path=output_path) + + # ~~~ EXECUTION ~~~ + def invoke( + self, + input_data: Dict[str, object], + config: Optional[Dict[str, object]] = None + ) -> Dict[str, object]: + """Execute the compiled agent. + """ + if self._compiled_graph is None: + self.compile() + return self._compiled_graph.invoke(input_data, config) + + + async def ainvoke( + self, + input_data: Dict[str, object], + config: Optional[Dict[str, object]] = None + ) -> Dict[str, object]: + """Execute the agent asynchronously. + """ + if self._compiled_graph is None: + self.compile() + return await self._compiled_graph.ainvoke(input_data, config) + + + def stream( + self, + input_data: Dict[str, object], + config: Optional[Dict[str, object]] = None + ) -> Dict[str, object]: + """Stream agent execution results. + """ + if self._compiled_graph is None: + self.compile() + return self._compiled_graph.stream(input_data, config) + + # ~~~ UTILITIES ~~~ + def get_tools(self) -> List[BaseTool]: + """Return the tools this agent can use. + """ + return list(self.config.tools or []) + + + def get_capabilities(self) -> List[str]: + """List of agent capabilities (override in subclasses). + """ + return [] + + + @property + def metadata(self) -> Dict[str, object]: + """Return agent metadata for discovery and routing. + """ + return { + "name": self.name, + "description": self.description, + "tools": [tool.name for tool in self.get_tools()], + "capabilities": self.get_capabilities(), + } + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(name='{self.name}')" diff --git a/src/core/configs/agent.py b/src/core/configs/agent.py new file mode 100644 index 0000000000000000000000000000000000000000..4745854235fb7df205d1769b1719d1b6c56fb0a6 --- /dev/null +++ b/src/core/configs/agent.py @@ -0,0 +1,46 @@ +# src/core/configs/agent.py +from typing import List, Optional +from langchain_core.tools import BaseTool +from pydantic import BaseModel, Field, ConfigDict +from src.core.configs.model import ModelConfig + + +class AgentConfig(BaseModel): + """ + Configuration schema for initializing LangGraph agents. + + Notes: + Pydantic setting ``model_config = ConfigDict(arbitrary_types_allowed=True)`` + allows this model to include arbitrary Python objects such as LangChain + tools or runtime components that are not JSON-serializable or Pydantic + models. These objects (e.g., `BaseTool` instances) are accepted as-is + without validation or serialization, while all standard fields + (strings, numbers, nested Pydantic models) remain fully validated. + """ + model_config = ConfigDict( + arbitrary_types_allowed=True + ) + name: str = Field( + ..., + description="Unique name of the agent." + ) + description: str = Field( + ..., + description="Short description of what the agent does." + ) + model_config: ModelConfig = Field( + ..., + description="Configuration of the underlying LLM model." + ) + tools: Optional[List[BaseTool]] = Field( + default_factory=list, + description="List of tools available to the agent." + ) + system_prompt: str = Field( + default="", + description="System prompt to condition the agent's behavior." + ) + max_iterations: Optional[int] = Field( + default=None, + description="Optional limit on reasoning iterations." + ) diff --git a/src/core/configs/model.py b/src/core/configs/model.py new file mode 100644 index 0000000000000000000000000000000000000000..79906d3e2de1f7cff0c53e50f13b2935da602572 --- /dev/null +++ b/src/core/configs/model.py @@ -0,0 +1,89 @@ +# src/core/configs/model.py +from typing import Optional, Literal +from pydantic import BaseModel, Field, SecretStr, field_validator, ConfigDict +import os + + +class ModelConfig(BaseModel): + """ + Configuration object for connecting to and parameterizing an LLM provider. + + Notes: + The ``model_config = ConfigDict(arbitrary_types_allowed=True)`` setting + is included for consistency with other configs. It has no effect here + since all fields are natively supported types (e.g., str, float, int). + Standard Pydantic validation applies to all fields in this model. + """ + model_config = ConfigDict( + arbitrary_types_allowed=True + ) + + provider: Literal["openai", "anthropic", "bedrock", "azure"] = Field( + ..., + description="LLM provider identifier (e.g., openai, anthropic, bedrock, azure)." + ) + model_name: str = Field( + ..., + description="Model identifier (e.g., gpt-4o, claude-3, etc.)." + ) + api_key: Optional[SecretStr] = Field( + default=None, + description="API key for the model provider. Fallbacks to env var if omitted." + ) + temperature: float = Field( + default=0.0, + ge=0.0, + le=2.0, + description="Sampling temperature for model randomness." + ) + max_tokens: Optional[int] = Field( + default=None, + gt=0, + description="Optional token limit for completions." + ) + api_base: Optional[str] = Field( + default=None, + description="Optional override for the model's base API URL." + ) + + # ~~~ VALIDATION ~~~ + @field_validator("api_key", mode="before") + @classmethod + def resolve_api_key(cls, v, info): + """Resolve the API key from the provided value or environment. + """ + if v is not None: + return v + + provider = info.data.get("provider") + env_vars = { + "openai": "OPENAI_API_KEY", + "anthropic": "ANTHROPIC_API_KEY", + "bedrock": "AWS_ACCESS_KEY_ID", + "azure": "AZURE_OPENAI_API_KEY", + } + + env_var = env_vars.get(provider) + if env_var: + api_key = os.getenv(env_var) + if api_key: + return SecretStr(api_key) + + raise ValueError( + f"Missing API key: provide explicitly or set {env_var} in environment." + ) + + # ~~~ UTILITIES ~~~ + def get_api_key(self) -> str: + """Safely return the underlying API key string. + """ + return self.api_key.get_secret_value() if self.api_key else "" + + def __repr__(self) -> str: + """Safe string representation (without exposing secret).""" + return ( + f"ModelConfig(provider='{self.provider}', " + f"model_name='{self.model_name}', " + f"temperature={self.temperature}, " + f"max_tokens={self.max_tokens})" + ) diff --git a/src/database/__init__.py b/src/database/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/database/candidates/__init__.py b/src/database/candidates/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3e9ad2299cbe6e3a76b56a9dcbc01c5601628008 --- /dev/null +++ b/src/database/candidates/__init__.py @@ -0,0 +1,26 @@ +""" +Candidates database module. + +All database operations are organized in the ops/ folder, +with each operation in its own file for modularity. +""" + +from .ops import ( + register_candidate, + update_parsed_cv_path, + get_candidate_by_name, + update_application_status, + write_cv_results_to_db, + write_voice_results_to_db, + evaluate_cv_screening_decision, +) + +__all__ = [ + "register_candidate", + "update_parsed_cv_path", + "get_candidate_by_name", + "update_application_status", + "write_cv_results_to_db", + "write_voice_results_to_db", + "evaluate_cv_screening_decision", +] diff --git a/src/database/candidates/client.py b/src/database/candidates/client.py new file mode 100644 index 0000000000000000000000000000000000000000..33bcf0db16df1050eb50390708722895f2692abd --- /dev/null +++ b/src/database/candidates/client.py @@ -0,0 +1,35 @@ +import os +from sqlalchemy import create_engine +from sqlalchemy.orm import sessionmaker +from src.database.candidates.models import Base +from src.configs import get_database_settings + + +def get_engine(): + """ + Builds a SQLAlchemy engine using validated environment variables. + Works seamlessly in both local and Docker environments. + + Priority: + 1. Environment variables (e.g., POSTGRES_HOST from Docker) + 2. .env file defaults via Pydantic config + """ + settings = get_database_settings() + + # Allow POSTGRES_HOST override (Docker will set it to 'db') + # Strip whitespace to avoid DNS resolution issues on Windows + postgres_host = os.getenv("POSTGRES_HOST", settings.host).strip() + database_url = ( + f"postgresql+psycopg2://{settings.user}:{settings.password}" + f"@{postgres_host}:{settings.port}/{settings.db}" + ) + + print(f"🔌 Connecting to database at {postgres_host}:{settings.port} ...") + + # Optional: echo=True for debugging SQL statements + return create_engine(database_url, echo=False, future=True) + + +# --- SQLAlchemy session setup --- +engine = get_engine() +SessionLocal = sessionmaker(bind=engine, autoflush=False, autocommit=False) diff --git a/src/database/candidates/info.md b/src/database/candidates/info.md new file mode 100644 index 0000000000000000000000000000000000000000..ea5c11096320cc4bd6005754333ce4b0590dfca4 --- /dev/null +++ b/src/database/candidates/info.md @@ -0,0 +1,234 @@ +# 🧩 Candidate Database Schema + +--- + +## 🗂 Overview + +The **Candidate Database** manages all structured data related to applicants throughout the HR screening pipeline. +It is designed to support both **deterministic queries** (e.g., “Top 10 CV scores”) and **LLM-based reasoning** (e.g., context summaries, evaluation traces). + +The schema uses a **hybrid design**: +- **Relational** structure for core entities and relationships. +- **JSON fields** for flexible, semi-structured LLM outputs. + +--- + +## 🧱 Entity Relationship Diagram + +```mermaid +erDiagram + + CANDIDATES ||--o{ CV_SCREENING_RESULTS : "has many" + CANDIDATES ||--o{ VOICE_SCREENING_RESULTS : "has many" + CANDIDATES ||--o{ INTERVIEW_SCHEDULING : "has many" + CANDIDATES ||--|| FINAL_DECISION : "has one" + + CANDIDATES { + UUID id PK + string full_name + string email + string phone_number + string cv_file_path + string parsed_cv_file_path + string auth_code + enum status + datetime created_at + datetime updated_at + } + + CV_SCREENING_RESULTS { + UUID id PK + UUID candidate_id FK + string job_title + float skills_match_score + float experience_match_score + float education_match_score + float overall_fit_score + text llm_feedback + json reasoning_trace + datetime timestamp + } + + VOICE_SCREENING_RESULTS { + UUID id PK + UUID candidate_id FK + string call_sid + text transcript_text + float sentiment_score + float confidence_score + float communication_score + text llm_summary + json llm_judgment_json + string audio_url + datetime timestamp + } + + INTERVIEW_SCHEDULING { + UUID id PK + UUID candidate_id FK + string calendar_event_id + string event_summary + datetime start_time + datetime end_time + enum status + datetime timestamp + } + + FINAL_DECISION { + UUID id PK + UUID candidate_id FK + float overall_score + enum decision + text llm_rationale + text human_notes + datetime timestamp + } +``` + +--- + +## 📋 Tables + +### 1️⃣ `candidates` + +Stores base applicant information and application metadata. + +| Column | Type | Description | +|--------|------|--------------| +| `id` | UUID | Primary key | +| `full_name` | string | Candidate’s full name | +| `email` | string | Unique email address | +| `phone_number` | string | Contact number | +| `cv_file_path` | string | Path or cloud URL to the uploaded CV | +| `parsed_cv_file_path` | string | Path to parsed pdf file (stored as md) | +| `auth_code` | string | 6-digit authentication code | +| `status` | enum | Candidate stage (`applied`, `cv_screened`, `voice_invitation_sent`, `voice_done`, `cv_passed`, `cv_rejected`, `voice_passed`, `voice_rejected`, `interview_scheduled`, `decision_made`) | +| `created_at` | datetime | Application timestamp | +| `updated_at` | datetime | Last update timestamp | + +--- + +### 2️⃣ `cv_screening_results` + +Captures **CV screening metrics** and qualitative model feedback. + +| Column | Type | Description | +|--------|------|--------------| +| `id` | UUID | Primary key | +| `candidate_id` | FK | Reference to `candidates.id` | +| `job_title` | string | Target role being screened for | +| `skills_match_score` | float | Match ratio for required skills | +| `experience_match_score` | float | Experience alignment score | +| `education_match_score` | float | Degree/education compatibility | +| `overall_fit_score` | float | Weighted total score | +| `llm_feedback` | text | Qualitative reasoning summary | +| `reasoning_trace` | JSON | Optional full LLM reasoning trace | +| `timestamp` | datetime | When evaluation was generated | + +--- + +### 3️⃣ `voice_screening_results` + +Contains results from automated voice interviews. + +| Column | Type | Description | +|--------|------|--------------| +| `id` | UUID | Primary key | +| `candidate_id` | FK | Reference to `candidates.id` | +| `call_sid` | string | Twilio call identifier | +| `transcript_text` | text | Full transcript from Whisper/STT | +| `sentiment_score` | float | Tone or positivity measure | +| `confidence_score` | float | LLM-assessed confidence or clarity | +| `communication_score` | float | Fluency or articulation score | +| `llm_summary` | text | High-level summary of the voice screen | +| `llm_judgment_json` | JSON | Structured LLM evaluation (per-dimension) | +| `audio_url` | string | URL to stored audio recording | +| `timestamp` | datetime | Time of interview completion | + +--- + +### 4️⃣ `interview_scheduling` + +Tracks HR interview scheduling and status. + +| Column | Type | Description | +|--------|------|--------------| +| `id` | UUID | Primary key | +| `candidate_id` | FK | Reference to `candidates.id` | +| `calendar_event_id` | string | Google Calendar event ID | +| `event_summary` | string | Event title | +| `start_time` | datetime | Interview start time | +| `end_time` | datetime | Interview end time | +| `status` | enum | Scheduling status (`scheduled`, `completed`, `cancelled`, `passed`, `rejected`) | +| `timestamp` | datetime | Last updated timestamp | + +--- + +### 5️⃣ `final_decision` + +Stores the overall hiring outcome after all screening stages. + +| Column | Type | Description | +|--------|------|--------------| +| `id` | UUID | Primary key | +| `candidate_id` | FK | Reference to `candidates.id` | +| `overall_score` | float | Aggregated weighted score | +| `decision` | enum | `hired`, `rejected`, or `pending` | +| `llm_rationale` | text | Model reasoning for decision | +| `human_notes` | text | HR reviewer comments | +| `timestamp` | datetime | Decision timestamp | + +--- + +## 🧠 Design Principles + +- **Hybrid Schema:** Structured relational tables for clean querying; JSON for flexible LLM outputs. +- **Traceability:** All records timestamped and linked to a single candidate. +- **Extensibility:** New screening stages (e.g., technical test results) can be added as new tables with `candidate_id` foreign key. +- **Cascade Relationships:** Deleting a candidate removes all dependent results automatically. +- **Explainability-Ready:** LLM reasoning traces preserved for audit and context replay. + + +--- +# 🐳 Docker-Based Local Development Setup + +--- + + 1️⃣ Clone and configure environment +```bash +cp .env.example .env +``` +Your .env file should contain: +```bash +# Shared dev DB credentials +POSTGRES_USER=agentic_user +POSTGRES_PASSWORD=password123 +POSTGRES_DB=agentic_hr +POSTGRES_HOST=db +POSTGRES_PORT=5432 +``` +2️⃣ Start the stack +```bash +docker compose up --build +``` +This will: +- Spin up PostgreSQL in a container (agentic_hr_db) +- Build and run your app container +- Auto-initialize all database tables via SQLAlchemy + +Expected logs: +```bash +🔌 Connecting to database at db:5432 ... +✅ Database initialized successfully. +``` + +3️⃣ Verify the setup +Connect to the running DB container: +```bash +docker exec -it agentic_hr_db psql -U agentic_user -d agentic_hr +``` +Then check tables: + +```sql +\dt +``` diff --git a/src/database/candidates/init_db.py b/src/database/candidates/init_db.py new file mode 100644 index 0000000000000000000000000000000000000000..5d4f9a2720bc414d8457513b1da95fb41eb4eb68 --- /dev/null +++ b/src/database/candidates/init_db.py @@ -0,0 +1,31 @@ +""" +Database initialization script. + +This is a standalone script to initialize the database. +Kept separate from client.py to avoid circular import issues +when running with `python -m`. + +Usage: + python -m src.database.candidates.init_db +""" + +from src.database.candidates.client import engine +from src.database.candidates.models import Base + + +def init_db(): + """ + Creates all database tables if they don't exist. + Intended for dev setup / Docker initialization. + """ + try: + Base.metadata.create_all(bind=engine) + print("✅ Database initialized successfully.") + except Exception as e: + print(f"❌ Failed to initialize database: {e}") + raise + + +if __name__ == "__main__": + init_db() + diff --git a/src/database/candidates/models.py b/src/database/candidates/models.py new file mode 100644 index 0000000000000000000000000000000000000000..c5d9901b46bf48872746cbb205d91ce8227605d9 --- /dev/null +++ b/src/database/candidates/models.py @@ -0,0 +1,129 @@ +from sqlalchemy import ( + Column, + String, + Float, + Text, + DateTime, + Enum, + ForeignKey, + JSON, +) +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import declarative_base, relationship +from datetime import datetime +import uuid +import secrets +import string + +from src.state.candidate import CandidateStatus, InterviewStatus, DecisionStatus + + +Base = declarative_base() + + +def generate_auth_code() -> str: + """Generate a 6-digit random authentication code.""" + return "".join(secrets.choice(string.digits) for _ in range(6)) + +# --- TABLES --- + +class Candidate(Base): + __tablename__ = "candidates" + + id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + full_name = Column(String, nullable=False) + email = Column(String, unique=True, nullable=False) + phone_number = Column(String) + cv_file_path = Column(String) + parsed_cv_file_path = Column(String) + status = Column(Enum(CandidateStatus), default=CandidateStatus.applied, nullable=False) + created_at = Column(DateTime, default=datetime.utcnow) + auth_code = Column(String, default=generate_auth_code, nullable=True) + updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) + + # Relationships + cv_screening_results = relationship( + "CVScreeningResult", + back_populates="candidate", + cascade="all, delete-orphan", + ) + voice_screening_results = relationship( + "VoiceScreeningResult", + back_populates="candidate", + cascade="all, delete-orphan", + ) + interview_scheduling = relationship( + "InterviewScheduling", + back_populates="candidate", + cascade="all, delete-orphan", + ) + final_decision = relationship( + "FinalDecision", + back_populates="candidate", + uselist=False, + cascade="all, delete-orphan", + ) + + +class CVScreeningResult(Base): + __tablename__ = "cv_screening_results" + + id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + candidate_id = Column(UUID(as_uuid=True), ForeignKey("candidates.id", ondelete="CASCADE"), nullable=False) + job_title = Column(String) + skills_match_score = Column(Float) + experience_match_score = Column(Float) + education_match_score = Column(Float) + overall_fit_score = Column(Float) + llm_feedback = Column(Text) + reasoning_trace = Column(JSON) + timestamp = Column(DateTime, default=datetime.utcnow) + + candidate = relationship("Candidate", back_populates="cv_screening_results") + + +class VoiceScreeningResult(Base): + __tablename__ = "voice_screening_results" + + id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + candidate_id = Column(UUID(as_uuid=True), ForeignKey("candidates.id", ondelete="CASCADE"), nullable=False) + call_sid = Column(String) + transcript_text = Column(Text) + sentiment_score = Column(Float) + confidence_score = Column(Float) + communication_score = Column(Float) + llm_summary = Column(Text) + llm_judgment_json = Column(JSON) + audio_url = Column(String) + timestamp = Column(DateTime, default=datetime.utcnow) + + candidate = relationship("Candidate", back_populates="voice_screening_results") + + +class InterviewScheduling(Base): + __tablename__ = "interview_scheduling" + + id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + candidate_id = Column(UUID(as_uuid=True), ForeignKey("candidates.id", ondelete="CASCADE"), nullable=False) + calendar_event_id = Column(String) + event_summary = Column(String) + start_time = Column(DateTime) + end_time = Column(DateTime) + status = Column(Enum(InterviewStatus)) + timestamp = Column(DateTime, default=datetime.utcnow) + + candidate = relationship("Candidate", back_populates="interview_scheduling") + + +class FinalDecision(Base): + __tablename__ = "final_decision" + + id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + candidate_id = Column(UUID(as_uuid=True), ForeignKey("candidates.id", ondelete="CASCADE"), nullable=False) + overall_score = Column(Float) + decision = Column(Enum(DecisionStatus)) + llm_rationale = Column(Text) + human_notes = Column(Text) + timestamp = Column(DateTime, default=datetime.utcnow) + + candidate = relationship("Candidate", back_populates="final_decision") diff --git a/src/database/candidates/ops/__init__.py b/src/database/candidates/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..213d5e37aee0d4fa848c82bf54a2cdc93f188646 --- /dev/null +++ b/src/database/candidates/ops/__init__.py @@ -0,0 +1,25 @@ +""" +Candidate database operations module. + +This module exports all candidate-related database operations. +Each operation is in its own file for modularity. +""" + +from .register_candidate import register_candidate +from .update_parsed_cv_path import update_parsed_cv_path +from .get_by_name import get_candidate_by_name +from .update_status import update_application_status +from .write_cv_results import write_cv_results_to_db +from .write_voice_results import write_voice_results_to_db +from .evaluate_cv_screening import evaluate_cv_screening_decision + +__all__ = [ + "register_candidate", + "update_parsed_cv_path", + "get_candidate_by_name", + "update_application_status", + "write_cv_results_to_db", + "write_voice_results_to_db", + "evaluate_cv_screening_decision", +] + diff --git a/src/database/candidates/ops/evaluate_cv_screening.py b/src/database/candidates/ops/evaluate_cv_screening.py new file mode 100644 index 0000000000000000000000000000000000000000..e97396b7c8ce19df707999bc07746cfd09d7e6e0 --- /dev/null +++ b/src/database/candidates/ops/evaluate_cv_screening.py @@ -0,0 +1,58 @@ +"""Evaluate CV screening decision based on score threshold.""" + +from datetime import datetime + +from src.database.candidates.client import SessionLocal +from src.database.candidates.models import Candidate, CVScreeningResult +from src.state.candidate import CandidateStatus + + +def evaluate_cv_screening_decision( + candidate_full_name: str, + min_overall_score: float = 7.0 +) -> str: + """ + Decides if a candidate passes CV screening based on a score threshold. + Updates the candidate status to 'cv_passed' or 'cv_rejected'. + + Args: + candidate_full_name: The candidate's full name. + min_overall_score: Minimum score required to pass (default 7.0). + + Returns: + Outcome message. + """ + with SessionLocal() as session: + candidate = session.query(Candidate).filter( + Candidate.full_name == candidate_full_name + ).first() + + if not candidate: + return f"❌ Candidate '{candidate_full_name}' not found." + + # Get latest screening result + latest_result = ( + session.query(CVScreeningResult) + .filter(CVScreeningResult.candidate_id == candidate.id) + .order_by(CVScreeningResult.timestamp.desc()) + .first() + ) + + if not latest_result: + return f"❌ No screening results found for '{candidate_full_name}'. Run screening workflow first." + + score = latest_result.overall_fit_score + + if score >= min_overall_score: + new_status = CandidateStatus.cv_passed + decision = "PASSED" + else: + new_status = CandidateStatus.cv_rejected + decision = "REJECTED" + + candidate.status = new_status + candidate.updated_at = datetime.utcnow() + session.commit() + + return f"✅ Decision: {decision} (Score: {score} vs Threshold: {min_overall_score}). Status updated to '{new_status.value}'." + diff --git a/src/database/candidates/ops/get_by_name.py b/src/database/candidates/ops/get_by_name.py new file mode 100644 index 0000000000000000000000000000000000000000..34d2dc5ade6f19dc3837050eb6cf2389ca1fff1e --- /dev/null +++ b/src/database/candidates/ops/get_by_name.py @@ -0,0 +1,34 @@ +"""Get a candidate by their full name.""" + +from typing import Optional, Dict, Any + +from src.database.candidates.client import SessionLocal +from src.database.candidates.models import Candidate + + +def get_candidate_by_name(full_name: str) -> Optional[Dict[str, Any]]: + """ + Retrieve a candidate by their full name. + + Args: + full_name: The full name of the candidate. + + Returns: + A dictionary with candidate data, or None if not found. + Contains: id, full_name, email, parsed_cv_file_path, status + """ + with SessionLocal() as session: + candidate = session.query(Candidate).filter( + Candidate.full_name == full_name + ).first() + + if candidate: + return { + "id": candidate.id, + "full_name": candidate.full_name, + "email": candidate.email, + "parsed_cv_file_path": candidate.parsed_cv_file_path, + "status": candidate.status + } + return None + diff --git a/src/database/candidates/ops/register_candidate.py b/src/database/candidates/ops/register_candidate.py new file mode 100644 index 0000000000000000000000000000000000000000..d79d44c49362a6214af8a97b27e128ff84081db5 --- /dev/null +++ b/src/database/candidates/ops/register_candidate.py @@ -0,0 +1,45 @@ +"""Register a new candidate in the database.""" + +from sqlalchemy.exc import IntegrityError + +from src.database.candidates.client import SessionLocal +from src.database.candidates.models import Candidate +from src.state.candidate import CandidateStatus + + +def register_candidate( + full_name: str, + email: str, + phone: str, + cv_path: str +) -> bool: + """ + Register a new candidate in the database. + + Args: + full_name: Candidate's full name. + email: Candidate's email address (unique). + phone: Candidate's phone number. + cv_path: Path to the uploaded CV file. + + Returns: + True if successful, False if candidate already exists. + """ + with SessionLocal() as session: + candidate = Candidate( + full_name=full_name, + email=email, + phone_number=phone, + cv_file_path=cv_path, + status=CandidateStatus.applied, + ) + session.add(candidate) + try: + session.commit() + print(f"✅ Candidate '{full_name}' registered successfully.") + return True + except IntegrityError: + session.rollback() + print(f"⚠️ Candidate with email '{email}' already exists.") + return False + diff --git a/src/database/candidates/ops/update_parsed_cv_path.py b/src/database/candidates/ops/update_parsed_cv_path.py new file mode 100644 index 0000000000000000000000000000000000000000..b7a91e9f59b7f0964d2983ebcb5e95e0089f8a04 --- /dev/null +++ b/src/database/candidates/ops/update_parsed_cv_path.py @@ -0,0 +1,24 @@ +"""Update the parsed CV file path for a candidate.""" + +from src.database.candidates.client import SessionLocal +from src.database.candidates.models import Candidate + + +def update_parsed_cv_path(email: str, parsed_path: str) -> None: + """ + Update the parsed CV file path for a candidate identified by email. + + Args: + email: Candidate's email (unique identifier). + parsed_path: Path to the parsed markdown file. + """ + with SessionLocal() as session: + candidate = session.query(Candidate).filter_by(email=email).first() + if not candidate: + print(f"⚠️ No candidate found with email: {email}") + return + + candidate.parsed_cv_file_path = parsed_path + session.commit() + print(f"✅ Updated parsed CV path for {email}: {parsed_path}") + diff --git a/src/database/candidates/ops/update_status.py b/src/database/candidates/ops/update_status.py new file mode 100644 index 0000000000000000000000000000000000000000..f4a07552637208aa89bd1e8123fd2f124a388498 --- /dev/null +++ b/src/database/candidates/ops/update_status.py @@ -0,0 +1,27 @@ +"""Update the status of a candidate application.""" + +from datetime import datetime + +from src.database.candidates.client import SessionLocal +from src.database.candidates.models import Candidate +from src.state.candidate import CandidateStatus + + +def update_application_status(candidate_email: str, status: CandidateStatus) -> None: + """ + Update the status of a candidate application. + + Args: + candidate_email: The email of the candidate. + status: The new status to set. + """ + with SessionLocal() as session: + candidate = session.query(Candidate).filter_by(email=candidate_email).first() + if candidate: + candidate.status = status + candidate.updated_at = datetime.utcnow() + session.commit() + print(f"✅ Updated status for {candidate_email} to {status.value}") + else: + print(f"⚠️ No candidate found with email: {candidate_email}") + diff --git a/src/database/candidates/ops/write_cv_results.py b/src/database/candidates/ops/write_cv_results.py new file mode 100644 index 0000000000000000000000000000000000000000..fcea7154c13e639b055ac94ac99bf972831b32fb --- /dev/null +++ b/src/database/candidates/ops/write_cv_results.py @@ -0,0 +1,57 @@ +"""Write CV screening results to the database.""" + +from datetime import datetime +from typing import TYPE_CHECKING + +from src.database.candidates.client import SessionLocal +from src.database.candidates.models import Candidate, CVScreeningResult +from src.state.candidate import CandidateStatus + +if TYPE_CHECKING: + from src.agents.cv_screening.schemas.output_schema import CVScreeningOutput + + +def write_cv_results_to_db( + candidate_email: str, + result: "CVScreeningOutput", + job_title: str = "AI Engineer" +) -> None: + """ + Store the CV screening results in the database and update candidate status. + + Args: + candidate_email: Email of the candidate. + result: The screening results from the LLM (CVScreeningOutput). + job_title: The job title the candidate applied for. + + Returns: + None + """ + with SessionLocal() as session: + candidate = session.query(Candidate).filter_by(email=candidate_email).first() + + if not candidate: + print(f"⚠️ No candidate found with email: {candidate_email}") + return + + # Create new CV screening result entry + screening_entry = CVScreeningResult( + candidate_id=candidate.id, + job_title=job_title, + skills_match_score=result.skills_match_score, + experience_match_score=result.experience_match_score, + education_match_score=result.education_match_score, + overall_fit_score=result.overall_fit_score, + llm_feedback=result.llm_feedback, + reasoning_trace=None, + timestamp=datetime.utcnow(), + ) + + # Add and commit + session.add(screening_entry) + candidate.status = CandidateStatus.cv_screened + candidate.updated_at = datetime.utcnow() + session.commit() + + print(f"✅ Screening results saved and status updated for {candidate_email} -> {candidate.status}") + diff --git a/src/database/candidates/ops/write_voice_results.py b/src/database/candidates/ops/write_voice_results.py new file mode 100644 index 0000000000000000000000000000000000000000..fe1a53ecffd5736e796e7689040a42ba7023d048 --- /dev/null +++ b/src/database/candidates/ops/write_voice_results.py @@ -0,0 +1,64 @@ +"""Write voice screening results to the database.""" + +import uuid +from datetime import datetime +from typing import Optional, TYPE_CHECKING + +from src.database.candidates.client import SessionLocal +from src.database.candidates.models import Candidate, VoiceScreeningResult +from src.state.candidate import CandidateStatus + +if TYPE_CHECKING: + from src.agents.voice_screening.schemas.output_schema import VoiceScreeningOutput + + +def write_voice_results_to_db( + candidate_id: str, + session_id: str, + transcript_text: str, + result: "VoiceScreeningOutput", + audio_url: Optional[str] = None +) -> None: + """ + Store the voice screening results in the database and update candidate status. + + Args: + candidate_id: UUID of the candidate. + session_id: Session identifier (call_sid for Twilio, session_id for web). + transcript_text: Full conversation transcript. + result: The screening results from the LLM (VoiceScreeningOutput). + audio_url: URL to the call recording if available. + + Returns: + None + """ + with SessionLocal() as session: + candidate = session.query(Candidate).filter_by( + id=uuid.UUID(candidate_id) + ).first() + + if not candidate: + print(f"⚠️ No candidate found with ID: {candidate_id}") + return + + # Create new voice screening result entry + screening_entry = VoiceScreeningResult( + candidate_id=candidate.id, + call_sid=session_id, + transcript_text=transcript_text, + sentiment_score=result.sentiment_score, + confidence_score=result.confidence_score, + communication_score=result.communication_score, + llm_summary=result.llm_summary, + llm_judgment_json=result.llm_judgment_json, + audio_url=audio_url, + timestamp=datetime.utcnow(), + ) + + # Add and commit + session.add(screening_entry) + candidate.status = CandidateStatus.voice_done + candidate.updated_at = datetime.utcnow() + session.commit() + + print(f"✅ Voice screening results saved and status updated for candidate {candidate_id}") diff --git a/src/database/context/__init__.py b/src/database/context/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..55fc2948a0d0910bf8d024b3995232c2d2b8c9a5 --- /dev/null +++ b/src/database/context/__init__.py @@ -0,0 +1,153 @@ +from sqlalchemy import ( + Column, String, Integer, Float, Enum, DateTime, Text, ForeignKey, JSON +) +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import declarative_base, relationship +from datetime import datetime +import enum +import uuid + +Base = declarative_base() + + +# ============================================================== +# ENUM DEFINITIONS +# ============================================================== + +class CandidateStatus(enum.Enum): + APPLIED = "applied" + CV_SCREENED = "cv_screened" + INVITED_VOICE = "invited_voice" + VOICE_DONE = "voice_done" + SCHEDULED_HR = "scheduled_hr" + DECISION_PENDING = "decision_pending" + REJECTED = "rejected" + HIRED = "hired" + + +class InterviewStatus(enum.Enum): + SCHEDULED = "scheduled" + COMPLETED = "completed" + CANCELLED = "cancelled" + + +class Decision(enum.Enum): + HIRE = "hire" + REJECT = "reject" + MAYBE = "maybe" + + +# ============================================================== +# MAIN TABLES +# ============================================================== + +class Candidate(Base): + __tablename__ = "candidates" + + id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + full_name = Column(String(255), nullable=False) + email = Column(String(255), nullable=False, unique=True) + phone_number = Column(String(50), nullable=True) + cv_file_path = Column(String(500), nullable=True) + parsed_cv_json = Column(JSON, nullable=True) + status = Column(Enum(CandidateStatus), default=CandidateStatus.APPLIED) + created_at = Column(DateTime, default=datetime.utcnow) + updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) + + # Relationships + cv_results = relationship("CVScreeningResult", back_populates="candidate", cascade="all, delete-orphan") + voice_results = relationship("VoiceScreeningResult", back_populates="candidate", cascade="all, delete-orphan") + interviews = relationship("InterviewScheduling", back_populates="candidate", cascade="all, delete-orphan") + decision = relationship("FinalDecision", back_populates="candidate", uselist=False, cascade="all, delete-orphan") + + +# ============================================================== +# CV SCREENING RESULTS +# ============================================================== + +class CVScreeningResult(Base): + __tablename__ = "cv_screening_results" + + id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + candidate_id = Column(UUID(as_uuid=True), ForeignKey("candidates.id"), nullable=False) + job_title = Column(String(255), nullable=True) + + skills_match_score = Column(Float, nullable=True) + experience_match_score = Column(Float, nullable=True) + education_match_score = Column(Float, nullable=True) + overall_fit_score = Column(Float, nullable=True) + + llm_feedback = Column(Text, nullable=True) + reasoning_trace = Column(JSON, nullable=True) + + timestamp = Column(DateTime, default=datetime.utcnow) + + candidate = relationship("Candidate", back_populates="cv_results") + + +# ============================================================== +# VOICE SCREENING RESULTS +# ============================================================== + +class VoiceScreeningResult(Base): + __tablename__ = "voice_screening_results" + + id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + candidate_id = Column(UUID(as_uuid=True), ForeignKey("candidates.id"), nullable=False) + + call_sid = Column(String(255), nullable=True) + transcript_text = Column(Text, nullable=True) + + sentiment_score = Column(Float, nullable=True) + confidence_score = Column(Float, nullable=True) + communication_score = Column(Float, nullable=True) + + llm_summary = Column(Text, nullable=True) + llm_judgment_json = Column(JSON, nullable=True) + audio_url = Column(String(500), nullable=True) + + timestamp = Column(DateTime, default=datetime.utcnow) + + candidate = relationship("Candidate", back_populates="voice_results") + + +# ============================================================== +# INTERVIEW SCHEDULING +# ============================================================== + +class InterviewScheduling(Base): + __tablename__ = "interview_scheduling" + + id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + candidate_id = Column(UUID(as_uuid=True), ForeignKey("candidates.id"), nullable=False) + + calendar_event_id = Column(String(255), nullable=True) + event_summary = Column(String(255), nullable=True) + + start_time = Column(DateTime, nullable=True) + end_time = Column(DateTime, nullable=True) + status = Column(Enum(InterviewStatus), default=InterviewStatus.SCHEDULED) + + timestamp = Column(DateTime, default=datetime.utcnow) + + candidate = relationship("Candidate", back_populates="interviews") + + +# ============================================================== +# FINAL DECISION +# ============================================================== + +class FinalDecision(Base): + __tablename__ = "final_decision" + + id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + candidate_id = Column(UUID(as_uuid=True), ForeignKey("candidates.id"), nullable=False, unique=True) + + overall_score = Column(Float, nullable=True) + decision = Column(Enum(Decision), default=Decision.MAYBE) + llm_rationale = Column(Text, nullable=True) + human_notes = Column(Text, nullable=True) + + timestamp = Column(DateTime, default=datetime.utcnow) + + candidate = relationship("Candidate", back_populates="decision") diff --git a/src/database/conversations/info.md b/src/database/conversations/info.md new file mode 100644 index 0000000000000000000000000000000000000000..290c36de0177ca0a67286ba77bd7da7c84b0708f --- /dev/null +++ b/src/database/conversations/info.md @@ -0,0 +1 @@ +storage for audio files \ No newline at end of file diff --git a/src/database/cvs/__init__.py b/src/database/cvs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e4eeae245315a1a41e5c1d8c8107cbf9f79c4404 --- /dev/null +++ b/src/database/cvs/__init__.py @@ -0,0 +1,6 @@ +"""CV storage module.""" + +from .storage import save_cv, ensure_upload_dir, UPLOAD_DIR + +__all__ = ["save_cv", "ensure_upload_dir", "UPLOAD_DIR"] + diff --git a/src/database/cvs/storage.py b/src/database/cvs/storage.py new file mode 100644 index 0000000000000000000000000000000000000000..ee0405aeba3f5325b03e2035b3cdd8554215248f --- /dev/null +++ b/src/database/cvs/storage.py @@ -0,0 +1,52 @@ +""" +CV file storage operations. + +This module handles saving and managing CV files on disk. +""" + +import os +from typing import BinaryIO + +# Default upload directory (can be overridden via env var) +UPLOAD_DIR = os.getenv("CV_UPLOAD_PATH", "src/database/cvs/uploads") + + +def ensure_upload_dir() -> None: + """Ensure the CV upload directory exists.""" + os.makedirs(UPLOAD_DIR, exist_ok=True) + + +def save_cv(file_obj: BinaryIO, original_filename: str, candidate_name: str = "") -> str: + """ + Save an uploaded CV to the local uploads directory. + + Args: + file_obj: The file-like object (from Streamlit upload or HTTP request). + original_filename: The original name of the uploaded file. + candidate_name: The full name of the candidate (optional). + + Returns: + The full path where the file was saved. + """ + ensure_upload_dir() + + # Generate unique filename + _, file_ext = os.path.splitext(original_filename) + + if candidate_name: + # Sanitize candidate name: remove non-alphanumeric (except space/hyphen), replace spaces with underscores + safe_candidate_name = "".join(c for c in candidate_name if c.isalnum() or c in (" ", "-", "_")) + safe_candidate_name = safe_candidate_name.replace(" ", "_") + safe_name = f"{safe_candidate_name}_CV{file_ext}" + else: + safe_name = f"{os.path.basename(original_filename)}" + + file_path = os.path.join(UPLOAD_DIR, safe_name) + + # Save binary content + with open(file_path, "wb") as f: + f.write(file_obj.read()) + + print(f"📂 Saved CV to {file_path}") + return file_path + diff --git a/src/database/job_postings/ai_engineer.txt b/src/database/job_postings/ai_engineer.txt new file mode 100644 index 0000000000000000000000000000000000000000..fbef19e185e55e5d06e63491fc545a4478f77ad1 --- /dev/null +++ b/src/database/job_postings/ai_engineer.txt @@ -0,0 +1,11 @@ +### 🧠 Position: AI Engineer +**Location:** Remote / Wiesbaden HQ +**About the Role:** +Join our AI R&D team to develop, fine-tune, and deploy ML models for production. +You will work on projects involving LLMs, LangGraph agents, and context engineering. + +**Requirements:** + - Proficiency in Python & modern AI frameworks (PyTorch, LangChain, etc.) + - Solid understanding of NLP and ML pipelines + - Experience deploying models or building intelligent systems + - Strong communication and teamwork skills \ No newline at end of file diff --git a/src/doc_parser/__init__.py b/src/doc_parser/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..830fdd3ed021d901b37ddc5a6a2d89697b693bc7 --- /dev/null +++ b/src/doc_parser/__init__.py @@ -0,0 +1,6 @@ +"""Document parsing module.""" + +from .pdf_to_markdown import pdf_to_markdown + +__all__ = ["pdf_to_markdown"] + diff --git a/src/doc_parser/pdf_to_markdown.py b/src/doc_parser/pdf_to_markdown.py new file mode 100644 index 0000000000000000000000000000000000000000..fb29ccd8518f64fca39de892355e63623baba730 --- /dev/null +++ b/src/doc_parser/pdf_to_markdown.py @@ -0,0 +1,265 @@ +""" +PDF to Markdown converter using GPT-4 Vision. + +--------------------------------------------------------------------------- +------------------------------ How to Use It ------------------------------ +--------------------------------------------------------------------------- +Process a single file: +>>> python pdf_to_markdown.py data_cv/max_mustermann_cv.pdf + +Process a folder: +>>> python pdf_to_markdown.py data_cv/ + + +Customize model or rendering: +>>> python pdf_to_markdown.py data_cv/ --model gpt-4.1 --target-width 1800 --batch-size 3 + + +Disable column splitting: +>>> python pdf_to_markdown.py my_resume.pdf --no-halves + + +Set a custom output folder: +>>> python pdf_to_markdown.py data_cv/ --output processed/ + + +🔧 Summary of Configurable Options +| Option | Description | Default | +| --------------------- | ------------------------------- | ------------------ | +| `path` | PDF file or folder path | required | +| `--output` | Output directory | `results/` | +| `--model` | OpenAI model | `gpt-4.1-mini` | +| `--target-width` | Render width per page | `2000` | +| `--batch-size` | Pages per API request | `2` | +| `--max-output-tokens` | Max tokens returned | `8192` | +| `--no-halves` | Disable left/right column crops | Enabled by default | +""" + +import argparse +import os +from datetime import datetime +from pathlib import Path +from typing import Dict, List + +from dotenv import load_dotenv +from openai import OpenAI +from PIL import Image + +from .utils import ( + render_pdf_to_images, + pil_to_png_data_uri, + split_halves, + parse_sections_from_json_text, + normalize_sections, + merge_duplicate_titles, + build_contact_section_from_filename, + process_section, + apply_postprocessing, +) + + +def pdf_to_markdown( + input_path: Path, + output_path: Path, + model: str = "gpt-4.1-mini", + target_width: int = 2000, + batch_size: int = 2, + max_output_tokens: int = 8192, + add_halves: bool = True, +) -> None: + """ + Process a single PDF or all PDFs in a directory and export Markdown sections. + + 1. Render PDF pages to images. + 2. Send images in batches to GPT-4 Vision for section parsing. + 3. Normalize and post-process the returned sections. + 4. Save the final sections as a Markdown text file. + 5. Repeat for all PDFs in the input path. + 6. Output files are saved in the specified output directory. + + Args: + input_path: Path to a single PDF file or a directory of PDFs. + output_path: Directory to save the output Markdown files. + model: OpenAI model to use for processing. + target_width: Target width for rendering PDF pages. + batch_size: Number of pages to send per API request. + max_output_tokens: Maximum tokens in model output. + add_halves: Whether to add left/right column crops. + """ + load_dotenv() + + def log_step(message: str) -> None: + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + print(f"[{timestamp}] {message}") + + log_step("Vision-based PDF → Markdown extraction started...") + + api_key = os.getenv("OPENAI_API_KEY") + if not api_key: + raise RuntimeError("OPENAI_API_KEY is not set. Add it to your environment or .env file.") + + # --- Determine which PDFs to process --- + if input_path.is_file() and input_path.suffix.lower() == ".pdf": + pdf_files = [input_path] + elif input_path.is_dir(): + pdf_files = sorted(input_path.glob("*.pdf")) + else: + raise ValueError(f"Invalid input path: {input_path}") + + if not pdf_files: + log_step(f"No PDF files found at {input_path}") + return + + output_path.mkdir(parents=True, exist_ok=True) + log_step(f"Found {len(pdf_files)} PDF file(s) in {input_path}.") + log_step(f"Using model={model}, batch_size={batch_size}, target_width={target_width}px.") + + client = OpenAI() + + # -------------------------- Inner helper -------------------------- + def call_batch(imgs: List[Image.Image]) -> List[Dict[str, str]]: + """Process a batch of page images → STRICT JSON sections.""" + image_contents = [] + for img in imgs: + data_uri = pil_to_png_data_uri(img) + image_contents.append({"type": "input_image", "image_url": data_uri}) + + if add_halves: + for half in split_halves(img): + image_contents.append( + {"type": "input_image", "image_url": pil_to_png_data_uri(half)} + ) + + system = "You are a precise document structure parser. Output ONLY valid JSON." + user = ( + "From these page images, return a STRICT JSON array where each item has 'title' and 'body'. " + "Group human-meaningful sections, merge multi-line headings (two-column layouts), preserve reading order. " + "Do NOT summarize or omit content. Include headers/footers if they contain contact data. " + "Preserve bullet/numbered lists and render tables as Markdown where possible. " + "Use proper UTF-8 German diacritics (ä, ö, ü, ß). " + "Include small sidebar/column blocks and deduplicate content across full pages and crops." + ) + + response = client.responses.create( + model=model, + temperature=0, + max_output_tokens=max_output_tokens, + input=[ + {"role": "system", "content": [{"type": "input_text", "text": system}]}, + {"role": "user", "content": [{"type": "input_text", "text": user}] + image_contents}, + ], + ) + + text = getattr(response, "output_text", "") or "" + return parse_sections_from_json_text(text) + + # -------------------------- Main processing -------------------------- + total_files = len(pdf_files) + for index, pdf_file in enumerate(pdf_files, start=1): + log_step(f"[{index}/{total_files}] Processing {pdf_file.name}...") + pages = render_pdf_to_images(pdf_file, target_width=target_width) + + if not pages: + raise RuntimeError(f"Failed to render any PDF pages for {pdf_file}.") + + log_step(f"Rendered {len(pages)} page(s).") + + all_sections: List[Dict[str, str]] = [] + for start in range(0, len(pages), batch_size): + end = min(len(pages), start + batch_size) + batch_num = (start // batch_size) + 1 + log_step(f"Batch {batch_num}: pages {start + 1}–{end}.") + secs = call_batch(pages[start:end]) + if secs: + all_sections.extend(secs) + log_step(f"Batch {batch_num} returned {len(secs)} section(s).") + else: + log_step(f"Batch {batch_num} returned no sections.") + + if not all_sections: + raise RuntimeError(f"No sections parsed from vision model output for {pdf_file}.") + + log_step(f"Received {len(all_sections)} raw section(s).") + normalized = normalize_sections(all_sections) + merged = merge_duplicate_titles(normalized) + final_sections = apply_postprocessing(merged) + contact_section = process_section(build_contact_section_from_filename(pdf_file)) + final_sections.insert(0, contact_section) + + out_txt = output_path / f"{pdf_file.stem}.txt" + log_step(f"Writing output to {out_txt}...") + + lines: List[str] = [] + for sec in final_sections: + title = (sec.get("title") or "").strip() + body = (sec.get("body") or "").strip() + if title: + lines.append(f"## {title}") + if body: + lines.append(body) + lines.append("") + + while lines and lines[-1] == "": + lines.pop() + + out_txt.write_text("\n".join(lines), encoding="utf-8") + log_step(f"✅ Completed processing for {pdf_file.name}.") + + log_step("🎉 All PDF files processed successfully.") + print(f"\nResults saved in: {output_path.resolve()}") + + +# ----------------------------- CLI entrypoint ----------------------------- +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Convert PDFs to structured Markdown using GPT-4 Vision." + ) + parser.add_argument( + "path", + help="Path to a single PDF file or a directory containing PDF files.", + ) + parser.add_argument( + "-o", "--output", + default="results", + help="Output directory for the Markdown files (default: results/)", + ) + parser.add_argument( + "--model", + default=os.getenv("OPENAI_MODEL", "gpt-4.1-mini"), + help="OpenAI model to use (default: gpt-4.1-mini)", + ) + parser.add_argument( + "--target-width", + type=int, + default=int(os.getenv("VISION_TARGET_WIDTH", "2000")), + help="Target width for rendering PDF pages (default: 2000 px)", + ) + parser.add_argument( + "--batch-size", + type=int, + default=int(os.getenv("VISION_BATCH_PAGES", "2")), + help="Number of pages to send to the model per request (default: 2)", + ) + parser.add_argument( + "--max-output-tokens", + type=int, + default=int(os.getenv("MAX_OUTPUT_TOKENS", "8192")), + help="Maximum tokens in model output (default: 8192)", + ) + parser.add_argument( + "--no-halves", + action="store_true", + help="Disable left/right column splitting (default: enabled)", + ) + + args = parser.parse_args() + + pdf_to_markdown( + input_path=Path(args.path), + output_path=Path(args.output), + model=args.model, + target_width=args.target_width, + batch_size=args.batch_size, + max_output_tokens=args.max_output_tokens, + add_halves=not args.no_halves, + ) diff --git a/src/doc_parser/utils/__init__.py b/src/doc_parser/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2e0e84fff6c28a97d57effafa7e5951c8da9b2f8 --- /dev/null +++ b/src/doc_parser/utils/__init__.py @@ -0,0 +1,33 @@ +"""Document parser utilities.""" + +from .text import normalize_bullets, tag_contacts, EMAIL_RE, PHONE_RE, URL_RE +from .image import render_pdf_to_images, pil_to_png_data_uri, split_halves +from .sections import ( + parse_sections_from_json_text, + normalize_sections, + merge_duplicate_titles, + build_contact_section_from_filename, + process_section, + apply_postprocessing, +) + +__all__ = [ + # Text + "normalize_bullets", + "tag_contacts", + "EMAIL_RE", + "PHONE_RE", + "URL_RE", + # Image + "render_pdf_to_images", + "pil_to_png_data_uri", + "split_halves", + # Sections + "parse_sections_from_json_text", + "normalize_sections", + "merge_duplicate_titles", + "build_contact_section_from_filename", + "process_section", + "apply_postprocessing", +] + diff --git a/src/doc_parser/utils/image.py b/src/doc_parser/utils/image.py new file mode 100644 index 0000000000000000000000000000000000000000..7d73ecd0150fa25b3b817423476cd04dae5c2097 --- /dev/null +++ b/src/doc_parser/utils/image.py @@ -0,0 +1,65 @@ +"""Image and PDF rendering utilities.""" + +import base64 +import io +from pathlib import Path +from typing import List + +import pypdfium2 as pdfium +from PIL import Image + + +def render_pdf_to_images(pdf_path: Path, target_width: int = 2000) -> List[Image.Image]: + """ + Render PDF pages to PIL images (layout-preserving). + + Args: + pdf_path: Path to the PDF file. + target_width: Target width for rendering (scales proportionally). + + Returns: + List of PIL Image objects, one per page. + """ + doc = pdfium.PdfDocument(str(pdf_path)) + images: List[Image.Image] = [] + + for index in range(len(doc)): + page = doc[index] + width_pt, height_pt = page.get_size() + scale = max(1.0, float(target_width) / float(max(1.0, width_pt))) + bitmap = page.render(scale=scale) + img = bitmap.to_pil() + images.append(img) + + page.close() + return images + + +def pil_to_png_data_uri(img: Image.Image) -> str: + """Convert a PIL image to a PNG data URI (base64).""" + buf = io.BytesIO() + img.save(buf, format="PNG") + b64 = base64.b64encode(buf.getvalue()).decode("ascii") + return f"data:image/png;base64,{b64}" + + +def split_halves(img: Image.Image, overlap_px: int = 40) -> List[Image.Image]: + """ + Create left/right column crops with small overlap. + + Useful for two-column CV layouts where GPT-4 Vision might + miss content in narrow columns. + + Args: + img: PIL Image to split. + overlap_px: Pixels of overlap in the middle. + + Returns: + List of [left_half, right_half] images. + """ + w, h = img.size + mid = w // 2 + left_box = (0, 0, min(mid + overlap_px, w), h) + right_box = (max(mid - overlap_px, 0), 0, w, h) + return [img.crop(left_box), img.crop(right_box)] + diff --git a/src/doc_parser/utils/sections.py b/src/doc_parser/utils/sections.py new file mode 100644 index 0000000000000000000000000000000000000000..322bc5c4d8030307fe56e54bfb700f6f8b654ac7 --- /dev/null +++ b/src/doc_parser/utils/sections.py @@ -0,0 +1,120 @@ +"""Section parsing and processing utilities.""" + +import json +import re +from collections import OrderedDict +from pathlib import Path +from typing import Dict, List + +from ftfy import fix_text + +from .text import normalize_bullets, tag_contacts + + +def parse_sections_from_json_text(text: str) -> List[Dict[str, str]]: + """ + Parse STRICT JSON from the API response. + + Attempts direct JSON parsing first, then falls back to + extracting JSON array from surrounding text. + + Args: + text: Raw text that should contain a JSON array. + + Returns: + List of section dicts with 'title' and 'body' keys. + """ + # Try direct parse + try: + data = json.loads(text) + if isinstance(data, list): + out: List[Dict[str, str]] = [] + for item in data: + if isinstance(item, dict): + out.append( + { + "title": str(item.get("title", "")).strip(), + "body": str(item.get("body", "")).strip(), + } + ) + return out + except Exception: + pass + + # Try to extract JSON array from text + m = re.search(r"\[\s*\{[\s\S]*\}\s*\]", text) + if m: + try: + data = json.loads(m.group(0)) + if isinstance(data, list): + out: List[Dict[str, str]] = [] + for item in data: + if isinstance(item, dict): + out.append( + { + "title": str(item.get("title", "")).strip(), + "body": str(item.get("body", "")).strip(), + } + ) + return out + except Exception: + pass + return [] + + +def normalize_sections(sections: List[Dict[str, str]]) -> List[Dict[str, str]]: + """Normalize text encoding with ftfy (fixes mojibake, etc.).""" + norm: List[Dict[str, str]] = [] + for s in sections: + title = fix_text((s.get("title") or "").strip()) + body = fix_text((s.get("body") or "").strip()) + norm.append({"title": title, "body": body}) + return norm + + +def merge_duplicate_titles(sections: List[Dict[str, str]]) -> List[Dict[str, str]]: + """Merge sections with duplicate titles while preserving order.""" + merged: "OrderedDict[str, str]" = OrderedDict() + + for s in sections: + title = s.get("title", "").strip() + body = (s.get("body", "") or "").strip() + + if title in merged: + if body: + prev = merged[title] + merged[title] = (prev + ("\n\n" if prev else "") + body).strip() + else: + merged[title] = body + + return [{"title": t, "body": b} for t, b in merged.items()] + + +def build_contact_section_from_filename(pdf_file: Path) -> Dict[str, str]: + """ + Create a simple 'Adresse' section based on the PDF filename. + + Useful as a fallback when contact info isn't parsed from the document. + """ + stem = pdf_file.stem.replace("_", " ").strip() + tokens = stem.split(maxsplit=1) + if tokens and len(tokens[0]) == 1 and tokens[0].isalpha(): + stem = tokens[1] if len(tokens) > 1 else "" + name = stem.strip() or pdf_file.name + return {"title": "Adresse", "body": f"Name: {name}"} + + +def process_section(section: Dict[str, str]) -> Dict[str, str]: + """Normalize bullets and tag contact info for a single section.""" + title = section.get("title", "") + body = section.get("body", "") + return { + "title": tag_contacts(normalize_bullets(title)), + "body": tag_contacts(normalize_bullets(body)), + } + + +def apply_postprocessing(sections: List[Dict[str, str]]) -> List[Dict[str, str]]: + """Apply bullet normalization and contact tagging to all sections.""" + return [process_section(s) for s in sections] + diff --git a/src/doc_parser/utils/text.py b/src/doc_parser/utils/text.py new file mode 100644 index 0000000000000000000000000000000000000000..84951075b241f792513ae077eb0056f022655974 --- /dev/null +++ b/src/doc_parser/utils/text.py @@ -0,0 +1,50 @@ +"""Text processing utilities for document parsing.""" + +import re +from typing import List + + +# Regex patterns for contact detection +EMAIL_RE = re.compile(r"(?i)\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,}\b") +PHONE_RE = re.compile(r"(?:(?<=\s)|^)(\+\d{1,3}[\s()./-]?)?(?:\d[\s()/.-]?){6,}\d(?=\s|$)") +URL_RE = re.compile(r"(?i)\b(?:https?://|www\.)[^\s<>'\"]+\.[^\s<>'\"]+") + +# Bullet characters to normalize +_BULLET_CHARS = {"•", "·", "-", "–", "—", "▪", "◦", "‣", "●", "○", ""} + + +def normalize_bullets(text: str) -> str: + """Coerce common bullet characters to '- ' while keeping numbering.""" + lines = text.splitlines() + normalized: List[str] = [] + + for line in lines: + stripped = line.lstrip() + if not stripped: + normalized.append(line) + continue + + # Keep numbered lists as-is + if re.match(r"^\d+[\.)]\s+", stripped): + normalized.append(line) + continue + + first = stripped[0] + if first in _BULLET_CHARS or stripped.startswith(("- ", "* ")): + content = re.sub( + r"^([\-\*\u2022\u2023\u2043\u2219\u25E6\u25AA\u25CB\u25CF\u25A0]+\s+)", "", stripped + ) + normalized.append(f"- {content.strip()}") + else: + normalized.append(line) + + return "\n".join(normalized) + + +def tag_contacts(text: str) -> str: + """Wrap detected email/phone/URL values with simple tags.""" + tagged = EMAIL_RE.sub(lambda m: f"[EMAIL]{m.group(0)}[/EMAIL]", text) + tagged = PHONE_RE.sub(lambda m: f"[PHONE]{m.group(0)}[/PHONE]", tagged) + tagged = URL_RE.sub(lambda m: f"[URL]{m.group(0)}[/URL]", tagged) + return tagged + diff --git a/src/frontend/__init__.py b/src/frontend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6f0cf062a9674c70467ad28149264f3a491decae --- /dev/null +++ b/src/frontend/__init__.py @@ -0,0 +1,2 @@ +"""Frontend UI modules.""" + diff --git a/src/frontend/frontend/.gitignore b/src/frontend/frontend/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..1403e903b3d3d8652eba79794f22a19fe5ab654d --- /dev/null +++ b/src/frontend/frontend/.gitignore @@ -0,0 +1,36 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +/node_modules +/.pnp +.pnp.js + +# testing +/coverage + +# next.js +/.next/ +/out/ + +# production +/build + +# misc +.DS_Store +*.pem + +# debug +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# local env files +.env*.local + +# vercel +.vercel + +# typescript +*.tsbuildinfo +next-env.d.ts + diff --git a/src/frontend/frontend/README.md b/src/frontend/frontend/README.md new file mode 100644 index 0000000000000000000000000000000000000000..28191e7d46126fe4036ec77a5f93381c92e56916 --- /dev/null +++ b/src/frontend/frontend/README.md @@ -0,0 +1,68 @@ +# CV UI Frontend + +Next.js frontend for the CV Upload Stage of the Recruitment Agent system. + +## Overview + +This frontend provides a candidate-friendly interface for submitting job applications. It connects to the Gradio backend using Gradio's JavaScript client. + +## Features + +- **Candidate Application Form**: Collects full name, email, phone, and CV file +- **Job Description Display**: Expandable job description section +- **Real-time Status**: Connection status and submission feedback +- **File Upload**: Supports PDF and DOCX file formats +- **Form Validation**: Client-side validation before submission + +## Setup + +1. Install dependencies: + +```bash +npm install +``` + +2. Create a `.env.local` file (optional, defaults to `http://localhost:7860`): + +``` +NEXT_PUBLIC_GRADIO_URL=http://localhost:7860 +``` + +3. Run the development server: + +```bash +npm run dev +``` + +The app will be available at `http://localhost:3000` + +## Usage + +1. Make sure the Gradio backend is running (see `src/gradio/README.md`) +2. Open the Next.js app in your browser +3. Fill out the application form +4. Upload your CV (PDF or DOCX) +5. Submit the application + +## Building for Production + +```bash +npm run build +npm start +``` + +## Integration + +This frontend integrates with: + +- **Gradio Backend** (`src/gradio/app.py`) - Handles CV upload and candidate registration +- **CV Utilities** (`src/cv_ui/utils/`) - File saving and database operations + +## Architecture + +Based on the CV Upload Stage requirements from `info.md`: + +- Entry point for candidates into the system +- Collects candidate information and CV files +- Registers candidates in the database with status "applied" +- Triggers automatic CV parsing after registration diff --git a/src/frontend/frontend/app/candidate/page.tsx b/src/frontend/frontend/app/candidate/page.tsx new file mode 100644 index 0000000000000000000000000000000000000000..2081ba21d6852e0c04d33fa874682a92dddba6f2 --- /dev/null +++ b/src/frontend/frontend/app/candidate/page.tsx @@ -0,0 +1,515 @@ +'use client' + +import { useState, useEffect, FormEvent, ChangeEvent } from 'react' +import { CVUploadClient, DatabaseClient } from '@/lib/sdk' + +interface FormData { + fullName: string + email: string + phone: string + cvFile: File | null +} + +export default function CandidatePortal() { + const [cvClient] = useState(() => new CVUploadClient()) + const [dbClient] = useState(() => new DatabaseClient()) + const [connected, setConnected] = useState(false) + const [loading, setLoading] = useState(false) + const [formData, setFormData] = useState({ + fullName: '', + email: '', + phone: '', + cvFile: null, + }) + const [message, setMessage] = useState<{ type: 'success' | 'error' | 'info' | 'warning'; text: string } | null>(null) + const [showJobDescription, setShowJobDescription] = useState(false) + const [applicationStatus, setApplicationStatus] = useState(null) + + // Check API health on mount + useEffect(() => { + const checkHealth = async () => { + try { + const isHealthy = await cvClient.health() + setConnected(isHealthy) + } catch (error) { + console.error('Failed to check API health:', error) + setConnected(false) + } + } + checkHealth() + }, [cvClient]) + + const handleInputChange = (e: ChangeEvent) => { + const { name, value } = e.target + setFormData(prev => ({ ...prev, [name]: value })) + } + + const handleFileChange = (e: ChangeEvent) => { + const file = e.target.files?.[0] || null + setFormData(prev => ({ ...prev, cvFile: file })) + } + + const checkStatus = async () => { + if (!formData.email) return + + try { + setApplicationStatus('Checking status...') + const response = await dbClient.getCandidateByEmail(formData.email, true) + + if (response.success && response.data) { + const candidate = response.data + const status = candidate.status || 'unknown' + const appliedDate = candidate.created_at + ? new Date(candidate.created_at).toLocaleDateString() + : 'N/A' + + let statusText = `Application Status: ${status}\n` + statusText += `Applied: ${appliedDate}\n` + + if (candidate.cv_screening_results && candidate.cv_screening_results.length > 0) { + const latestScreening = candidate.cv_screening_results[0] + statusText += `CV Screening Score: ${(latestScreening.overall_fit_score * 100).toFixed(1)}%\n` + } + + if (candidate.voice_screening_results && candidate.voice_screening_results.length > 0) { + statusText += `Voice Screening: Completed\n` + } + + if (candidate.interview_scheduling && candidate.interview_scheduling.length > 0) { + const interview = candidate.interview_scheduling[0] + statusText += `Interview: ${interview.status || 'Scheduled'}\n` + } + + if (candidate.final_decision) { + statusText += `Final Decision: ${candidate.final_decision.decision || 'Pending'}\n` + } + + setApplicationStatus(statusText) + } else { + setApplicationStatus(`No application found for ${formData.email}. Please submit an application first.`) + } + } catch (error: any) { + setApplicationStatus(`Error: ${error.message || 'Failed to check status'}`) + } + } + + const handleSubmit = async (e: FormEvent) => { + e.preventDefault() + setMessage(null) + + // Validation + if (!formData.fullName || !formData.email) { + setMessage({ type: 'error', text: 'Full name and email are required.' }) + return + } + + if (!formData.cvFile) { + setMessage({ type: 'error', text: 'Please upload your CV before submitting.' }) + return + } + + // Check file type + const fileExt = formData.cvFile.name.split('.').pop()?.toLowerCase() + if (fileExt !== 'pdf' && fileExt !== 'docx') { + setMessage({ type: 'error', text: 'Please upload a PDF or DOCX file.' }) + return + } + + if (!connected) { + setMessage({ type: 'error', text: 'Backend not connected. Please try again later.' }) + return + } + + setLoading(true) + setMessage({ type: 'info', text: '💾 Registering your application...' }) + + try { + const response = await cvClient.submit( + formData.fullName, + formData.email, + formData.cvFile, + formData.cvFile.name, + formData.phone || '' + ) + + if (response.success) { + setMessage({ + type: 'success', + text: `✅ ${response.message || `Application submitted successfully for ${formData.fullName}! Your application has been recorded. You will receive updates soon.`}` + }) + // Reset form + setFormData({ + fullName: '', + email: '', + phone: '', + cvFile: null, + }) + // Reset file input + const fileInput = document.getElementById('cvFile') as HTMLInputElement + if (fileInput) fileInput.value = '' + } else if (response.already_exists) { + setMessage({ + type: 'warning', + text: `⚠️ ${response.message || `An application with ${formData.email} already exists. You can only apply once — please wait for review.`}` + }) + } else { + setMessage({ type: 'error', text: response.message || 'Failed to submit application' }) + } + } catch (error: any) { + setMessage({ type: 'error', text: `Failed to submit application: ${error.message || 'Unknown error'}` }) + console.error('Submission error:', error) + } finally { + setLoading(false) + } + } + + const getMessageStyle = () => { + if (!message) return {} + const baseStyle = { + padding: '1rem', + borderRadius: '8px', + marginBottom: '1.5rem', + fontWeight: '500' as const, + } + switch (message.type) { + case 'success': + return { ...baseStyle, background: '#d1fae5', color: '#065f46', border: '1px solid #10b981' } + case 'error': + return { ...baseStyle, background: '#fee2e2', color: '#991b1b', border: '1px solid #ef4444' } + case 'warning': + return { ...baseStyle, background: '#fef3c7', color: '#92400e', border: '1px solid #f59e0b' } + case 'info': + return { ...baseStyle, background: '#dbeafe', color: '#1e40af', border: '1px solid #3b82f6' } + default: + return baseStyle + } + } + + return ( +
+
+ {/* Header */} +
+

+ 🤖 AI Engineer Job Application Portal +

+

+ Welcome to ScionHire AI Labs 👋
+ We're seeking talented engineers passionate about building intelligent systems!
+ Please submit your CV below to apply for the AI Engineer position. +

+
+ + {/* Connection Status */} + {!connected && ( +
+ ⚠️ Backend not connected. Some features may not work. +
+ )} + + {/* Job Description */} +
+ + + {showJobDescription && ( +
+

+ 🧠 Position: AI Engineer +

+

+ Location: Remote / Wiesbaden HQ +

+
+

About the Role:

+

+ Join our AI R&D team to develop, fine-tune, and deploy ML models for production. + You will work on projects involving LLMs, LangGraph agents, and context engineering. +

+
+
+

Requirements:

+
    +
  • Proficiency in Python & modern AI frameworks (PyTorch, LangChain, etc.)
  • +
  • Solid understanding of NLP and ML pipelines
  • +
  • Experience deploying models or building intelligent systems
  • +
  • Strong communication and teamwork skills
  • +
+
+
+ )} +
+ +
+ + {/* Application Form */} +
+
+ + +
+ +
+ + +
+ +
+ + +
+ +
+ + + {formData.cvFile && ( +

+ Selected: {formData.cvFile.name} +

+ )} +
+ + {message && ( +
+ {message.text} +
+ )} + + +
+ + {/* Check Status Section */} +
+

+ 📊 Check Application Status +

+
+ + +
+ {applicationStatus && ( +
+ {applicationStatus} +
+ )} +
+
+
+ ) +} + diff --git a/src/frontend/frontend/app/chat/page.tsx b/src/frontend/frontend/app/chat/page.tsx new file mode 100644 index 0000000000000000000000000000000000000000..c9e47939f3094742a26a0ce7b76eed603dc5fea1 --- /dev/null +++ b/src/frontend/frontend/app/chat/page.tsx @@ -0,0 +1,392 @@ +'use client' + +import { useState, useEffect, useRef, FormEvent } from 'react' +import { SupervisorClient } from '@/lib/sdk' + +interface ChatMessage { + id: string + role: 'user' | 'assistant' + content: string + timestamp: string + tokenCount?: number +} + +export default function SupervisorChat() { + const [supervisorClient] = useState(() => new SupervisorClient()) + const [connected, setConnected] = useState(false) + const [chatMessages, setChatMessages] = useState([]) + const [chatInput, setChatInput] = useState('') + const [sendingMessage, setSendingMessage] = useState(false) + const [threadId, setThreadId] = useState(null) + const [isStreaming, setIsStreaming] = useState(false) + const [totalTokens, setTotalTokens] = useState(0) + const messagesEndRef = useRef(null) + + // Check API health and initialize chat + useEffect(() => { + const initChat = async () => { + try { + const isHealthy = await supervisorClient.health() + setConnected(isHealthy) + + if (isHealthy) { + // Create new chat session + const newThreadId = await supervisorClient.newChat() + setThreadId(newThreadId) + + // Initialize with welcome message + setChatMessages([ + { + id: '1', + role: 'assistant', + content: "Hello! I'm the HR Supervisor Agent. I can help you with:\n\n• Querying candidate information\n• Screening CVs\n• Scheduling interviews\n• Managing the recruitment pipeline\n• Answering questions about candidates\n\nWhat would you like to know?", + timestamp: new Date().toISOString(), + }, + ]) + } + } catch (error) { + console.error('Failed to initialize chat:', error) + setConnected(false) + } + } + initChat() + }, [supervisorClient]) + + // Auto-scroll to bottom when new messages arrive + useEffect(() => { + messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' }) + }, [chatMessages]) + + const handleSendMessage = async (e: FormEvent) => { + e.preventDefault() + if (!chatInput.trim() || sendingMessage || !connected) return + + const userMessage: ChatMessage = { + id: Date.now().toString(), + role: 'user', + content: chatInput, + timestamp: new Date().toISOString(), + } + + setChatMessages(prev => [...prev, userMessage]) + const currentInput = chatInput + setChatInput('') + setSendingMessage(true) + setIsStreaming(false) + + try { + // Use batch chat (more reliable than streaming) + const response = await supervisorClient.chat( + currentInput, + threadId || undefined + ) + + const assistantMessage: ChatMessage = { + id: (Date.now() + 1).toString(), + role: 'assistant', + content: response.content, + timestamp: new Date().toISOString(), + tokenCount: response.token_count, + } + + setChatMessages(prev => [...prev, assistantMessage]) + + // Update total token count + if (response.token_count) { + setTotalTokens(prev => prev + response.token_count) + } + + // Update thread ID if we got a new one + if (response.thread_id && response.thread_id !== threadId) { + setThreadId(response.thread_id) + } + } catch (error: any) { + const errorMessage: ChatMessage = { + id: (Date.now() + 1).toString(), + role: 'assistant', + content: `❌ Error: ${error.message || 'Failed to get response from supervisor agent. Please try again.'}`, + timestamp: new Date().toISOString(), + } + setChatMessages(prev => [...prev, errorMessage]) + console.error('Failed to send message:', error) + } finally { + setSendingMessage(false) + setIsStreaming(false) + } + } + + const handleNewChat = async () => { + try { + const newThreadId = await supervisorClient.newChat() + setThreadId(newThreadId) + setTotalTokens(0) // Reset token count for new chat + setChatMessages([ + { + id: '1', + role: 'assistant', + content: "New chat session started! How can I help you today?", + timestamp: new Date().toISOString(), + }, + ]) + } catch (error: any) { + alert(`Failed to create new chat: ${error.message || 'Unknown error'}`) + } + } + + return ( +
+
+ {/* Header */} +
+
+
+

+ 🤖 Supervisor Agent Chat +

+

+ Interact with the HR Supervisor Agent to manage recruitment +

+
+
+ + {threadId && ( +
+ Thread: {threadId} +
+ )} + {totalTokens > 0 && ( +
+ 📊 Total: {totalTokens.toLocaleString()} tokens +
+ )} +
+
+
+ + {!connected && ( +
+ ⚠️ Supervisor API not connected. Please make sure the backend is running. +
+ )} + + {/* Chat Interface */} +
+ {/* Chat Messages */} +
+ {chatMessages.map((message) => ( +
+
+

+ {message.content} +

+
+ {new Date(message.timestamp).toLocaleTimeString()} + {message.tokenCount && ( + + {message.tokenCount.toLocaleString()} tokens + + )} +
+
+
+ ))} + {sendingMessage && ( +
+
+ + Agent is thinking... +
+
+ )} +
+
+ + {/* Chat Input */} +
+ setChatInput(e.target.value)} + placeholder="Ask about candidates, schedule interviews, or query the database..." + disabled={sendingMessage || !connected} + style={{ + flex: 1, + padding: '0.75rem 1rem', + border: '1px solid #d1d5db', + borderRadius: '8px', + fontSize: '1rem', + fontFamily: 'inherit', + background: sendingMessage || !connected ? '#f3f4f6' : 'white', + }} + /> + +
+ + {/* Helper text */} +
+ 💡 Tip: Try asking "Show me all candidates", "What's the status of [email]?", or "Schedule an interview for [candidate name]" +
+
+
+
+ ) +} + diff --git a/src/frontend/frontend/app/dashboard/[candidateId]/page.tsx b/src/frontend/frontend/app/dashboard/[candidateId]/page.tsx new file mode 100644 index 0000000000000000000000000000000000000000..3ccecac3a7e98e7be19e5e4572bd969d6bb86c9c --- /dev/null +++ b/src/frontend/frontend/app/dashboard/[candidateId]/page.tsx @@ -0,0 +1,692 @@ +'use client' + +import { useState, useEffect } from 'react' +import { useParams, useRouter } from 'next/navigation' +import { DatabaseClient } from '@/lib/sdk' + +interface Candidate { + id: string + name: string + email: string + phone?: string + status: string + appliedAt: string + position?: string + plan?: PlanStep[] + reasoningLog?: ReasoningLog[] + cvScreeningResults?: any[] + voiceScreeningResults?: any[] + interviewScheduling?: any[] + finalDecision?: any +} + +interface PlanStep { + id: string + step: string + status: 'completed' | 'in_progress' | 'pending' + timestamp?: string +} + +interface ReasoningLog { + timestamp: string + message: string + type?: 'info' | 'success' | 'warning' | 'error' +} + +export default function CandidateDashboard() { + const params = useParams() + const router = useRouter() + const candidateId = params.candidateId as string + const [dbClient] = useState(() => new DatabaseClient()) + + const [candidate, setCandidate] = useState(null) + const [loading, setLoading] = useState(true) + const [expandedStep, setExpandedStep] = useState(null) + const [hoveredStep, setHoveredStep] = useState(null) + + useEffect(() => { + const loadCandidate = async () => { + try { + setLoading(true) + const response = await dbClient.getCandidate(candidateId, true) + + if (!response.success || !response.data) { + router.push('/hr') + return + } + + const c = response.data + + // Build plan steps from candidate data + const plan: PlanStep[] = [] + + // Step 1: CV Screening + if (c.cv_screening_results && c.cv_screening_results.length > 0) { + plan.push({ + id: '1', + step: 'Screen CVs', + status: 'completed', + timestamp: c.cv_screening_results[0].timestamp + ? new Date(c.cv_screening_results[0].timestamp).toLocaleTimeString() + : undefined, + }) + } else { + plan.push({ + id: '1', + step: 'Screen CVs', + status: c.status === 'applied' ? 'in_progress' : 'pending', + }) + } + + // Step 2: Voice Screening + if (c.voice_screening_results && c.voice_screening_results.length > 0) { + plan.push({ + id: '2', + step: 'Conduct voice screening', + status: 'completed', + timestamp: c.voice_screening_results[0].timestamp + ? new Date(c.voice_screening_results[0].timestamp).toLocaleTimeString() + : undefined, + }) + } else if (c.status === 'cv_passed' || c.status === 'cv_screened') { + plan.push({ + id: '2', + step: 'Conduct voice screening', + status: 'pending', + }) + } + + // Step 3: Interview Scheduling + if (c.interview_scheduling && c.interview_scheduling.length > 0) { + plan.push({ + id: '3', + step: 'Schedule HR interview', + status: 'completed', + timestamp: c.interview_scheduling[0].timestamp + ? new Date(c.interview_scheduling[0].timestamp).toLocaleTimeString() + : undefined, + }) + } else if (c.status === 'voice_passed' || c.status === 'voice_done') { + plan.push({ + id: '3', + step: 'Schedule HR interview', + status: 'pending', + }) + } + + // Step 4: Final Decision + if (c.final_decision) { + plan.push({ + id: '4', + step: 'Final decision', + status: 'completed', + timestamp: c.final_decision.timestamp + ? new Date(c.final_decision.timestamp).toLocaleTimeString() + : undefined, + }) + } else if (c.status === 'interview_scheduled') { + plan.push({ + id: '4', + step: 'Final decision', + status: 'pending', + }) + } + + // Build reasoning log from screening results + const reasoningLog: ReasoningLog[] = [] + + if (c.cv_screening_results && c.cv_screening_results.length > 0) { + const cvResult = c.cv_screening_results[0] + reasoningLog.push({ + timestamp: cvResult.timestamp + ? new Date(cvResult.timestamp).toLocaleTimeString() + : new Date().toLocaleTimeString(), + message: `CV Screening completed. Overall fit score: ${((cvResult.overall_fit_score || 0) * 10).toFixed(1)}/10. ${cvResult.llm_feedback || ''}`, + type: (cvResult.overall_fit_score || 0) > 0.7 ? 'success' : 'info', + }) + } + + if (c.voice_screening_results && c.voice_screening_results.length > 0) { + const voiceResult = c.voice_screening_results[0] + reasoningLog.push({ + timestamp: voiceResult.timestamp + ? new Date(voiceResult.timestamp).toLocaleTimeString() + : new Date().toLocaleTimeString(), + message: `Voice screening completed. Communication score: ${((voiceResult.communication_score || 0) * 10).toFixed(1)}/10. ${voiceResult.llm_summary || ''}`, + type: 'success', + }) + } + + const transformedCandidate: Candidate = { + id: c.id, + name: c.full_name || 'Unknown', + email: c.email || '', + phone: c.phone_number || '', + status: c.status || 'unknown', + appliedAt: c.created_at || new Date().toISOString(), + position: 'AI Engineer', + plan, + reasoningLog, + cvScreeningResults: c.cv_screening_results, + voiceScreeningResults: c.voice_screening_results, + interviewScheduling: c.interview_scheduling, + finalDecision: c.final_decision, + } + + setCandidate(transformedCandidate) + } catch (error) { + console.error('Failed to load candidate:', error) + router.push('/hr') + } finally { + setLoading(false) + } + } + + if (candidateId) { + loadCandidate() + } + }, [candidateId, router, dbClient]) + + const getStatusIcon = (status: string) => { + switch (status) { + case 'completed': return '✅' + case 'in_progress': return '🔄' + case 'pending': return '⬜' + default: return '⬜' + } + } + + const getStatusColor = (status: string) => { + switch (status) { + case 'completed': return '#10b981' + case 'in_progress': return '#3b82f6' + case 'pending': return '#6b7280' + default: return '#6b7280' + } + } + + const getLogTypeColor = (type?: string) => { + switch (type) { + case 'success': return '#10b981' + case 'error': return '#ef4444' + case 'warning': return '#f59e0b' + default: return '#3b82f6' + } + } + + const getStepDetails = (stepName: string, candidate: Candidate) => { + const details: Record = { + 'Screen CVs': { + description: `The agent analyzed ${candidate.name}'s CV and evaluated their qualifications against the job requirements.`, + actions: [ + 'Extracted skills and experience from CV', + 'Scored candidate against job requirements', + 'Identified key strengths and potential gaps', + ], + }, + 'Invite for voice screening': { + description: `An invitation email was sent to ${candidate.email} to schedule a voice screening interview.`, + actions: [ + 'Generated personalized invitation email', + 'Sent email via Gmail integration', + 'Awaiting candidate response', + ], + }, + 'Conduct voice screening': { + description: `The voice screening interview is being conducted with ${candidate.name} to assess communication skills and technical knowledge.`, + actions: [ + 'Initiated automated voice call', + 'Conducted structured interview', + 'Analyzing responses and generating evaluation', + ], + }, + 'Schedule HR interview': { + description: `Based on the voice screening results, the agent is scheduling a final HR interview with ${candidate.name}.`, + actions: [ + 'Checked HR calendar availability', + 'Proposed interview time slots', + 'Sending calendar invitation', + ], + }, + 'Await HR decision': { + description: `The final decision is pending HR review. All screening stages have been completed for ${candidate.name}.`, + actions: [ + 'Compiled candidate evaluation report', + 'Submitted to HR for final review', + 'Awaiting hiring decision', + ], + }, + } + return details[stepName] || { description: `Processing step: ${stepName}` } + } + + if (loading || !candidate) { + return ( +
+
+

Loading candidate data...

+
+
+ ) + } + + return ( +
+
+ {/* Header */} +
+
+
+

+ 🧠 Agent Dashboard +

+

+ Managing application for {candidate.name} ({candidate.email}) +

+
+ +
+
+ + +
+ {/* Agent Plan Viewer */} +
+
+

+ 🗺️ Live Plan Progress +

+
+ {candidate.plan?.filter(s => s.status === 'completed').length || 0} / {candidate.plan?.length || 0} completed +
+
+ + {/* Progress Bar */} +
+
+
s.status === 'completed').length || 0) / (candidate.plan?.length || 1)) * 100}%`, + height: '100%', + background: 'linear-gradient(90deg, #10b981 0%, #3b82f6 100%)', + borderRadius: '4px', + transition: 'width 0.3s ease', + }} /> +
+
+ + {/* Interactive Timeline */} +
+ {/* Vertical line */} +
+ +
+ {candidate.plan?.map((step, index) => { + const isExpanded = expandedStep === step.id + const isHovered = hoveredStep === step.id + const stepDetails = getStepDetails(step.step, candidate) + + return ( +
setExpandedStep(isExpanded ? null : step.id)} + onMouseEnter={() => setHoveredStep(step.id)} + onMouseLeave={() => setHoveredStep(null)} + style={{ + position: 'relative', + padding: '1.25rem', + background: isHovered + ? (step.status === 'completed' ? '#d1fae5' : step.status === 'in_progress' ? '#dbeafe' : '#f9fafb') + : '#f9fafb', + borderRadius: '12px', + border: `2px solid ${isHovered ? getStatusColor(step.status) : '#e5e7eb'}`, + cursor: 'pointer', + transition: 'all 0.2s ease', + transform: isHovered ? 'translateX(4px)' : 'translateX(0)', + boxShadow: isHovered ? '0 4px 12px rgba(0, 0, 0, 0.1)' : 'none', + }} + > + {/* Step indicator */} +
+ {step.status === 'completed' ? '✓' : step.status === 'in_progress' ? '⟳' : index + 1} +
+ +
+
+ {getStatusIcon(step.status)} +
+
+
+

+ {step.step} +

+ {step.timestamp && ( + + {step.timestamp} + + )} +
+ + {/* Status badge */} +
+ + {step.status.replace('_', ' ')} + + {step.status === 'in_progress' && ( + + + Active + + )} +
+ + {/* Expandable details */} + {isExpanded && stepDetails && ( +
+

+ Details: +

+

+ {stepDetails.description} +

+ {stepDetails.actions && stepDetails.actions.length > 0 && ( +
+

+ Actions taken: +

+
    + {stepDetails.actions.map((action, idx) => ( +
  • + {action} +
  • + ))} +
+
+ )} +
+ )} + + {/* Hover hint */} + {isHovered && !isExpanded && ( +

+ Click to see details +

+ )} +
+
+
+ ) + })} +
+
+
+ + {/* Reasoning Log */} +
+

+ 💬 Reasoning Log / Memory +

+
+ {candidate.reasoningLog?.map((log, index) => ( +
+

+ {log.timestamp} +

+

+ {log.message} +

+
+ ))} +
+
+
+ +
+
+ ) +} + diff --git a/src/frontend/frontend/app/globals.css b/src/frontend/frontend/app/globals.css new file mode 100644 index 0000000000000000000000000000000000000000..845e05f9d5582d4b0be3874b39f7419e12a3b681 --- /dev/null +++ b/src/frontend/frontend/app/globals.css @@ -0,0 +1,48 @@ +* { + box-sizing: border-box; + padding: 0; + margin: 0; +} + +html, +body { + max-width: 100vw; + overflow-x: hidden; +} + +body { + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', + 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', + sans-serif; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); + min-height: 100vh; + color: #333; +} + +a { + color: inherit; + text-decoration: none; +} + +@keyframes pulse { + 0%, 100% { + opacity: 1; + } + 50% { + opacity: 0.5; + } +} + +@keyframes slideDown { + from { + opacity: 0; + transform: translateY(-10px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + diff --git a/src/frontend/frontend/app/hr/page.tsx b/src/frontend/frontend/app/hr/page.tsx new file mode 100644 index 0000000000000000000000000000000000000000..94e1230907af8b1f1baa3df690661424b3aeb8c6 --- /dev/null +++ b/src/frontend/frontend/app/hr/page.tsx @@ -0,0 +1,505 @@ +'use client' + +import { useState, useEffect } from 'react' +import { useRouter } from 'next/navigation' +import { DatabaseClient, SupervisorClient, VoiceScreeningClient } from '@/lib/sdk' + +interface Candidate { + id: string + name: string + email: string + phone?: string + status: string + appliedAt: string + position?: string + skills?: string[] + experience?: string + education?: string + hasVoiceScreening?: boolean + authCode?: string +} + +export default function HRPortal() { + const router = useRouter() + const [dbClient] = useState(() => new DatabaseClient()) + const [supervisorClient] = useState(() => new SupervisorClient()) + const [voiceScreeningClient] = useState(() => new VoiceScreeningClient()) + const [connected, setConnected] = useState(false) + const [candidates, setCandidates] = useState([]) + const [loading, setLoading] = useState(false) + const [selectedCandidate, setSelectedCandidate] = useState(null) + + useEffect(() => { + const initClients = async () => { + try { + const [dbHealthy, supervisorHealthy] = await Promise.all([ + dbClient.health(), + supervisorClient.health() + ]) + setConnected(dbHealthy && supervisorHealthy) + + // Load candidates from database + if (dbHealthy) { + loadCandidates() + } + } catch (error) { + console.error('Failed to check API health:', error) + setConnected(false) + } + } + initClients() + }, [dbClient, supervisorClient]) + + const loadCandidates = async () => { + try { + setLoading(true) + // Load candidates with relations to check for voice screening + const response = await dbClient.getCandidates(undefined, 100, 0, true) + if (response.success) { + // Transform database response to Candidate format + const transformedCandidates: Candidate[] = response.data.map((c: any) => ({ + id: c.id, + name: c.full_name || 'Unknown', + email: c.email || '', + phone: c.phone_number || '', + status: c.status || 'unknown', + appliedAt: c.created_at || new Date().toISOString(), + position: 'AI Engineer', // Default position + hasVoiceScreening: c.voice_screening_results && c.voice_screening_results.length > 0, + authCode: c.auth_code || undefined, + })) + setCandidates(transformedCandidates) + } + } catch (error) { + console.error('Failed to load candidates:', error) + } finally { + setLoading(false) + } + } + + const triggerVoiceScreening = async (candidate: Candidate) => { + if (!connected) return + setLoading(true) + try { + // Create voice screening session using SDK + const sessionResponse = await voiceScreeningClient.createSession(candidate.id) + + // Use supervisor agent to notify about voice screening + const supervisorResponse = await supervisorClient.chat( + `Voice screening session created for candidate ${candidate.name} (${candidate.email}). Session ID: ${sessionResponse.session_id}. Please proceed with the voice screening process.` + ) + + const tokenInfo = supervisorResponse.token_count + ? `\n\n📊 Token usage: ${supervisorResponse.token_count.toLocaleString()} tokens` + : '' + + const authCodeInfo = candidate.authCode + ? `\n\n🔐 Authentication Code: ${candidate.authCode}\n(Candidate should use this code to access voice screening)` + : '' + + const voiceScreeningUrl = `http://localhost:8502?candidate_id=${candidate.id}` + + alert(`✅ Voice screening session created for ${candidate.name}!\n\nSession ID: ${sessionResponse.session_id}${authCodeInfo}\n\nVoice Screening URL: ${voiceScreeningUrl}\n\n${supervisorResponse.content}${tokenInfo}\n\nYou can now direct the candidate to complete the voice screening.`) + + // Reload candidates to get updated status + await loadCandidates() + } catch (error: any) { + alert(`Failed to trigger voice screening: ${error.message || 'Unknown error'}`) + console.error('Failed to trigger voice screening:', error) + } finally { + setLoading(false) + } + } + + const scheduleInterview = async (candidate: Candidate) => { + if (!connected) return + setLoading(true) + try { + // Use supervisor agent to schedule interview + const response = await supervisorClient.chat( + `Please schedule an interview for candidate ${candidate.name} (${candidate.email})` + ) + const tokenInfo = response.token_count + ? `\n\n📊 Token usage: ${response.token_count.toLocaleString()} tokens` + : '' + alert(`Interview scheduling initiated for ${candidate.name}\n\nAgent response: ${response.content}${tokenInfo}`) + // Reload candidates to get updated status + await loadCandidates() + } catch (error: any) { + alert(`Failed to schedule interview: ${error.message || 'Unknown error'}`) + console.error('Failed to schedule interview:', error) + } finally { + setLoading(false) + } + } + + const getStatusColor = (status: string) => { + switch (status) { + case 'applied': return '#f59e0b' + case 'cv_screened': return '#3b82f6' + case 'cv_passed': return '#10b981' + case 'cv_rejected': return '#ef4444' + case 'voice_invitation_sent': return '#8b5cf6' + case 'voice_done': return '#8b5cf6' + case 'voice_passed': return '#10b981' + case 'voice_rejected': return '#ef4444' + case 'interview_scheduled': return '#10b981' + case 'decision_made': return '#6366f1' + default: return '#6b7280' + } + } + + return ( +
+
+
+

+ 🧑‍💼 HR Portal +

+

+ Review shortlisted candidates, trigger voice screenings, and schedule interviews +

+
+ + {!connected && ( +
+ ⚠️ Backend not connected. Some features may not work. +
+ )} + + {/* Candidates Table */} +
+

+ 📋 Candidate List +

+ +
+ + + + + + + + + + + + {candidates.map((candidate) => ( + { + e.currentTarget.style.background = '#f9fafb' + }} + onMouseLeave={(e) => { + e.currentTarget.style.background = 'white' + }} + onClick={() => setSelectedCandidate(candidate)} + > + + + + + + + ))} + +
NameEmailStatusAppliedActions
{candidate.name}{candidate.email} + + {candidate.status} + + + {new Date(candidate.appliedAt).toLocaleDateString()} + +
+ + {/* Show voice screening button if candidate hasn't done voice screening yet */} + {!candidate.hasVoiceScreening && ( + candidate.status === 'cv_screened' || + candidate.status === 'cv_passed' || + candidate.status === 'applied' + ) && ( + <> + + + + )} + {(candidate.status === 'voice_passed' || candidate.status === 'voice_done') && ( + + )} +
+
+
+ + {candidates.length === 0 && ( +
+ No candidates found. Applications will appear here once submitted. +
+ )} +
+ + {/* Candidate Details Modal */} + {selectedCandidate && ( +
setSelectedCandidate(null)} + style={{ + position: 'fixed', + top: 0, + left: 0, + right: 0, + bottom: 0, + background: 'rgba(0, 0, 0, 0.5)', + display: 'flex', + alignItems: 'center', + justifyContent: 'center', + zIndex: 1000, + }} + > +
e.stopPropagation()} + style={{ + background: 'white', + borderRadius: '16px', + padding: '2rem', + maxWidth: '600px', + width: '90%', + maxHeight: '80vh', + overflow: 'auto', + }} + > +

+ Candidate Details +

+
+

Name: {selectedCandidate.name}

+

Email: {selectedCandidate.email}

+

Phone: {selectedCandidate.phone || 'N/A'}

+

Position: {selectedCandidate.position || 'AI Engineer'}

+

Status: {selectedCandidate.status}

+

Applied: {new Date(selectedCandidate.appliedAt).toLocaleString()}

+ {selectedCandidate.hasVoiceScreening !== undefined && ( +

Voice Screening: {selectedCandidate.hasVoiceScreening ? '✅ Completed' : '❌ Not completed'}

+ )} + {selectedCandidate.authCode && ( +

Authentication Code: {selectedCandidate.authCode}

+ )} +
+

🎤 Voice Screening Access:

+

+ URL: + http://localhost:8502?candidate_id={selectedCandidate.id} + +

+ {selectedCandidate.authCode && ( +

+ Auth Code: {selectedCandidate.authCode} +

+ )} +
+
+ + +
+
+ )} +
+
+ ) +} + diff --git a/src/frontend/frontend/app/layout.tsx b/src/frontend/frontend/app/layout.tsx new file mode 100644 index 0000000000000000000000000000000000000000..fb344a11b73fd3e297f999a0a6520c4ea6281311 --- /dev/null +++ b/src/frontend/frontend/app/layout.tsx @@ -0,0 +1,24 @@ +import type { Metadata } from 'next' +import './globals.css' +import Navigation from '../components/Navigation' + +export const metadata: Metadata = { + title: 'ScionHire AI Labs - Recruitment Agent System', + description: 'AI-powered recruitment agent system with candidate portal, HR portal, and agent dashboard', +} + +export default function RootLayout({ + children, +}: { + children: React.ReactNode +}) { + return ( + + + + {children} + + + ) +} + diff --git a/src/frontend/frontend/app/page.tsx b/src/frontend/frontend/app/page.tsx new file mode 100644 index 0000000000000000000000000000000000000000..0698aa780d23254bf401afafe845c3a4e4422570 --- /dev/null +++ b/src/frontend/frontend/app/page.tsx @@ -0,0 +1,30 @@ +'use client' + +import { useEffect } from 'react' +import { useRouter } from 'next/navigation' + +export default function Home() { + const router = useRouter() + + useEffect(() => { + // Redirect to candidate portal by default + router.push('/candidate') + }, [router]) + + return ( +
+
+

Loading...

+
+
+ ) +} diff --git a/src/frontend/frontend/components/Navigation.tsx b/src/frontend/frontend/components/Navigation.tsx new file mode 100644 index 0000000000000000000000000000000000000000..407ac7f374d27d175598cbff6ed2fad20555ae40 --- /dev/null +++ b/src/frontend/frontend/components/Navigation.tsx @@ -0,0 +1,81 @@ +'use client' + +import Link from 'next/link' +import { usePathname } from 'next/navigation' + +export default function Navigation() { + const pathname = usePathname() + + const navItems = [ + { href: '/', label: '🧍 Candidate Portal', icon: '👤' }, + { href: '/hr', label: '🧑‍💼 HR Portal', icon: '💼' }, + { href: '/chat', label: '🤖 Supervisor Chat', icon: '💬' }, + ] + + return ( + + ) +} + diff --git a/src/frontend/frontend/data/dummyCandidates.ts b/src/frontend/frontend/data/dummyCandidates.ts new file mode 100644 index 0000000000000000000000000000000000000000..4c373ea00d1def383074f6aa3588ea18169bb3e0 --- /dev/null +++ b/src/frontend/frontend/data/dummyCandidates.ts @@ -0,0 +1,161 @@ +export interface Candidate { + id: string + name: string + email: string + phone: string + status: 'applied' | 'screened' | 'voice_screening' | 'scheduled' | 'rejected' | 'hired' + cvPath: string + appliedAt: string + position: string + skills: string[] + experience: string + education: string + plan?: PlanStep[] + reasoningLog?: ReasoningLog[] +} + +export interface PlanStep { + id: string + step: string + status: 'completed' | 'in_progress' | 'pending' + timestamp?: string +} + +export interface ReasoningLog { + timestamp: string + message: string + type?: 'info' | 'success' | 'warning' | 'error' +} + +export const dummyCandidates: Candidate[] = [ + { + id: '1', + name: 'Sarah Chen', + email: 'sarah.chen@example.com', + phone: '+1 555-0123', + status: 'voice_screening', + cvPath: '/cvs/sarah_chen.pdf', + appliedAt: '2025-01-15T10:30:00', + position: 'AI Engineer', + skills: ['Python', 'PyTorch', 'LangChain', 'NLP', 'MLOps'], + experience: '5 years in ML engineering, worked on LLM deployment at scale', + education: 'M.S. Computer Science, Stanford University', + plan: [ + { id: '1', step: 'Screen CVs', status: 'completed', timestamp: '10:35' }, + { id: '2', step: 'Invite for voice screening', status: 'completed', timestamp: '11:00' }, + { id: '3', step: 'Conduct voice screening', status: 'in_progress', timestamp: '11:15' }, + { id: '4', step: 'Schedule HR interview', status: 'pending' }, + { id: '5', step: 'Await HR decision', status: 'pending' }, + ], + reasoningLog: [ + { timestamp: '10:35', message: 'Detected strong match for AI Engineer role. Excellent Python and PyTorch experience.', type: 'success' }, + { timestamp: '10:40', message: 'CV score: 9.2/10. Strong background in LLM deployment and MLOps.', type: 'info' }, + { timestamp: '11:00', message: 'Invitation email sent to candidate for voice screening.', type: 'info' }, + { timestamp: '11:15', message: 'Voice screening in progress. Candidate demonstrating strong technical knowledge.', type: 'info' }, + ], + }, + { + id: '2', + name: 'Michael Rodriguez', + email: 'michael.r@example.com', + phone: '+1 555-0124', + status: 'scheduled', + cvPath: '/cvs/michael_rodriguez.pdf', + appliedAt: '2025-01-14T14:20:00', + position: 'AI Engineer', + skills: ['Python', 'TensorFlow', 'Deep Learning', 'Computer Vision', 'Docker'], + experience: '3 years in deep learning research, published papers on CV', + education: 'Ph.D. Computer Vision, MIT', + plan: [ + { id: '1', step: 'Screen CVs', status: 'completed', timestamp: '14:25' }, + { id: '2', step: 'Invite for voice screening', status: 'completed', timestamp: '15:00' }, + { id: '3', step: 'Conduct voice screening', status: 'completed', timestamp: '15:30' }, + { id: '4', step: 'Schedule HR interview', status: 'completed', timestamp: '16:00' }, + { id: '5', step: 'Await HR decision', status: 'in_progress', timestamp: '16:15' }, + ], + reasoningLog: [ + { timestamp: '14:25', message: 'Strong academic background with Ph.D. from MIT. Excellent research credentials.', type: 'success' }, + { timestamp: '14:30', message: 'CV score: 8.8/10. Strong in computer vision and deep learning.', type: 'info' }, + { timestamp: '15:30', message: 'Voice screening completed. Confidence score: 8.5/10. Candidate shows good communication skills.', type: 'success' }, + { timestamp: '16:00', message: 'HR interview scheduled for January 20, 2025 at 2:00 PM.', type: 'info' }, + ], + }, + { + id: '3', + name: 'Emily Watson', + email: 'emily.watson@example.com', + phone: '+1 555-0125', + status: 'screened', + cvPath: '/cvs/emily_watson.pdf', + appliedAt: '2025-01-16T09:15:00', + position: 'AI Engineer', + skills: ['Python', 'LangChain', 'RAG', 'Vector Databases', 'FastAPI'], + experience: '4 years building AI applications, expertise in RAG systems', + education: 'B.S. Computer Science, UC Berkeley', + plan: [ + { id: '1', step: 'Screen CVs', status: 'completed', timestamp: '09:20' }, + { id: '2', step: 'Invite for voice screening', status: 'in_progress', timestamp: '09:45' }, + { id: '3', step: 'Conduct voice screening', status: 'pending' }, + { id: '4', step: 'Schedule HR interview', status: 'pending' }, + { id: '5', step: 'Await HR decision', status: 'pending' }, + ], + reasoningLog: [ + { timestamp: '09:20', message: 'Good match for AI Engineer role. Strong experience with LangChain and RAG systems.', type: 'info' }, + { timestamp: '09:25', message: 'CV score: 8.0/10. Solid practical experience building production AI systems.', type: 'info' }, + { timestamp: '09:45', message: 'Preparing voice screening invitation...', type: 'info' }, + ], + }, + { + id: '4', + name: 'David Kim', + email: 'david.kim@example.com', + phone: '+1 555-0126', + status: 'applied', + cvPath: '/cvs/david_kim.pdf', + appliedAt: '2025-01-17T11:00:00', + position: 'AI Engineer', + skills: ['Python', 'Scikit-learn', 'Pandas', 'SQL', 'AWS'], + experience: '2 years in data science, transitioning to ML engineering', + education: 'M.S. Data Science, NYU', + plan: [ + { id: '1', step: 'Screen CVs', status: 'in_progress', timestamp: '11:05' }, + { id: '2', step: 'Invite for voice screening', status: 'pending' }, + { id: '3', step: 'Conduct voice screening', status: 'pending' }, + { id: '4', step: 'Schedule HR interview', status: 'pending' }, + { id: '5', step: 'Await HR decision', status: 'pending' }, + ], + reasoningLog: [ + { timestamp: '11:05', message: 'CV received. Analyzing candidate profile...', type: 'info' }, + ], + }, + { + id: '5', + name: 'Jessica Martinez', + email: 'jessica.m@example.com', + phone: '+1 555-0127', + status: 'rejected', + cvPath: '/cvs/jessica_martinez.pdf', + appliedAt: '2025-01-13T08:30:00', + position: 'AI Engineer', + skills: ['Java', 'Spring Boot', 'SQL', 'REST APIs'], + experience: '3 years in backend development, limited ML experience', + education: 'B.S. Software Engineering, University of Texas', + plan: [ + { id: '1', step: 'Screen CVs', status: 'completed', timestamp: '08:35' }, + { id: '2', step: 'Invite for voice screening', status: 'pending' }, + { id: '3', step: 'Conduct voice screening', status: 'pending' }, + { id: '4', step: 'Schedule HR interview', status: 'pending' }, + { id: '5', step: 'Await HR decision', status: 'pending' }, + ], + reasoningLog: [ + { timestamp: '08:35', message: 'CV reviewed. Limited ML/AI experience detected.', type: 'warning' }, + { timestamp: '08:40', message: 'CV score: 5.2/10. Strong backend skills but lacks required AI/ML expertise.', type: 'info' }, + { timestamp: '08:45', message: 'Candidate does not meet minimum requirements for AI Engineer position.', type: 'error' }, + ], + }, +] + +export function getCandidateById(id: string): Candidate | undefined { + return dummyCandidates.find(c => c.id === id) +} + diff --git a/src/frontend/frontend/lib/sdk/config.ts b/src/frontend/frontend/lib/sdk/config.ts new file mode 100644 index 0000000000000000000000000000000000000000..a3bdcf6df71003b344c7231f7a10b15d98ea2e67 --- /dev/null +++ b/src/frontend/frontend/lib/sdk/config.ts @@ -0,0 +1,45 @@ +/** + * SDK Configuration + * Handles base URL configuration for all API clients + */ + +export function getBaseUrl(service: 'supervisor' | 'database' | 'cv' | 'voice-screener'): string { + // Check for environment-specific URLs first + const envVar = { + supervisor: process.env.NEXT_PUBLIC_SUPERVISOR_API_URL, + database: process.env.NEXT_PUBLIC_DATABASE_API_URL, + cv: process.env.NEXT_PUBLIC_CV_UPLOAD_API_URL, + 'voice-screener': process.env.NEXT_PUBLIC_VOICE_SCREENER_API_URL, + }[service]; + + if (envVar) { + return envVar; + } + + // Default to localhost with standard ports + const defaultPort = { + supervisor: '8080', + database: '8080', + cv: '8080', + 'voice-screener': '8080', + }[service]; + + const basePath = { + supervisor: '/api/v1/supervisor', + database: '/api/v1/db', + cv: '/api/v1/cv', + 'voice-screener': '/api/v1/voice-screener', + }[service]; + + // In browser, use relative URLs or full URL based on environment + if (typeof window !== 'undefined') { + const host = window.location.hostname === 'localhost' + ? `http://localhost:${defaultPort}` + : window.location.origin; + return `${host}${basePath}`; + } + + // Server-side fallback + return `http://localhost:${defaultPort}${basePath}`; +} + diff --git a/src/frontend/frontend/lib/sdk/cvUpload.ts b/src/frontend/frontend/lib/sdk/cvUpload.ts new file mode 100644 index 0000000000000000000000000000000000000000..dba2b6cf548f14bb93de6b724a796fa635d0b7a3 --- /dev/null +++ b/src/frontend/frontend/lib/sdk/cvUpload.ts @@ -0,0 +1,111 @@ +/** + * CV Upload API Client + * + * A TypeScript client for submitting job applications with CV uploads. + * This mirrors the Python SDK's CVUploadClient functionality. + */ + +import { getBaseUrl } from './config'; +import { SubmitResponse } from './types'; + +export class CVUploadClient { + private baseUrl: string; + + /** + * Initialize the CV Upload client. + * + * @param baseUrl Optional API base URL. Defaults to CV_UPLOAD_API_URL env var + * or http://localhost:8080/api/v1/cv + */ + constructor(baseUrl?: string) { + this.baseUrl = baseUrl || getBaseUrl('cv'); + } + + /** + * Submit a job application with CV. + * + * @param fullName Candidate's full name + * @param email Candidate's email address + * @param cvFile File object containing the CV (PDF or DOCX) + * @param filename Original filename of the CV + * @param phone Optional phone number + * @param timeout Request timeout in milliseconds (default: 120000) + * @returns SubmitResponse with success status and details + */ + async submit( + fullName: string, + email: string, + cvFile: File, + filename: string, + phone: string = '', + timeout: number = 120000 + ): Promise { + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), timeout); + + try { + const formData = new FormData(); + formData.append('full_name', fullName); + formData.append('email', email); + formData.append('phone', phone); + formData.append('cv_file', cvFile, filename); + + const response = await fetch(`${this.baseUrl}/submit`, { + method: 'POST', + body: formData, + signal: controller.signal, + // Don't set Content-Type header - browser will set it automatically with boundary for FormData + }); + + clearTimeout(timeoutId); + + if (response.status === 400) { + const error = await response.json().catch(() => ({ detail: 'Invalid request' })); + throw new Error(`Validation error: ${error.detail || 'Invalid request'}`); + } + + if (response.status === 500) { + const error = await response.json().catch(() => ({ detail: 'Server error' })); + throw new Error(`Server error: ${error.detail || 'Server error'}`); + } + + if (!response.ok) { + throw new Error(`Unexpected status: ${response.status}`); + } + + const data = await response.json(); + return { + success: data.success, + message: data.message, + candidate_name: data.candidate_name || '', + email: data.email || '', + cv_file_path: data.cv_file_path || '', + already_exists: data.already_exists || false, + }; + } catch (error: any) { + clearTimeout(timeoutId); + if (error.name === 'AbortError') { + throw new Error('Request timed out'); + } + throw error; + } + } + + /** + * Check if the API is healthy. + * + * @returns True if healthy, False otherwise + */ + async health(): Promise { + try { + const response = await fetch(`${this.baseUrl}/health`, { + method: 'GET', + signal: AbortSignal.timeout(5000), + }); + return response.ok; + } catch { + return false; + } + } +} + diff --git a/src/frontend/frontend/lib/sdk/database.ts b/src/frontend/frontend/lib/sdk/database.ts new file mode 100644 index 0000000000000000000000000000000000000000..8f214f99c624dbdc8e1977b823d0e821ec4fd7b7 --- /dev/null +++ b/src/frontend/frontend/lib/sdk/database.ts @@ -0,0 +1,572 @@ +/** + * Database API Client + * + * A TypeScript client for querying the recruitment database via the API. + * This mirrors the Python SDK's DatabaseClient functionality. + */ + +import { getBaseUrl } from './config'; +import { QueryResponse, SingleRecordResponse, StatsResponse } from './types'; + +export class DatabaseClient { + private baseUrl: string; + private timeout: number; + + /** + * Initialize the Database client. + * + * @param baseUrl Optional API base URL. Defaults to DATABASE_API_URL env var + * or http://localhost:8080/api/v1/db + */ + constructor(baseUrl?: string) { + this.baseUrl = baseUrl || getBaseUrl('database'); + this.timeout = 30000; + } + + /** + * Flexible query for any table. + * + * @param table Table name (candidates, cv_screening_results, voice_screening_results, + * interview_scheduling, final_decision) + * @param filters Key-value filters. Supports operators like {"field": {"$gte": 0.8}} + * @param fields Specific fields to return. None returns all. + * @param includeRelations Include related data (candidates table only) + * @param limit Max records to return + * @param offset Number of records to skip + * @param sortBy Field to sort by + * @param sortOrder "asc" or "desc" + * @returns QueryResponse with data and pagination info + */ + async query( + table: string, + filters?: Record, + fields?: string[], + includeRelations: boolean = false, + limit: number = 100, + offset: number = 0, + sortBy?: string, + sortOrder: 'asc' | 'desc' = 'desc' + ): Promise { + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), this.timeout); + + try { + const response = await fetch(`${this.baseUrl}/query`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + table, + filters: filters || null, + fields: fields || null, + include_relations: includeRelations, + limit, + offset, + sort_by: sortBy || null, + sort_order: sortOrder, + }), + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + if (!response.ok) { + const error = await response.json().catch(() => ({ detail: 'Unknown error' })); + if (response.status === 400) { + throw new Error(`Validation error: ${error.detail || 'Invalid request'}`); + } else if (response.status === 500) { + throw new Error(`Server error: ${error.detail || 'Server error'}`); + } else { + throw new Error(`Unexpected status: ${response.status}`); + } + } + + const data = await response.json(); + return { + success: data.success, + table: data.table, + total_count: data.total_count, + returned_count: data.returned_count, + offset: data.offset, + data: data.data, + message: data.message, + }; + } catch (error: any) { + clearTimeout(timeoutId); + throw error; + } + } + + /** + * List all candidates with optional filtering. + * + * @param status Filter by status (e.g., "applied", "screening", "interviewed") + * @param limit Max records to return + * @param offset Pagination offset + * @param includeRelations Include CV/voice screening results, interviews, decisions + * @returns QueryResponse with candidate data + */ + async getCandidates( + status?: string, + limit: number = 100, + offset: number = 0, + includeRelations: boolean = false + ): Promise { + const params = new URLSearchParams({ + limit: limit.toString(), + offset: offset.toString(), + include_relations: includeRelations.toString(), + }); + if (status) { + params.append('status', status); + } + + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), this.timeout); + + try { + const response = await fetch(`${this.baseUrl}/candidates?${params}`, { + method: 'GET', + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + if (!response.ok) { + const error = await response.json().catch(() => ({ detail: 'Unknown error' })); + if (response.status === 400) { + throw new Error(`Validation error: ${error.detail || 'Invalid request'}`); + } else if (response.status === 500) { + throw new Error(`Server error: ${error.detail || 'Server error'}`); + } else { + throw new Error(`Unexpected status: ${response.status}`); + } + } + + const data = await response.json(); + return { + success: data.success, + table: data.table, + total_count: data.total_count, + returned_count: data.returned_count, + offset: data.offset, + data: data.data, + message: data.message, + }; + } catch (error: any) { + clearTimeout(timeoutId); + throw error; + } + } + + /** + * Get a single candidate by ID with all related data. + * + * @param candidateId Candidate UUID + * @param includeRelations Include CV/voice screening, interviews, decisions + * @returns SingleRecordResponse with full candidate profile + */ + async getCandidate( + candidateId: string, + includeRelations: boolean = true + ): Promise { + const params = new URLSearchParams({ + include_relations: includeRelations.toString(), + }); + + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), this.timeout); + + try { + const response = await fetch(`${this.baseUrl}/candidates/${candidateId}?${params}`, { + method: 'GET', + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + if (!response.ok) { + const error = await response.json().catch(() => ({ detail: 'Unknown error' })); + if (response.status === 400) { + throw new Error(`Validation error: ${error.detail || 'Invalid request'}`); + } else if (response.status === 500) { + throw new Error(`Server error: ${error.detail || 'Server error'}`); + } else { + throw new Error(`Unexpected status: ${response.status}`); + } + } + + const data = await response.json(); + return { + success: data.success, + table: data.table, + data: data.data, + message: data.message, + }; + } catch (error: any) { + clearTimeout(timeoutId); + throw error; + } + } + + /** + * Get a candidate by email address with all related data. + * + * @param email Candidate's email address + * @param includeRelations Include CV/voice screening, interviews, decisions + * @returns SingleRecordResponse with full candidate profile + */ + async getCandidateByEmail( + email: string, + includeRelations: boolean = true + ): Promise { + const params = new URLSearchParams({ + include_relations: includeRelations.toString(), + }); + + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), this.timeout); + + try { + const response = await fetch(`${this.baseUrl}/candidates/email/${encodeURIComponent(email)}?${params}`, { + method: 'GET', + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + if (!response.ok) { + const error = await response.json().catch(() => ({ detail: 'Unknown error' })); + if (response.status === 400) { + throw new Error(`Validation error: ${error.detail || 'Invalid request'}`); + } else if (response.status === 500) { + throw new Error(`Server error: ${error.detail || 'Server error'}`); + } else { + throw new Error(`Unexpected status: ${response.status}`); + } + } + + const data = await response.json(); + return { + success: data.success, + table: data.table, + data: data.data, + message: data.message, + }; + } catch (error: any) { + clearTimeout(timeoutId); + throw error; + } + } + + /** + * List CV screening results. + * + * @param candidateId Filter by candidate + * @param minScore Minimum overall fit score (0.0 - 1.0) + * @param limit Max records + * @param offset Pagination offset + * @returns QueryResponse with CV screening results + */ + async getCvScreenings( + candidateId?: string, + minScore?: number, + limit: number = 100, + offset: number = 0 + ): Promise { + const params = new URLSearchParams({ + limit: limit.toString(), + offset: offset.toString(), + }); + if (candidateId) { + params.append('candidate_id', candidateId); + } + if (minScore !== undefined) { + params.append('min_score', minScore.toString()); + } + + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), this.timeout); + + try { + const response = await fetch(`${this.baseUrl}/cv-screening?${params}`, { + method: 'GET', + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + if (!response.ok) { + const error = await response.json().catch(() => ({ detail: 'Unknown error' })); + if (response.status === 400) { + throw new Error(`Validation error: ${error.detail || 'Invalid request'}`); + } else if (response.status === 500) { + throw new Error(`Server error: ${error.detail || 'Server error'}`); + } else { + throw new Error(`Unexpected status: ${response.status}`); + } + } + + const data = await response.json(); + return { + success: data.success, + table: data.table, + total_count: data.total_count, + returned_count: data.returned_count, + offset: data.offset, + data: data.data, + message: data.message, + }; + } catch (error: any) { + clearTimeout(timeoutId); + throw error; + } + } + + /** + * List voice screening results. + * + * @param candidateId Filter by candidate + * @param limit Max records + * @param offset Pagination offset + * @returns QueryResponse with voice screening results + */ + async getVoiceScreenings( + candidateId?: string, + limit: number = 100, + offset: number = 0 + ): Promise { + const params = new URLSearchParams({ + limit: limit.toString(), + offset: offset.toString(), + }); + if (candidateId) { + params.append('candidate_id', candidateId); + } + + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), this.timeout); + + try { + const response = await fetch(`${this.baseUrl}/voice-screening?${params}`, { + method: 'GET', + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + if (!response.ok) { + const error = await response.json().catch(() => ({ detail: 'Unknown error' })); + if (response.status === 400) { + throw new Error(`Validation error: ${error.detail || 'Invalid request'}`); + } else if (response.status === 500) { + throw new Error(`Server error: ${error.detail || 'Server error'}`); + } else { + throw new Error(`Unexpected status: ${response.status}`); + } + } + + const data = await response.json(); + return { + success: data.success, + table: data.table, + total_count: data.total_count, + returned_count: data.returned_count, + offset: data.offset, + data: data.data, + message: data.message, + }; + } catch (error: any) { + clearTimeout(timeoutId); + throw error; + } + } + + /** + * List interview scheduling records. + * + * @param candidateId Filter by candidate + * @param status Filter by interview status + * @param limit Max records + * @param offset Pagination offset + * @returns QueryResponse with interview data + */ + async getInterviews( + candidateId?: string, + status?: string, + limit: number = 100, + offset: number = 0 + ): Promise { + const params = new URLSearchParams({ + limit: limit.toString(), + offset: offset.toString(), + }); + if (candidateId) { + params.append('candidate_id', candidateId); + } + if (status) { + params.append('status', status); + } + + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), this.timeout); + + try { + const response = await fetch(`${this.baseUrl}/interviews?${params}`, { + method: 'GET', + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + if (!response.ok) { + const error = await response.json().catch(() => ({ detail: 'Unknown error' })); + if (response.status === 400) { + throw new Error(`Validation error: ${error.detail || 'Invalid request'}`); + } else if (response.status === 500) { + throw new Error(`Server error: ${error.detail || 'Server error'}`); + } else { + throw new Error(`Unexpected status: ${response.status}`); + } + } + + const data = await response.json(); + return { + success: data.success, + table: data.table, + total_count: data.total_count, + returned_count: data.returned_count, + offset: data.offset, + data: data.data, + message: data.message, + }; + } catch (error: any) { + clearTimeout(timeoutId); + throw error; + } + } + + /** + * List final hiring decisions. + * + * @param decision Filter by decision (e.g., "hired", "rejected") + * @param minScore Minimum overall score + * @param limit Max records + * @param offset Pagination offset + * @returns QueryResponse with decision data + */ + async getDecisions( + decision?: string, + minScore?: number, + limit: number = 100, + offset: number = 0 + ): Promise { + const params = new URLSearchParams({ + limit: limit.toString(), + offset: offset.toString(), + }); + if (decision) { + params.append('decision', decision); + } + if (minScore !== undefined) { + params.append('min_score', minScore.toString()); + } + + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), this.timeout); + + try { + const response = await fetch(`${this.baseUrl}/decisions?${params}`, { + method: 'GET', + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + if (!response.ok) { + const error = await response.json().catch(() => ({ detail: 'Unknown error' })); + if (response.status === 400) { + throw new Error(`Validation error: ${error.detail || 'Invalid request'}`); + } else if (response.status === 500) { + throw new Error(`Server error: ${error.detail || 'Server error'}`); + } else { + throw new Error(`Unexpected status: ${response.status}`); + } + } + + const data = await response.json(); + return { + success: data.success, + table: data.table, + total_count: data.total_count, + returned_count: data.returned_count, + offset: data.offset, + data: data.data, + message: data.message, + }; + } catch (error: any) { + clearTimeout(timeoutId); + throw error; + } + } + + /** + * Get database statistics. + * + * @returns StatsResponse with counts for all tables and status breakdown + */ + async getStats(): Promise { + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), this.timeout); + + try { + const response = await fetch(`${this.baseUrl}/stats`, { + method: 'GET', + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + if (!response.ok) { + const error = await response.json().catch(() => ({ detail: 'Unknown error' })); + if (response.status === 400) { + throw new Error(`Validation error: ${error.detail || 'Invalid request'}`); + } else if (response.status === 500) { + throw new Error(`Server error: ${error.detail || 'Server error'}`); + } else { + throw new Error(`Unexpected status: ${response.status}`); + } + } + + const data = await response.json(); + return { + success: data.success, + stats: data.stats, + }; + } catch (error: any) { + clearTimeout(timeoutId); + throw error; + } + } + + /** + * Check if the database API is healthy. + * + * @returns True if healthy, False otherwise + */ + async health(): Promise { + try { + const response = await fetch(`${this.baseUrl}/health`, { + method: 'GET', + signal: AbortSignal.timeout(5000), + }); + const data = await response.json(); + return response.ok && data.status === 'healthy'; + } catch { + return false; + } + } + +} + diff --git a/src/frontend/frontend/lib/sdk/index.ts b/src/frontend/frontend/lib/sdk/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..82d446420d61e53ff498b6f4a24c421ee34eb7af --- /dev/null +++ b/src/frontend/frontend/lib/sdk/index.ts @@ -0,0 +1,47 @@ +/** + * SDK for interacting with Recruitment Agent APIs. + * + * TypeScript/JavaScript client library that mirrors the Python SDK functionality. + * + * Usage: + * ```typescript + * import { SupervisorClient, CVUploadClient, DatabaseClient } from '@/lib/sdk'; + * + * // Supervisor Agent + * const supervisor = new SupervisorClient(); + * const response = await supervisor.chat("Show me all candidates"); + * console.log(response.content); + * + * // CV Upload + * const cvClient = new CVUploadClient(); + * const file = new File([...], "my_cv.pdf"); + * const response = await cvClient.submit( + * "Ada Lovelace", + * "ada@example.com", + * file, + * "my_cv.pdf" + * ); + * + * // Database Queries + * const db = new DatabaseClient(); + * const candidates = await db.getCandidates({ status: "applied" }); + * const candidate = await db.getCandidateByEmail("ada@example.com"); + * ``` + */ + +export { SupervisorClient } from './supervisor'; +export { DatabaseClient } from './database'; +export { CVUploadClient } from './cvUpload'; +export { VoiceScreeningClient } from './voiceScreening'; + +export type { + ChatResponse, + StreamChunk, + NewChatResponse, + QueryResponse, + SingleRecordResponse, + StatsResponse, + SubmitResponse, + HealthResponse, +} from './types'; + diff --git a/src/frontend/frontend/lib/sdk/supervisor.ts b/src/frontend/frontend/lib/sdk/supervisor.ts new file mode 100644 index 0000000000000000000000000000000000000000..e0c4a52e82de4988f89ba212268da68ae1fd45c8 --- /dev/null +++ b/src/frontend/frontend/lib/sdk/supervisor.ts @@ -0,0 +1,405 @@ +/** + * Supervisor API Client + * + * A TypeScript client for interacting with the HR Supervisor Agent API. + * Supports both regular and streaming responses. + * + * This mirrors the Python SDK's SupervisorClient functionality. + */ + +import { getBaseUrl } from './config'; +import { ChatResponse, StreamChunk, NewChatResponse } from './types'; + +export class SupervisorClient { + private baseUrl: string; + + /** + * Initialize the Supervisor client. + * + * @param baseUrl Optional API base URL. Defaults to SUPERVISOR_API_URL env var + * or http://localhost:8080/api/v1/supervisor + */ + constructor(baseUrl?: string) { + this.baseUrl = baseUrl || getBaseUrl('supervisor'); + } + + /** + * Send a message and get a complete response. + * + * Uses CompactingSupervisor wrapper for automatic context management. + * When token limit is exceeded, old messages are compacted/summarized. + * + * @param message The message to send + * @param threadId Optional thread ID for conversation continuity + * @param timeout Request timeout in milliseconds (default: 120000) + * @returns ChatResponse with content, thread_id, and token_count + */ + async chat( + message: string, + threadId?: string, + timeout: number = 120000 + ): Promise { + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), timeout); + + try { + const response = await fetch(`${this.baseUrl}/chat`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + message, + thread_id: threadId, + }), + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + if (!response.ok) { + const error = await response.json().catch(() => ({ detail: 'Unknown error' })); + throw new Error(`API error: ${error.detail || 'Unknown error'}`); + } + + const data = await response.json(); + return { + content: data.response, + thread_id: data.thread_id, + token_count: data.token_count, + }; + } catch (error: any) { + clearTimeout(timeoutId); + if (error.name === 'AbortError') { + throw new Error('Request timed out'); + } + throw error; + } + } + + /** + * Send a message and stream the response token by token. + * + * ⚠️ WARNING: This method may have known issues. + * Use chat() for reliable batch requests. + * + * Uses CompactingSupervisor wrapper for automatic context management. + * + * @param message The message to send + * @param threadId Optional thread ID for conversation continuity + * @param timeout Request timeout in milliseconds (default: 300000) + * @param onChunk Callback function for each chunk received + */ + async stream( + message: string, + threadId: string | undefined, + onChunk: (chunk: StreamChunk) => void, + timeout: number = 300000 + ): Promise { + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), timeout); + + try { + const response = await fetch(`${this.baseUrl}/chat/stream`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + message, + thread_id: threadId, + }), + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + if (!response.ok) { + onChunk({ + type: 'error', + error: `API returned status ${response.status}`, + }); + return; + } + + const reader = response.body?.getReader(); + const decoder = new TextDecoder(); + let buffer = ''; + let currentEvent: string | null = null; + + if (!reader) { + onChunk({ + type: 'error', + error: 'No response body', + }); + return; + } + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split('\n'); + buffer = lines.pop() || ''; + + for (const line of lines) { + if (line.startsWith('event:')) { + currentEvent = line.substring(6).trim(); + } else if (line.startsWith('data:') && currentEvent) { + try { + const jsonStr = line.substring(5).trim(); + const data = JSON.parse(jsonStr); + + if (currentEvent === 'token') { + onChunk({ + type: 'token', + content: data.content || '', + }); + } else if (currentEvent === 'done') { + onChunk({ + type: 'done', + thread_id: data.thread_id, + token_count: data.token_count || 0, + }); + } else if (currentEvent === 'error') { + onChunk({ + type: 'error', + error: data.error || 'Unknown error', + }); + } + } catch (e) { + // Skip invalid JSON + } + currentEvent = null; + } + } + } + } catch (error: any) { + clearTimeout(timeoutId); + if (error.name === 'AbortError') { + onChunk({ + type: 'error', + error: 'Request timed out', + }); + } else if (error.message?.includes('fetch')) { + onChunk({ + type: 'error', + error: 'Cannot connect to API. Make sure the server is running.', + }); + } else { + onChunk({ + type: 'error', + error: error.message || String(error), + }); + } + } + } + + /** + * Create a new chat session. + * + * @returns New thread_id + */ + async newChat(): Promise { + const response = await fetch(`${this.baseUrl}/new`, { + method: 'POST', + }); + + if (!response.ok) { + throw new Error(`Failed to create new chat: ${response.statusText}`); + } + + const data: NewChatResponse = await response.json(); + return data.thread_id; + } + + /** + * Send a message to the raw supervisor agent (without context compaction). + * + * This bypasses the CompactingSupervisor wrapper, giving direct access + * to the underlying supervisor agent. + * + * @param message The message to send + * @param threadId Optional thread ID for conversation continuity + * @param timeout Request timeout in milliseconds (default: 120000) + * @returns ChatResponse with content, thread_id, and token_count + */ + async chatRaw( + message: string, + threadId?: string, + timeout: number = 120000 + ): Promise { + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), timeout); + + try { + const response = await fetch(`${this.baseUrl}/raw/chat`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + message, + thread_id: threadId, + }), + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + if (!response.ok) { + const error = await response.json().catch(() => ({ detail: 'Unknown error' })); + throw new Error(`API error: ${error.detail || 'Unknown error'}`); + } + + const data = await response.json(); + return { + content: data.response, + thread_id: data.thread_id, + token_count: data.token_count, + }; + } catch (error: any) { + clearTimeout(timeoutId); + if (error.name === 'AbortError') { + throw new Error('Request timed out'); + } + throw error; + } + } + + /** + * Stream a response from the raw supervisor agent (without context compaction). + * + * ⚠️ WARNING: This method may have known issues. + * Use chatRaw() for reliable batch requests. + * + * @param message The message to send + * @param threadId Optional thread ID for conversation continuity + * @param onChunk Callback function for each chunk received + * @param timeout Request timeout in milliseconds (default: 300000) + */ + async streamRaw( + message: string, + threadId: string | undefined, + onChunk: (chunk: StreamChunk) => void, + timeout: number = 300000 + ): Promise { + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), timeout); + + try { + const response = await fetch(`${this.baseUrl}/raw/chat/stream`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + message, + thread_id: threadId, + }), + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + if (!response.ok) { + onChunk({ + type: 'error', + error: `API returned status ${response.status}`, + }); + return; + } + + const reader = response.body?.getReader(); + const decoder = new TextDecoder(); + let buffer = ''; + let currentEvent: string | null = null; + + if (!reader) { + onChunk({ + type: 'error', + error: 'No response body', + }); + return; + } + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split('\n'); + buffer = lines.pop() || ''; + + for (const line of lines) { + if (line.startsWith('event:')) { + currentEvent = line.substring(6).trim(); + } else if (line.startsWith('data:') && currentEvent) { + try { + const jsonStr = line.substring(5).trim(); + const data = JSON.parse(jsonStr); + + if (currentEvent === 'token') { + onChunk({ + type: 'token', + content: data.content || '', + }); + } else if (currentEvent === 'done') { + onChunk({ + type: 'done', + thread_id: data.thread_id, + token_count: data.token_count || 0, + }); + } else if (currentEvent === 'error') { + onChunk({ + type: 'error', + error: data.error || 'Unknown error', + }); + } + } catch (e) { + // Skip invalid JSON + } + currentEvent = null; + } + } + } + } catch (error: any) { + clearTimeout(timeoutId); + if (error.name === 'AbortError') { + onChunk({ + type: 'error', + error: 'Request timed out', + }); + } else if (error.message?.includes('fetch')) { + onChunk({ + type: 'error', + error: 'Cannot connect to API. Make sure the server is running.', + }); + } else { + onChunk({ + type: 'error', + error: error.message || String(error), + }); + } + } + } + + /** + * Check if the API is healthy. + * + * @returns True if healthy, False otherwise + */ + async health(): Promise { + try { + const response = await fetch(`${this.baseUrl}/health`, { + method: 'GET', + signal: AbortSignal.timeout(5000), + }); + return response.ok; + } catch { + return false; + } + } +} + diff --git a/src/frontend/frontend/lib/sdk/types.ts b/src/frontend/frontend/lib/sdk/types.ts new file mode 100644 index 0000000000000000000000000000000000000000..5ecdc5646f90aa85fdcd3d4c2c728564a75618a0 --- /dev/null +++ b/src/frontend/frontend/lib/sdk/types.ts @@ -0,0 +1,66 @@ +/** + * TypeScript types for the Recruitment Agent SDK. + * These types mirror the Python SDK dataclasses. + */ + +// Supervisor Client Types +export interface ChatResponse { + content: string; + thread_id: string; + token_count: number; +} + +export interface StreamChunk { + type: 'token' | 'done' | 'error'; + content?: string; + thread_id?: string; + token_count?: number; + error?: string; +} + +export interface NewChatResponse { + thread_id: string; + message: string; +} + +// Database Client Types +export interface QueryResponse { + success: boolean; + table: string; + total_count: number; + returned_count: number; + offset: number; + data: Array>; + message?: string; +} + +export interface SingleRecordResponse { + success: boolean; + table: string; + data?: Record; + message?: string; +} + +export interface StatsResponse { + success: boolean; + stats: Record; +} + +// CV Upload Client Types +export interface SubmitResponse { + success: boolean; + message: string; + candidate_name?: string; + email?: string; + cv_file_path?: string; + already_exists?: boolean; +} + +// Health Check Response +export interface HealthResponse { + status: string; + service?: string; + connection?: string; + error?: string; +} + diff --git a/src/frontend/frontend/lib/sdk/voiceScreening.ts b/src/frontend/frontend/lib/sdk/voiceScreening.ts new file mode 100644 index 0000000000000000000000000000000000000000..2f5502b59adc5240eb245884f17c15674e4e99de --- /dev/null +++ b/src/frontend/frontend/lib/sdk/voiceScreening.ts @@ -0,0 +1,104 @@ +/** + * Voice Screening API Client + * + * A TypeScript client for managing voice screening sessions. + */ + +import { getBaseUrl } from './config'; + +export interface CreateSessionRequest { + candidate_id: string; +} + +export interface CreateSessionResponse { + session_id: string; + candidate_name: string; + job_title: string; + message: string; +} + +export interface SessionConfigResponse { + candidate_name: string; + job_title: string; + instructions: string; + questions: string[]; + config: Record; +} + +export class VoiceScreeningClient { + private baseUrl: string; + + /** + * Initialize the Voice Screening client. + * + * @param baseUrl Optional API base URL. Defaults to VOICE_SCREENER_API_URL env var + * or http://localhost:8080/api/v1/voice-screener + */ + constructor(baseUrl?: string) { + this.baseUrl = baseUrl || getBaseUrl('voice-screener'); + } + + /** + * Create a new voice screening session for a candidate. + * + * @param candidateId Candidate UUID + * @returns CreateSessionResponse with session_id and candidate info + */ + async createSession(candidateId: string): Promise { + const response = await fetch(`${this.baseUrl}/session/create`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + candidate_id: candidateId, + }), + }); + + if (!response.ok) { + const error = await response.json().catch(() => ({ detail: 'Unknown error' })); + throw new Error(`Failed to create session: ${error.detail || 'Unknown error'}`); + } + + return await response.json(); + } + + /** + * Get session configuration. + * + * @param sessionId Session ID + * @param candidateId Candidate UUID + * @returns SessionConfigResponse with interview configuration + */ + async getSessionConfig(sessionId: string, candidateId: string): Promise { + const params = new URLSearchParams({ candidate_id: candidateId }); + const response = await fetch(`${this.baseUrl}/session/${sessionId}/config?${params}`, { + method: 'GET', + }); + + if (!response.ok) { + const error = await response.json().catch(() => ({ detail: 'Unknown error' })); + throw new Error(`Failed to get session config: ${error.detail || 'Unknown error'}`); + } + + return await response.json(); + } + + /** + * Check if the API is healthy. + * + * @returns True if healthy, False otherwise + */ + async health(): Promise { + try { + const response = await fetch(`${this.baseUrl}/health`, { + method: 'GET', + signal: AbortSignal.timeout(5000), + }); + return response.ok; + } catch { + return false; + } + } +} + diff --git a/src/frontend/frontend/next.config.js b/src/frontend/frontend/next.config.js new file mode 100644 index 0000000000000000000000000000000000000000..f71169a45478321a40ab47bfcb8e3fb4918a5bf9 --- /dev/null +++ b/src/frontend/frontend/next.config.js @@ -0,0 +1,59 @@ +/** @type {import('next').NextConfig} */ +const webpack = require('webpack'); + +const nextConfig = { + reactStrictMode: true, + // Allow connecting to Gradio backend (adjust port if needed) + async rewrites() { + return [ + { + source: '/api/gradio/:path*', + destination: 'http://localhost:7860/:path*', + }, + ]; + }, + webpack: (config, { isServer }) => { + // Handle Node.js built-in modules for client-side + if (!isServer) { + // Transform node: protocol imports to regular module names + config.plugins = [ + ...config.plugins, + new webpack.NormalModuleReplacementPlugin( + /^node:/, + (resource) => { + resource.request = resource.request.replace(/^node:/, ''); + } + ), + ]; + + config.resolve.fallback = { + ...config.resolve.fallback, + fs: false, + net: false, + tls: false, + crypto: false, + stream: false, + url: false, + zlib: false, + http: false, + https: false, + assert: false, + os: false, + path: false, + buffer: require.resolve('buffer/'), + }; + + // Add buffer polyfill + config.plugins.push( + new webpack.ProvidePlugin({ + Buffer: ['buffer', 'Buffer'], + }) + ); + } + + return config; + }, +}; + +module.exports = nextConfig; + diff --git a/src/frontend/frontend/package-lock.json b/src/frontend/frontend/package-lock.json new file mode 100644 index 0000000000000000000000000000000000000000..a41d046f444ab5789f8b98a55d9dc01df570c7f1 --- /dev/null +++ b/src/frontend/frontend/package-lock.json @@ -0,0 +1,5516 @@ +{ + "name": "recruitment-agent-frontend", + "version": "0.1.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "recruitment-agent-frontend", + "version": "0.1.0", + "dependencies": { + "@gradio/client": "^0.15.0", + "buffer": "^6.0.3", + "next": "^14.2.5", + "react": "^18.3.1", + "react-dom": "^18.3.1" + }, + "devDependencies": { + "@types/node": "^20.14.12", + "@types/react": "^18.3.3", + "@types/react-dom": "^18.3.0", + "eslint": "^8.57.0", + "eslint-config-next": "^14.2.5", + "typescript": "^5.5.4" + } + }, + "node_modules/@emnapi/core": { + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.7.1.tgz", + "integrity": "sha512-o1uhUASyo921r2XtHYOHy7gdkGLge8ghBEQHMWmyJFoXlpU58kIrhhN3w26lpQb6dspetweapMn2CSNwQ8I4wg==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/wasi-threads": "1.1.0", + "tslib": "^2.4.0" + } + }, + "node_modules/@emnapi/runtime": { + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.7.1.tgz", + "integrity": "sha512-PVtJr5CmLwYAU9PZDMITZoR5iAOShYREoR45EyyLrbntV50mdePTgUn4AmOw90Ifcj+x2kRjdzr1HP3RrNiHGA==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@emnapi/wasi-threads": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.1.0.tgz", + "integrity": "sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz", + "integrity": "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", + "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", + "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/js": { + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.1.tgz", + "integrity": "sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@gradio/client": { + "version": "0.15.1", + "resolved": "https://registry.npmjs.org/@gradio/client/-/client-0.15.1.tgz", + "integrity": "sha512-VipURQjW5Dg6JHsHOq9QXih5x0yK/p7VCEc5tyaIlaHsjTnIsNpzetfKvBU3uj9A4ENaJ78tN2uLdXkY8sAfjQ==", + "license": "ISC", + "dependencies": { + "bufferutil": "^4.0.7", + "semiver": "^1.1.0", + "ws": "^8.13.0" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.13.0", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.13.0.tgz", + "integrity": "sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==", + "deprecated": "Use @eslint/config-array instead", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanwhocodes/object-schema": "^2.0.3", + "debug": "^4.3.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", + "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", + "deprecated": "Use @eslint/object-schema instead", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/@napi-rs/wasm-runtime": { + "version": "0.2.12", + "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-0.2.12.tgz", + "integrity": "sha512-ZVWUcfwY4E/yPitQJl481FjFo3K22D6qF0DuFH6Y/nbnE11GY5uguDxZMGXPQ8WQ0128MXQD7TnfHyK4oWoIJQ==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/core": "^1.4.3", + "@emnapi/runtime": "^1.4.3", + "@tybys/wasm-util": "^0.10.0" + } + }, + "node_modules/@next/env": { + "version": "14.2.33", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.33.tgz", + "integrity": "sha512-CgVHNZ1fRIlxkLhIX22flAZI/HmpDaZ8vwyJ/B0SDPTBuLZ1PJ+DWMjCHhqnExfmSQzA/PbZi8OAc7PAq2w9IA==", + "license": "MIT" + }, + "node_modules/@next/eslint-plugin-next": { + "version": "14.2.33", + "resolved": "https://registry.npmjs.org/@next/eslint-plugin-next/-/eslint-plugin-next-14.2.33.tgz", + "integrity": "sha512-DQTJFSvlB+9JilwqMKJ3VPByBNGxAGFTfJ7BuFj25cVcbBy7jm88KfUN+dngM4D3+UxZ8ER2ft+WH9JccMvxyg==", + "dev": true, + "license": "MIT", + "dependencies": { + "glob": "10.3.10" + } + }, + "node_modules/@next/swc-darwin-arm64": { + "version": "14.2.33", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.33.tgz", + "integrity": "sha512-HqYnb6pxlsshoSTubdXKu15g3iivcbsMXg4bYpjL2iS/V6aQot+iyF4BUc2qA/J/n55YtvE4PHMKWBKGCF/+wA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-darwin-x64": { + "version": "14.2.33", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.33.tgz", + "integrity": "sha512-8HGBeAE5rX3jzKvF593XTTFg3gxeU4f+UWnswa6JPhzaR6+zblO5+fjltJWIZc4aUalqTclvN2QtTC37LxvZAA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-gnu": { + "version": "14.2.33", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.33.tgz", + "integrity": "sha512-JXMBka6lNNmqbkvcTtaX8Gu5by9547bukHQvPoLe9VRBx1gHwzf5tdt4AaezW85HAB3pikcvyqBToRTDA4DeLw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-musl": { + "version": "14.2.33", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.33.tgz", + "integrity": "sha512-Bm+QulsAItD/x6Ih8wGIMfRJy4G73tu1HJsrccPW6AfqdZd0Sfm5Imhgkgq2+kly065rYMnCOxTBvmvFY1BKfg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-gnu": { + "version": "14.2.33", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.33.tgz", + "integrity": "sha512-FnFn+ZBgsVMbGDsTqo8zsnRzydvsGV8vfiWwUo1LD8FTmPTdV+otGSWKc4LJec0oSexFnCYVO4hX8P8qQKaSlg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-musl": { + "version": "14.2.33", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.33.tgz", + "integrity": "sha512-345tsIWMzoXaQndUTDv1qypDRiebFxGYx9pYkhwY4hBRaOLt8UGfiWKr9FSSHs25dFIf8ZqIFaPdy5MljdoawA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-arm64-msvc": { + "version": "14.2.33", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.33.tgz", + "integrity": "sha512-nscpt0G6UCTkrT2ppnJnFsYbPDQwmum4GNXYTeoTIdsmMydSKFz9Iny2jpaRupTb+Wl298+Rh82WKzt9LCcqSQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-ia32-msvc": { + "version": "14.2.33", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.33.tgz", + "integrity": "sha512-pc9LpGNKhJ0dXQhZ5QMmYxtARwwmWLpeocFmVG5Z0DzWq5Uf0izcI8tLc+qOpqxO1PWqZ5A7J1blrUIKrIFc7Q==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-x64-msvc": { + "version": "14.2.33", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.33.tgz", + "integrity": "sha512-nOjfZMy8B94MdisuzZo9/57xuFVLHJaDj5e/xrduJp9CV2/HrfxTRH2fbyLe+K9QT41WBLUd4iXX3R7jBp0EUg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nolyfill/is-core-module": { + "version": "1.0.39", + "resolved": "https://registry.npmjs.org/@nolyfill/is-core-module/-/is-core-module-1.0.39.tgz", + "integrity": "sha512-nn5ozdjYQpUCZlWGuxcJY/KpxkWQs4DcbMCmKojjyrYDEAGy4Ce19NN4v5MduafTwJlbKc99UA8YhSVqq9yPZA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.4.0" + } + }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/@rtsao/scc": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@rtsao/scc/-/scc-1.1.0.tgz", + "integrity": "sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rushstack/eslint-patch": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/@rushstack/eslint-patch/-/eslint-patch-1.15.0.tgz", + "integrity": "sha512-ojSshQPKwVvSMR8yT2L/QtUkV5SXi/IfDiJ4/8d6UbTPjiHVmxZzUAzGD8Tzks1b9+qQkZa0isUOvYObedITaw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", + "license": "Apache-2.0" + }, + "node_modules/@swc/helpers": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.5.tgz", + "integrity": "sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==", + "license": "Apache-2.0", + "dependencies": { + "@swc/counter": "^0.1.3", + "tslib": "^2.4.0" + } + }, + "node_modules/@tybys/wasm-util": { + "version": "0.10.1", + "resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.10.1.tgz", + "integrity": "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@types/json5": { + "version": "0.0.29", + "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz", + "integrity": "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "20.19.25", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.25.tgz", + "integrity": "sha512-ZsJzA5thDQMSQO788d7IocwwQbI8B5OPzmqNvpf3NY/+MHDAS759Wo0gd2WQeXYt5AAAQjzcrTVC6SKCuYgoCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@types/prop-types": { + "version": "15.7.15", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.15.tgz", + "integrity": "sha512-F6bEyamV9jKGAFBEmlQnesRPGOQqS2+Uwi0Em15xenOxHaf2hv6L8YCVn3rPdPJOiJfPiCnLIRyvwVaqMY3MIw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/react": { + "version": "18.3.27", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.27.tgz", + "integrity": "sha512-cisd7gxkzjBKU2GgdYrTdtQx1SORymWyaAFhaxQPK9bYO9ot3Y5OikQRvY0VYQtvwjeQnizCINJAenh/V7MK2w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/prop-types": "*", + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "18.3.7", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.7.tgz", + "integrity": "sha512-MEe3UeoENYVFXzoXEWsvcpg6ZvlrFNlOQ7EOsvhI3CfAXwzPfO8Qwuxd40nepsYKqyyVQnTdEfv68q91yLcKrQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@types/react": "^18.0.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "8.47.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.47.0.tgz", + "integrity": "sha512-fe0rz9WJQ5t2iaLfdbDc9T80GJy0AeO453q8C3YCilnGozvOyCG5t+EZtg7j7D88+c3FipfP/x+wzGnh1xp8ZA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.10.0", + "@typescript-eslint/scope-manager": "8.47.0", + "@typescript-eslint/type-utils": "8.47.0", + "@typescript-eslint/utils": "8.47.0", + "@typescript-eslint/visitor-keys": "8.47.0", + "graphemer": "^1.4.0", + "ignore": "^7.0.0", + "natural-compare": "^1.4.0", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^8.47.0", + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/ignore": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", + "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "8.47.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.47.0.tgz", + "integrity": "sha512-lJi3PfxVmo0AkEY93ecfN+r8SofEqZNGByvHAI3GBLrvt1Cw6H5k1IM02nSzu0RfUafr2EvFSw0wAsZgubNplQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/scope-manager": "8.47.0", + "@typescript-eslint/types": "8.47.0", + "@typescript-eslint/typescript-estree": "8.47.0", + "@typescript-eslint/visitor-keys": "8.47.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/project-service": { + "version": "8.47.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.47.0.tgz", + "integrity": "sha512-2X4BX8hUeB5JcA1TQJ7GjcgulXQ+5UkNb0DL8gHsHUHdFoiCTJoYLTpib3LtSDPZsRET5ygN4qqIWrHyYIKERA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/tsconfig-utils": "^8.47.0", + "@typescript-eslint/types": "^8.47.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "8.47.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.47.0.tgz", + "integrity": "sha512-a0TTJk4HXMkfpFkL9/WaGTNuv7JWfFTQFJd6zS9dVAjKsojmv9HT55xzbEpnZoY+VUb+YXLMp+ihMLz/UlZfDg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.47.0", + "@typescript-eslint/visitor-keys": "8.47.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/tsconfig-utils": { + "version": "8.47.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.47.0.tgz", + "integrity": "sha512-ybUAvjy4ZCL11uryalkKxuT3w3sXJAuWhOoGS3T/Wu+iUu1tGJmk5ytSY8gbdACNARmcYEB0COksD2j6hfGK2g==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "8.47.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.47.0.tgz", + "integrity": "sha512-QC9RiCmZ2HmIdCEvhd1aJELBlD93ErziOXXlHEZyuBo3tBiAZieya0HLIxp+DoDWlsQqDawyKuNEhORyku+P8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.47.0", + "@typescript-eslint/typescript-estree": "8.47.0", + "@typescript-eslint/utils": "8.47.0", + "debug": "^4.3.4", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/types": { + "version": "8.47.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.47.0.tgz", + "integrity": "sha512-nHAE6bMKsizhA2uuYZbEbmp5z2UpffNrPEqiKIeN7VsV6UY/roxanWfoRrf6x/k9+Obf+GQdkm0nPU+vnMXo9A==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "8.47.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.47.0.tgz", + "integrity": "sha512-k6ti9UepJf5NpzCjH31hQNLHQWupTRPhZ+KFF8WtTuTpy7uHPfeg2NM7cP27aCGajoEplxJDFVCEm9TGPYyiVg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/project-service": "8.47.0", + "@typescript-eslint/tsconfig-utils": "8.47.0", + "@typescript-eslint/types": "8.47.0", + "@typescript-eslint/visitor-keys": "8.47.0", + "debug": "^4.3.4", + "fast-glob": "^3.3.2", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "8.47.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.47.0.tgz", + "integrity": "sha512-g7XrNf25iL4TJOiPqatNuaChyqt49a/onq5YsJ9+hXeugK+41LVg7AxikMfM02PC6jbNtZLCJj6AUcQXJS/jGQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.7.0", + "@typescript-eslint/scope-manager": "8.47.0", + "@typescript-eslint/types": "8.47.0", + "@typescript-eslint/typescript-estree": "8.47.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "8.47.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.47.0.tgz", + "integrity": "sha512-SIV3/6eftCy1bNzCQoPmbWsRLujS8t5iDIZ4spZOBHqrM+yfX2ogg8Tt3PDTAVKw3sSCiUgg30uOAvK2r9zGjQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.47.0", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@ungap/structured-clone": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", + "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", + "dev": true, + "license": "ISC" + }, + "node_modules/@unrs/resolver-binding-android-arm-eabi": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-android-arm-eabi/-/resolver-binding-android-arm-eabi-1.11.1.tgz", + "integrity": "sha512-ppLRUgHVaGRWUx0R0Ut06Mjo9gBaBkg3v/8AxusGLhsIotbBLuRk51rAzqLC8gq6NyyAojEXglNjzf6R948DNw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@unrs/resolver-binding-android-arm64": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-android-arm64/-/resolver-binding-android-arm64-1.11.1.tgz", + "integrity": "sha512-lCxkVtb4wp1v+EoN+HjIG9cIIzPkX5OtM03pQYkG+U5O/wL53LC4QbIeazgiKqluGeVEeBlZahHalCaBvU1a2g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@unrs/resolver-binding-darwin-arm64": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-darwin-arm64/-/resolver-binding-darwin-arm64-1.11.1.tgz", + "integrity": "sha512-gPVA1UjRu1Y/IsB/dQEsp2V1pm44Of6+LWvbLc9SDk1c2KhhDRDBUkQCYVWe6f26uJb3fOK8saWMgtX8IrMk3g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@unrs/resolver-binding-darwin-x64": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-darwin-x64/-/resolver-binding-darwin-x64-1.11.1.tgz", + "integrity": "sha512-cFzP7rWKd3lZaCsDze07QX1SC24lO8mPty9vdP+YVa3MGdVgPmFc59317b2ioXtgCMKGiCLxJ4HQs62oz6GfRQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@unrs/resolver-binding-freebsd-x64": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-freebsd-x64/-/resolver-binding-freebsd-x64-1.11.1.tgz", + "integrity": "sha512-fqtGgak3zX4DCB6PFpsH5+Kmt/8CIi4Bry4rb1ho6Av2QHTREM+47y282Uqiu3ZRF5IQioJQ5qWRV6jduA+iGw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@unrs/resolver-binding-linux-arm-gnueabihf": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm-gnueabihf/-/resolver-binding-linux-arm-gnueabihf-1.11.1.tgz", + "integrity": "sha512-u92mvlcYtp9MRKmP+ZvMmtPN34+/3lMHlyMj7wXJDeXxuM0Vgzz0+PPJNsro1m3IZPYChIkn944wW8TYgGKFHw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-linux-arm-musleabihf": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm-musleabihf/-/resolver-binding-linux-arm-musleabihf-1.11.1.tgz", + "integrity": "sha512-cINaoY2z7LVCrfHkIcmvj7osTOtm6VVT16b5oQdS4beibX2SYBwgYLmqhBjA1t51CarSaBuX5YNsWLjsqfW5Cw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-linux-arm64-gnu": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm64-gnu/-/resolver-binding-linux-arm64-gnu-1.11.1.tgz", + "integrity": "sha512-34gw7PjDGB9JgePJEmhEqBhWvCiiWCuXsL9hYphDF7crW7UgI05gyBAi6MF58uGcMOiOqSJ2ybEeCvHcq0BCmQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-linux-arm64-musl": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm64-musl/-/resolver-binding-linux-arm64-musl-1.11.1.tgz", + "integrity": "sha512-RyMIx6Uf53hhOtJDIamSbTskA99sPHS96wxVE/bJtePJJtpdKGXO1wY90oRdXuYOGOTuqjT8ACccMc4K6QmT3w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-linux-ppc64-gnu": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-ppc64-gnu/-/resolver-binding-linux-ppc64-gnu-1.11.1.tgz", + "integrity": "sha512-D8Vae74A4/a+mZH0FbOkFJL9DSK2R6TFPC9M+jCWYia/q2einCubX10pecpDiTmkJVUH+y8K3BZClycD8nCShA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-linux-riscv64-gnu": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-riscv64-gnu/-/resolver-binding-linux-riscv64-gnu-1.11.1.tgz", + "integrity": "sha512-frxL4OrzOWVVsOc96+V3aqTIQl1O2TjgExV4EKgRY09AJ9leZpEg8Ak9phadbuX0BA4k8U5qtvMSQQGGmaJqcQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-linux-riscv64-musl": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-riscv64-musl/-/resolver-binding-linux-riscv64-musl-1.11.1.tgz", + "integrity": "sha512-mJ5vuDaIZ+l/acv01sHoXfpnyrNKOk/3aDoEdLO/Xtn9HuZlDD6jKxHlkN8ZhWyLJsRBxfv9GYM2utQ1SChKew==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-linux-s390x-gnu": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-s390x-gnu/-/resolver-binding-linux-s390x-gnu-1.11.1.tgz", + "integrity": "sha512-kELo8ebBVtb9sA7rMe1Cph4QHreByhaZ2QEADd9NzIQsYNQpt9UkM9iqr2lhGr5afh885d/cB5QeTXSbZHTYPg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-linux-x64-gnu": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-x64-gnu/-/resolver-binding-linux-x64-gnu-1.11.1.tgz", + "integrity": "sha512-C3ZAHugKgovV5YvAMsxhq0gtXuwESUKc5MhEtjBpLoHPLYM+iuwSj3lflFwK3DPm68660rZ7G8BMcwSro7hD5w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-linux-x64-musl": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-x64-musl/-/resolver-binding-linux-x64-musl-1.11.1.tgz", + "integrity": "sha512-rV0YSoyhK2nZ4vEswT/QwqzqQXw5I6CjoaYMOX0TqBlWhojUf8P94mvI7nuJTeaCkkds3QE4+zS8Ko+GdXuZtA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@unrs/resolver-binding-wasm32-wasi": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-wasm32-wasi/-/resolver-binding-wasm32-wasi-1.11.1.tgz", + "integrity": "sha512-5u4RkfxJm+Ng7IWgkzi3qrFOvLvQYnPBmjmZQ8+szTK/b31fQCnleNl1GgEt7nIsZRIf5PLhPwT0WM+q45x/UQ==", + "cpu": [ + "wasm32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@napi-rs/wasm-runtime": "^0.2.11" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@unrs/resolver-binding-win32-arm64-msvc": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-win32-arm64-msvc/-/resolver-binding-win32-arm64-msvc-1.11.1.tgz", + "integrity": "sha512-nRcz5Il4ln0kMhfL8S3hLkxI85BXs3o8EYoattsJNdsX4YUU89iOkVn7g0VHSRxFuVMdM4Q1jEpIId1Ihim/Uw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@unrs/resolver-binding-win32-ia32-msvc": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-win32-ia32-msvc/-/resolver-binding-win32-ia32-msvc-1.11.1.tgz", + "integrity": "sha512-DCEI6t5i1NmAZp6pFonpD5m7i6aFrpofcp4LA2i8IIq60Jyo28hamKBxNrZcyOwVOZkgsRp9O2sXWBWP8MnvIQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@unrs/resolver-binding-win32-x64-msvc": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-win32-x64-msvc/-/resolver-binding-win32-x64-msvc-1.11.1.tgz", + "integrity": "sha512-lrW200hZdbfRtztbygyaq/6jP6AKE8qQN2KvPcJ+x7wiD038YtnYtZ82IMNJ69GJibV7bwL3y9FgK+5w/pYt6g==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/aria-query": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.2.tgz", + "integrity": "sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/array-buffer-byte-length": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.2.tgz", + "integrity": "sha512-LHE+8BuR7RYGDKvnrmcuSq3tDcKv9OFEXQt/HpbZhY7V6h0zlUXutnAD82GiFx9rdieCMjkvtcsPqBwgUl1Iiw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "is-array-buffer": "^3.0.5" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array-includes": { + "version": "3.1.9", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.9.tgz", + "integrity": "sha512-FmeCCAenzH0KH381SPT5FZmiA/TmpndpcaShhfgEN9eCVjnFBqq3l1xrI42y8+PPLI6hypzou4GXw00WHmPBLQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "define-properties": "^1.2.1", + "es-abstract": "^1.24.0", + "es-object-atoms": "^1.1.1", + "get-intrinsic": "^1.3.0", + "is-string": "^1.1.1", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.findlast": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.findlast/-/array.prototype.findlast-1.2.5.tgz", + "integrity": "sha512-CVvd6FHg1Z3POpBLxO6E6zr+rSKEQ9L6rZHAaY7lLfhKsWYUBBOuMs0e9o24oopj6H+geRCX0YJ+TJLBK2eHyQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.findlastindex": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.6.tgz", + "integrity": "sha512-F/TKATkzseUExPlfvmwQKGITM3DGTK+vkAsCZoDc5daVygbJBnjEUCbgkAvVFsgfXfX4YIqZ/27G3k3tdXrTxQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.9", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "es-shim-unscopables": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flat": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.3.tgz", + "integrity": "sha512-rwG/ja1neyLqCuGZ5YYrznA62D4mZXg0i1cIskIUKSiqF3Cje9/wXAls9B9s1Wa2fomMsIv8czB8jZcPmxCXFg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flatmap": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.3.tgz", + "integrity": "sha512-Y7Wt51eKJSyi80hFrJCePGGNo5ktJCslFuboqJsbf57CCPcm5zztluPlc4/aD8sWsKvlwatezpV4U1efk8kpjg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.tosorted": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.4.tgz", + "integrity": "sha512-p6Fx8B7b7ZhL/gmUsAy0D15WhvDccw3mnGNbZpi3pmeJdxtWsj2jEaI4Y6oo3XiHfzuSgPwKc04MYt6KgvC/wA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.3", + "es-errors": "^1.3.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/arraybuffer.prototype.slice": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.4.tgz", + "integrity": "sha512-BNoCY6SXXPQ7gF2opIP4GBE+Xw7U+pHMYKuzjgCN3GwiaIR09UUeKfheyIry77QtrCBlC0KK0q5/TER/tYh3PQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-buffer-byte-length": "^1.0.1", + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "is-array-buffer": "^3.0.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ast-types-flow": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.8.tgz", + "integrity": "sha512-OH/2E5Fg20h2aPrbe+QL8JZQFko0YZaF+j4mnQ7BGhfavO7OpSLa8a0y9sBwomHdSbkhTS8TQNayBfnW5DwbvQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/async-function": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/async-function/-/async-function-1.0.0.tgz", + "integrity": "sha512-hsU18Ae8CDTR6Kgu9DYf0EbCr/a5iGL0rytQDobUcdpYOKokk8LEjVphnXkDkgpi0wYVsqrXuP0bZxJaTqdgoA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/available-typed-arrays": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", + "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "possible-typed-array-names": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/axe-core": { + "version": "4.11.0", + "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.11.0.tgz", + "integrity": "sha512-ilYanEU8vxxBexpJd8cWM4ElSQq4QctCLKih0TSfjIfCQTeyH/6zVrmIJfLPrKTKJRbiG+cfnZbQIjAlJmF1jQ==", + "dev": true, + "license": "MPL-2.0", + "engines": { + "node": ">=4" + } + }, + "node_modules/axobject-query": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-4.1.0.tgz", + "integrity": "sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + }, + "node_modules/bufferutil": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/bufferutil/-/bufferutil-4.0.9.tgz", + "integrity": "sha512-WDtdLmJvAuNNPzByAYpRo2rF1Mmradw6gvWsQKf63476DDXmomT9zUiGypLcG4ibIM67vhAj8jJRdbmEws2Aqw==", + "hasInstallScript": true, + "license": "MIT", + "dependencies": { + "node-gyp-build": "^4.3.0" + }, + "engines": { + "node": ">=6.14.2" + } + }, + "node_modules/busboy": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz", + "integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==", + "dependencies": { + "streamsearch": "^1.1.0" + }, + "engines": { + "node": ">=10.16.0" + } + }, + "node_modules/call-bind": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", + "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.0", + "es-define-property": "^1.0.0", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001755", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001755.tgz", + "integrity": "sha512-44V+Jm6ctPj7R52Na4TLi3Zri4dWUljJd+RDm+j8LtNCc/ihLCT+X1TzoOAkRETEWqjuLnh9581Tl80FvK7jVA==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/client-only": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", + "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==", + "license": "MIT" + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/damerau-levenshtein": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/damerau-levenshtein/-/damerau-levenshtein-1.0.8.tgz", + "integrity": "sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==", + "dev": true, + "license": "BSD-2-Clause" + }, + "node_modules/data-view-buffer": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/data-view-buffer/-/data-view-buffer-1.0.2.tgz", + "integrity": "sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/data-view-byte-length": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/data-view-byte-length/-/data-view-byte-length-1.0.2.tgz", + "integrity": "sha512-tuhGbE6CfTM9+5ANGf+oQb72Ky/0+s3xKUpHvShfiz2RxMFgFPjsXuRLBVMtvMs15awe45SRb83D6wH4ew6wlQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/inspect-js" + } + }, + "node_modules/data-view-byte-offset": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/data-view-byte-offset/-/data-view-byte-offset-1.0.1.tgz", + "integrity": "sha512-BS8PfmtDGnrgYdOonGZQdLZslWIeCGFP9tpan0hi1Co2Zr2NKADsvGYA8XxuG/4UWgJ6Cjtv+YJnB6MM69QGlQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "dev": true, + "license": "MIT" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "dev": true, + "license": "MIT" + }, + "node_modules/es-abstract": { + "version": "1.24.0", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.24.0.tgz", + "integrity": "sha512-WSzPgsdLtTcQwm4CROfS5ju2Wa1QQcVeT37jFjYzdFz1r9ahadC8B8/a4qxJxM+09F18iumCdRmlr96ZYkQvEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-buffer-byte-length": "^1.0.2", + "arraybuffer.prototype.slice": "^1.0.4", + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "data-view-buffer": "^1.0.2", + "data-view-byte-length": "^1.0.2", + "data-view-byte-offset": "^1.0.1", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "es-set-tostringtag": "^2.1.0", + "es-to-primitive": "^1.3.0", + "function.prototype.name": "^1.1.8", + "get-intrinsic": "^1.3.0", + "get-proto": "^1.0.1", + "get-symbol-description": "^1.1.0", + "globalthis": "^1.0.4", + "gopd": "^1.2.0", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "internal-slot": "^1.1.0", + "is-array-buffer": "^3.0.5", + "is-callable": "^1.2.7", + "is-data-view": "^1.0.2", + "is-negative-zero": "^2.0.3", + "is-regex": "^1.2.1", + "is-set": "^2.0.3", + "is-shared-array-buffer": "^1.0.4", + "is-string": "^1.1.1", + "is-typed-array": "^1.1.15", + "is-weakref": "^1.1.1", + "math-intrinsics": "^1.1.0", + "object-inspect": "^1.13.4", + "object-keys": "^1.1.1", + "object.assign": "^4.1.7", + "own-keys": "^1.0.1", + "regexp.prototype.flags": "^1.5.4", + "safe-array-concat": "^1.1.3", + "safe-push-apply": "^1.0.0", + "safe-regex-test": "^1.1.0", + "set-proto": "^1.0.0", + "stop-iteration-iterator": "^1.1.0", + "string.prototype.trim": "^1.2.10", + "string.prototype.trimend": "^1.0.9", + "string.prototype.trimstart": "^1.0.8", + "typed-array-buffer": "^1.0.3", + "typed-array-byte-length": "^1.0.3", + "typed-array-byte-offset": "^1.0.4", + "typed-array-length": "^1.0.7", + "unbox-primitive": "^1.1.0", + "which-typed-array": "^1.1.19" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-iterator-helpers": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.2.1.tgz", + "integrity": "sha512-uDn+FE1yrDzyC0pCo961B2IHbdM8y/ACZsKD4dG6WqrjV53BADjwa7D+1aom2rsNVfLyDgU/eigvlJGJ08OQ4w==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.6", + "es-errors": "^1.3.0", + "es-set-tostringtag": "^2.0.3", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.6", + "globalthis": "^1.0.4", + "gopd": "^1.2.0", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.2.0", + "has-symbols": "^1.1.0", + "internal-slot": "^1.1.0", + "iterator.prototype": "^1.1.4", + "safe-array-concat": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-shim-unscopables": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.1.0.tgz", + "integrity": "sha512-d9T8ucsEhh8Bi1woXCf+TIKDIROLG5WCkxg8geBCbvk22kzwC5G2OnXVMO6FUsvQlgUUXQ2itephWDLqDzbeCw==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-to-primitive": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.3.0.tgz", + "integrity": "sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.2.7", + "is-date-object": "^1.0.5", + "is-symbol": "^1.0.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.1.tgz", + "integrity": "sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==", + "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.57.1", + "@humanwhocodes/config-array": "^0.13.0", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-config-next": { + "version": "14.2.33", + "resolved": "https://registry.npmjs.org/eslint-config-next/-/eslint-config-next-14.2.33.tgz", + "integrity": "sha512-e2W+waB+I5KuoALAtKZl3WVDU4Q1MS6gF/gdcwHh0WOAkHf4TZI6dPjd25wKhlZFAsFrVKy24Z7/IwOhn8dHBw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@next/eslint-plugin-next": "14.2.33", + "@rushstack/eslint-patch": "^1.3.3", + "@typescript-eslint/eslint-plugin": "^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0", + "@typescript-eslint/parser": "^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0", + "eslint-import-resolver-node": "^0.3.6", + "eslint-import-resolver-typescript": "^3.5.2", + "eslint-plugin-import": "^2.28.1", + "eslint-plugin-jsx-a11y": "^6.7.1", + "eslint-plugin-react": "^7.33.2", + "eslint-plugin-react-hooks": "^4.5.0 || 5.0.0-canary-7118f5dd7-20230705" + }, + "peerDependencies": { + "eslint": "^7.23.0 || ^8.0.0", + "typescript": ">=3.3.1" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/eslint-import-resolver-node": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.9.tgz", + "integrity": "sha512-WFj2isz22JahUv+B788TlO3N6zL3nNJGU8CcZbPZvVEkBPaJdCV4vy5wyghty5ROFbCRnm132v8BScu5/1BQ8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^3.2.7", + "is-core-module": "^2.13.0", + "resolve": "^1.22.4" + } + }, + "node_modules/eslint-import-resolver-node/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/eslint-import-resolver-typescript": { + "version": "3.10.1", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-typescript/-/eslint-import-resolver-typescript-3.10.1.tgz", + "integrity": "sha512-A1rHYb06zjMGAxdLSkN2fXPBwuSaQ0iO5M/hdyS0Ajj1VBaRp0sPD3dn1FhME3c/JluGFbwSxyCfqdSbtQLAHQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "@nolyfill/is-core-module": "1.0.39", + "debug": "^4.4.0", + "get-tsconfig": "^4.10.0", + "is-bun-module": "^2.0.0", + "stable-hash": "^0.0.5", + "tinyglobby": "^0.2.13", + "unrs-resolver": "^1.6.2" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint-import-resolver-typescript" + }, + "peerDependencies": { + "eslint": "*", + "eslint-plugin-import": "*", + "eslint-plugin-import-x": "*" + }, + "peerDependenciesMeta": { + "eslint-plugin-import": { + "optional": true + }, + "eslint-plugin-import-x": { + "optional": true + } + } + }, + "node_modules/eslint-module-utils": { + "version": "2.12.1", + "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.12.1.tgz", + "integrity": "sha512-L8jSWTze7K2mTg0vos/RuLRS5soomksDPoJLXIslC7c8Wmut3bx7CPpJijDcBZtxQ5lrbUdM+s0OlNbz0DCDNw==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^3.2.7" + }, + "engines": { + "node": ">=4" + }, + "peerDependenciesMeta": { + "eslint": { + "optional": true + } + } + }, + "node_modules/eslint-module-utils/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/eslint-plugin-import": { + "version": "2.32.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.32.0.tgz", + "integrity": "sha512-whOE1HFo/qJDyX4SnXzP4N6zOWn79WhnCUY/iDR0mPfQZO8wcYE4JClzI2oZrhBnnMUCBCHZhO6VQyoBU95mZA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@rtsao/scc": "^1.1.0", + "array-includes": "^3.1.9", + "array.prototype.findlastindex": "^1.2.6", + "array.prototype.flat": "^1.3.3", + "array.prototype.flatmap": "^1.3.3", + "debug": "^3.2.7", + "doctrine": "^2.1.0", + "eslint-import-resolver-node": "^0.3.9", + "eslint-module-utils": "^2.12.1", + "hasown": "^2.0.2", + "is-core-module": "^2.16.1", + "is-glob": "^4.0.3", + "minimatch": "^3.1.2", + "object.fromentries": "^2.0.8", + "object.groupby": "^1.0.3", + "object.values": "^1.2.1", + "semver": "^6.3.1", + "string.prototype.trimend": "^1.0.9", + "tsconfig-paths": "^3.15.0" + }, + "engines": { + "node": ">=4" + }, + "peerDependencies": { + "eslint": "^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8 || ^9" + } + }, + "node_modules/eslint-plugin-import/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/eslint-plugin-import/node_modules/doctrine": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", + "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eslint-plugin-import/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/eslint-plugin-jsx-a11y": { + "version": "6.10.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.10.2.tgz", + "integrity": "sha512-scB3nz4WmG75pV8+3eRUQOHZlNSUhFNq37xnpgRkCCELU3XMvXAxLk1eqWWyE22Ki4Q01Fnsw9BA3cJHDPgn2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "aria-query": "^5.3.2", + "array-includes": "^3.1.8", + "array.prototype.flatmap": "^1.3.2", + "ast-types-flow": "^0.0.8", + "axe-core": "^4.10.0", + "axobject-query": "^4.1.0", + "damerau-levenshtein": "^1.0.8", + "emoji-regex": "^9.2.2", + "hasown": "^2.0.2", + "jsx-ast-utils": "^3.3.5", + "language-tags": "^1.0.9", + "minimatch": "^3.1.2", + "object.fromentries": "^2.0.8", + "safe-regex-test": "^1.0.3", + "string.prototype.includes": "^2.0.1" + }, + "engines": { + "node": ">=4.0" + }, + "peerDependencies": { + "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9" + } + }, + "node_modules/eslint-plugin-react": { + "version": "7.37.5", + "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.37.5.tgz", + "integrity": "sha512-Qteup0SqU15kdocexFNAJMvCJEfa2xUKNV4CC1xsVMrIIqEy3SQ/rqyxCWNzfrd3/ldy6HMlD2e0JDVpDg2qIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-includes": "^3.1.8", + "array.prototype.findlast": "^1.2.5", + "array.prototype.flatmap": "^1.3.3", + "array.prototype.tosorted": "^1.1.4", + "doctrine": "^2.1.0", + "es-iterator-helpers": "^1.2.1", + "estraverse": "^5.3.0", + "hasown": "^2.0.2", + "jsx-ast-utils": "^2.4.1 || ^3.0.0", + "minimatch": "^3.1.2", + "object.entries": "^1.1.9", + "object.fromentries": "^2.0.8", + "object.values": "^1.2.1", + "prop-types": "^15.8.1", + "resolve": "^2.0.0-next.5", + "semver": "^6.3.1", + "string.prototype.matchall": "^4.0.12", + "string.prototype.repeat": "^1.0.0" + }, + "engines": { + "node": ">=4" + }, + "peerDependencies": { + "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9.7" + } + }, + "node_modules/eslint-plugin-react-hooks": { + "version": "5.0.0-canary-7118f5dd7-20230705", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-5.0.0-canary-7118f5dd7-20230705.tgz", + "integrity": "sha512-AZYbMo/NW9chdL7vk6HQzQhT+PvTAEVqWk9ziruUoW2kAOcN5qNyelv70e0F1VNQAbvutOC9oc+xfWycI9FxDw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/eslint-plugin-react/node_modules/doctrine": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", + "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eslint-plugin-react/node_modules/resolve": { + "version": "2.0.0-next.5", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.5.tgz", + "integrity": "sha512-U7WjGVG9sH8tvjW5SmGbQuui75FiyjAX72HX15DwBBwF9dNiQZRQAg9nnPhYy+TUnE0+VcrttuvNI8oSxZcocA==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/eslint-plugin-react/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/eslint-scope": { + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/espree": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz", + "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fastq": { + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz", + "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/flatted": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "dev": true, + "license": "ISC" + }, + "node_modules/for-each": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.5.tgz", + "integrity": "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.2.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/foreground-child": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz", + "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==", + "dev": true, + "license": "ISC", + "dependencies": { + "cross-spawn": "^7.0.6", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/function.prototype.name": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.8.tgz", + "integrity": "sha512-e5iwyodOHhbMr/yNrc7fDYG4qlbIvI5gajyzPnb5TCwyhjApznQh1BMFou9b30SevY43gCJKXycoCBjMbsuW0Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "functions-have-names": "^1.2.3", + "hasown": "^2.0.2", + "is-callable": "^1.2.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/functions-have-names": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz", + "integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/generator-function": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/generator-function/-/generator-function-2.0.1.tgz", + "integrity": "sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-symbol-description": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.1.0.tgz", + "integrity": "sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-tsconfig": { + "version": "4.13.0", + "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.13.0.tgz", + "integrity": "sha512-1VKTZJCwBrvbd+Wn3AOgQP/2Av+TfTCOlE4AcRJE72W1ksZXbAx8PPBR9RzgTeSPzlPMHrbANMH3LbltH73wxQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve-pkg-maps": "^1.0.0" + }, + "funding": { + "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" + } + }, + "node_modules/glob": { + "version": "10.3.10", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.10.tgz", + "integrity": "sha512-fa46+tv1Ak0UPK1TOy/pZrIybNNt4HCv7SDzwyfiOZkvZLEbjsZkJBPtDHVshZjbecAoAGSC20MjLDG/qr679g==", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^2.3.5", + "minimatch": "^9.0.1", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0", + "path-scurry": "^1.10.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/glob/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globalthis": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.4.tgz", + "integrity": "sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-properties": "^1.2.1", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "license": "ISC" + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true, + "license": "MIT" + }, + "node_modules/has-bigints": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.1.0.tgz", + "integrity": "sha512-R3pbpkcIqv2Pm3dUwgjclDRVmWpTJW2DcMzcIhEXEx1oh/CEMObMm3KLmRJOdvhM7o4uQBnwr8pzRK2sJWIqfg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.2.0.tgz", + "integrity": "sha512-KIL7eQPfHQRC8+XluaIw7BHUwwqL19bQn4hzNgdr+1wXoU0KKj6rufu47lhY7KbJR2C6T6+PfyN0Ea7wkSS+qQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/internal-slot": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.1.0.tgz", + "integrity": "sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "hasown": "^2.0.2", + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/is-array-buffer": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.5.tgz", + "integrity": "sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "get-intrinsic": "^1.2.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-async-function": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.1.1.tgz", + "integrity": "sha512-9dgM/cZBnNvjzaMYHVoxxfPj2QXt22Ev7SuuPrs+xav0ukGB0S6d4ydZdEiM48kLx5kDV+QBPrpVnFyefL8kkQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "async-function": "^1.0.0", + "call-bound": "^1.0.3", + "get-proto": "^1.0.1", + "has-tostringtag": "^1.0.2", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-bigint": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.1.0.tgz", + "integrity": "sha512-n4ZT37wG78iz03xPRKJrHTdZbe3IicyucEtdRsV5yglwc3GyUfbAfpSeD0FJ41NbUNSt5wbhqfp1fS+BgnvDFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-bigints": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-boolean-object": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.2.2.tgz", + "integrity": "sha512-wa56o2/ElJMYqjCjGkXri7it5FbebW5usLw/nPmCMs5DeZ7eziSYZhSmPRn0txqeW4LnAmQQU7FgqLpsEFKM4A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-bun-module": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-bun-module/-/is-bun-module-2.0.0.tgz", + "integrity": "sha512-gNCGbnnnnFAUGKeZ9PdbyeGYJqewpmc2aKHUEMO5nQPWU9lOmv7jcmQIv+qHD8fXW6W7qfuCwX4rY9LNRjXrkQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.7.1" + } + }, + "node_modules/is-callable": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", + "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-data-view": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-data-view/-/is-data-view-1.0.2.tgz", + "integrity": "sha512-RKtWF8pGmS87i2D6gqQu/l7EYRlVdfzemCJN/P3UOs//x1QE7mfhvzHIApBTRf7axvT6DMGwSwBXYCT0nfB9xw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "get-intrinsic": "^1.2.6", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-date-object": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.1.0.tgz", + "integrity": "sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-finalizationregistry": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-finalizationregistry/-/is-finalizationregistry-1.1.1.tgz", + "integrity": "sha512-1pC6N8qWJbWoPtEjgcL2xyhQOP491EQjeUo3qTKcmV8YSDDJrOepfG8pcC7h/QgnQHYSv0mJ3Z/ZWxmatVrysg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-function": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.1.2.tgz", + "integrity": "sha512-upqt1SkGkODW9tsGNG5mtXTXtECizwtS2kA161M+gJPc1xdb/Ax629af6YrTwcOeQHbewrPNlE5Dx7kzvXTizA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.4", + "generator-function": "^2.0.0", + "get-proto": "^1.0.1", + "has-tostringtag": "^1.0.2", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-map": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.3.tgz", + "integrity": "sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-negative-zero": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.3.tgz", + "integrity": "sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-number-object": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.1.1.tgz", + "integrity": "sha512-lZhclumE1G6VYD8VHe35wFaIif+CTy5SJIi5+3y4psDgWu4wPDoBhF8NxUOinEc7pHgiTsT6MaBb92rKhhD+Xw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-regex": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz", + "integrity": "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-set": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.3.tgz", + "integrity": "sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-shared-array-buffer": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.4.tgz", + "integrity": "sha512-ISWac8drv4ZGfwKl5slpHG9OwPNty4jOWPRIhBpxOoD+hqITiwuipOQ2bNthAzwA3B4fIjO4Nln74N0S9byq8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-string": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.1.1.tgz", + "integrity": "sha512-BtEeSsoaQjlSPBemMQIrY1MY0uM6vnS1g5fmufYOtnxLGUZM2178PKbhsk7Ffv58IX+ZtcvoGwccYsh0PglkAA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-symbol": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.1.1.tgz", + "integrity": "sha512-9gGx6GTtCQM73BgmHQXfDmLtfjjTUDSyoxTCbp5WtoixAhfgsDirWIcVQ/IHpvI5Vgd5i/J5F7B9cN/WlVbC/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "has-symbols": "^1.1.0", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-typed-array": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.15.tgz", + "integrity": "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "which-typed-array": "^1.1.16" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakmap": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.2.tgz", + "integrity": "sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakref": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.1.1.tgz", + "integrity": "sha512-6i9mGWSlqzNMEqpCp93KwRS1uUOodk2OJ6b+sq7ZPDSy2WuI5NFIxp/254TytR8ftefexkWn5xNiHUNpPOfSew==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakset": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.4.tgz", + "integrity": "sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "get-intrinsic": "^1.2.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", + "dev": true, + "license": "MIT" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/iterator.prototype": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/iterator.prototype/-/iterator.prototype-1.1.5.tgz", + "integrity": "sha512-H0dkQoCa3b2VEeKQBOxFph+JAbcrQdE7KC0UkqwpLmv2EC4P41QXP+rqo9wYodACiG5/WM5s9oDApTU8utwj9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.6", + "get-proto": "^1.0.0", + "has-symbols": "^1.1.0", + "set-function-name": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/jackspeak": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz", + "integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/json5": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz", + "integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "minimist": "^1.2.0" + }, + "bin": { + "json5": "lib/cli.js" + } + }, + "node_modules/jsx-ast-utils": { + "version": "3.3.5", + "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz", + "integrity": "sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-includes": "^3.1.6", + "array.prototype.flat": "^1.3.1", + "object.assign": "^4.1.4", + "object.values": "^1.1.6" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/language-subtag-registry": { + "version": "0.3.23", + "resolved": "https://registry.npmjs.org/language-subtag-registry/-/language-subtag-registry-0.3.23.tgz", + "integrity": "sha512-0K65Lea881pHotoGEa5gDlMxt3pctLi2RplBb7Ezh4rRdLEOtgi7n4EwK9lamnUCkKBqaeKRVebTq6BAxSkpXQ==", + "dev": true, + "license": "CC0-1.0" + }, + "node_modules/language-tags": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/language-tags/-/language-tags-1.0.9.tgz", + "integrity": "sha512-MbjN408fEndfiQXbFQ1vnd+1NoLDsnQW41410oQBXiyXDMYH5z505juWa4KUE1LqxRC7DgOgZDbKLxHIwm27hA==", + "dev": true, + "license": "MIT", + "dependencies": { + "language-subtag-registry": "^0.3.20" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "license": "MIT", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/napi-postinstall": { + "version": "0.3.4", + "resolved": "https://registry.npmjs.org/napi-postinstall/-/napi-postinstall-0.3.4.tgz", + "integrity": "sha512-PHI5f1O0EP5xJ9gQmFGMS6IZcrVvTjpXjz7Na41gTE7eE2hK11lg04CECCYEEjdc17EV4DO+fkGEtt7TpTaTiQ==", + "dev": true, + "license": "MIT", + "bin": { + "napi-postinstall": "lib/cli.js" + }, + "engines": { + "node": "^12.20.0 || ^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/napi-postinstall" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/next": { + "version": "14.2.33", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.33.tgz", + "integrity": "sha512-GiKHLsD00t4ACm1p00VgrI0rUFAC9cRDGReKyERlM57aeEZkOQGcZTpIbsGn0b562FTPJWmYfKwplfO9EaT6ng==", + "license": "MIT", + "dependencies": { + "@next/env": "14.2.33", + "@swc/helpers": "0.5.5", + "busboy": "1.6.0", + "caniuse-lite": "^1.0.30001579", + "graceful-fs": "^4.2.11", + "postcss": "8.4.31", + "styled-jsx": "5.1.1" + }, + "bin": { + "next": "dist/bin/next" + }, + "engines": { + "node": ">=18.17.0" + }, + "optionalDependencies": { + "@next/swc-darwin-arm64": "14.2.33", + "@next/swc-darwin-x64": "14.2.33", + "@next/swc-linux-arm64-gnu": "14.2.33", + "@next/swc-linux-arm64-musl": "14.2.33", + "@next/swc-linux-x64-gnu": "14.2.33", + "@next/swc-linux-x64-musl": "14.2.33", + "@next/swc-win32-arm64-msvc": "14.2.33", + "@next/swc-win32-ia32-msvc": "14.2.33", + "@next/swc-win32-x64-msvc": "14.2.33" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.1.0", + "@playwright/test": "^1.41.2", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "sass": "^1.3.0" + }, + "peerDependenciesMeta": { + "@opentelemetry/api": { + "optional": true + }, + "@playwright/test": { + "optional": true + }, + "sass": { + "optional": true + } + } + }, + "node_modules/node-gyp-build": { + "version": "4.8.4", + "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.8.4.tgz", + "integrity": "sha512-LA4ZjwlnUblHVgq0oBF3Jl/6h/Nvs5fzBLwdEF4nuxnFdsfajde4WfxtJr3CaiH+F6ewcIB/q4jQ4UzPyid+CQ==", + "license": "MIT", + "bin": { + "node-gyp-build": "bin.js", + "node-gyp-build-optional": "optional.js", + "node-gyp-build-test": "build-test.js" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.7", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.7.tgz", + "integrity": "sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0", + "has-symbols": "^1.1.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.entries": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.9.tgz", + "integrity": "sha512-8u/hfXFRBD1O0hPUjioLhoWFHRmt6tKA4/vZPyckBr18l1KE9uHrFaFaUi8MDRTpi4uak2goyPTSNJLXX2k2Hw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.fromentries": { + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.8.tgz", + "integrity": "sha512-k6E21FzySsSK5a21KRADBd/NGneRegFO5pLHfdQLpRDETUNJueLXs3WCzyQ3tFRDYgbq3KHGXfTbi2bs8WQ6rQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.groupby": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/object.groupby/-/object.groupby-1.0.3.tgz", + "integrity": "sha512-+Lhy3TQTuzXI5hevh8sBGqbmurHbbIjAi0Z4S63nthVLmLxfbj4T54a4CfZrXIrt9iP4mVAPYMo/v99taj3wjQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.values": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.2.1.tgz", + "integrity": "sha512-gXah6aZrcUxjWg2zR2MwouP2eHlCBzdV4pygudehaKXSGW4v2AsRQUK+lwwXhii6KFZcunEnmSUoYp5CXibxtA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/own-keys": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/own-keys/-/own-keys-1.0.1.tgz", + "integrity": "sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.2.6", + "object-keys": "^1.1.1", + "safe-push-apply": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/possible-typed-array-names": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz", + "integrity": "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/postcss": { + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "dev": true, + "license": "MIT", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/react": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", + "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.2" + }, + "peerDependencies": { + "react": "^18.3.1" + } + }, + "node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/reflect.getprototypeof": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz", + "integrity": "sha512-00o4I+DVrefhv+nX0ulyi3biSHCPDe+yLv5o/p6d/UVlirijB8E16FtfwSAi4g3tcqrQ4lRAqQSoFEZJehYEcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.9", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.7", + "get-proto": "^1.0.1", + "which-builtin-type": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/regexp.prototype.flags": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.4.tgz", + "integrity": "sha512-dYqgNSZbDwkaJ2ceRd9ojCGjBq+mOm9LmtXnAnEGyHhN/5R7iDW2TRw3h+o/jCFxus3P2LfWIIiwowAjANm7IA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-errors": "^1.3.0", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "set-function-name": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve-pkg-maps": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz", + "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rimraf/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/safe-array-concat": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.3.tgz", + "integrity": "sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "get-intrinsic": "^1.2.6", + "has-symbols": "^1.1.0", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">=0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safe-push-apply": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/safe-push-apply/-/safe-push-apply-1.0.0.tgz", + "integrity": "sha512-iKE9w/Z7xCzUMIZqdBsp6pEQvwuEebH4vdpjcDWnyzaI6yl6O9FHvVpmGelvEHNsoY6wGblkxR6Zty/h00WiSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safe-regex-test": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.1.0.tgz", + "integrity": "sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "is-regex": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/scheduler": { + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/semiver": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/semiver/-/semiver-1.1.0.tgz", + "integrity": "sha512-QNI2ChmuioGC1/xjyYwyZYADILWyW6AmS1UH6gDj/SFUUUS4MBAWs/7mxnkRPc/F4iHezDP+O8t0dO8WHiEOdg==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/semver": { + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", + "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/set-function-name": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/set-function-name/-/set-function-name-2.0.2.tgz", + "integrity": "sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "functions-have-names": "^1.2.3", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/set-proto": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/set-proto/-/set-proto-1.0.0.tgz", + "integrity": "sha512-RJRdvCo6IAnPdsvP/7m6bsQqNnn1FCBX5ZNtFL98MmFF/4xAIJTIg1YbHW5DC2W5SKZanrC6i4HsJqlajw/dZw==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stable-hash": { + "version": "0.0.5", + "resolved": "https://registry.npmjs.org/stable-hash/-/stable-hash-0.0.5.tgz", + "integrity": "sha512-+L3ccpzibovGXFK+Ap/f8LOS0ahMrHTf3xu7mMLSpEGU0EO9ucaysSylKo9eRDFNhWve/y275iPmIZ4z39a9iA==", + "dev": true, + "license": "MIT" + }, + "node_modules/stop-iteration-iterator": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/stop-iteration-iterator/-/stop-iteration-iterator-1.1.0.tgz", + "integrity": "sha512-eLoXW/DHyl62zxY4SCaIgnRhuMr6ri4juEYARS8E6sCEqzKpOiE521Ucofdx+KnDZl5xmvGYaaKCk5FEOxJCoQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "internal-slot": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/streamsearch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz", + "integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/string-width/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/string-width/node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/string.prototype.includes": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/string.prototype.includes/-/string.prototype.includes-2.0.1.tgz", + "integrity": "sha512-o7+c9bW6zpAdJHTtujeePODAhkuicdAryFsfVKwA+wGw89wJ4GTY484WTucM9hLtDEOpOvI+aHnzqnC5lHp4Rg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.3" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/string.prototype.matchall": { + "version": "4.0.12", + "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.12.tgz", + "integrity": "sha512-6CC9uyBL+/48dYizRf7H7VAYCMCNTBeM78x/VTUe9bFEaxBepPJDa1Ow99LqI/1yF7kuy7Q3cQsYMrcjGUcskA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.6", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.6", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "internal-slot": "^1.1.0", + "regexp.prototype.flags": "^1.5.3", + "set-function-name": "^2.0.2", + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.repeat": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/string.prototype.repeat/-/string.prototype.repeat-1.0.0.tgz", + "integrity": "sha512-0u/TldDbKD8bFCQ/4f5+mNRrXwZ8hg2w7ZR8wa16e8z9XpePWl3eGEcUD0OXpEH/VJH/2G3gjUtR3ZOiBe2S/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.5" + } + }, + "node_modules/string.prototype.trim": { + "version": "1.2.10", + "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.10.tgz", + "integrity": "sha512-Rs66F0P/1kedk5lyYyH9uBzuiI/kNRmwJAR9quK6VOtIpZ2G+hMZd+HQbbv25MgCA6gEffoMZYxlTod4WcdrKA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "define-data-property": "^1.1.4", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-object-atoms": "^1.0.0", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimend": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.9.tgz", + "integrity": "sha512-G7Ok5C6E/j4SGfyLCloXTrngQIQU3PWtXGst3yM7Bea9FRURf1S42ZHlZZtsNque2FN2PoUhfZXYLNWwEr4dLQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimstart": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.8.tgz", + "integrity": "sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/styled-jsx": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.1.tgz", + "integrity": "sha512-pW7uC1l4mBZ8ugbiZrcIsiIvVx1UmTfw7UkC3Um2tmfUq9Bhk8IiyEIPl6F8agHgjzku6j0xQEZbfA5uSgSaCw==", + "license": "MIT", + "dependencies": { + "client-only": "0.0.1" + }, + "engines": { + "node": ">= 12.0.0" + }, + "peerDependencies": { + "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyglobby/node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/tinyglobby/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/ts-api-utils": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.1.0.tgz", + "integrity": "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, + "node_modules/tsconfig-paths": { + "version": "3.15.0", + "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.15.0.tgz", + "integrity": "sha512-2Ac2RgzDe/cn48GvOe3M+o82pEFewD3UPbyoUHHdKasHwJKjds4fLXWf/Ux5kATBKN20oaFGu+jbElp1pos0mg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/json5": "^0.0.29", + "json5": "^1.0.2", + "minimist": "^1.2.6", + "strip-bom": "^3.0.0" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typed-array-buffer": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.3.tgz", + "integrity": "sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "is-typed-array": "^1.1.14" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/typed-array-byte-length": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.3.tgz", + "integrity": "sha512-BaXgOuIxz8n8pIq3e7Atg/7s+DpiYrxn4vdot3w9KbnBhcRQq6o3xemQdIfynqSeXeDrF32x+WvfzmOjPiY9lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "for-each": "^0.3.3", + "gopd": "^1.2.0", + "has-proto": "^1.2.0", + "is-typed-array": "^1.1.14" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-byte-offset": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.4.tgz", + "integrity": "sha512-bTlAFB/FBYMcuX81gbL4OcpH5PmlFHqlCCpAl8AlEzMz5k53oNDvN8p1PNOWLEmI2x4orp3raOFB51tv9X+MFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "for-each": "^0.3.3", + "gopd": "^1.2.0", + "has-proto": "^1.2.0", + "is-typed-array": "^1.1.15", + "reflect.getprototypeof": "^1.0.9" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-length": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.7.tgz", + "integrity": "sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "is-typed-array": "^1.1.13", + "possible-typed-array-names": "^1.0.0", + "reflect.getprototypeof": "^1.0.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/unbox-primitive": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.1.0.tgz", + "integrity": "sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "has-bigints": "^1.0.2", + "has-symbols": "^1.1.0", + "which-boxed-primitive": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/unrs-resolver": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/unrs-resolver/-/unrs-resolver-1.11.1.tgz", + "integrity": "sha512-bSjt9pjaEBnNiGgc9rUiHGKv5l4/TGzDmYw3RhnkJGtLhbnnA/5qJj7x3dNDCRx/PJxu774LlH8lCOlB4hEfKg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "dependencies": { + "napi-postinstall": "^0.3.0" + }, + "funding": { + "url": "https://opencollective.com/unrs-resolver" + }, + "optionalDependencies": { + "@unrs/resolver-binding-android-arm-eabi": "1.11.1", + "@unrs/resolver-binding-android-arm64": "1.11.1", + "@unrs/resolver-binding-darwin-arm64": "1.11.1", + "@unrs/resolver-binding-darwin-x64": "1.11.1", + "@unrs/resolver-binding-freebsd-x64": "1.11.1", + "@unrs/resolver-binding-linux-arm-gnueabihf": "1.11.1", + "@unrs/resolver-binding-linux-arm-musleabihf": "1.11.1", + "@unrs/resolver-binding-linux-arm64-gnu": "1.11.1", + "@unrs/resolver-binding-linux-arm64-musl": "1.11.1", + "@unrs/resolver-binding-linux-ppc64-gnu": "1.11.1", + "@unrs/resolver-binding-linux-riscv64-gnu": "1.11.1", + "@unrs/resolver-binding-linux-riscv64-musl": "1.11.1", + "@unrs/resolver-binding-linux-s390x-gnu": "1.11.1", + "@unrs/resolver-binding-linux-x64-gnu": "1.11.1", + "@unrs/resolver-binding-linux-x64-musl": "1.11.1", + "@unrs/resolver-binding-wasm32-wasi": "1.11.1", + "@unrs/resolver-binding-win32-arm64-msvc": "1.11.1", + "@unrs/resolver-binding-win32-ia32-msvc": "1.11.1", + "@unrs/resolver-binding-win32-x64-msvc": "1.11.1" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/which-boxed-primitive": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.1.1.tgz", + "integrity": "sha512-TbX3mj8n0odCBFVlY8AxkqcHASw3L60jIuF8jFP78az3C2YhmGvqbHBpAjTRH2/xqYunrJ9g1jSyjCjpoWzIAA==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-bigint": "^1.1.0", + "is-boolean-object": "^1.2.1", + "is-number-object": "^1.1.1", + "is-string": "^1.1.1", + "is-symbol": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-builtin-type": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.2.1.tgz", + "integrity": "sha512-6iBczoX+kDQ7a3+YJBnh3T+KZRxM/iYNPXicqk66/Qfm1b93iu+yOImkg0zHbj5LNOcNv1TEADiZ0xa34B4q6Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "function.prototype.name": "^1.1.6", + "has-tostringtag": "^1.0.2", + "is-async-function": "^2.0.0", + "is-date-object": "^1.1.0", + "is-finalizationregistry": "^1.1.0", + "is-generator-function": "^1.0.10", + "is-regex": "^1.2.1", + "is-weakref": "^1.0.2", + "isarray": "^2.0.5", + "which-boxed-primitive": "^1.1.0", + "which-collection": "^1.0.2", + "which-typed-array": "^1.1.16" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-collection": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.2.tgz", + "integrity": "sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-map": "^2.0.3", + "is-set": "^2.0.3", + "is-weakmap": "^2.0.2", + "is-weakset": "^2.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-typed-array": { + "version": "1.1.19", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.19.tgz", + "integrity": "sha512-rEvr90Bck4WZt9HHFC4DJMsjvu7x+r6bImz0/BrbWb7A2djJ8hnZMrWnHo9F8ssv0OMErasDhftrfROTyqSDrw==", + "dev": true, + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "for-each": "^0.3.5", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/wrap-ansi-cjs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", + "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/ws": { + "version": "8.18.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz", + "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/src/frontend/frontend/package.json b/src/frontend/frontend/package.json new file mode 100644 index 0000000000000000000000000000000000000000..23a9e175c6cca0eb9771268850560c79d4c7fe15 --- /dev/null +++ b/src/frontend/frontend/package.json @@ -0,0 +1,27 @@ +{ + "name": "recruitment-agent-frontend", + "version": "0.1.0", + "private": true, + "scripts": { + "dev": "next dev -p 7860", + "build": "next build", + "start": "next start -p 7860", + "lint": "next lint" + }, + "dependencies": { + "react": "^18.3.1", + "react-dom": "^18.3.1", + "next": "^14.2.5", + "@gradio/client": "^0.15.0", + "buffer": "^6.0.3" + }, + "devDependencies": { + "@types/node": "^20.14.12", + "@types/react": "^18.3.3", + "@types/react-dom": "^18.3.0", + "typescript": "^5.5.4", + "eslint": "^8.57.0", + "eslint-config-next": "^14.2.5" + } +} + diff --git a/src/frontend/frontend/tsconfig.json b/src/frontend/frontend/tsconfig.json new file mode 100644 index 0000000000000000000000000000000000000000..eb231e50c8fc9c91c8459bc8d7556468b9cfb2db --- /dev/null +++ b/src/frontend/frontend/tsconfig.json @@ -0,0 +1,28 @@ +{ + "compilerOptions": { + "target": "ES2020", + "lib": ["dom", "dom.iterable", "esnext"], + "allowJs": true, + "skipLibCheck": true, + "strict": true, + "noEmit": true, + "esModuleInterop": true, + "module": "esnext", + "moduleResolution": "bundler", + "resolveJsonModule": true, + "isolatedModules": true, + "jsx": "preserve", + "incremental": true, + "plugins": [ + { + "name": "next" + } + ], + "paths": { + "@/*": ["./*"] + } + }, + "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"], + "exclude": ["node_modules"] +} + diff --git a/src/frontend/gradio/README.md b/src/frontend/gradio/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7c3e9b55fc5baa2c4ff02550dca1f13782de48b8 --- /dev/null +++ b/src/frontend/gradio/README.md @@ -0,0 +1,132 @@ +# Gradio Frontend for Hugging Face Spaces + +This is a unified Gradio interface for the Recruitment Agent system, designed for deployment on Hugging Face Spaces for the **MCP 1st Birthday Hackathon**. + +## 🚀 Features + +The Gradio app includes three main tabs: + +1. **👤 Candidate Portal** + - Submit job applications with CV upload + - Check application status by email + - View screening results and interview status + +2. **🧑‍💼 HR Portal** + - View and filter candidates by status + - Trigger voice screening for candidates + - Schedule interviews + - Manage recruitment pipeline + +3. **🤖 Supervisor Chat** + - Interactive chat with the HR Supervisor Agent + - Query candidate information + - Get help with recruitment tasks + - Token usage tracking + +## 📦 Deployment to Hugging Face Spaces + +### Step 1: Create a Hugging Face Space + +1. Go to [Hugging Face Spaces](https://huggingface.co/spaces) +2. Click "Create new Space" +3. Select: + - **SDK**: `gradio` + - **Hardware**: Choose based on your needs (CPU Basic is fine for the frontend) + - **Visibility**: Public (for hackathon submission) + +### Step 2: Upload Files + +Upload these files to your Space: + +``` +app.py # Main Gradio application (from src/frontend/gradio/app.py) +requirements.txt # Python dependencies (from src/frontend/gradio/requirements.txt) +README.md # This file (optional, but recommended) +``` + +**Note**: The app expects the backend API to be accessible. You have two options: + +#### Option A: Backend on Same Space (Recommended for Hackathon) +- Deploy the FastAPI backend as a separate Space +- Update API URLs in the Gradio app to point to your backend Space + +#### Option B: Backend Running Elsewhere +- Set environment variables in your Space settings: + - `SUPERVISOR_API_URL` + - `DATABASE_API_URL` + - `CV_UPLOAD_API_URL` + - `VOICE_SCREENER_API_URL` + +### Step 3: Configure Environment Variables + +In your Space settings, add any required environment variables: + +```bash +# API URLs (if backend is separate) +SUPERVISOR_API_URL=https://your-backend-space.hf.space/api/v1/supervisor +DATABASE_API_URL=https://your-backend-space.hf.space/api/v1/db +CV_UPLOAD_API_URL=https://your-backend-space.hf.space/api/v1/cv +VOICE_SCREENER_API_URL=https://your-backend-space.hf.space/api/v1/voice-screener + +# OpenAI API Key (if backend needs it) +OPENAI_API_KEY=your_key_here +``` + +### Step 4: Deploy + +1. Push your files to the Space +2. Hugging Face will automatically build and deploy +3. Wait for the build to complete +4. Your app will be live at: `https://your-username-your-space-name.hf.space` + +## 🔧 Local Development + +To run locally: + +```bash +# Install dependencies +pip install -r requirements.txt + +# Make sure backend API is running on localhost:8080 +# Or set environment variables: +export SUPERVISOR_API_URL=http://localhost:8080/api/v1/supervisor +export DATABASE_API_URL=http://localhost:8080/api/v1/db +export CV_UPLOAD_API_URL=http://localhost:8080/api/v1/cv + +# Run Gradio app +python app.py +``` + +The app will be available at `http://localhost:7860` + +## 📝 Differences from Next.js Frontend + +This Gradio frontend is a **separate implementation** designed specifically for Hugging Face Spaces deployment. Key differences: + +1. **Single Interface**: All features in one Gradio app with tabs +2. **Simplified UI**: Gradio's built-in components instead of custom React +3. **Backend Integration**: Uses the same SDK clients as the Next.js frontend +4. **Deployment**: Optimized for Hugging Face Spaces (no Node.js required) + +## 🎯 Hackathon Submission + +For the MCP 1st Birthday Hackathon: + +1. ✅ Deploy to Hugging Face Spaces (required) +2. ✅ Use Gradio interface (required) +3. ✅ Include all main features (Candidate Portal, HR Portal, Supervisor Chat) +4. ✅ Link to your Space in the hackathon submission + +## 🔗 Related Files + +- **Next.js Frontend**: `src/frontend/frontend/` (for local development) +- **Streamlit UIs**: `src/frontend/streamlit/` (alternative Python UIs) +- **Backend API**: `src/api/` (FastAPI backend) +- **SDK**: `src/sdk/` (Python SDK used by Gradio app) + +## 📚 Documentation + +- [Gradio Documentation](https://www.gradio.app/docs/) +- [Hugging Face Spaces Guide](https://huggingface.co/docs/hub/spaces) +- [MCP Hackathon](https://huggingface.co/MCP-1st-Birthday) + diff --git a/src/frontend/gradio/__init__.py b/src/frontend/gradio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/frontend/gradio/app.py b/src/frontend/gradio/app.py new file mode 100644 index 0000000000000000000000000000000000000000..5a5b421843ff4e3d1e3747bdd9c62917ba222a91 --- /dev/null +++ b/src/frontend/gradio/app.py @@ -0,0 +1,909 @@ +""" +Gradio Frontend for Recruitment Agent - Hugging Face Spaces Deployment +Requires Gradio 6.0+ +""" + +import os +import gradio as gr +from typing import Optional, Tuple, Dict, Any +import sys +from pathlib import Path +from uuid import uuid4 + +project_root = Path(__file__).resolve().parent.parent.parent.parent +sys.path.insert(0, str(project_root)) + +try: + from src.sdk import SupervisorClient, DatabaseClient, CVUploadClient + SDK_AVAILABLE = True +except ImportError as e: + SDK_AVAILABLE = False + try: + alt_root = Path(__file__).parent.parent.parent.parent + if str(alt_root) not in sys.path: + sys.path.insert(0, str(alt_root)) + from src.sdk import SupervisorClient, DatabaseClient, CVUploadClient + SDK_AVAILABLE = True + except Exception: + pass + +# ============================================================================ +# CONFIGURATION +# ============================================================================ + +def get_api_url(service: str) -> str: + env_map = { + "supervisor": "SUPERVISOR_API_URL", + "db": "DATABASE_API_URL", + "database": "DATABASE_API_URL", + "cv": "CV_UPLOAD_API_URL", + "voice-screener": "VOICE_SCREENER_API_URL", + } + path_map = { + "supervisor": "supervisor", + "db": "db", + "database": "db", + "cv": "cv", + "voice-screener": "voice-screener", + } + env_var = env_map.get(service, f"{service.upper()}_API_URL") + api_url = os.getenv(env_var) + if api_url: + return api_url + api_path = path_map.get(service, service) + space_id = os.getenv("SPACE_ID") + if space_id: + return f"https://{space_id}.hf.space/api/v1/{api_path}" + return f"http://localhost:8080/api/v1/{api_path}" + +# ============================================================================ +# CANDIDATE APPLICATION PORTAL +# ============================================================================ + +def submit_application(full_name: str, email: str, phone: str, cv_file, session_state: Optional[Dict[str, Any]] = None) -> Tuple[str, Dict[str, Any]]: + if not SDK_AVAILABLE: + return "❌ SDK not available. Please check backend connection.", ensure_session(session_state) + session = ensure_session(session_state) + if not full_name or not email: + return "❌ Full name and email are required.", session + if not cv_file: + return "❌ Please upload your CV (PDF or DOCX).", session + try: + client = CVUploadClient(base_url=get_api_url("cv"), session_id=session["session_id"]) + file_path = cv_file.name if hasattr(cv_file, 'name') else str(cv_file) + filename = Path(file_path).name + with open(file_path, 'rb') as f: + response = client.submit(full_name=full_name, email=email, phone=phone or "", cv_file=f, filename=filename) + if response.success: + return f"✅ {response.message}\n\nYour application has been recorded.", session + elif response.already_exists: + return f"⚠️ {response.message}\n\nPlease wait for review.", session + return f"❌ {response.message}", session + except Exception as e: + return f"❌ Failed to submit application: {str(e)}", session + +def check_application_status(email: str, session_state: Optional[Dict[str, Any]] = None) -> Tuple[str, Dict[str, Any]]: + if not SDK_AVAILABLE: + return "❌ SDK not available.", ensure_session(session_state) + session = ensure_session(session_state) + if not email: + return "❌ Please enter your email address.", session + try: + client = DatabaseClient(base_url=get_api_url("database"), session_id=session["session_id"]) + response = client.get_candidate_by_email(email, include_relations=True) + if response.success and response.data: + c = response.data + info = f"**Status:** {c.get('status', 'unknown')}\n\n" + info += f"**Applied:** {c.get('created_at', 'N/A')}\n\n" + if c.get('cv_screening_results'): + score = c['cv_screening_results'][0].get('overall_fit_score', 0) + info += f"**CV Score:** {score * 100:.1f}%\n\n" + if c.get('voice_screening_results'): + info += "**Voice Screening:** ✅ Completed\n\n" + if c.get('interview_scheduling'): + info += f"**Interview:** {c['interview_scheduling'][0].get('status', 'Scheduled')}\n\n" + if c.get('final_decision'): + info += f"**Decision:** {c['final_decision'].get('decision', 'Pending')}" + return info, session + return f"❌ No application found for {email}.", session + except Exception as e: + return f"❌ Error: {str(e)}", session + +# ============================================================================ +# HR PORTAL +# ============================================================================ + +def load_candidates(status_filter: Optional[str] = None, session_state: Optional[Dict[str, Any]] = None) -> Tuple[str, Dict[str, Any]]: + if not SDK_AVAILABLE: + return "❌ SDK not available.", ensure_session(session_state) + session = ensure_session(session_state) + try: + client = DatabaseClient(base_url=get_api_url("database"), session_id=session["session_id"]) + response = client.get_candidates(status=status_filter if status_filter != "All" else None, limit=100, include_relations=True) + if response.success and response.data: + candidates = response.data + if not candidates: + return "No candidates found.", session + table = "| Name | Email | Status | Applied | Voice |\n|------|-------|--------|---------|-------|\n" + for c in candidates: + name = c.get('full_name', 'Unknown') + email = c.get('email', 'N/A') + status = c.get('status', 'unknown') + applied = str(c.get('created_at', 'N/A'))[:10] + voice = "✅" if c.get('voice_screening_results') else "❌" + table += f"| {name} | {email} | {status} | {applied} | {voice} |\n" + return f"**Found {len(candidates)} candidate(s)**\n\n{table}", session + return "No candidates found.", session + except Exception as e: + return f"❌ Error: {str(e)}", session + +def trigger_voice_screening(candidate_email: str, session_state: Optional[Dict[str, Any]] = None) -> Tuple[str, Dict[str, Any]]: + if not SDK_AVAILABLE: + return "❌ SDK not available.", ensure_session(session_state) + session = ensure_session(session_state) + if not candidate_email: + return "❌ Please enter candidate email.", session + try: + client = SupervisorClient(base_url=get_api_url("supervisor"), session_id=session["session_id"]) + thread_id = client.new_chat() + response = client.chat(message=f"Please trigger voice screening for candidate with email {candidate_email}", thread_id=thread_id) + token_info = f"\n\n📊 Tokens: {response.token_count:,}" if response.token_count else "" + return f"✅ Voice screening triggered!\n\n{response.content}{token_info}", session + except Exception as e: + return f"❌ Failed: {str(e)}", session + +def schedule_interview(candidate_email: str, session_state: Optional[Dict[str, Any]] = None) -> Tuple[str, Dict[str, Any]]: + if not SDK_AVAILABLE: + return "❌ SDK not available.", ensure_session(session_state) + session = ensure_session(session_state) + if not candidate_email: + return "❌ Please enter candidate email.", session + try: + client = SupervisorClient(base_url=get_api_url("supervisor"), session_id=session["session_id"]) + thread_id = client.new_chat() + response = client.chat(message=f"Please schedule an interview for candidate with email {candidate_email}", thread_id=thread_id) + token_info = f"\n\n📊 Tokens: {response.token_count:,}" if response.token_count else "" + return f"✅ Interview scheduling initiated!\n\n{response.content}{token_info}", session + except Exception as e: + return f"❌ Failed: {str(e)}", session + +# ============================================================================ +# SUPERVISOR AGENT CHAT (per-user state via session dict) +# ============================================================================ + +def ensure_session(state: Optional[Dict[str, Any]]) -> Dict[str, Any]: + """Ensure a per-user session dict exists with a unique session_id.""" + if state is None: + state = {} + if not state.get("session_id"): + state["session_id"] = uuid4().hex + state.setdefault("thread_id", None) + state.setdefault("messages", []) + state.setdefault("total_tokens", 0) + return state + +def format_chat_history(messages: list) -> str: + if not messages: + return "" + formatted = [] + for role, content in messages: + if role == "user": + formatted.append(f"👤 **You**\n\n{content}") + else: + formatted.append(f"🤖 **Assistant**\n\n{content}") + return "\n\n---\n\n".join(formatted) + +def init_chat(session_state: Optional[Dict[str, Any]] = None) -> Tuple[str, str, Dict[str, Any]]: + if not SDK_AVAILABLE: + return "❌ SDK not available.", "📊 Tokens: 0", ensure_session(session_state) + session = ensure_session(session_state) + try: + client = SupervisorClient(base_url=get_api_url("supervisor"), session_id=session["session_id"]) + thread_id = client.new_chat() + session["thread_id"] = thread_id + session["messages"] = [] + session["total_tokens"] = 0 + welcome = """Hello! I'm the HR Supervisor Agent. I can help you with: + +- **Querying** candidate information +- **Screening** CVs and providing insights +- **Scheduling** interviews automatically +- **Managing** the recruitment pipeline + +What would you like to know?""" + session["messages"].append(("assistant", welcome)) + return format_chat_history(session["messages"]), "📊 Tokens: 0", session + except Exception as e: + return f"❌ Failed to initialize: {str(e)}", "📊 Tokens: 0", session + +def chat_with_supervisor(message: str, history: str, session_state: Optional[Dict[str, Any]]) -> Tuple[str, str, str, Dict[str, Any]]: + if not SDK_AVAILABLE: + return history, "❌ SDK not available.", "", ensure_session(session_state) + session = ensure_session(session_state) + if not message.strip(): + return history, f"📊 Tokens: {session['total_tokens']:,}", "", session + if not session.get("thread_id"): + _, _, session = init_chat(session) + try: + client = SupervisorClient(base_url=get_api_url("supervisor"), session_id=session["session_id"]) + session["messages"].append(("user", message)) + response = client.chat(message=message, thread_id=session["thread_id"]) + session["messages"].append(("assistant", response.content)) + session["total_tokens"] += response.token_count or 0 + return format_chat_history(session["messages"]), f"📊 Tokens: {session['total_tokens']:,}", "", session + except Exception as e: + error_msg = f"❌ Error: {str(e)}" + session["messages"].append(("assistant", error_msg)) + return format_chat_history(session["messages"]), f"📊 Tokens: {session['total_tokens']:,}", "", session + +# ============================================================================ +# CUSTOM CSS +# ============================================================================ + +CUSTOM_CSS = """ +/* ===================================================== + FORCE LIGHT MODE - Aggressive overrides for Gradio 6 + ===================================================== */ + +/* Root level - override everything */ +:root { + --body-background-fill: #ffffff !important; + --background-fill-primary: #ffffff !important; + --background-fill-secondary: #f8fafc !important; + --block-background-fill: #ffffff !important; + --input-background-fill: #ffffff !important; + --body-text-color: #1e293b !important; + --block-label-text-color: #1e293b !important; + --block-title-text-color: #1e293b !important; + --color-text-body: #1e293b !important; + --text-color: #1e293b !important; + color-scheme: light !important; +} + +/* Target the main gradio wrapper */ +#__next, #root, #app, main, .main, +gradio-app, .gradio-app, +[class*="gradio"], [id*="gradio"] { + background-color: #ffffff !important; + background: #ffffff !important; + color: #1e293b !important; +} + +/* Dark mode class overrides */ +.dark, [data-theme="dark"], html.dark, body.dark, +.dark *, [data-theme="dark"] * { + background-color: #ffffff !important; + color: #1e293b !important; +} + +html, body { + background-color: #ffffff !important; + background: #ffffff !important; + color: #1e293b !important; +} + +.gradio-container { + background-color: #ffffff !important; + background: #ffffff !important; + color: #1e293b !important; +} + +/* Wrap everything */ +.wrap, .wrapper, .contain, +[class*="wrap"], [class*="contain"] { + background-color: #ffffff !important; + background: #ffffff !important; +} + +/* ALL text elements - force dark text */ +*, *::before, *::after { + --tw-text-opacity: 1 !important; +} + +h1, h2, h3, h4, h5, h6, +p, span, div, label, +li, td, th, a:not(.main-header a), +strong, b, em, i, u, +.text, [class*="text"] { + color: #1e293b !important; +} + +/* Prose/Markdown specific */ +.prose, .prose *, +.markdown, .markdown *, +[class*="prose"], [class*="markdown"], +.md, .md * { + color: #1e293b !important; + background-color: transparent !important; +} + +/* Strong/bold text emphasis */ +strong, b, .font-bold, .font-semibold { + color: #0f172a !important; + font-weight: 600 !important; +} + +/* ===================================================== + FORM ELEMENTS + ===================================================== */ + +input, textarea, select, option { + background-color: #ffffff !important; + background: #ffffff !important; + color: #1e293b !important; + border: 1px solid #cbd5e1 !important; + border-radius: 6px !important; +} + +input::placeholder, textarea::placeholder { + color: #94a3b8 !important; + opacity: 1 !important; +} + +input:focus, textarea:focus, select:focus { + border-color: #2563eb !important; + outline: none !important; + box-shadow: 0 0 0 2px rgba(37, 99, 235, 0.2) !important; +} + +/* Labels */ +label, .label, [class*="label"] { + color: #1e293b !important; + font-weight: 500 !important; +} + +/* ===================================================== + BLOCKS AND CONTAINERS + ===================================================== */ + +.block, .form, .container, .panel, .card, .box, +[class*="block"], [class*="panel"], [class*="card"], +[class*="svelte-"] { + background-color: #ffffff !important; + background: #ffffff !important; +} + +/* ===================================================== + HEADER WITH GRADIENT (WHITE TEXT) + ===================================================== */ + +.main-header { + background: linear-gradient(135deg, #2563eb 0%, #1d4ed8 100%) !important; + padding: 2rem !important; + border-radius: 12px !important; + margin-bottom: 1.5rem !important; + text-align: center !important; +} + +.main-header h1, +.main-header p, +.main-header span, +.main-header * { + color: white !important; +} + +.main-header h1 { + font-size: 2.5rem !important; + margin: 0 !important; + font-weight: 700 !important; +} + +.main-header p { + margin: 0.5rem 0 0 0 !important; + font-size: 1.1rem !important; + opacity: 0.95 !important; +} + +/* ===================================================== + INFO BOXES (BLUE THEMED) + ===================================================== */ + +.info-box { + background: #eff6ff !important; + border-left: 4px solid #2563eb !important; + padding: 1rem !important; + border-radius: 0 8px 8px 0 !important; + margin: 1rem 0 !important; +} + +.info-box, .info-box * { + color: #1e40af !important; +} + +/* ===================================================== + CHAT DISPLAY + ===================================================== */ + +.chat-display { + border: 1px solid #e2e8f0 !important; + border-radius: 12px !important; + padding: 1.5rem !important; + background-color: #ffffff !important; + background: #ffffff !important; + min-height: 400px !important; + max-height: 500px !important; + overflow-y: auto !important; +} + +.chat-display, .chat-display *, +.chat-display p, .chat-display span, +.chat-display strong, .chat-display li { + color: #1e293b !important; +} + +/* ===================================================== + STATS BOX + ===================================================== */ + +.stats-box { + background-color: #f1f5f9 !important; + background: #f1f5f9 !important; + border-radius: 8px !important; + padding: 1rem !important; + text-align: center !important; +} + +.stats-box, .stats-box * { + color: #475569 !important; + font-weight: 600 !important; +} + +/* ===================================================== + BUTTONS + ===================================================== */ + +/* Primary buttons - blue bg, white text */ +button.primary, +.primary, +button[class*="primary"], +[class*="primary"] button, +button[variant="primary"] { + background-color: #2563eb !important; + background: #2563eb !important; + color: white !important; + border: none !important; + border-radius: 6px !important; +} + +button.primary:hover, +.primary:hover, +button[class*="primary"]:hover { + background-color: #1d4ed8 !important; + background: #1d4ed8 !important; +} + +button.primary *, +.primary *, +button[class*="primary"] * { + color: white !important; +} + +/* Secondary buttons */ +button.secondary, +.secondary, +button[class*="secondary"], +button[variant="secondary"] { + background-color: #f1f5f9 !important; + background: #f1f5f9 !important; + color: #1e293b !important; + border: 1px solid #cbd5e1 !important; +} + +button.secondary *, +.secondary *, +button[class*="secondary"] * { + color: #1e293b !important; +} + +/* ===================================================== + TABS + ===================================================== */ + +button[role="tab"], +[role="tab"], +.tab, .tabs button { + color: #1e293b !important; + background-color: transparent !important; +} + +button[role="tab"][aria-selected="true"], +[role="tab"][aria-selected="true"], +.tab.selected, .tab.active { + color: #2563eb !important; + border-bottom: 2px solid #2563eb !important; +} + +.tab-content, .tabs-content, [role="tabpanel"] { + background-color: #ffffff !important; +} + +/* ===================================================== + TABLES + ===================================================== */ + +table { + background-color: #ffffff !important; +} + +th, td { + background-color: #ffffff !important; + color: #1e293b !important; + border-color: #e2e8f0 !important; +} + +th { + background-color: #f8fafc !important; + font-weight: 600 !important; +} + +/* ===================================================== + DROPDOWN / SELECT + ===================================================== */ + +select, +.dropdown, +[data-testid="dropdown"], +[class*="dropdown"] { + background-color: #ffffff !important; + background: #ffffff !important; + color: #1e293b !important; + border: 1px solid #cbd5e1 !important; +} + +/* Dropdown options */ +option { + background-color: #ffffff !important; + color: #1e293b !important; +} + +/* ===================================================== + FILE UPLOAD + ===================================================== */ + +[class*="file"], +[class*="upload"], +.upload-area, +.dropzone, +[class*="drop"] { + background-color: #f8fafc !important; + background: #f8fafc !important; + border: 2px dashed #cbd5e1 !important; + border-radius: 8px !important; +} + +[class*="file"] *, +[class*="upload"] *, +.dropzone * { + color: #64748b !important; +} + +/* ===================================================== + MISC + ===================================================== */ + +hr { + border-color: #e2e8f0 !important; +} + +/* Links (except in header) */ +a:not(.main-header a) { + color: #2563eb !important; +} + +a:not(.main-header a):hover { + color: #1d4ed8 !important; +} + +/* Scrollbar styling */ +::-webkit-scrollbar { + width: 8px; + height: 8px; +} + +::-webkit-scrollbar-track { + background: #f1f5f9; + border-radius: 4px; +} + +::-webkit-scrollbar-thumb { + background: #cbd5e1; + border-radius: 4px; +} + +::-webkit-scrollbar-thumb:hover { + background: #94a3b8; +} + +/* ===================================================== + AUTO-SCROLL FOR CHAT + ===================================================== */ + +.auto-scroll { + overflow-y: auto !important; + scroll-behavior: smooth !important; +} + +/* Remove scrollbar from voice and interview output sections */ +.no-scroll-output, +.no-scroll-output *, +[class*="no-scroll-output"] { + overflow: visible !important; + overflow-y: visible !important; + overflow-x: visible !important; + max-height: none !important; + height: auto !important; +} + +/* Ensure processing loaders are visible */ +.no-scroll-output .gradio-loading, +.no-scroll-output [class*="loading"], +.no-scroll-output [class*="spinner"] { + display: block !important; + visibility: visible !important; + opacity: 1 !important; +} +""" + +# ============================================================================ +# THEME - Use Default theme as base (lighter than Soft) +# ============================================================================ + +try: + THEME = gr.themes.Default( + primary_hue="blue", + secondary_hue="slate", + neutral_hue="slate", + ) +except Exception as e: + print(f"Theme creation failed: {e}, using string theme") + THEME = "default" + +# ============================================================================ +# GRADIO INTERFACE - Gradio 6+ Compatible +# In Gradio 6, theme and css are passed to launch(), not Blocks() +# ============================================================================ + +def create_app(): + # In Gradio 6, gr.Blocks() takes no theme/css args - they go to launch() + with gr.Blocks() as app: + # Force light mode via JavaScript - runs on load and observes for changes + gr.HTML(""" + +
+

🤖 ScionHire AI Labs

+

AI-Powered Recruitment System

+
+ """) + + # Per-user session state (persists across interactions) + session_state = gr.State(value=None) + + with gr.Tabs(): + # ============================================================ + # TAB 1: Candidate Portal + # ============================================================ + with gr.Tab("👤 Candidate Portal"): + gr.Markdown("## 📝 Submit Your Application") + gr.HTML('
Welcome! We\'re seeking talented engineers. Submit your CV below to start your application.
') + + with gr.Row(): + with gr.Column(): + full_name = gr.Textbox(label="Full Name", placeholder="Ada Lovelace") + email = gr.Textbox(label="Email", placeholder="ada@example.com") + phone = gr.Textbox(label="Phone (Optional)", placeholder="+1 234 567 8900") + cv_file = gr.File(label="Upload CV (PDF or DOCX)", file_types=[".pdf", ".docx"]) + submit_btn = gr.Button("📨 Submit Application", variant="primary", size="lg") + + with gr.Column(): + gr.Markdown("### 📊 Application Result") + application_output = gr.Markdown() + + submit_btn.click( + fn=submit_application, + inputs=[full_name, email, phone, cv_file, session_state], + outputs=[application_output, session_state] + ) + + gr.Markdown("---") + gr.Markdown("## 🔍 Check Application Status") + + with gr.Row(): + status_email = gr.Textbox(label="Email", placeholder="Enter your email to check status", scale=3) + check_btn = gr.Button("🔍 Check Status", variant="secondary", scale=1) + + status_output = gr.Markdown() + check_btn.click(fn=check_application_status, inputs=[status_email, session_state], outputs=[status_output, session_state]) + + # ============================================================ + # TAB 2: HR Portal + # ============================================================ + with gr.Tab("🧑‍💼 HR Portal"): + gr.Markdown("## 👥 Candidate Management") + + with gr.Row(): + status_filter = gr.Dropdown( + label="Filter by Status", + choices=["All", "applied", "cv_screened", "cv_passed", "voice_done", "voice_passed", "interview_scheduled"], + value="All", + scale=2 + ) + load_btn = gr.Button("🔄 Load Candidates", variant="primary", scale=1) + + candidates_output = gr.Markdown() + load_btn.click(fn=load_candidates, inputs=[status_filter, session_state], outputs=[candidates_output, session_state]) + + gr.Markdown("---") + gr.Markdown("## 🎙️ Voice Screening") + + with gr.Row(): + voice_email = gr.Textbox(label="Candidate Email", placeholder="candidate@example.com", scale=3) + voice_btn = gr.Button("🎙️ Trigger Screening", variant="secondary", scale=1) + + voice_output = gr.Markdown(elem_classes=["no-scroll-output"]) + voice_btn.click(fn=trigger_voice_screening, inputs=[voice_email, session_state], outputs=[voice_output, session_state]) + + gr.Markdown("---") + gr.Markdown("## 📅 Interview Scheduling") + + with gr.Row(): + interview_email = gr.Textbox(label="Candidate Email", placeholder="candidate@example.com", scale=3) + interview_btn = gr.Button("📅 Schedule Interview", variant="secondary", scale=1) + + interview_output = gr.Markdown(elem_classes=["no-scroll-output"]) + interview_btn.click(fn=schedule_interview, inputs=[interview_email, session_state], outputs=[interview_output, session_state]) + + # ============================================================ + # TAB 3: Supervisor Chat + # ============================================================ + with gr.Tab("🤖 Supervisor Chat"): + gr.Markdown("## 💬 Chat with HR Supervisor Agent") + gr.HTML('''
+ Capabilities: Query candidates • Screen CVs • Schedule interviews • Manage recruitment pipeline +
''') + + with gr.Row(): + with gr.Column(scale=3): + chat_history = gr.Markdown(elem_classes=["chat-display", "auto-scroll"]) + chat_input = gr.Textbox( + label="Your Message", + placeholder="Ask about candidates, screening, interviews...", + lines=2 + ) + with gr.Row(): + send_btn = gr.Button("💬 Send Message", variant="primary", scale=2) + new_chat_btn = gr.Button("🆕 New Chat", variant="secondary", scale=1) + + with gr.Column(scale=1): + gr.Markdown("### 📊 Session Stats") + token_info = gr.Markdown("📊 Tokens: 0", elem_classes=["stats-box"]) + gr.Markdown(""" +**💡 Tips:** +- Ask about specific candidates by email +- Request CV screening summaries +- Schedule interviews directly +- Get pipeline statistics + """) + + # Initialize chat on load with auto-scroll + def init_chat_with_scroll(state): + hist, tokens, new_state = init_chat(state) + return hist, tokens, new_state + + app.load( + fn=init_chat_with_scroll, + inputs=[session_state], + outputs=[chat_history, token_info, session_state] + ).then( + fn=None, + js=""" + () => { + const chatDisplay = document.querySelector('.auto-scroll'); + if (chatDisplay) { + setTimeout(() => { + chatDisplay.scrollTop = chatDisplay.scrollHeight; + }, 100); + } + } + """ + ) + + # Send message with auto-scroll + send_btn.click( + fn=chat_with_supervisor, + inputs=[chat_input, chat_history, session_state], + outputs=[chat_history, token_info, chat_input, session_state] + ).then( + fn=None, + js=""" + () => { + // Auto-scroll chat history to bottom + const chatDisplay = document.querySelector('.auto-scroll'); + if (chatDisplay) { + setTimeout(() => { + chatDisplay.scrollTop = chatDisplay.scrollHeight; + }, 100); + } + } + """ + ) + + # New chat with auto-scroll + new_chat_btn.click( + fn=init_chat, + inputs=[session_state], + outputs=[chat_history, token_info, session_state] + ).then( + fn=None, + js=""" + () => { + const chatDisplay = document.querySelector('.auto-scroll'); + if (chatDisplay) { + setTimeout(() => { + chatDisplay.scrollTop = chatDisplay.scrollHeight; + }, 100); + } + } + """ + ) + + gr.Markdown("---") + gr.Markdown("
Built with ❤️ for the MCP Hackathon
") + + return app + +# ============================================================================ +# MAIN +# ============================================================================ + +if __name__ == "__main__": + print(f"Gradio version: {gr.__version__}") + app = create_app() + + # Honor PORT if provided by hosting platform (e.g., Hugging Face Spaces) + # Some platforms inject quotes around PORT (e.g., "\"7860\""); strip them. + raw_port = os.getenv("PORT", "7860").strip().strip("\"'") + port = int(raw_port) + + # In Gradio 6, theme and css are passed to launch(), not Blocks() + app.launch( + server_name="0.0.0.0", + server_port=port, + theme=THEME, + css=CUSTOM_CSS, + # Try to force light mode if available + # dark_mode=False, # Uncomment if supported in your version + ) diff --git a/src/frontend/gradio/assets/__init__.py b/src/frontend/gradio/assets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/frontend/gradio/requirements.txt b/src/frontend/gradio/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..265f3b03d60f1988d96e4341c7ab803d17cce621 --- /dev/null +++ b/src/frontend/gradio/requirements.txt @@ -0,0 +1,4 @@ +gradio>=6.0.0 +requests>=2.31.0 +python-dotenv>=1.0.0 + diff --git a/src/frontend/streamlit/__init__.py b/src/frontend/streamlit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..638ff8f8de5ec5df93c09d642b9f0bef1813595f --- /dev/null +++ b/src/frontend/streamlit/__init__.py @@ -0,0 +1,2 @@ +"""Streamlit UI modules.""" + diff --git a/src/frontend/streamlit/cv_ui/__init__.py b/src/frontend/streamlit/cv_ui/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/frontend/streamlit/cv_ui/app.py b/src/frontend/streamlit/cv_ui/app.py new file mode 100644 index 0000000000000000000000000000000000000000..3d41f1fdf9644ba16b0f71c0a9519adc8416574c --- /dev/null +++ b/src/frontend/streamlit/cv_ui/app.py @@ -0,0 +1,98 @@ +""" +CV Upload UI for Job Applications. + +Connects to the CV Upload API. +Run with: streamlit run src/frontend/streamlit/cv_ui/app.py + +In Docker, set CV_UPLOAD_API_URL environment variable. +Locally, defaults to http://localhost:8080/api/v1/cv +""" + +import streamlit as st +from src.sdk import CVUploadClient + +# Initialize SDK client +client = CVUploadClient() + +st.set_page_config(page_title="AI Engineer Job Portal", page_icon="🤖", layout="centered") + +# --- UI Header --- +st.title("🤖 AI Engineer Job Application Portal") +st.markdown( + """ + Welcome to **ScionHire AI Labs** 👋 + We're seeking talented engineers passionate about building intelligent systems! + Please submit your CV below to apply for the **AI Engineer** position. + """ +) + +# --- Job Description (Static for now) --- +with st.expander("📄 View Job Description"): + st.markdown( + """ + ### 🧠 Position: AI Engineer + **Location:** Remote / Wiesbaden HQ + **About the Role:** + Join our AI R&D team to develop, fine-tune, and deploy ML models for production. + You will work on projects involving LLMs, LangGraph agents, and context engineering. + + **Requirements:** + - Proficiency in Python & modern AI frameworks (PyTorch, LangChain, etc.) + - Solid understanding of NLP and ML pipelines + - Experience deploying models or building intelligent systems + - Strong communication and teamwork skills + """ + ) + +st.markdown("---") + +# --- Candidate Form --- +with st.form("application_form"): + full_name = st.text_input("Full Name", placeholder="Ada Lovelace") + email = st.text_input("Email Address", placeholder="ada@lovelabs.ai") + phone = st.text_input("Phone Number", placeholder="+49 170 1234567") + uploaded_file = st.file_uploader("Upload Your CV (PDF or DOCX)", type=["pdf", "docx"]) + submitted = st.form_submit_button("📨 Submit Application") + +# --- Handle Submission --- +if submitted: + if not uploaded_file: + st.error("Please upload your CV before submitting.") + elif not (full_name and email): + st.error("Full name and email are required.") + else: + try: + with st.spinner("📤 Submitting your application..."): + response = client.submit( + full_name=full_name, + email=email, + phone=phone, + cv_file=uploaded_file, + filename=uploaded_file.name, + ) + + if response.success: + st.success(f"✅ {response.message}") + st.info("Your application has been recorded. You will receive updates soon.") + + with st.expander("📬 Submitted Info"): + st.json({ + "full_name": response.candidate_name, + "email": response.email, + "phone": phone, + "cv_file_path": response.cv_file_path, + "position": "AI Engineer", + }) + + elif response.already_exists: + st.warning( + f"⚠️ {response.message} " + "Please wait for review." + ) + else: + st.error(f"❌ {response.message}") + + except ValueError as e: + st.error(f"❌ {str(e)}") + except Exception as e: + st.error(f"❌ Failed to submit application. Is the API running?\n\nError: {e}") diff --git a/src/frontend/streamlit/cv_ui/info.md b/src/frontend/streamlit/cv_ui/info.md new file mode 100644 index 0000000000000000000000000000000000000000..e7a6dbfb05da2532e198c3674d9d0cffff7ceb18 --- /dev/null +++ b/src/frontend/streamlit/cv_ui/info.md @@ -0,0 +1,52 @@ +### 📄 CV Upload Stage — Overview + +#### 🎯 Purpose +The **CV Upload Stage** is the **entry point** of the HR screening pipeline. +It collects candidate information, stores the uploaded CV, and registers the applicant in the central database. +This stage serves as the bridge between the **candidate** and the **agentic system**, +initiating the automated recruitment workflow. + +--- + +#### 🧩 Role in the Pipeline +| Step | Description | Responsible Component | +|------|--------------|-----------------------| +| 1️⃣ Candidate applies | Candidate fills out form and uploads CV via the web interface | **Streamlit UI** | +| 2️⃣ File saved | CV file is stored in the shared volume (`/src/database/cvs/uploads`) | **File handler (`save_cv`)** | +| 3️⃣ Candidate registered | Candidate details are inserted into the database | **DB client (`register_candidate`)** | +| 4️⃣ Status initialized | Candidate record created with `status = "applied"` | **Database (`candidates` table)** | + +--- + +#### 🧱 Data Created +A new record is added to the **`candidates`** table containing: + +| Field | Description | +|--------|--------------| +| `id` | Unique candidate UUID | +| `full_name` | Candidate’s full name | +| `email` | Unique email address | +| `phone_number` | Optional contact number | +| `cv_file_path` | Path to the uploaded CV file | +| `parsed_cv_json` | *Empty at this stage* | +| `status` | `"applied"` | +| `created_at` / `updated_at` | Automatic timestamps | + +--- + +#### 🧠 Next Stage Triggered +Once a record exists with `status="applied"`, +the **CV Parsing Agent** is automatically triggered to extract structured data +(e.g., skills, education, and experience) from the uploaded CV. +This marks the transition toward the **CV Screening** phase. + +--- + +#### ✅ Key Takeaways +- **Initial entrypoint:** introduces candidates into the system. +- **File persistence:** all uploaded CVs are stored locally for later parsing. +- **Automation-ready:** triggers the next agent without human input. +- **Simple interface:** Streamlit UI makes the process candidate-friendly. + +> **In short:** +> The CV Upload Stage is the **gateway** to the pipeline — it collects, stores, and initializes candidate data so the agentic workflow can proceed autonomously. diff --git a/src/frontend/streamlit/supervisor_ui/__init__.py b/src/frontend/streamlit/supervisor_ui/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..85b9b204004c8f0f8ec2a7c6a6b55b041e146e18 --- /dev/null +++ b/src/frontend/streamlit/supervisor_ui/__init__.py @@ -0,0 +1,2 @@ +"""Supervisor UI module.""" + diff --git a/src/frontend/streamlit/supervisor_ui/app.py b/src/frontend/streamlit/supervisor_ui/app.py new file mode 100644 index 0000000000000000000000000000000000000000..55a69f243185a3b66c4370b898c998d143416c37 --- /dev/null +++ b/src/frontend/streamlit/supervisor_ui/app.py @@ -0,0 +1,117 @@ +""" +Streamlit UI for HR Supervisor Agent. + +Connects to the Supervisor API with streaming support. +Run with: streamlit run src/frontend/streamlit/supervisor_ui/app.py + +In Docker, set SUPERVISOR_API_URL environment variable. +Locally, defaults to http://localhost:8080/api/v1/supervisor +""" + +import streamlit as st +from src.sdk import SupervisorClient + +# Initialize SDK client +client = SupervisorClient() + +st.set_page_config(page_title="HR Supervisor Agent", layout="wide") + +# Initialize chat history +if "messages" not in st.session_state: + st.session_state.messages = [] + +# Initialize thread_id for conversation continuity +if "thread_id" not in st.session_state: + st.session_state.thread_id = None + +st.title("🤖 HR Supervisor Agent") +st.caption("I can query the candidate database and help with recruitment tasks.") + +# Sidebar with "New Chat" button to reset context +with st.sidebar: + st.header("Controls") + if st.button("Start New Chat", type="primary", use_container_width=True): + try: + st.session_state.thread_id = client.new_chat() + st.session_state.messages = [] + st.session_state.token_usage = 0 + except Exception: + st.error("⚠️ Cannot connect to API. Is the server running?") + st.rerun() + + st.divider() + st.caption(f"Chat ID:\n`{st.session_state.get('thread_id', 'Not set')}`") + + # Placeholder for token usage to allow dynamic updates + token_metric_placeholder = st.empty() + + if "token_usage" in st.session_state: + token_metric_placeholder.metric(label="Context Window Tokens", value=st.session_state.token_usage) + +# Display chat messages +for message in st.session_state.messages: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + +# User input +if prompt := st.chat_input("Ask me anything about candidates..."): + # Add user message to chat history + st.session_state.messages.append({"role": "user", "content": prompt}) + + # Display user message + with st.chat_message("user"): + st.markdown(prompt) + + # Generate response using chat (with CompactingSupervisor wrapper) + with st.chat_message("assistant"): + message_placeholder = st.empty() + full_response = "" + + try: + # Use chat endpoint (with context compaction) + with st.spinner("Thinking..."): + response = client.chat(prompt, st.session_state.thread_id) + + full_response = response.content + message_placeholder.markdown(full_response) + + # Update thread_id if this was first message + if st.session_state.thread_id is None: + st.session_state.thread_id = response.thread_id + + # Update token usage + st.session_state.token_usage = response.token_count + token_metric_placeholder.metric( + label="Context Window Tokens", + value=response.token_count + ) + + except Exception as e: + full_response = f"❌ Error: {str(e)}" + message_placeholder.error(full_response) + + # Handle empty response + if not full_response: + full_response = "No response received from agent." + message_placeholder.warning(full_response) + + # --- STREAMING RAW VERSION (commented out) --- + # for chunk in client.stream_raw(prompt, st.session_state.thread_id): + # if chunk.type == "token": + # full_response += chunk.content or "" + # message_placeholder.markdown(full_response + "▌") + # elif chunk.type == "done": + # if st.session_state.thread_id is None: + # st.session_state.thread_id = chunk.thread_id + # st.session_state.token_usage = chunk.token_count or 0 + # token_metric_placeholder.metric( + # label="Context Window Tokens", + # value=chunk.token_count or 0 + # ) + # message_placeholder.markdown(full_response) + # elif chunk.type == "error": + # full_response = f"❌ Error: {chunk.error}" + # message_placeholder.error(full_response) + + # Add assistant response to chat history + st.session_state.messages.append({"role": "assistant", "content": full_response}) diff --git a/src/frontend/streamlit/voice_screening_ui/__init__.py b/src/frontend/streamlit/voice_screening_ui/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/frontend/streamlit/voice_screening_ui/app.py b/src/frontend/streamlit/voice_screening_ui/app.py new file mode 100644 index 0000000000000000000000000000000000000000..09cd791d730b05c36248277385fb8242c2138256 --- /dev/null +++ b/src/frontend/streamlit/voice_screening_ui/app.py @@ -0,0 +1,333 @@ +""" +Voice Screening MVP - Streamlit UI for browser-based voice interviews. +""" +import os +import streamlit as st +from datetime import datetime +from pathlib import Path +import uuid + +import sys +from pathlib import Path + +# Load environment variables +try: + from dotenv import load_dotenv + load_dotenv() +except ImportError: + pass # dotenv not installed, will try to get from environment + +# Add src directory to path so imports work +sys.path.insert(0, str(Path(__file__).parent.parent)) + +# Try to import requests for API calls (required) +try: + import requests + HAS_REQUESTS = True +except ImportError: + HAS_REQUESTS = False + raise ImportError("requests library is required for voice screening") + +# Helper function to get proxy URL +def get_proxy_url(for_client=False): + """ + Get WebSocket proxy URL from environment or default. + + Args: + for_client (bool): If True, returns a URL accessible from the browser (localhost). + If False, returns the internal Docker URL (websocket_proxy). + """ + proxy_url = os.getenv("WEBSOCKET_PROXY_URL", "ws://localhost:8000/ws/realtime") + + if for_client: + # Convert Docker internal URL to browser-accessible URL + if "websocket_proxy" in proxy_url: + proxy_url = proxy_url.replace("websocket_proxy", "localhost") + + return proxy_url + +def get_proxy_base_url(): + """Get HTTP base URL for proxy API calls (server-side).""" + # Use internal URL for server-side requests + proxy_url = get_proxy_url(for_client=False) + return proxy_url.replace("ws://", "http://").replace("wss://", "https://").replace("/ws/realtime", "") + +def get_backend_url(): + """Get backend API URL from environment or default.""" + return os.getenv("BACKEND_API_URL", "http://localhost:8000") + +# Page configuration +st.set_page_config( + page_title="Voice Screening Interview", + page_icon="🎙️", + layout="centered" +) + +# Initialize session state +if "session_id" not in st.session_state: + st.session_state.session_id = None +if "transcript" not in st.session_state: + st.session_state.transcript = [] +if "is_interview_active" not in st.session_state: + st.session_state.is_interview_active = False +if "candidate_id" not in st.session_state: + st.session_state.candidate_id = None +if "session_token" not in st.session_state: + st.session_state.session_token = None + +if "user_email" not in st.session_state: + st.session_state.user_email = None +if "auth_code" not in st.session_state: + st.session_state.auth_code = None +if "audio_file_path" not in st.session_state: + st.session_state.audio_file_path = None + +st.title("🎙️ Voice Screening Interview") + +# Authentication screen +if not st.session_state.session_token: + st.markdown("### 🔐 Authentication") + st.markdown("Please enter your email and authentication code to start.") + + with st.form("auth_form"): + user_email = st.text_input("Email", placeholder="your.email@example.com", value=st.session_state.user_email or "") + auth_code = st.text_input("Authentication Code", placeholder="Enter your code", value=st.session_state.auth_code or "") + + verify_submitted = st.form_submit_button("✅ Verify & Login", use_container_width=True, type="primary") + + if verify_submitted: + if user_email and auth_code: + try: + proxy_base = get_proxy_base_url() + response = requests.post( + f"{proxy_base}/auth/verify", + json={"email": user_email, "code": auth_code}, + timeout=5 + ) + if response.status_code == 200: + data = response.json() + st.session_state.session_token = data["session_token"] + st.session_state.user_email = user_email + + # Auto-set candidate ID if returned + if "candidate_id" in data and data["candidate_id"]: + st.session_state.candidate_id = data["candidate_id"] + + st.success("✅ Authentication successful!") + st.rerun() + else: + error_data = response.json() if response.content else {} + st.error(f"❌ Authentication failed: {error_data.get('detail', response.text)}") + except Exception as e: + st.error(f"❌ Error connecting to proxy: {e}") + st.info("💡 Make sure the WebSocket proxy service is running.") + else: + st.warning("⚠️ Please enter both email and code.") + + st.markdown("---") + st.info("💡 **Note:** Enter your email and authentication code to proceed.") + st.stop() + +# Main interview interface (only shown after authentication) +col_header1, col_header2 = st.columns([3, 1]) +with col_header1: + st.markdown( + f""" + Welcome, **{st.session_state.user_email}**! + Click **Start Interview** to begin, then use the toggle button to speak. + """ + ) +with col_header2: + if st.button("🚪 Logout", use_container_width=True): + st.session_state.session_token = None + st.session_state.user_email = None + st.session_state.auth_code = None + st.session_state.is_interview_active = False + st.rerun() + +# Candidate selection +with st.expander("Candidate Information", expanded=True): + # Check for candidate_id in query parameters + query_params = st.query_params + if "candidate_id" in query_params and not st.session_state.candidate_id: + st.session_state.candidate_id = query_params["candidate_id"] + st.success(f"✅ Candidate ID loaded from URL: {st.session_state.candidate_id}") + + if st.session_state.candidate_id: + st.info(f"Current Candidate ID: `{st.session_state.candidate_id}`") + else: + st.warning("⚠️ No candidate selected. Please provide a Candidate ID.") + + candidate_id_input = st.text_input("Enter Candidate ID", value=st.session_state.candidate_id or "") + + # Strip whitespace from input + if candidate_id_input: + candidate_id_input = candidate_id_input.strip() + + if candidate_id_input and candidate_id_input != st.session_state.candidate_id: + st.session_state.candidate_id = candidate_id_input + st.success(f"✅ Candidate ID set to: {candidate_id_input}") + +# Interview controls +col1, col2 = st.columns(2) + +with col1: + if not st.session_state.is_interview_active: + if st.button("🚀 Start Interview", type="primary", use_container_width=True): + st.session_state.is_interview_active = True + st.session_state.session_id = str(uuid.uuid4()) + st.session_state.transcript = [] + st.session_state.transcript.append({ + "speaker": "system", + "text": "Interview started", + "timestamp": datetime.now().isoformat() + }) + st.rerun() + else: + if st.button("⏹️ End Interview", type="secondary", use_container_width=True): + # Save audio recording and transcript via backend API + if st.session_state.session_id and st.session_state.session_token and st.session_state.candidate_id and HAS_REQUESTS: + try: + # Build transcript text + transcript_text = "\n".join([ + f"{entry.get('speaker', 'unknown')}: {entry.get('text', '')}" + for entry in st.session_state.transcript + if entry.get("speaker") in ["agent", "candidate"] + ]) + + backend_url = get_backend_url() + st.info(f"🔍 Debug: Attempting to save to {backend_url}/api/v1/voice-screener/session/{st.session_state.session_id}/save") + + response = requests.post( + f"{backend_url}/api/v1/voice-screener/session/{st.session_state.session_id}/save", + json={ + "session_id": st.session_state.session_id, + "candidate_id": st.session_state.candidate_id, + "transcript_text": transcript_text, + "proxy_token": st.session_state.session_token + }, + timeout=30 + ) + st.info(f"🔍 Debug: Response Status: {response.status_code}") + + if response.status_code == 200: + data = response.json() + st.session_state.audio_file_path = data.get("audio_file_path") + st.success(f"✅ Session saved successfully!") + if st.session_state.audio_file_path: + st.info(f"Audio: {st.session_state.audio_file_path}") + else: + st.error(f"❌ Backend Error ({response.status_code}): {response.text}") + except Exception as e: + st.error(f"❌ Connection Error: {e}") + st.code(f"Backend URL: {get_backend_url()}\nError Type: {type(e).__name__}") + else: + st.error("❌ Missing session state for saving!") + st.write(f"Session ID: {st.session_state.session_id}") + st.write(f"Token: {bool(st.session_state.session_token)}") + st.write(f"Candidate ID: {st.session_state.candidate_id}") + + st.session_state.is_interview_active = False + # st.rerun() # Commented out to see debug messages + +with col2: + if st.session_state.is_interview_active: + st.info("🟢 Interview Active") + +# Voice interface component +if st.session_state.is_interview_active: + st.markdown("---") + st.subheader("Voice Interface") + + # Load HTML component with WebSocket and audio handling + html_file = Path(__file__).parent / "components" / "voice_interface.html" + if html_file.exists(): + with open(html_file, "r", encoding="utf-8") as f: + html_content = f.read() + + # Get proxy URL and session token + proxy_url = get_proxy_url(for_client=True) + session_token = st.session_state.session_token + + if not session_token: + st.error("⚠️ No session token. Please authenticate first.") + st.stop() + + # Show connection debug info + with st.expander("🔍 Connection Debug Info", expanded=False): + st.success(f"✅ Authenticated as: `{st.session_state.user_email}`") + st.info(f"**WebSocket Proxy:** `{proxy_url}`") + st.info("**Note:** The connection uses a WebSocket proxy to handle authentication. " + "Browsers cannot set custom headers in WebSocket connections, so we proxy through the backend.") + if "localhost" in proxy_url or "127.0.0.1" in proxy_url: + st.warning("⚠️ Make sure the WebSocket proxy service is running! Check docker-compose logs.") + + # Proxy health check + if HAS_REQUESTS: + try: + proxy_base = get_proxy_base_url() + health_url = f"{proxy_base}/health" + response = requests.get(health_url, timeout=2) + if response.status_code == 200: + health_data = response.json() + st.success(f"✅ Proxy is healthy: {health_data.get('status', 'unknown')}") + if health_data.get('openai_api_key_configured'): + st.success("✅ OpenAI API key is configured in proxy") + else: + st.error("❌ OpenAI API key NOT configured in proxy") + st.info(f"Active sessions: {health_data.get('active_sessions', 0)}") + else: + st.warning(f"⚠️ Proxy health check returned: {response.status_code}") + except Exception as e: + st.warning(f"⚠️ Could not check proxy health: {e}") + st.info("💡 **To view proxy logs:** `docker compose logs -f websocket_proxy`") + else: + st.info("💡 **To check proxy status:** `docker compose logs websocket_proxy`") + st.info("💡 **To view live logs:** `docker compose logs -f websocket_proxy`") + + # Build WebSocket URL with session token + ws_url = f"{proxy_url}?token={session_token}" + + html_content = html_content.replace("{{SESSION_ID}}", st.session_state.session_id) + html_content = html_content.replace("{{SESSION_TOKEN}}", session_token) + html_content = html_content.replace("{{PROXY_URL}}", ws_url) + + st.components.v1.html(html_content, height=500) # Increased height for error messages + else: + st.warning("Voice interface component not found. Please ensure voice_interface.html exists.") + + # Transcript display + st.markdown("---") + st.subheader("Live Transcript") + + if st.session_state.transcript: + for entry in st.session_state.transcript: + speaker = entry.get("speaker", "unknown") + text = entry.get("text", "") + timestamp = entry.get("timestamp", "") + + if speaker == "agent": + st.markdown(f"**🤖 Agent:** {text}") + elif speaker == "candidate": + st.markdown(f"**👤 You:** {text}") + else: + st.markdown(f"*{text}*") + + # Manual transcript update (for testing - in real app, JS updates this) + with st.expander("Add Transcript Entry (Testing)"): + col1, col2 = st.columns([3, 1]) + with col1: + test_text = st.text_input("Text", key="test_transcript") + with col2: + test_speaker = st.selectbox("Speaker", ["candidate", "agent"], key="test_speaker") + + if st.button("Add Entry"): + if test_text: + st.session_state.transcript.append({ + "speaker": test_speaker, + "text": test_text, + "timestamp": datetime.now().isoformat() + }) + st.rerun() + + \ No newline at end of file diff --git a/src/frontend/streamlit/voice_screening_ui/components/__init__.py b/src/frontend/streamlit/voice_screening_ui/components/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a01f65fc5c1b7fd8870bc5243f872bbfec88b84f --- /dev/null +++ b/src/frontend/streamlit/voice_screening_ui/components/__init__.py @@ -0,0 +1,2 @@ +# Components module for voice screening UI + diff --git a/src/frontend/streamlit/voice_screening_ui/components/voice_interface.html b/src/frontend/streamlit/voice_screening_ui/components/voice_interface.html new file mode 100644 index 0000000000000000000000000000000000000000..f1349b7da352f086dc8987dd39add77dea5de506 --- /dev/null +++ b/src/frontend/streamlit/voice_screening_ui/components/voice_interface.html @@ -0,0 +1,759 @@ + + + + + + + + + +
+ +

Click to start recording

+ +
Disconnected
+ + + +
🔍 Toggle Debug Info
+ + +
+

Transcript will appear here...

+
+
+ + + + + \ No newline at end of file diff --git a/src/frontend/streamlit/voice_screening_ui/proxy.py b/src/frontend/streamlit/voice_screening_ui/proxy.py new file mode 100644 index 0000000000000000000000000000000000000000..b767f2956538780c0973440a7db04e56fd239201 --- /dev/null +++ b/src/frontend/streamlit/voice_screening_ui/proxy.py @@ -0,0 +1,526 @@ +""" +WebSocket proxy for OpenAI Realtime API. +Handles authentication since browsers cannot set custom headers in WebSocket connections. +Also handles user authentication and session management. +""" +import asyncio +import os +import json +import logging +import secrets +import time +import base64 +import wave +import io +import struct +from typing import Dict, Optional, List +from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException, Query +from fastapi.middleware.cors import CORSMiddleware +from pydantic import BaseModel, EmailStr +import aiohttp +from dotenv import load_dotenv +from sqlalchemy import select + +# Import database client and models +from src.database.candidates.client import SessionLocal +from src.database.candidates.models import Candidate + +load_dotenv() + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' +) +logger = logging.getLogger(__name__) + +app = FastAPI(title="Voice Screening WebSocket Proxy") + +# Enable CORS for Streamlit +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], # In production, restrict to your Streamlit domain + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +OPENAI_REALTIME_URL = "wss://api.openai.com/v1/realtime?model=gpt-realtime-mini" + +# Session management (in-memory for MVP) +# In production, use Redis or database +sessions: Dict[str, dict] = {} # session_token -> {email, expires_at, created_at} + +# Session configuration (moved from frontend) +SESSION_CONFIG = { + "modalities": ["audio", "text"], + "instructions": "You are a friendly HR assistant conducting a phone screening interview. Greet the candidate warmly and ask them about their background and interest in the position.", + "voice": "alloy", + "input_audio_format": "pcm16", + "output_audio_format": "pcm16", + "turn_detection": { + "type": "server_vad", + "threshold": 0.5, + "prefix_padding_ms": 300, + "silence_duration_ms": 10000 + } +} + +# Auth models +class LoginRequest(BaseModel): + email: EmailStr + +class VerifyRequest(BaseModel): + email: EmailStr + code: str + + +def generate_session_token() -> str: + """Generate a secure session token.""" + return secrets.token_urlsafe(32) + +def cleanup_expired_sessions(): + """Remove expired sessions.""" + current_time = time.time() + + # Clean up expired sessions + expired_sessions = [ + token for token, session in sessions.items() + if session.get("expires_at", 0) < current_time + ] + for token in expired_sessions: + del sessions[token] + +@app.post("/auth/login") +async def login(request: LoginRequest): + """ + Request authentication for email. + Just accepts the email - no code generation needed. + """ + cleanup_expired_sessions() + + email = request.email.lower() + + logger.info(f"Login request for {email}") + + return { + "message": "Please enter your authentication code", + "email": email + } + +@app.post("/auth/verify") +async def verify(request: VerifyRequest): + """ + Verify email and code, return session token. + Authentication logic left empty for now - just accepts any code. + """ + cleanup_expired_sessions() + + email = request.email.lower() + code = request.code + + # Authenticate user against database + candidate_id = None + + try: + with SessionLocal() as db: + # Find candidate by email (case insensitive) + stmt = select(Candidate).where(Candidate.email == email) + candidate = db.execute(stmt).scalar_one_or_none() + + if not candidate: + logger.warning(f"Authentication failed: Email {email} not found") + raise HTTPException(status_code=401, detail="Invalid email or authentication code") + + # Check auth code + # For now, we'll accept the code if it matches or if it's a "magic" code for testing + # In production, this should be strict + if candidate.auth_code != code and code != "000000": + logger.warning(f"Authentication failed: Invalid code for {email}") + raise HTTPException(status_code=401, detail="Invalid email or authentication code") + + candidate_id = str(candidate.id) + logger.info(f"User authenticated: {email} (ID: {candidate_id})") + + except HTTPException: + raise + except Exception as e: + logger.error(f"Database error during authentication: {e}") + raise HTTPException(status_code=500, detail="Internal server error during authentication") + + # Authentication successful, create session + session_token = generate_session_token() + + sessions[session_token] = { + "email": email, + "candidate_id": candidate_id, + "expires_at": time.time() + 3600, # 1 hour + "created_at": time.time(), + "user_audio_chunks": [], # List of {timestamp, data: bytes} + "agent_audio_chunks": [], # List of {timestamp, data: bytes} + "transcript": [], # List of {speaker, text, timestamp} + "session_start_time": None # Set when WebSocket connects + } + + logger.info(f"Session created for {email}: {session_token[:8]}...") + + return { + "session_token": session_token, + "candidate_id": candidate_id, + "expires_in": 3600 + } + +def validate_session_token(token: Optional[str]) -> Optional[dict]: + """Validate session token and return session data.""" + if not token: + return None + + cleanup_expired_sessions() + + if token not in sessions: + return None + + session = sessions[token] + + if session["expires_at"] < time.time(): + del sessions[token] + return None + + return session + +@app.websocket("/ws/realtime") +async def websocket_proxy(websocket: WebSocket, token: Optional[str] = Query(None), candidate_id: Optional[str] = Query(None)): + """ + Proxy WebSocket connection to OpenAI Realtime API. + Adds proper authentication headers that browsers cannot set. + Requires valid session token for authentication. + """ + client_id = id(websocket) + logger.info(f"[{client_id}] Client connecting...") + + # Validate session token + session = validate_session_token(token) + if not session: + logger.warning(f"[{client_id}] Invalid or missing session token") + await websocket.close(code=1008, reason="Invalid or expired session token") + return + + await websocket.accept() + logger.info(f"[{client_id}] Client connected (user: {session['email']})") + + api_key = os.getenv("OPENAI_API_KEY") + if not api_key: + error_msg = "API key not configured" + logger.error(f"[{client_id}] {error_msg}") + await websocket.close(code=1008, reason=error_msg) + return + + try: + # Connect to OpenAI Realtime API using aiohttp (better header support) + headers = { + "Authorization": f"Bearer {api_key}", + "OpenAI-Beta": "realtime=v1" + } + + logger.info(f"[{client_id}] Connecting to OpenAI Realtime API: {OPENAI_REALTIME_URL}") + + async with aiohttp.ClientSession() as session: + async with session.ws_connect( + OPENAI_REALTIME_URL, + headers=headers + ) as openai_ws: + logger.info(f"[{client_id}] Connected to OpenAI Realtime API") + + # Send connection success message to client + await websocket.send_json({ + "type": "proxy.status", + "status": "connected", + "message": "Proxy connected to OpenAI Realtime API" + }) + + # Configure session (moved from frontend) + # Get session configuration from backend API + current_session_config = SESSION_CONFIG.copy() + + if candidate_id: + try: + import requests + # Get backend API URL + backend_url = os.getenv("BACKEND_API_URL", "http://localhost:8000") + response = requests.get( + f"{backend_url}/api/v1/voice-screener/session/dummy/config", + params={"candidate_id": candidate_id}, + timeout=5 + ) + if response.status_code == 200: + config_data = response.json() + current_session_config = config_data["config"] + logger.info(f"[{client_id}] Retrieved session config from backend for {config_data['candidate_name']}") + else: + logger.warning(f"[{client_id}] Failed to get config from backend: {response.status_code}") + # Fallback to default + except Exception as e: + logger.error(f"[{client_id}] Error fetching config from backend: {e}") + # Fallback to default instructions + + await openai_ws.send_str(json.dumps({ + "type": "session.update", + "session": current_session_config + })) + logger.info(f"[{client_id}] Session configured") + + # Initialize session start time for audio buffering + sessions[token]["session_start_time"] = time.time() + + # Send greeting after session is configured + await asyncio.sleep(0.5) # Small delay to ensure session is configured + await openai_ws.send_str(json.dumps({ + "type": "response.create", + "response": { + "modalities": ["audio", "text"], + "instructions": "Greet the candidate and ask them to tell you about themselves." + } + })) + logger.info(f"[{client_id}] Greeting sent") + + # Bidirectional message forwarding + async def forward_to_openai(): + """Forward messages from client to OpenAI.""" + try: + async for message in websocket.iter_text(): + try: + # Log message for debugging + msg_data = json.loads(message) if message else {} + msg_type = msg_data.get("type", "unknown") + logger.debug(f"[{client_id}] Client -> OpenAI: {msg_type}") + + # Capture user audio for recording + if msg_type == "input_audio_buffer.append": + audio_base64 = msg_data.get("audio", "") + if audio_base64: + try: + audio_data = base64.b64decode(audio_base64) + sessions[token]["user_audio_chunks"].append({ + "timestamp": time.time(), + "data": audio_data + }) + except Exception as e: + logger.warning(f"[{client_id}] Failed to decode user audio: {e}") + + await openai_ws.send_str(message) + except json.JSONDecodeError: + logger.warning(f"[{client_id}] Invalid JSON from client: {message[:100]}") + await openai_ws.send_str(message) + except WebSocketDisconnect: + logger.info(f"[{client_id}] Client disconnected") + except Exception as e: + error_msg = f"Error forwarding to OpenAI: {str(e)}" + logger.error(f"[{client_id}] {error_msg}", exc_info=True) + try: + await websocket.send_json({ + "type": "proxy.error", + "error": error_msg, + "source": "forward_to_openai" + }) + except: + pass + + async def forward_to_client(): + """Forward messages from OpenAI to client.""" + try: + async for msg in openai_ws: + if msg.type == aiohttp.WSMsgType.TEXT: + try: + # Log message for debugging + msg_data = json.loads(msg.data) if msg.data else {} + msg_type = msg_data.get("type", "unknown") + logger.debug(f"[{client_id}] OpenAI -> Client: {msg_type}") + + # Capture agent audio for recording + if msg_type == "response.audio.delta": + audio_base64 = msg_data.get("delta", "") + if audio_base64: + try: + audio_data = base64.b64decode(audio_base64) + sessions[token]["agent_audio_chunks"].append({ + "timestamp": time.time(), + "data": audio_data + }) + except Exception as e: + logger.warning(f"[{client_id}] Failed to decode agent audio: {e}") + + # Capture transcript + elif msg_type == "response.audio_transcript.done": + # Candidate transcript + text = msg_data.get("transcript", "") + if text: + sessions[token]["transcript"].append({ + "speaker": "candidate", + "text": text, + "timestamp": time.time() + }) + logger.info(f"[{client_id}] Captured candidate transcript: {text[:30]}...") + + elif msg_type == "response.text.done": + # Agent transcript (if using text modality) + text = msg_data.get("text", "") + if text: + sessions[token]["transcript"].append({ + "speaker": "agent", + "text": text, + "timestamp": time.time() + }) + logger.info(f"[{client_id}] Captured agent transcript: {text[:30]}...") + + # Also capture agent audio transcript if available (more accurate than text.done for audio) + elif msg_type == "response.audio_transcript.done": + # This event is for user input transcription usually, but check documentation + pass + + await websocket.send_text(msg.data) + except Exception as e: + logger.error(f"[{client_id}] Error sending message to client: {e}") + await websocket.send_json({ + "type": "proxy.error", + "error": f"Error sending message: {str(e)}", + "source": "forward_to_client" + }) + elif msg.type == aiohttp.WSMsgType.ERROR: + error = openai_ws.exception() + error_msg = f"WebSocket error from OpenAI: {error}" + logger.error(f"[{client_id}] {error_msg}") + await websocket.send_json({ + "type": "proxy.error", + "error": error_msg, + "source": "openai_websocket" + }) + break + elif msg.type == aiohttp.WSMsgType.CLOSE: + logger.info(f"[{client_id}] OpenAI closed connection: {msg.data}") + break + else: + logger.warning(f"[{client_id}] Unexpected message type from OpenAI: {msg.type}") + except Exception as e: + error_msg = f"Error forwarding to client: {str(e)}" + logger.error(f"[{client_id}] {error_msg}", exc_info=True) + try: + await websocket.send_json({ + "type": "proxy.error", + "error": error_msg, + "source": "forward_to_client" + }) + except: + pass + + # Run both forwarding tasks concurrently + results = await asyncio.gather( + forward_to_openai(), + forward_to_client(), + return_exceptions=True + ) + + # Log any exceptions from the tasks + for i, result in enumerate(results): + if isinstance(result, Exception): + logger.error(f"[{client_id}] Task {i} raised exception: {result}", exc_info=True) + + except aiohttp.ClientError as e: + error_msg = f"OpenAI connection failed: {str(e)}" + logger.error(f"[{client_id}] {error_msg}", exc_info=True) + try: + await websocket.send_json({ + "type": "proxy.error", + "error": error_msg, + "source": "connection" + }) + except: + pass + await websocket.close(code=1008, reason=error_msg) + except Exception as e: + error_msg = f"Proxy error: {str(e)}" + logger.error(f"[{client_id}] {error_msg}", exc_info=True) + import traceback + traceback.print_exc() + try: + await websocket.send_json({ + "type": "proxy.error", + "error": error_msg, + "source": "proxy", + "traceback": traceback.format_exc() + }) + except: + pass + await websocket.close(code=1011, reason=error_msg) + + + +class RetrieveAudioRequest(BaseModel): + session_id: str + + +@app.post("/audio/retrieve") +async def retrieve_audio(request: RetrieveAudioRequest, token: Optional[str] = Query(None)): + """ + Retrieve audio chunks for a session. + Backend will call this to get chunks for processing. + + Args: + request: Contains session_id. + token: Session token from query parameter. + + Returns: + dict: Contains user_chunks, agent_chunks, and session_start_time. + """ + session = validate_session_token(token) + if not session: + raise HTTPException(status_code=401, detail="Invalid or expired session token") + + try: + user_chunks = session.get("user_audio_chunks", []) + agent_chunks = session.get("agent_audio_chunks", []) + transcript = session.get("transcript", []) + session_start_time = session.get("session_start_time") + + # Base64 encode chunks for JSON transport + encoded_user_chunks = [] + for chunk in user_chunks: + encoded_chunk = chunk.copy() + if isinstance(chunk.get("data"), bytes): + encoded_chunk["data"] = base64.b64encode(chunk["data"]).decode("utf-8") + encoded_user_chunks.append(encoded_chunk) + + encoded_agent_chunks = [] + for chunk in agent_chunks: + encoded_chunk = chunk.copy() + if isinstance(chunk.get("data"), bytes): + encoded_chunk["data"] = base64.b64encode(chunk["data"]).decode("utf-8") + encoded_agent_chunks.append(encoded_chunk) + + logger.info(f"Retrieved {len(user_chunks)} user chunks, {len(agent_chunks)} agent chunks, and {len(transcript)} transcript lines for session {request.session_id}") + + return { + "user_chunks": encoded_user_chunks, + "agent_chunks": encoded_agent_chunks, + "transcript": transcript, + "session_start_time": session_start_time + } + except Exception as e: + logger.error(f"Error retrieving audio: {e}", exc_info=True) + raise HTTPException(status_code=500, detail=f"Failed to retrieve audio: {str(e)}") + + +@app.get("/health") +async def health_check(): + """Health check endpoint.""" + cleanup_expired_sessions() + return { + "status": "healthy", + "openai_api_key_configured": bool(os.getenv("OPENAI_API_KEY")), + "active_sessions": len(sessions) + } + + +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=8000) + diff --git a/src/frontend/streamlit/voice_screening_ui/utils/__init__.py b/src/frontend/streamlit/voice_screening_ui/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c7801a7a2713d9c96eb450fbad372d090eef0f5c --- /dev/null +++ b/src/frontend/streamlit/voice_screening_ui/utils/__init__.py @@ -0,0 +1,2 @@ +# Utils module for voice screening UI + diff --git a/src/mcp_servers/calendar-mcp/run_server.py b/src/mcp_servers/calendar-mcp/run_server.py new file mode 100644 index 0000000000000000000000000000000000000000..a1ca4d4e2dfb95cd4f91172638b401b3ebe279af --- /dev/null +++ b/src/mcp_servers/calendar-mcp/run_server.py @@ -0,0 +1,17 @@ +import logging +from src.mcp_bridge import create_mcp_server + +# Optional: keep logging simple and clean +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s [%(levelname)s] %(name)s: %(message)s" +) + +def main(): + """Start the MCP server over stdio.""" + server = create_mcp_server() + server.run(transport="stdio") + + +if __name__ == "__main__": + main() diff --git a/src/mcp_servers/calendar-mcp/src/__init__.py b/src/mcp_servers/calendar-mcp/src/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..37564503046156909cb589dbc47e04454f9fbd6e --- /dev/null +++ b/src/mcp_servers/calendar-mcp/src/__init__.py @@ -0,0 +1,7 @@ +""" +Google Calendar MCP Package + +This package contains modules for the Google Calendar Model Context Protocol server. +""" + +__version__ = "0.1.0" \ No newline at end of file diff --git a/src/mcp_servers/calendar-mcp/src/analysis.py b/src/mcp_servers/calendar-mcp/src/analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..318b51f7be3c28723390386d1dd45e08bbaa2641 --- /dev/null +++ b/src/mcp_servers/calendar-mcp/src/analysis.py @@ -0,0 +1,345 @@ +import logging +from datetime import datetime, date, timedelta +from typing import Optional, List, Dict, Any +from collections import defaultdict + +from google.oauth2.credentials import Credentials +from dateutil import rrule +from dateutil import parser as date_parser # Alias to avoid confusion with our parser module if any + +# Import find_events from the sibling module +try: + # Use absolute imports for consistency + import src.calendar_actions as calendar_actions # Changed from .calendar_actions for compatibility + from src.models import GoogleCalendarEvent # Changed from .models for compatibility +except ImportError: + # Handle potential path issues if run directly or structured differently + logging.error("Could not import from src.calendar_actions or src.models. Ensure structure is correct.") + # Define dummy functions/classes for type hinting if needed, or re-raise + def find_events(*args, **kwargs): return None + class GoogleCalendarEvent: pass + + +logger = logging.getLogger(__name__) + +# Define a structure for projected occurrences (can be a TypedDict or Pydantic model later) +class ProjectedEventOccurrence: + def __init__(self, original_event_id: str, original_summary: str, occurrence_start: datetime, occurrence_end: datetime): + self.original_event_id = original_event_id + self.original_summary = original_summary + self.occurrence_start = occurrence_start + self.occurrence_end = occurrence_end + + def __repr__(self): + return f"ProjectedOccurrence(id='{self.original_event_id}', summary='{self.original_summary}', start='{self.occurrence_start}', end='{self.occurrence_end}')" + + +def project_recurring_events( + credentials: Credentials, + time_min: datetime, + time_max: datetime, + calendar_id: str = 'primary', + event_query: Optional[str] = None +) -> List[ProjectedEventOccurrence]: + """Finds recurring events and projects their occurrences within a time window. + + Args: + credentials: Valid Google OAuth2 credentials. + time_min: Start of the projection window (timezone-aware recommended). + time_max: End of the projection window (timezone-aware recommended). + calendar_id: The calendar to search within. + event_query: Optional text query to filter master recurring events (e.g., "Birthday"). + + Returns: + A list of ProjectedEventOccurrence objects representing calculated occurrences. + """ + projected_occurrences: List[ProjectedEventOccurrence] = [] + + logger.info(f"Starting projection of recurring events for calendar '{calendar_id}'") + logger.info(f"Projection window: {time_min} to {time_max}. Query: '{event_query or 'None'}'") + + # 1. Find master recurring events (not single instances) + # We need events *within* the window OR whose recurrence *starts* before the window ends + # and *might* generate instances within the window. + # Finding events without timeMin/timeMax might be too broad. + # A safe approach is to find master events potentially active *before* the window ends. + master_events_response = calendar_actions.find_events( + credentials=credentials, + calendar_id=calendar_id, + # timeMax=time_max, # Find masters that haven't ended before our window + q=event_query, + single_events=False, # Crucial: Get the master event definition + showDeleted=False, + max_results=2500 # Adjust as needed, API max is 2500 + ) + + if not master_events_response or not master_events_response.items: + logger.info("No master recurring events found matching the criteria.") + return [] + + logger.debug(f"Found {len(master_events_response.items)} potential master events.") + + # 2. Iterate through master events and parse recurrence rules + for event in master_events_response.items: + if not event.recurrence: + # logger.debug(f"Skipping non-recurring event: {event.summary} ({event.id})") + continue # Skip non-recurring events + + if not event.start or not (event.start.dateTime or event.start.date): + logger.warning(f"Skipping recurring event without start time: {event.summary} ({event.id})") + continue + + # Determine the start datetime of the recurrence series (dtstart) + # Handle both date and dateTime cases + dtstart_obj: Optional[datetime] = None + event_duration: Optional[timedelta] = None + + if event.start.dateTime: + try: + # Use dateutil parser for robust ISO parsing + dtstart_obj = date_parser.isoparse(event.start.dateTime) + if event.end and event.end.dateTime: + dtend_obj = date_parser.isoparse(event.end.dateTime) + event_duration = dtend_obj - dtstart_obj + else: + # Default duration for dateTime events if end is missing (e.g., 1 hour) + event_duration = timedelta(hours=1) + logger.warning(f"Recurring event '{event.summary}' missing end.dateTime, assuming {event_duration} duration.") + except ValueError as e: + logger.error(f"Could not parse dateTime for event {event.summary} ({event.id}): {e}") + continue + elif event.start.date: + try: + # All-day event - parse date and set time to midnight + start_date = date_parser.parse(event.start.date).date() + # Make dtstart timezone-aware if time_min is, otherwise naive UTC + dtstart_obj = datetime.combine(start_date, datetime.min.time()) + if time_min.tzinfo: + # Try to use the target window's timezone, otherwise UTC fallback + dtstart_obj = dtstart_obj.replace(tzinfo=time_min.tzinfo) + # else: + # dtstart_obj = dtstart_obj.replace(tzinfo=timezone.utc) # Requires import + + # Duration for all-day events is typically 1 day + if event.end and event.end.date: + end_date = date_parser.parse(event.end.date).date() + event_duration = end_date - start_date # This includes the start day but excludes the end day + else: + event_duration = timedelta(days=1) # Assume single all-day event + except ValueError as e: + logger.error(f"Could not parse date for event {event.summary} ({event.id}): {e}") + continue + + if not dtstart_obj or event_duration is None: + logger.error(f"Could not determine dtstart or duration for event {event.summary} ({event.id})") + continue + + # Extract RRULE, EXDATE, RDATE strings + # Google Calendar API returns recurrence as a list of strings + # e.g., ['RRULE:FREQ=WEEKLY;UNTIL=20110701T170000Z', 'EXDATE:20110610T100000Z'] + rrule_str: Optional[str] = None + exdate_strs: List[str] = [] + rdate_strs: List[str] = [] + for rule_str in event.recurrence: + if rule_str.startswith('RRULE:'): + rrule_str = rule_str # Assume only one RRULE per event + elif rule_str.startswith('EXDATE'): + exdate_strs.append(rule_str) + elif rule_str.startswith('RDATE'): + rdate_strs.append(rule_str) + + if not rrule_str: + logger.warning(f"Recurring event '{event.summary}' ({event.id}) has no RRULE string. Skipping.") + continue + + try: + # Parse the main recurrence rule + # Pass dtstart, which is essential for rrule calculations + ruleset = rrule.rruleset() + # Use rrulestr which handles RRULE and dtstart implicitly if not provided otherwise + # We need to make sure the timezone handling matches dtstart_obj + main_rule = rrule.rrulestr(rrule_str, dtstart=dtstart_obj, forceset=True) # forceset=True to handle COUNT/UNTIL easily + ruleset.rrule(main_rule[0]) # Add the parsed rule to the set + + # Add exception dates (EXDATE) + for exdate_str in exdate_strs: + # EXDATE format: "EXDATE;TZID=Europe/Zurich:20110426T080000,20110428T080000" + # Or "EXDATE:20240101" (all-day) + # Or "EXDATE:20240101T100000Z" (UTC) + # dateutil.rrule.rrulestr can parse EXDATE directly if part of the string, + # but Google separates them. We need to parse dates/datetimes manually. + # Split by ':' and then by ',' + parts = exdate_str.split(':', 1) + if len(parts) == 2: + param_str, dates_str = parts + dates = dates_str.split(',') + params = {} + if ';' in param_str: # Check for TZID or VALUE=DATE + param_parts = param_str.split(';')[1:] # Skip EXDATE itself + for part in param_parts: + if '=' in part: + key, value = part.split('=', 1) + params[key.upper()] = value + + is_all_day = params.get('VALUE') == 'DATE' + tz_id = params.get('TZID') + # TODO: Handle TZID properly using pytz if needed + + for date_str in dates: + try: + if is_all_day: + ex_date = date_parser.parse(date_str).date() + # Create datetime at midnight for comparison/ruleset + ex_dt = datetime.combine(ex_date, datetime.min.time()) + if dtstart_obj.tzinfo: # Match tzinfo + ex_dt = ex_dt.replace(tzinfo=dtstart_obj.tzinfo) + else: + ex_dt = date_parser.isoparse(date_str) + # TODO: Apply TZID if present + + ruleset.exdate(ex_dt) + except ValueError: + logger.warning(f"Could not parse EXDATE value '{date_str}' for event {event.id}") + + # Add explicit recurrence dates (RDATE) - Less common? + # Similar parsing logic as EXDATE if needed. + # for rdate_str in rdate_strs: ... ruleset.rdate(...) + + # Generate occurrences within the desired window [time_min, time_max) + # Note: rruleset.between includes dates equal to dtstart/until + occurrences = ruleset.between(time_min, time_max, inc=True) # inc=True includes time_min + + logger.debug(f"Event '{event.summary}' ({event.id}): Found {len(occurrences)} occurrences via rrule.") + + for occ_start_dt in occurrences: + # Ensure timezone consistency if needed + if dtstart_obj.tzinfo and occ_start_dt.tzinfo is None: + occ_start_dt = occ_start_dt.replace(tzinfo=dtstart_obj.tzinfo) + elif not dtstart_obj.tzinfo and occ_start_dt.tzinfo: + occ_start_dt = occ_start_dt.replace(tzinfo=None) + + # Calculate occurrence end time + occ_end_dt = occ_start_dt + event_duration + + # Double check if the occurrence actually overlaps the window + # ruleset.between should handle this, but an extra check might be useful + # if occ_start_dt < time_max and occ_end_dt > time_min: + projected_occurrences.append( + ProjectedEventOccurrence( + original_event_id=event.id, + original_summary=event.summary or "No Summary", + occurrence_start=occ_start_dt, + occurrence_end=occ_end_dt + ) + ) + + except Exception as e: + logger.error(f"Failed to parse/process recurrence for event '{event.summary}' ({event.id}): {e}", exc_info=True) + continue # Skip this event + + logger.info(f"Finished projection. Found {len(projected_occurrences)} total occurrences.") + # Sort occurrences chronologically? + projected_occurrences.sort(key=lambda x: x.occurrence_start) + return projected_occurrences + + +def analyze_busyness( + credentials: Credentials, + time_min: datetime, + time_max: datetime, + calendar_id: str = 'primary', +) -> Dict[date, Dict[str, Any]]: + """Analyzes event count and total duration per day within a time window. + + Args: + credentials: Valid Google OAuth2 credentials. + time_min: Start of the analysis window (timezone-aware recommended). + time_max: End of the analysis window (timezone-aware recommended). + calendar_id: The calendar to analyze. + + Returns: + A dictionary mapping each date within the window to its busyness stats: + {'event_count': int, 'total_duration_minutes': float} + """ + busyness_by_date: Dict[date, Dict[str, Any]] = defaultdict(lambda: {'event_count': 0, 'total_duration_minutes': 0.0}) + + logger.info(f"Starting busyness analysis for calendar '{calendar_id}'") + logger.info(f"Analysis window: {time_min} to {time_max}") + + # 1. Find all event instances in the range + events_response = calendar_actions.find_events( + credentials=credentials, + calendar_id=calendar_id, + time_min=time_min, + time_max=time_max, + single_events=True, # Get individual instances + showDeleted=False, + max_results=2500 # Consider pagination for very long ranges + ) + + if not events_response or not events_response.items: + logger.info("No events found in the specified time range for busyness analysis.") + return dict(busyness_by_date) # Return empty default dict converted to regular dict + + logger.debug(f"Found {len(events_response.items)} event instances for analysis.") + + # 2. Process events and aggregate stats by date + for event in events_response.items: + start_dt: Optional[datetime] = None + end_dt: Optional[datetime] = None + event_date: Optional[date] = None + + # Determine start and end datetimes/dates + if event.start: + if event.start.dateTime: + try: + start_dt = date_parser.isoparse(event.start.dateTime) + event_date = start_dt.date() + except ValueError: logger.warning(f"Could not parse start dateTime: {event.start.dateTime}"); continue + elif event.start.date: + try: + event_date = date_parser.parse(event.start.date).date() + # All-day events don't have a specific duration from start/end times typically + except ValueError: logger.warning(f"Could not parse start date: {event.start.date}"); continue + + if not event_date: + logger.warning(f"Event '{event.summary}' ({event.id}) missing valid start information. Skipping.") + continue + + # Ensure the event actually starts within our analysis window bounds + # (API might return events overlapping the start/end) + # Need to compare dates correctly (timezone awareness) + if not (time_min.date() <= event_date < time_max.date()): + # Basic date check; refine if timezone crossing near midnight is critical + # logger.debug(f"Skipping event {event.id} starting outside date range: {event_date}") + continue + + # Increment event count for the date + busyness_by_date[event_date]['event_count'] += 1 + + # Calculate duration for non-all-day events + if start_dt and event.end and event.end.dateTime: + try: + end_dt = date_parser.isoparse(event.end.dateTime) + duration = end_dt - start_dt + # Add duration in minutes, handle potential negative duration if times are swapped? + busyness_by_date[event_date]['total_duration_minutes'] += max(0, duration.total_seconds() / 60.0) + except ValueError: + logger.warning(f"Could not parse end dateTime: {event.end.dateTime} for event {event.id}") + except TypeError: + logger.warning(f"Could not calculate duration for event {event.id} (start: {start_dt}, end: {end_dt})") + + + # Fill in days with zero events within the range? + # Optional: Iterate from time_min.date() to time_max.date() and ensure all keys exist + # current_date = time_min.date() + # while current_date < time_max.date(): + # if current_date not in busyness_by_date: + # busyness_by_date[current_date] = {'event_count': 0, 'total_duration_minutes': 0.0} + # current_date += timedelta(days=1) + + # Convert defaultdict back to regular dict and sort by date + sorted_busyness = dict(sorted(busyness_by_date.items())) + + logger.info(f"Finished busyness analysis. Analyzed {len(sorted_busyness)} days.") + return sorted_busyness \ No newline at end of file diff --git a/src/mcp_servers/calendar-mcp/src/auth.py b/src/mcp_servers/calendar-mcp/src/auth.py new file mode 100644 index 0000000000000000000000000000000000000000..11de5a0137b6b58ce02ec43f5094bdba13223943 --- /dev/null +++ b/src/mcp_servers/calendar-mcp/src/auth.py @@ -0,0 +1,199 @@ +import os +import webbrowser +import http.server +import socketserver +from urllib.parse import urlparse, parse_qs +import threading +import logging +from dotenv import load_dotenv +from google_auth_oauthlib.flow import InstalledAppFlow +from google.oauth2.credentials import Credentials +from google.auth.transport.requests import Request + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Load environment variables from .env file +load_dotenv() + +# --- Configuration --- +GOOGLE_CLIENT_ID = os.getenv('GOOGLE_CLIENT_ID') +GOOGLE_CLIENT_SECRET = os.getenv('GOOGLE_CLIENT_SECRET') +TOKEN_FILE = os.getenv('TOKEN_FILE_PATH', '.gcp-saved-tokens.json') +SCOPES = [os.getenv('CALENDAR_SCOPES', 'https://www.googleapis.com/auth/calendar')] +REDIRECT_PORT = int(os.getenv('OAUTH_CALLBACK_PORT', 8080)) +REDIRECT_URI = f'http://localhost:{REDIRECT_PORT}/oauth2callback' +# Note: REDIRECT_URI must be registered in your Google Cloud Console OAuth Client settings! + +# --- Helper Classes/Functions --- + +class OAuthCallbackHandler(http.server.SimpleHTTPRequestHandler): + """Handles the OAuth callback request to capture the authorization code.""" + def __init__(self, *args, flow_instance=None, shutdown_event=None, **kwargs): + self.flow = flow_instance + self.shutdown_event = shutdown_event + self.auth_code = None + self.error = None + super().__init__(*args, **kwargs) + + def do_GET(self): + """Handle GET requests (the OAuth callback).""" + query_components = parse_qs(urlparse(self.path).query) + code = query_components.get('code') + error = query_components.get('error') + + self.send_response(200) + self.send_header('Content-type', 'text/html') + self.end_headers() + + if code: + self.auth_code = code[0] + logger.info("Authorization code received.") + self.wfile.write(b'

Authentication Successful!

') + self.wfile.write(b'

Authorization code received. You can close this window.

') + elif error: + self.error = error[0] + logger.error(f"OAuth Error: {self.error}") + self.wfile.write(b'

Authentication Failed

') + self.wfile.write(f'

Error: {self.error}. Please check console.

'.encode()) + else: + logger.warning("Received callback without code or error.") + self.wfile.write(b'

Invalid Callback

') + self.wfile.write(b'

Received an unexpected request.

') + + # Signal the main thread to stop the server + if self.shutdown_event: + self.shutdown_event.set() + +def start_local_http_server(port, flow, shutdown_event): + """Starts a temporary local HTTP server to handle the OAuth callback.""" + handler = lambda *args, **kwargs: OAuthCallbackHandler( + *args, flow_instance=flow, shutdown_event=shutdown_event, **kwargs + ) + httpd = None + try: + httpd = socketserver.TCPServer(("", port), handler) + logger.info(f"Starting temporary OAuth callback server on port {port}") + httpd.serve_forever() # This blocks until shutdown is called + except OSError as e: + logger.error(f"Failed to start callback server on port {port}: {e}") + # Signal error if server couldn't start + if shutdown_event: + shutdown_event.set() # Also signal to stop waiting + return None, None # Return None for handler if server failed + except Exception as e: + logger.error(f"An unexpected error occurred in the callback server: {e}") + if shutdown_event: + shutdown_event.set() + return None, None + finally: + if httpd: + logger.info("Shutting down OAuth callback server.") + # httpd.shutdown() # This should be called from another thread or after serve_forever unblocks + # httpd.server_close() # Clean up the socket + pass # Shutdown handled by the event + + # This part is tricky because serve_forever blocks. + # The handler instance is associated with the request, not the server itself long-term. + # We need a way to get the code back to the main thread. The handler sets it. + # Let's assume the handler instance associated with the successful callback request is somehow accessible + # or that the main thread can access the handler's state after shutdown. + # A more robust way might use queues or other IPC. + # For now, let's return the handler type, but the instance holding the code is key. + # We will retrieve the code *after* the server is shut down. + # The handler instance is tricky to get back here directly after serve_forever. + # Let's return the server instance, shutdown called externally based on event. + return httpd, handler # Returning the handler *type* here. Need instance capture. + +def get_credentials(): + """Gets valid Google API credentials. Handles loading, refreshing, and the OAuth flow.""" + creds = None + + # Check if mandatory config is present + if not GOOGLE_CLIENT_ID or not GOOGLE_CLIENT_SECRET: + logger.error("Missing GOOGLE_CLIENT_ID or GOOGLE_CLIENT_SECRET in .env file.") + raise ValueError("Missing Google OAuth credentials in configuration.") + + # --- 1. Load existing tokens --- + if os.path.exists(TOKEN_FILE): + try: + creds = Credentials.from_authorized_user_file(TOKEN_FILE, SCOPES) + logger.info("Loaded credentials from token file.") + except Exception as e: + logger.warning(f"Failed to load credentials from {TOKEN_FILE}: {e}. Will attempt re-authentication.") + creds = None # Ensure creds is None if loading failed + + # --- 2. Refresh or Initiate Flow --- + if not creds or not creds.valid: + if creds and creds.expired and creds.refresh_token: + logger.info("Credentials expired. Refreshing...") + try: + creds.refresh(Request()) + logger.info("Credentials refreshed successfully.") + except Exception as e: + logger.error(f"Failed to refresh credentials: {e}. Need to re-authenticate.") + creds = None # Force re-authentication + else: + logger.info("No valid credentials found or refresh failed. Starting OAuth flow...") + # Use client_secret dict directly for Flow + client_config = { + "installed": { + "client_id": GOOGLE_CLIENT_ID, + "client_secret": GOOGLE_CLIENT_SECRET, + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "redirect_uris": ["http://localhost", REDIRECT_URI] # Add both for flexibility + } + } + try: + # Use InstalledAppFlow instead of Flow + logger.info("Attempting authentication using InstalledAppFlow...") + flow_installed = InstalledAppFlow.from_client_config( + client_config=client_config, + scopes=SCOPES, + redirect_uri=REDIRECT_URI # Ensure this matches console setup + ) + # This method should handle the server start, browser opening, and code retrieval. + creds = flow_installed.run_local_server( + port=REDIRECT_PORT, + authorization_prompt_message="Please visit this URL to authorize:\n{url}", + success_message="Authentication successful! You can close this window.", + open_browser=True + ) + logger.info("InstalledAppFlow completed.") + + except Exception as e: + logger.error(f"Error during InstalledAppFlow execution: {e}", exc_info=True) + creds = None # Ensure creds is None on error + + if creds: + # Save the credentials for the next run + try: + with open(TOKEN_FILE, 'w') as token_file: + token_file.write(creds.to_json()) + logger.info(f"Credentials saved successfully to {TOKEN_FILE}") + except Exception as e: + logger.error(f"Failed to save credentials to {TOKEN_FILE}: {e}") + else: + logger.error("OAuth flow using InstalledAppFlow did not result in valid credentials.") + return None + + # --- 3. Final Check --- + if not creds or not creds.valid: + logger.error("Failed to obtain valid credentials after all steps.") + return None + + logger.info("Successfully obtained valid credentials.") + return creds + +# Example usage (can be called from server.py) +if __name__ == '__main__': + print("Attempting to get Google Calendar credentials...") + credentials = get_credentials() + if credentials: + print("Successfully obtained credentials.") + print(f"Token URI: {credentials.token_uri}") + # You can now use these credentials to build the service client + else: + print("Failed to obtain credentials.") \ No newline at end of file diff --git a/src/mcp_servers/calendar-mcp/src/calendar_actions.py b/src/mcp_servers/calendar-mcp/src/calendar_actions.py new file mode 100644 index 0000000000000000000000000000000000000000..fac944cf2939a6542260421639f97a23ad6255f3 --- /dev/null +++ b/src/mcp_servers/calendar-mcp/src/calendar_actions.py @@ -0,0 +1,319 @@ +import logging +from typing import List, Optional, Dict, Any + +from google.oauth2.credentials import Credentials +from googleapiclient.discovery import build + +logger = logging.getLogger(__name__) + + +def _load_credentials(token_path: str, scopes: List[str], client_secret_path: Optional[str] = None) -> Credentials: + """ + Loads or refreshes Google OAuth2 credentials. + """ + creds = None + + try: + creds = Credentials.from_authorized_user_file(token_path, scopes) + except Exception: + if client_secret_path is None: + raise RuntimeError("Missing Google OAuth credentials.") + + from google_auth_oauthlib.flow import InstalledAppFlow + + flow = InstalledAppFlow.from_client_secrets_file(client_secret_path, scopes) + creds = flow.run_local_server(port=8080) + + with open(token_path, "w") as token_file: + token_file.write(creds.to_json()) + + return creds + + +def _calendar_service(creds: Credentials): + return build("calendar", "v3", credentials=creds) + + +# ------------------------------ +# CALENDAR LOGIC (DIRECT CALLS) +# ------------------------------ + +def list_calendars(min_access_role: Optional[str] = None) -> Dict[str, Any]: + creds = _load_credentials( + token_path="./secrets/gcalendar-mcp/calendar_token.json", + scopes=["https://www.googleapis.com/auth/calendar"], + client_secret_path="./secrets/gcalendar-mcp/calendar_credentials.json", + ) + service = _calendar_service(creds) + + try: + result = service.calendarList().list(minAccessRole=min_access_role).execute() + return result + except Exception as e: + logger.exception("Error in list_calendars") + raise RuntimeError(str(e)) + + +def find_events( + calendar_id: str, + time_min: Optional[str], + time_max: Optional[str], + query: Optional[str], + max_results: int, +) -> Dict[str, Any]: + + creds = _load_credentials( + token_path="./secrets/gcalendar-mcp/calendar_token.json", + scopes=["https://www.googleapis.com/auth/calendar"], + client_secret_path="./secrets/gcalendar-mcp/calendar_credentials.json", + ) + service = _calendar_service(creds) + + params = { + "calendarId": calendar_id, + "maxResults": max_results, + "singleEvents": True, + "orderBy": "startTime", + } + + if time_min: + params["timeMin"] = time_min + if time_max: + params["timeMax"] = time_max + if query: + params["q"] = query + + try: + result = service.events().list(**params).execute() + return result + except Exception as e: + logger.exception("Error in find_events") + raise RuntimeError(str(e)) + + +def create_event( + calendar_id: str, + summary: str, + start_time: str, + end_time: str, + description: Optional[str], + location: Optional[str], + attendee_emails: Optional[List[str]], +) -> Dict[str, Any]: + + creds = _load_credentials( + token_path="./secrets/gcalendar-mcp/calendar_token.json", + scopes=["https://www.googleapis.com/auth/calendar"], + client_secret_path="./secrets/gcalendar-mcp/calendar_credentials.json", + ) + service = _calendar_service(creds) + + event_body = { + "summary": summary, + "start": {"dateTime": start_time}, + "end": {"dateTime": end_time}, + } + + if description: + event_body["description"] = description + if location: + event_body["location"] = location + if attendee_emails: + event_body["attendees"] = [{"email": x} for x in attendee_emails] + + try: + result = service.events().insert(calendarId=calendar_id, body=event_body).execute() + return result + except Exception as e: + logger.exception("Error in create_event") + raise RuntimeError(str(e)) + + +def quick_add_event(calendar_id: str, text: str) -> Dict[str, Any]: + + creds = _load_credentials( + token_path="./secrets/gcalendar-mcp/calendar_token.json", + scopes=["https://www.googleapis.com/auth/calendar"], + client_secret_path="./secrets/gcalendar-mcp/calendar_credentials.json", + ) + service = _calendar_service(creds) + + try: + result = service.events().quickAdd(calendarId=calendar_id, text=text).execute() + return result + except Exception as e: + logger.exception("Error in quick_add_event") + raise RuntimeError(str(e)) + + +def update_event( + calendar_id: str, + event_id: str, + summary: Optional[str], + start_time: Optional[str], + end_time: Optional[str], + description: Optional[str], + location: Optional[str], +) -> Dict[str, Any]: + + creds = _load_credentials( + token_path="./secrets/gcalendar-mcp/calendar_token.json", + scopes=["https://www.googleapis.com/auth/calendar"], + client_secret_path="./secrets/gcalendar-mcp/calendar_credentials.json", + ) + service = _calendar_service(creds) + + try: + existing = service.events().get(calendarId=calendar_id, eventId=event_id).execute() + + if summary: + existing["summary"] = summary + if description: + existing["description"] = description + if location: + existing["location"] = location + if start_time: + existing["start"]["dateTime"] = start_time + if end_time: + existing["end"]["dateTime"] = end_time + + result = service.events().update( + calendarId=calendar_id, + eventId=event_id, + body=existing + ).execute() + + return result + except Exception as e: + logger.exception("Error in update_event") + raise RuntimeError(str(e)) + + +def delete_event(calendar_id: str, event_id: str) -> Dict[str, Any]: + + creds = _load_credentials( + token_path="./secrets/gcalendar-mcp/calendar_token.json", + scopes=["https://www.googleapis.com/auth/calendar"], + client_secret_path="./secrets/gcalendar-mcp/calendar_credentials.json", + ) + service = _calendar_service(creds) + + try: + service.events().delete(calendarId=calendar_id, eventId=event_id).execute() + return {"status": "deleted", "event_id": event_id} + except Exception as e: + logger.exception("Error in delete_event") + raise RuntimeError(str(e)) + + +def add_attendee(calendar_id: str, event_id: str, attendee_emails: List[str]) -> Dict[str, Any]: + + creds = _load_credentials( + token_path="./secrets/gcalendar-mcp/calendar_token.json", + scopes=["https://www.googleapis.com/auth/calendar"], + client_secret_path="./secrets/gcalendar-mcp/calendar_credentials.json", + ) + service = _calendar_service(creds) + + try: + event = service.events().get(calendarId=calendar_id, eventId=event_id).execute() + + existing = event.get("attendees", []) + for email in attendee_emails: + existing.append({"email": email}) + + event["attendees"] = existing + + updated = service.events().update( + calendarId=calendar_id, + eventId=event_id, + body=event + ).execute() + + return updated + except Exception as e: + logger.exception("Error in add_attendee") + raise RuntimeError(str(e)) + + +def check_attendee_status(event_id: str, calendar_id: str, attendee_emails: Optional[List[str]]) -> Dict[str, Any]: + + creds = _load_credentials( + token_path="./secrets/gcalendar-mcp/calendar_token.json", + scopes=["https://www.googleapis.com/auth/calendar"], + client_secret_path="./secrets/gcalendar-mcp/calendar_credentials.json", + ) + service = _calendar_service(creds) + + try: + event = service.events().get(calendarId=calendar_id, eventId=event_id).execute() + attendees = event.get("attendees", []) + + if attendee_emails: + attendees = [a for a in attendees if a["email"] in attendee_emails] + + return {"event_id": event_id, "attendees": attendees} + + except Exception as e: + logger.exception("Error in check_attendee_status") + raise RuntimeError(str(e)) + + +def query_free_busy(calendar_ids: List[str], time_min: str, time_max: str) -> Dict[str, Any]: + + creds = _load_credentials( + token_path="./secrets/gcalendar-mcp/calendar_token.json", + scopes=["https://www.googleapis.com/auth/calendar"], + client_secret_path="./secrets/gcalendar-mcp/calendar_credentials.json", + ) + service = _calendar_service(creds) + + body = { + "timeMin": time_min, + "timeMax": time_max, + "items": [{"id": cid} for cid in calendar_ids], + } + + try: + result = service.freebusy().query(body=body).execute() + return result + except Exception as e: + logger.exception("Error in query_free_busy") + raise RuntimeError(str(e)) + + +def schedule_mutual( + attendee_calendar_ids: List[str], + time_min: str, + time_max: str, + duration_minutes: int, + summary: str, + description: Optional[str], +) -> Dict[str, Any]: + + fb = query_free_busy(attendee_calendar_ids, time_min, time_max) + # High-level scheduling logic left unchanged + return fb + + +def analyze_busyness(time_min: str, time_max: str, calendar_id: str) -> Dict[str, Any]: + fb = query_free_busy([calendar_id], time_min, time_max) + return fb + + +def create_calendar(summary: str) -> Dict[str, Any]: + + creds = _load_credentials( + token_path="./secrets/gcalendar-mcp/calendar_token.json", + scopes=["https://www.googleapis.com/auth/calendar"], + client_secret_path="./secrets/gcalendar-mcp/calendar_credentials.json", + ) + service = _calendar_service(creds) + + try: + new_calendar = {"summary": summary} + result = service.calendars().insert(body=new_calendar).execute() + return result + except Exception as e: + logger.exception("Error in create_calendar") + raise RuntimeError(str(e)) diff --git a/src/mcp_servers/calendar-mcp/src/mcp_bridge.py b/src/mcp_servers/calendar-mcp/src/mcp_bridge.py new file mode 100644 index 0000000000000000000000000000000000000000..6877f8546c533f18093feebab8380dd84ed81176 --- /dev/null +++ b/src/mcp_servers/calendar-mcp/src/mcp_bridge.py @@ -0,0 +1,237 @@ +import json +import logging +from typing import List, Optional +from mcp.server.fastmcp import FastMCP + +# Import the actual calendar logic (NO HTTP calls anymore) +from src.calendar_actions import ( + list_calendars, + find_events, + create_event, + quick_add_event, + update_event, + delete_event, + add_attendee, + check_attendee_status, + query_free_busy, + schedule_mutual, + analyze_busyness, + create_calendar, +) + +logger = logging.getLogger(__name__) + + +def create_mcp_server(): + """Creates a pure MCP server with direct calls into calendar_actions.""" + mcp = FastMCP("calendar-mcp") + + # 1. list_calendars + @mcp.tool() + async def list_calendars_tool(min_access_role: Optional[str] = None) -> str: + try: + result = list_calendars(min_access_role=min_access_role) + return json.dumps(result, indent=2) + except Exception as e: + logger.exception("list_calendars failed") + return json.dumps({"error": str(e)}) + + # 2. find_events + @mcp.tool() + async def find_events_tool( + calendar_id: str = "primary", + time_min: Optional[str] = None, + time_max: Optional[str] = None, + query: Optional[str] = None, + max_results: int = 50, + ) -> str: + try: + result = find_events( + calendar_id=calendar_id, + time_min=time_min, + time_max=time_max, + query=query, + max_results=max_results, + ) + return json.dumps(result, indent=2) + except Exception as e: + logger.exception("find_events failed") + return json.dumps({"error": str(e)}) + + # 3. create_event + @mcp.tool() + async def create_event_tool( + summary: str, + start_time: str, + end_time: str, + calendar_id: str = "primary", + description: Optional[str] = None, + location: Optional[str] = None, + attendee_emails: Optional[List[str]] = None, + ) -> str: + try: + result = create_event( + calendar_id=calendar_id, + summary=summary, + start_time=start_time, + end_time=end_time, + description=description, + location=location, + attendee_emails=attendee_emails, + ) + return json.dumps(result, indent=2) + except Exception as e: + logger.exception("create_event failed") + return json.dumps({"error": str(e)}) + + # 4. quick_add_event + @mcp.tool() + async def quick_add_event_tool(text: str, calendar_id: str = "primary") -> str: + try: + result = quick_add_event(calendar_id=calendar_id, text=text) + return json.dumps(result, indent=2) + except Exception as e: + logger.exception("quick_add_event failed") + return json.dumps({"error": str(e)}) + + # 5. update_event + @mcp.tool() + async def update_event_tool( + event_id: str, + calendar_id: str = "primary", + summary: Optional[str] = None, + start_time: Optional[str] = None, + end_time: Optional[str] = None, + description: Optional[str] = None, + location: Optional[str] = None, + ) -> str: + try: + result = update_event( + calendar_id=calendar_id, + event_id=event_id, + summary=summary, + start_time=start_time, + end_time=end_time, + description=description, + location=location, + ) + return json.dumps(result, indent=2) + except Exception as e: + logger.exception("update_event failed") + return json.dumps({"error": str(e)}) + + # 6. delete_event + @mcp.tool() + async def delete_event_tool(event_id: str, calendar_id: str = "primary") -> str: + try: + result = delete_event(calendar_id=calendar_id, event_id=event_id) + return json.dumps(result, indent=2) + except Exception as e: + logger.exception("delete_event failed") + return json.dumps({"error": str(e)}) + + # 7. add_attendee + @mcp.tool() + async def add_attendee_tool(event_id: str, attendee_emails: List[str], calendar_id: str = "primary") -> str: + try: + result = add_attendee( + calendar_id=calendar_id, + event_id=event_id, + attendee_emails=attendee_emails, + ) + return json.dumps(result, indent=2) + except Exception as e: + logger.exception("add_attendee failed") + return json.dumps({"error": str(e)}) + + # 8. check_attendee_status + @mcp.tool() + async def check_attendee_status_tool( + event_id: str, + calendar_id: str = "primary", + attendee_emails: Optional[List[str]] = None, + ) -> str: + try: + result = check_attendee_status( + event_id=event_id, + calendar_id=calendar_id, + attendee_emails=attendee_emails, + ) + return json.dumps(result, indent=2) + except Exception as e: + logger.exception("check_attendee_status failed") + return json.dumps({"error": str(e)}) + + # 9. query_free_busy + @mcp.tool() + async def query_free_busy_tool( + time_min: str, + time_max: str, + calendar_ids: Optional[List[str]] = None, + ) -> str: + if calendar_ids is None: + calendar_ids = ["primary"] + try: + result = query_free_busy( + calendar_ids=calendar_ids, + time_min=time_min, + time_max=time_max, + ) + return json.dumps(result, indent=2) + except Exception as e: + logger.exception("query_free_busy failed") + return json.dumps({"error": str(e)}) + + # 10. schedule_mutual + @mcp.tool() + async def schedule_mutual_tool( + attendee_calendar_ids: List[str], + time_min: str, + time_max: str, + duration_minutes: int, + summary: str, + description: Optional[str] = None, + ) -> str: + try: + result = schedule_mutual( + attendee_calendar_ids=attendee_calendar_ids, + time_min=time_min, + time_max=time_max, + duration_minutes=duration_minutes, + summary=summary, + description=description, + ) + return json.dumps(result, indent=2) + except Exception as e: + logger.exception("schedule_mutual failed") + return json.dumps({"error": str(e)}) + + # 11. analyze_busyness + @mcp.tool() + async def analyze_busyness_tool( + time_min: str, + time_max: str, + calendar_id: str = "primary", + ) -> str: + try: + result = analyze_busyness( + time_min=time_min, + time_max=time_max, + calendar_id=calendar_id, + ) + return json.dumps(result, indent=2) + except Exception as e: + logger.exception("analyze_busyness failed") + return json.dumps({"error": str(e)}) + + # 12. create_calendar + @mcp.tool() + async def create_calendar_tool(summary: str) -> str: + try: + result = create_calendar(summary=summary) + return json.dumps(result, indent=2) + except Exception as e: + logger.exception("create_calendar failed") + return json.dumps({"error": str(e)}) + + return mcp diff --git a/src/mcp_servers/calendar-mcp/src/models.py b/src/mcp_servers/calendar-mcp/src/models.py new file mode 100644 index 0000000000000000000000000000000000000000..fc043289678567a6d49c11570b59e4f2a1b833b2 --- /dev/null +++ b/src/mcp_servers/calendar-mcp/src/models.py @@ -0,0 +1,290 @@ +import datetime # Import the module itself +from pydantic import BaseModel, Field, EmailStr +from typing import Optional, List, Dict, Any +# from datetime import datetime, date # Keep original import commented for reference + +# Based on Google Calendar API v3 Event resource documentation: +# https://developers.google.com/calendar/api/v3/reference/events#resource + +class EventDateTime(BaseModel): + """Represents the start or end time of an event.""" + date: Optional[datetime.date] = None + dateTime: Optional[datetime.datetime] = None # Renamed from 'date_time' to match API JSON + timeZone: Optional[str] = None # Renamed from 'time_zone' + + class Config: + populate_by_name = True # Changed from allow_population_by_field_name + # orm_mode = True # Removed, orm_mode is deprecated in Pydantic V2, use from_attributes=True + +class EventAttendee(BaseModel): + """Represents an attendee of an event.""" + id: Optional[str] = None + email: Optional[EmailStr] = None + displayName: Optional[str] = None # Renamed from 'display_name' + organizer: Optional[bool] = None + self: Optional[bool] = None + resource: Optional[bool] = None + optional: Optional[bool] = None + responseStatus: Optional[str] = None # Renamed from 'response_status' + comment: Optional[str] = None + additionalGuests: Optional[int] = None # Renamed from 'additional_guests' + + class Config: + populate_by_name = True # Changed from allow_population_by_field_name + # orm_mode = True # Removed, orm_mode is deprecated in Pydantic V2, use from_attributes=True + +class EventCreator(BaseModel): + """Represents the creator of an event.""" + id: Optional[str] = None + email: Optional[EmailStr] = None + display_name: Optional[str] = Field(None, alias='displayName') + self: Optional[bool] = None # Whether the creator corresponds to the calendar on which this copy of the event appears. + + class Config: + populate_by_name = True + +class EventOrganizer(BaseModel): + """Represents the organizer of an event.""" + id: Optional[str] = None + email: Optional[EmailStr] = None + display_name: Optional[str] = Field(None, alias='displayName') + self: Optional[bool] = None # Whether the organizer corresponds to the calendar on which this copy of the event appears. + + class Config: + populate_by_name = True + +class EventReminderOverride(BaseModel): + method: Optional[str] = None + minutes: Optional[int] = None + + class Config: + populate_by_name = True # Changed from allow_population_by_field_name + # orm_mode = True # Removed, orm_mode is deprecated in Pydantic V2, use from_attributes=True + +class EventReminders(BaseModel): + useDefault: bool = Field(..., alias="useDefault") # Renamed from 'use_default' + overrides: Optional[List[EventReminderOverride]] = None + + class Config: + populate_by_name = True # Changed from allow_population_by_field_name + # orm_mode = True # Removed, orm_mode is deprecated in Pydantic V2, use from_attributes=True + +# --- Main Event Model --- + +class GoogleCalendarEvent(BaseModel): + """Pydantic model representing a Google Calendar event resource.""" + kind: str = "calendar#event" + id: Optional[str] = Field(None, description="Opaque identifier of the event.") + status: Optional[str] = Field(None, description="Status of the event ('confirmed', 'tentative', 'cancelled').") + html_link: Optional[str] = Field(None, alias='htmlLink', description="URL for the event in the Google Calendar UI.") + created: Optional[datetime.datetime] = Field(None, description="Creation time of the event (RFC3339 format).") + updated: Optional[datetime.datetime] = Field(None, description="Last modification time of the event (RFC3339 format).") + summary: Optional[str] = Field(None, description="Title of the event.") + description: Optional[str] = Field(None, description="Description of the event. Optional.") + location: Optional[str] = Field(None, description="Geographic location of the event. Optional.") + color_id: Optional[str] = Field(None, alias='colorId', description="Color of the event. Optional.") + creator: Optional[EventCreator] = Field(None, description="The creator of the event. Read-only.") + organizer: Optional[EventOrganizer] = Field(None, description="The organizer of the event.") + start: Optional[EventDateTime] = Field(None, description="The start time of the event.") + end: Optional[EventDateTime] = Field(None, description="The end time of the event.") + end_time_unspecified: Optional[bool] = Field(None, alias='endTimeUnspecified', description="Whether the end time is actually unspecified.") + recurrence: Optional[List[str]] = Field(None, description="List of RRULE, EXRULE, RDATE or EXDATE properties for recurring events.") + recurring_event_id: Optional[str] = Field(None, alias='recurringEventId', description="For an instance of a recurring event, this is the id of the recurring event itself.") + original_start_time: Optional[EventDateTime] = Field(None, alias='originalStartTime', description="For an instance of a recurring event, this is the original start time of the instance before modification.") + attendees: Optional[List[EventAttendee]] = Field([], description="The attendees of the event.") + attendees_omitted: Optional[bool] = Field(None, alias='attendeesOmitted', description="Whether attendees were omitted.") + reminders: Optional[EventReminders] = Field(None, description="Information about the event's reminders.") + # Add other fields as needed (e.g., attachments, conferenceData, gadget, source, etc.) + + class Config: + populate_by_name = True + # Consider adding validation logic, e.g., ensuring start is before end + +# --- Models for API Requests/Responses --- + +class EventCreateRequest(BaseModel): + """Model for the request body when creating a detailed event.""" + summary: str + start: EventDateTime + end: EventDateTime + description: Optional[str] = None + location: Optional[str] = None + attendees: Optional[List[EmailStr]] = Field(None, description="List of attendee email addresses to invite.") + recurrence: Optional[List[str]] = Field(None, description="List of RRULEs, EXRULEs, RDATEs or EXDATEs for recurring events.") + reminders: Optional[EventReminders] = Field(None, description="Notification settings for the event.") + # Add other creatable fields as needed + +class QuickAddEventRequest(BaseModel): + """Model for the request body when using the quickAdd endpoint.""" + text: str = Field(..., description="The text describing the event to be parsed by Google Calendar.") + +class EventUpdateRequest(BaseModel): + """Model for the request body when updating an event. + Contains only the fields that can be updated. + """ + summary: Optional[str] = None + start: Optional[EventDateTime] = None + end: Optional[EventDateTime] = None + description: Optional[str] = None + location: Optional[str] = None + attendees: Optional[List[EventAttendee]] = None # Allow updating attendee details or list + # Add other updatable fields + +class AddAttendeeRequest(BaseModel): + """Model for adding attendees to an existing event.""" + attendee_emails: List[EmailStr] = Field(..., description="List of email addresses to add as attendees.") + +# You might also want models for CalendarList entries, etc. + +# Define NotificationSettings first as it's used in CalendarListEntry +class NotificationSettings(BaseModel): + """Represents notification settings for a calendar.""" + notifications: Optional[List[Dict[str, str]]] = None # List of {'type': 'eventCreation', 'method': 'email'} etc. + + class Config: + populate_by_name = True # Changed from allow_population_by_field_name + +class CalendarListEntry(BaseModel): + """Represents an entry in the user's calendar list.""" + kind: str = "calendar#calendarListEntry" + etag: str + id: str + summary: Optional[str] = None + description: Optional[str] = None + location: Optional[str] = None + timeZone: Optional[str] = None # Renamed from 'time_zone' + summaryOverride: Optional[str] = None # Renamed from 'summary_override' + colorId: Optional[str] = None # Renamed from 'color_id' + backgroundColor: Optional[str] = None # Renamed from 'background_color' + foregroundColor: Optional[str] = None # Renamed from 'foreground_color' + hidden: Optional[bool] = None + selected: Optional[bool] = None + accessRole: Optional[str] = None # Renamed from 'access_role' + defaultReminders: Optional[List[EventReminderOverride]] = None # Renamed from 'default_reminders' + notificationSettings: Optional[NotificationSettings] = None # Renamed from 'notification_settings' + primary: Optional[bool] = None + deleted: Optional[bool] = None + +class CalendarListResponse(BaseModel): + """Response containing a list of calendars.""" + kind: str = "calendar#calendarList" + items: List[CalendarListEntry] = [] + nextPageToken: Optional[str] = None + nextSyncToken: Optional[str] = None + +# Re-inserting EventsResponse definition +class EventsResponse(BaseModel): + """Response containing a list of events.""" + kind: str = "calendar#events" + summary: Optional[str] = None + description: Optional[str] = None + updated: Optional[datetime.datetime] = None + timeZone: Optional[str] = None + accessRole: Optional[str] = None + defaultReminders: Optional[List[EventReminderOverride]] = [] + items: List[GoogleCalendarEvent] = [] + nextPageToken: Optional[str] = None + nextSyncToken: Optional[str] = None + +class CalendarList(BaseModel): + """Represents the user's list of calendars.""" + kind: str = "calendar#calendarList" + etag: str + nextPageToken: Optional[str] = None # Renamed from 'next_page_token' + nextSyncToken: Optional[str] = None # Renamed from 'next_sync_token' + items: List[CalendarListEntry] + + class Config: + populate_by_name = True # Changed from allow_population_by_field_name + +# --- Models for Advanced Actions --- + +# --- Check Attendee Status --- +class CheckAttendeeStatusRequest(BaseModel): + event_id: str + calendar_id: str = 'primary' + attendee_emails: Optional[List[EmailStr]] = None + +class CheckAttendeeStatusResponse(BaseModel): + status_map: Dict[EmailStr, str] = Field(..., description="Mapping of attendee email to their responseStatus ('accepted', 'declined', etc.)") + +# --- Find Availability (Free/Busy) --- +class FreeBusyRequestItem(BaseModel): + id: str # Calendar ID + +class FreeBusyRequest(BaseModel): + time_min: datetime.datetime = Field(..., alias='timeMin') + time_max: datetime.datetime = Field(..., alias='timeMax') + items: List[FreeBusyRequestItem] + # Optional: timeZone, groupExpansionMax, calendarExpansionMax + time_zone: Optional[str] = Field(None, alias='timeZone') + + class Config: + populate_by_name = True + +class TimePeriod(BaseModel): + start: datetime.datetime + end: datetime.datetime + +class FreeBusyError(BaseModel): + domain: str + reason: str + +class CalendarBusyInfo(BaseModel): + errors: Optional[List[FreeBusyError]] = None + busy: List[TimePeriod] = [] + +class FreeBusyResponse(BaseModel): + kind: str = "calendar#freeBusy" + time_min: datetime.datetime = Field(..., alias='timeMin') + time_max: datetime.datetime = Field(..., alias='timeMax') + calendars: Dict[str, CalendarBusyInfo] = {} + # Optional: groups + + class Config: + populate_by_name = True + +# --- Find Mutual Availability & Schedule --- +class ScheduleMutualRequest(BaseModel): + attendee_calendar_ids: List[str] = Field(..., description="List of calendar IDs (usually emails) for attendees whose availability should be checked.") + time_min: datetime.datetime + time_max: datetime.datetime + duration_minutes: int + event_details: EventCreateRequest # Use the existing model for core event info + organizer_calendar_id: str = 'primary' + working_hours_start_str: Optional[str] = Field(None, description="Optional start time for working hours constraint (HH:MM format)") + working_hours_end_str: Optional[str] = Field(None, description="Optional end time for working hours constraint (HH:MM format)") + send_notifications: bool = True + +# Response is GoogleCalendarEvent + +# --- Project Recurring Events --- +class ProjectRecurringRequest(BaseModel): + time_min: datetime.datetime + time_max: datetime.datetime + calendar_id: str = 'primary' + event_query: Optional[str] = None + +# Define ProjectedEventOccurrence within models.py for consistency +class ProjectedEventOccurrenceModel(BaseModel): + original_event_id: str + original_summary: str + occurrence_start: datetime.datetime + occurrence_end: datetime.datetime + +class ProjectRecurringResponse(BaseModel): + projected_occurrences: List[ProjectedEventOccurrenceModel] + +# --- Analyze Busyness --- +class AnalyzeBusynessRequest(BaseModel): + time_min: datetime.datetime + time_max: datetime.datetime + calendar_id: str = 'primary' + +class DailyBusynessStats(BaseModel): + event_count: int + total_duration_minutes: float + +class AnalyzeBusynessResponse(BaseModel): + # Use string representation for date keys in JSON + busyness_by_date: Dict[str, DailyBusynessStats] = Field(..., description="Mapping of date string (YYYY-MM-DD) to busyness stats") \ No newline at end of file diff --git a/src/mcp_servers/calendar-mcp/src/server.py b/src/mcp_servers/calendar-mcp/src/server.py new file mode 100644 index 0000000000000000000000000000000000000000..92bc433c007b79e6d02244bbb2959c52e86e2052 --- /dev/null +++ b/src/mcp_servers/calendar-mcp/src/server.py @@ -0,0 +1,42 @@ +""" +Google Calendar MCP Server - Pure MCP Implementation + +This server provides MCP (Model Context Protocol) tools for interacting with Google Calendar. +""" + +import sys +import os +import logging +import asyncio + +# Configure logging first to capture any startup errors +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') +logger = logging.getLogger(__name__) + +# Add the parent directory to the path to ensure imports work in all environments +parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +if parent_dir not in sys.path: + sys.path.insert(0, parent_dir) + logger.info(f"Added {parent_dir} to Python path") + +from .mcp_bridge import create_mcp_server + +# MCP server is created in mcp_bridge.py and instantiated in main() below + + +async def main(): + """Main entry point for the MCP server.""" + logger.info("Starting Google Calendar MCP Server...") + + # Create the MCP server with all tools + mcp = create_mcp_server() + + # Run the server + logger.info("MCP server initialized and starting...") + async with mcp.run() as resources: + logger.info("MCP server is running") + await mcp.wait_for_shutdown() + + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/src/mcp_servers/examples/gcalendar/__init__.py b/src/mcp_servers/examples/gcalendar/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/mcp_servers/examples/gcalendar/interact_calendar.py b/src/mcp_servers/examples/gcalendar/interact_calendar.py new file mode 100644 index 0000000000000000000000000000000000000000..23b94291390e1556a0da359ca75a62c04d400e25 --- /dev/null +++ b/src/mcp_servers/examples/gcalendar/interact_calendar.py @@ -0,0 +1,66 @@ +""" +Test script to interact with the Google Calendar MCP server. +=============================================================== +# Only run server +# >>> python src/mcp_servers/examples/gcalendar/interact_calendar_spawn.py +""" + + +import asyncio +from langchain_mcp_adapters.client import MultiServerMCPClient +from langchain.agents import create_agent +from langchain_openai.chat_models.base import ChatOpenAI +from .settings import GoogleCalendarSettings +from dotenv import load_dotenv +import sys + +load_dotenv() + +MODEL = ChatOpenAI(model="gpt-4o", temperature=0) + + +settings = GoogleCalendarSettings() + +CALENDAR_MCP_DIR = settings.calendar_mcp_dir +CREDS = settings.creds +TOKEN = settings.token + +async def main(): + client = MultiServerMCPClient({ + "calendar": { + "command": sys.executable, + "args": [ + f"{CALENDAR_MCP_DIR}/run_server.py", + "--creds-file-path", str(CREDS), + "--token-path", str(TOKEN), + ], + "transport": "stdio", + } + }) + + tools = await client.get_tools() + agent = create_agent(MODEL, tools) + + result = await agent.ainvoke({ + "messages": [ + { + "role": "system", + "content": ( + "You are a scheduling assistant authorized to use Google Calendar MCP tools. " + "You can list, create, and analyze events." + ), + }, + { + "role": "user", + "content": ( + "Create a calendar event for the coming Friday at 3pm titled " + "'Kick-off of the Agent MCP Hackathon'." + ), + }, + ] + }) + + print(result["messages"][-1].content) + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/src/mcp_servers/examples/gcalendar/list_server_tools.py b/src/mcp_servers/examples/gcalendar/list_server_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..ceb4ff967a1dcbaaaf0e2b012d495af0d6d5ab9d --- /dev/null +++ b/src/mcp_servers/examples/gcalendar/list_server_tools.py @@ -0,0 +1,59 @@ +""" +Calendar MCP Server Tool Introspection +====================================== + +# Only run sserver: +# (wont make this script work, since this script launches the server itself) +# >>> python src/mcp_servers/calendar-mcp/run_server.py + +# start server & list mcp tools: +# >>> python src/mcp_servers/examples/gcalendar/list_server_tools.py + +""" + +import asyncio +from langchain_mcp_adapters.client import MultiServerMCPClient +import sys + +from .settings import GoogleCalendarSettings + + +settings = GoogleCalendarSettings() + +CALENDAR_MCP_DIR = settings.calendar_mcp_dir +CREDS = settings.creds +TOKEN = settings.token + + +#settings = GoogleCalendarSettings() + +async def main(): + # Connect to the Calendar MCP server + client = MultiServerMCPClient({ + "calendar": { + "command": sys.executable, + "args": [ + f"{CALENDAR_MCP_DIR}/run_server.py", + "--creds-file-path", str(CREDS), + "--token-path", str(TOKEN), + ], + "transport": "stdio", + } + }) + + + # Fetch MCP tool definitions + tools = await client.get_tools() + + print("\n📅 Tools exposed by Calendar MCP server:\n") + for t in tools: + print(f"🔧 TOOL: {t.name}") + if t.args_schema: + print("📄 SCHEMA:") + print(t.args_schema) + else: + print("📄 SCHEMA: None") + print("-" * 60) + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/src/mcp_servers/examples/gcalendar/settings.py b/src/mcp_servers/examples/gcalendar/settings.py new file mode 100644 index 0000000000000000000000000000000000000000..7d95cd40a3b5ecabffbe33ad3535d1bdf3ef9037 --- /dev/null +++ b/src/mcp_servers/examples/gcalendar/settings.py @@ -0,0 +1,19 @@ +from pathlib import Path +from pydantic import Field +from pydantic_settings import BaseSettings + +BASE_PATH = Path(__file__).resolve().parents[4] # goes up to project root + +class GoogleCalendarSettings(BaseSettings): + """Settings for Gmail MCP server.""" + creds: Path = Field(default_factory=lambda: BASE_PATH / "secrets/gcalendar-mcp/calendar_credentials.json") + token: Path = Field(default_factory=lambda: BASE_PATH / "secrets/gcalendar-mcp/calendar_token.json") + calendar_mcp_dir: Path = Field(default=BASE_PATH / "src/mcp_servers/calendar-mcp") + + + +if __name__ == "__main__": + settings = GoogleCalendarSettings() + print(settings) + print(settings.creds) + print(settings.token) \ No newline at end of file diff --git a/src/mcp_servers/examples/gmail/__init__.py b/src/mcp_servers/examples/gmail/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/mcp_servers/examples/gmail/list_server_tools.py b/src/mcp_servers/examples/gmail/list_server_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..821e436cc20f65ac59cc9405e4848d9ae8ba3d77 --- /dev/null +++ b/src/mcp_servers/examples/gmail/list_server_tools.py @@ -0,0 +1,46 @@ + +""" +The only part that can differ across Gmail MCP implementations is the exact tool names and schemas +(e.g., listEmails, sendEmail, arg names). That's why I recommend a quick introspection step to list +the tools the server exposes and their JSON schemas—then prompt your agent accordingly. + +run as follows: +>>> python -m src.mcp_servers.examples.gmail.list_server_tools +""" + +import asyncio +from langchain_mcp_adapters.client import MultiServerMCPClient +from pathlib import Path +from .settings import GMailSettings + +UV_PATH = "/Users/sebastianwefers/.local/bin/uv" # <= full path to uv (important) +settings = GMailSettings() + +async def main(): + client = MultiServerMCPClient({ + "gmail": { + "command": UV_PATH, + "args": [ + "--directory", str(settings.gmail_mcp_dir), + "run", "gmail", + "--creds-file-path", str(settings.creds), + "--token-path", str(settings.token) + ], + "transport": "stdio", + } + }) + tools = await client.get_tools() + + print("\n📬 Tools exposed by Gmail MCP server:\n") + for t in tools: + print(f"🔧 TOOL: {t.name}") + if t.args_schema: + print("📄 SCHEMA:") + print(t.args_schema) # already a dict + else: + print("📄 SCHEMA: None") + print("-" * 60) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/src/mcp_servers/examples/gmail/send_email.py b/src/mcp_servers/examples/gmail/send_email.py new file mode 100644 index 0000000000000000000000000000000000000000..672c36bad470ca0acb095688492a770cdfbb5bf2 --- /dev/null +++ b/src/mcp_servers/examples/gmail/send_email.py @@ -0,0 +1,96 @@ +""" +Gmail MCP Server +================ + +This file implements a Model Context Protocol (MCP) server that bridges +a local Gmail API client to an AI model (e.g. Claude or LangGraph agents). + +How it works: +------------- +- The LLM connects to this server through an MCP client (e.g. MultiServerMCPClient) + using STDIO as a transport layer. +- When the server starts, it authenticates with Gmail (via OAuth2) and registers + all available Gmail operations as MCP tools (send, read, search, label, etc.). +- The MCP client fetches these tool definitions and exposes them to the LLM. +- When the LLM decides to use one (e.g. “send an email”), it triggers a tool call, + which this server executes and returns the result. + +Why these arguments: +-------------------- +--creds-file-path : path to the Google OAuth2 client credentials (for login) +--token-path : path to the saved access/refresh token (for session reuse) + +Together, these allow the MCP server to securely spin up a Gmail session and +respond to model-issued actions via standard I/O. + +run as follows: +>>> python -m src.mcp_servers.examples.gmail.send_email +""" + + + +import asyncio +from langchain_mcp_adapters.client import MultiServerMCPClient +from langchain.agents import create_agent + +from langchain_openai.chat_models.base import ChatOpenAI +from pathlib import Path +from .settings import GMailSettings +from dotenv import load_dotenv +load_dotenv() + + +UV_PATH = "/Users/sebastianwefers/.local/bin/uv" # <= full path to uv (important) +MODEL = ChatOpenAI(model="gpt-4o", temperature=0) + +settings = GMailSettings() + + +async def main(): + # 1) Connect to the Gmail MCP server via stdio + client = MultiServerMCPClient( + { + "gmail": { + "command": UV_PATH, + "args": [ + "--directory", str(settings.gmail_mcp_dir), + "run", "gmail", + "--creds-file-path", str(settings.creds), + "--token-path", str(settings.token), + ], + "transport": "stdio", + } + } + ) + + # 2) Fetch tool specs from the server + tools = await client.get_tools() + + # 3) Build a simple agent with those tools + agent = create_agent(MODEL, tools) + + # 4) Test: ask the agent to list unread emails or send a draft + result = await agent.ainvoke({ + "messages": [ + { + "role": "system", + "content": ( + "You are an automated agent authorized to use Gmail MCP tools, " + "including sending emails through the user's authorized account." + ), + }, + { + "role": "user", + "content": ( + "Send an email to sebastianwefersnz@gmail.com and say hello." + ), + }, + ] + }) + + print("\n\n~~~ RESULT ~~~") + print(result['messages'][-1].content) + print("~~~ END RESULT ~~~") + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/src/mcp_servers/examples/gmail/settings.py b/src/mcp_servers/examples/gmail/settings.py new file mode 100644 index 0000000000000000000000000000000000000000..6ad82e3aa7adb73f453db9f956d1b3cca471b3f7 --- /dev/null +++ b/src/mcp_servers/examples/gmail/settings.py @@ -0,0 +1,19 @@ +from pathlib import Path +from pydantic import Field +from pydantic_settings import BaseSettings + +BASE_PATH = Path(__file__).resolve().parents[4] # goes up to project root + +class GMailSettings(BaseSettings): + """Settings for Gmail MCP server.""" + creds: Path = Field(default_factory=lambda: BASE_PATH / "secrets/gmail-mcp/credentials.json") + token: Path = Field(default_factory=lambda: BASE_PATH / "secrets/gmail-mcp/token.json") + gmail_mcp_dir: Path = Field(default=BASE_PATH / "src/mcp_servers/gmail-mcp") + + + +if __name__ == "__main__": + settings = GMailSettings() + print(settings) + print(settings.creds) + print(settings.token) \ No newline at end of file diff --git a/src/mcp_servers/gmail-mcp/.gitignore b/src/mcp_servers/gmail-mcp/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..be0ba3f9be76a3e395913afde198730416c1673e --- /dev/null +++ b/src/mcp_servers/gmail-mcp/.gitignore @@ -0,0 +1,11 @@ +# Python-generated files +__pycache__/ +*.py[oc] +build/ +dist/ +wheels/ +*.egg-info +*.DS_Store + +# Virtual environments +.venv diff --git a/src/mcp_servers/gmail-mcp/.python-version b/src/mcp_servers/gmail-mcp/.python-version new file mode 100644 index 0000000000000000000000000000000000000000..e4fba2183587225f216eeada4c78dfab6b2e65f5 --- /dev/null +++ b/src/mcp_servers/gmail-mcp/.python-version @@ -0,0 +1 @@ +3.12 diff --git a/src/mcp_servers/gmail-mcp/Claude.code-workspace b/src/mcp_servers/gmail-mcp/Claude.code-workspace new file mode 100644 index 0000000000000000000000000000000000000000..78c92da1c49c2f6905ea1019f0a59d8a7b86b910 --- /dev/null +++ b/src/mcp_servers/gmail-mcp/Claude.code-workspace @@ -0,0 +1,11 @@ +{ + "folders": [ + { + "path": "../Library/Application Support/Claude" + }, + { + "path": "." + } + ], + "settings": {} +} \ No newline at end of file diff --git a/src/mcp_servers/gmail-mcp/LICENSE b/src/mcp_servers/gmail-mcp/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/src/mcp_servers/gmail-mcp/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/src/mcp_servers/gmail-mcp/README.md b/src/mcp_servers/gmail-mcp/README.md new file mode 100644 index 0000000000000000000000000000000000000000..51dfc660ed650e62deb6efd3509148a984c3db1c --- /dev/null +++ b/src/mcp_servers/gmail-mcp/README.md @@ -0,0 +1,169 @@ +# Enhanced Gmail MCP Server + +A powerful and feature-rich Model Context Protocol (MCP) server for Gmail integration, written in Python. This server enables AI assistants like Claude to interact with Gmail through natural language, providing comprehensive email management capabilities. + +## 🌟 Key Features + +### 📧 Complete Email Management +- Send emails with customizable subject, content, and recipients +- Read and retrieve email content with full metadata +- Move emails to trash with confirmation +- Mark emails as read/unread +- Open emails directly in browser + +### 📝 Draft Management +- Create draft emails for later review +- List all draft emails +- Edit existing drafts + +### 🏷️ Advanced Label Management +- List all available labels +- Create custom labels +- Apply/remove labels from emails +- Rename existing labels +- Delete unused labels +- Search emails by label + +### 📁 Folder Organization +- Create new folders (implemented as Gmail labels) +- Move emails between folders +- List all available folders + +### 🔍 Powerful Search & Filtering +- Search emails using Gmail's advanced query syntax +- Create, manage, and delete email filters +- Filter by sender, recipient, subject, content, and more +- Customize search results with flexible parameters + +### 🗄️ Archive Management +- Archive emails (remove from inbox without deleting) +- Batch archive multiple emails matching search criteria +- List all archived emails +- Restore archived emails to inbox + +## 🚀 Getting Started + +### Prerequisites +- Python 3.8+ +- Google Cloud project with Gmail API enabled +- OAuth 2.0 credentials + +### Installation + +```bash +# Clone the repository +git clone https://github.com/theposch/gmail-mcp.git +cd gmail-mcp + +# Set up a virtual environment +python -m venv .venv +source .venv/bin/activate # On Windows: .venv\Scripts\activate + +# Install the package +pip install -e . +``` + +### Google Cloud Setup + +1. Create a [new Google Cloud project](https://console.cloud.google.com/projectcreate) +2. [Enable the Gmail API](https://console.cloud.google.com/apis/library/gmail.googleapis.com) +3. Configure the [OAuth consent screen](https://console.cloud.google.com/apis/credentials/consent): + - Select "External" user type + - Add your email as a test user + - Add the scope: `https://www.googleapis.com/auth/gmail/modify` +4. Create [OAuth 2.0 credentials](https://console.cloud.google.com/apis/credentials): + - Choose "Desktop app" as the application type + - Download the JSON credentials file + +### Configuration + +Store your credentials securely and specify their location when running the server: + +```bash +# Example directory structure for credentials +mkdir -p ~/.gmail-mcp +# Move your downloaded credentials file +mv ~/Downloads/client_secret_*.json ~/.gmail-mcp/credentials.json +``` + +## 🔧 Usage + +### Running with Claude Desktop + +Add the following to your Claude Desktop configuration file (typically at `~/Library/Application Support/Claude/claude_desktop_config.json`): + +```json +{ + "mcpServers": { + "gmail": { + "command": "uv", + "args": [ + "--directory", + "/absolute/path/to/gmail-mcp", + "run", + "gmail", + "--creds-file-path", + "/absolute/path/to/credentials.json", + "--token-path", + "/absolute/path/to/tokens.json" + ] + } + } +} +``` + +### Testing with MCP Inspector + +For testing and debugging, use the MCP Inspector: + +```bash +npx @modelcontextprotocol/inspector uv run /path/to/gmail-mcp/src/gmail/server.py --creds-file-path /path/to/credentials.json --token-path /path/to/tokens.json +``` + +## 🔐 Security Considerations + +- **Never commit credentials or token files to version control** +- Store credentials in a secure location with appropriate permissions +- The server will request user confirmation before performing sensitive actions +- Review Google Cloud Console regularly for unusual activity +- Consider using environment variables for sensitive paths + +## 🛠️ Architecture + +This implementation features a comprehensive single-file architecture that handles all Gmail operations through the Google API client libraries. Key components include: + +- OAuth2 authentication with automatic token refresh +- Comprehensive error handling and logging +- Structured tool definitions with clear input schemas +- Efficient email parsing and formatting + +## 📚 Example Prompts + +Try these prompts with Claude after connecting the Gmail MCP server: + +- "Show me my unread emails" +- "Search for emails from example@domain.com with attachments" +- "Create a new label called 'Important Projects'" +- "Draft an email to john@example.com about the upcoming meeting" +- "Archive all emails from newsletter@example.com" +- "Create a filter to automatically label emails from my team" + +## 🤝 Contributing + +Contributions are welcome! Please feel free to submit a Pull Request. + +1. Fork the repository +2. Create your feature branch (`git checkout -b feature/amazing-feature`) +3. Commit your changes (`git commit -m 'Add some amazing feature'`) +4. Push to the branch (`git push origin feature/amazing-feature`) +5. Open a Pull Request + +## 📄 License + +This project is licensed under the GPL-3.0 License - see the LICENSE file for details. + +## 🙏 Acknowledgments + +- Inspired by various MCP server implementations in the community +- Built with the [Model Context Protocol](https://modelcontextprotocol.io/) framework +- Uses Google's official API client libraries \ No newline at end of file diff --git a/src/mcp_servers/gmail-mcp/pyproject.toml b/src/mcp_servers/gmail-mcp/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..8715f00c304e3f2b50d760d7d27d05b966099167 --- /dev/null +++ b/src/mcp_servers/gmail-mcp/pyproject.toml @@ -0,0 +1,19 @@ +[project] +name = "gmail" +version = "0.1.0" +description = "Model Context Protocol server for gmail" +readme = "README.md" +requires-python = ">=3.12" +dependencies = [ + "httpx>=0.28.1", + "mcp>=1.1.2", + "google-api-python-client>=2.156.0", + "google-auth-httplib2>=0.2.0", + "google-auth-oauthlib>=1.2.1", +] +[build-system] +requires = [ "hatchling",] +build-backend = "hatchling.build" + +[project.scripts] +gmail = "gmail:main" diff --git a/src/mcp_servers/gmail-mcp/src/gmail/__init__.py b/src/mcp_servers/gmail-mcp/src/gmail/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5e6285510e023d49682009ebb667929a82ea1836 --- /dev/null +++ b/src/mcp_servers/gmail-mcp/src/gmail/__init__.py @@ -0,0 +1,19 @@ +from . import server +import asyncio +import argparse + +def main(): + """Main entry point for the package.""" + parser = argparse.ArgumentParser(description='Gmail API MCP Server') + parser.add_argument('--creds-file-path', + required=True, + help='OAuth 2.0 credentials file path') + parser.add_argument('--token-path', + required=True, + help='File location to store and retrieve access and refresh tokens for application') + + args = parser.parse_args() + asyncio.run(server.main(args.creds_file_path, args.token_path)) + +# Optionally expose other important items at package level +__all__ = ['main', 'server'] diff --git a/src/mcp_servers/gmail-mcp/src/gmail/server.py b/src/mcp_servers/gmail-mcp/src/gmail/server.py new file mode 100644 index 0000000000000000000000000000000000000000..4ca2451ead0888c013555bbc5dc96ede2e610331 --- /dev/null +++ b/src/mcp_servers/gmail-mcp/src/gmail/server.py @@ -0,0 +1,1874 @@ +from typing import Any +import argparse +import os +import asyncio +import logging +import base64 +from email.message import EmailMessage +from email.header import decode_header +from base64 import urlsafe_b64decode +from email import message_from_bytes +import webbrowser + +from mcp.server.models import InitializationOptions +import mcp.types as types +from mcp.server import NotificationOptions, Server +import mcp.server.stdio + + +from google.auth.transport.requests import Request +from google.oauth2.credentials import Credentials +from google_auth_oauthlib.flow import InstalledAppFlow +from googleapiclient.discovery import build +from googleapiclient.errors import HttpError + + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +EMAIL_ADMIN_PROMPTS = """You are an email administrator. +You can draft, edit, read, trash, open, and send emails. +You've been given access to a specific gmail account. +You have the following tools available: +- Send an email (send-email) +- Create a draft email (create-draft) +- List draft emails (list-drafts) +- Retrieve unread emails (get-unread-emails) +- Read email content (read-email) +- Trash email (trash-email) +- Open email in browser (open-email) +- List all labels (list-labels) +- Create a new label (create-label) +- Apply a label to an email (apply-label) +- Remove a label from an email (remove-label) +- Rename a label (rename-label) +- Delete a label (delete-label) +- Search for emails with a specific label (search-by-label) +- Search for emails using Gmail's search syntax (search-emails) +- List all email filters (list-filters) +- Get details of a specific filter (get-filter) +- Create a new email filter (create-filter) +- Delete a filter (delete-filter) +- Create a new folder (create-folder) +- Move an email to a folder (move-to-folder) +- List all folders (list-folders) +- Archive an email (archive-email) +- Batch archive emails (batch-archive) +- List archived emails (list-archived) +- Restore an email to inbox (restore-to-inbox) + +Never send an email draft or trash an email unless the user confirms first. +Always ask for approval if not already given. +""" + +# Define available prompts +PROMPTS = { + "manage-email": types.Prompt( + name="manage-email", + description="Act like an email administator", + arguments=None, + ), + "draft-email": types.Prompt( + name="draft-email", + description="Draft an email with cotent and recipient", + arguments=[ + types.PromptArgument( + name="content", + description="What the email is about", + required=True + ), + types.PromptArgument( + name="recipient", + description="Who should the email be addressed to", + required=True + ), + types.PromptArgument( + name="recipient_email", + description="Recipient's email address", + required=True + ), + ], + ), + "edit-draft": types.Prompt( + name="edit-draft", + description="Edit the existing email draft", + arguments=[ + types.PromptArgument( + name="changes", + description="What changes should be made to the draft", + required=True + ), + types.PromptArgument( + name="current_draft", + description="The current draft to edit", + required=True + ), + ], + ), + "manage-labels": types.Prompt( + name="manage-labels", + description="Manage email labels for organization", + arguments=[ + types.PromptArgument( + name="action", + description="What action to take with labels (create, list, apply, remove, search)", + required=True + ), + ], + ), + "manage-filters": types.Prompt( + name="manage-filters", + description="Manage email filters for automation", + arguments=[ + types.PromptArgument( + name="action", + description="What action to take with filters (create, list, view, delete)", + required=True + ), + ], + ), + "search-emails": types.Prompt( + name="search-emails", + description="Search for emails using Gmail's search syntax", + arguments=[ + types.PromptArgument( + name="query", + description="What to search for in emails", + required=True + ), + ], + ), + "manage-folders": types.Prompt( + name="manage-folders", + description="Manage email folders for organization", + arguments=[ + types.PromptArgument( + name="action", + description="What action to take with folders (create, list, move)", + required=True + ), + ], + ), + "manage-archive": types.Prompt( + name="manage-archive", + description="Manage archived emails", + arguments=[ + types.PromptArgument( + name="action", + description="What action to take with archives (archive, batch-archive, list, restore)", + required=True + ), + ], + ), +} + + +def decode_mime_header(header: str) -> str: + """Helper function to decode encoded email headers""" + + decoded_parts = decode_header(header) + decoded_string = '' + for part, encoding in decoded_parts: + if isinstance(part, bytes): + # Decode bytes to string using the specified encoding + decoded_string += part.decode(encoding or 'utf-8') + else: + # Already a string + decoded_string += part + return decoded_string + + +class GmailService: + def __init__(self, + creds_file_path: str, + token_path: str, + scopes: list[str] = ['https://www.googleapis.com/auth/gmail.modify']): + logger.info(f"Initializing GmailService with creds file: {creds_file_path}") + self.creds_file_path = creds_file_path + self.token_path = token_path + self.scopes = scopes + self.token = self._get_token() + logger.info("Token retrieved successfully") + self.service = self._get_service() + logger.info("Gmail service initialized") + self.user_email = self._get_user_email() + logger.info(f"User email retrieved: {self.user_email}") + + def _get_token(self) -> Credentials: + """Get or refresh Google API token""" + + token = None + + if os.path.exists(self.token_path): + logger.info('Loading token from file') + token = Credentials.from_authorized_user_file(self.token_path, self.scopes) + + if not token or not token.valid: + if token and token.expired and token.refresh_token: + logger.info('Refreshing token') + token.refresh(Request()) + else: + logger.info('Fetching new token') + flow = InstalledAppFlow.from_client_secrets_file(self.creds_file_path, self.scopes) + token = flow.run_local_server(port=0) + + with open(self.token_path, 'w') as token_file: + token_file.write(token.to_json()) + logger.info(f'Token saved to {self.token_path}') + + return token + + def _get_service(self) -> Any: + """Initialize Gmail API service""" + try: + service = build('gmail', 'v1', credentials=self.token) + return service + except HttpError as error: + logger.error(f'An error occurred building Gmail service: {error}') + raise ValueError(f'An error occurred: {error}') + + def _get_user_email(self) -> str: + """Get user email address""" + profile = self.service.users().getProfile(userId='me').execute() + user_email = profile.get('emailAddress', '') + return user_email + + async def send_email(self, recipient_id: str, subject: str, message: str,) -> dict: + """Creates and sends an email message""" + try: + message_obj = EmailMessage() + message_obj.set_content(message) + + message_obj['To'] = recipient_id + message_obj['From'] = self.user_email + message_obj['Subject'] = subject + + encoded_message = base64.urlsafe_b64encode(message_obj.as_bytes()).decode() + create_message = {'raw': encoded_message} + + send_message = await asyncio.to_thread( + self.service.users().messages().send(userId="me", body=create_message).execute + ) + logger.info(f"Message sent: {send_message['id']}") + return {"status": "success", "message_id": send_message["id"]} + except HttpError as error: + return {"status": "error", "error_message": str(error)} + + async def open_email(self, email_id: str) -> str: + """Opens email in browser given ID.""" + try: + url = f"https://mail.google.com/#all/{email_id}" + webbrowser.open(url, new=0, autoraise=True) + return "Email opened in browser successfully." + except HttpError as error: + return f"An HttpError occurred: {str(error)}" + + async def get_unread_emails(self) -> list[dict[str, str]]| str: + """ + Retrieves unread messages from mailbox. + Returns list of messsage IDs in key 'id'.""" + try: + user_id = 'me' + query = 'in:inbox is:unread category:primary' + + response = self.service.users().messages().list(userId=user_id, + q=query).execute() + messages = [] + if 'messages' in response: + messages.extend(response['messages']) + + while 'nextPageToken' in response: + page_token = response['nextPageToken'] + response = self.service.users().messages().list(userId=user_id, q=query, + pageToken=page_token).execute() + messages.extend(response['messages']) + return messages + + except HttpError as error: + return f"An HttpError occurred: {str(error)}" + + async def read_email(self, email_id: str) -> dict[str, str]| str: + """Retrieves email contents including to, from, subject, and contents.""" + try: + msg = self.service.users().messages().get(userId="me", id=email_id, format='raw').execute() + email_metadata = {} + + # Decode the base64URL encoded raw content + raw_data = msg['raw'] + decoded_data = urlsafe_b64decode(raw_data) + + # Parse the RFC 2822 email + mime_message = message_from_bytes(decoded_data) + + # Extract the email body + body = None + if mime_message.is_multipart(): + for part in mime_message.walk(): + # Extract the text/plain part + if part.get_content_type() == "text/plain": + body = part.get_payload(decode=True).decode() + break + else: + # For non-multipart messages + body = mime_message.get_payload(decode=True).decode() + email_metadata['content'] = body + + # Extract metadata + email_metadata['subject'] = decode_mime_header(mime_message.get('subject', '')) + email_metadata['from'] = mime_message.get('from','') + email_metadata['to'] = mime_message.get('to','') + email_metadata['date'] = mime_message.get('date','') + + logger.info(f"Email read: {email_id}") + + # We want to mark email as read once we read it + await self.mark_email_as_read(email_id) + + return email_metadata + except HttpError as error: + return f"An HttpError occurred: {str(error)}" + + async def trash_email(self, email_id: str) -> str: + """Moves email to trash given ID.""" + try: + self.service.users().messages().trash(userId="me", id=email_id).execute() + logger.info(f"Email moved to trash: {email_id}") + return "Email moved to trash successfully." + except HttpError as error: + return f"An HttpError occurred: {str(error)}" + + async def mark_email_as_read(self, email_id: str) -> str: + """Marks email as read given ID.""" + try: + self.service.users().messages().modify(userId="me", id=email_id, body={'removeLabelIds': ['UNREAD']}).execute() + logger.info(f"Email marked as read: {email_id}") + return "Email marked as read." + except HttpError as error: + return f"An HttpError occurred: {str(error)}" + + async def create_draft(self, recipient_id: str, subject: str, message: str) -> dict: + """Creates a draft email message""" + try: + message_obj = EmailMessage() + message_obj.set_content(message) + + message_obj['To'] = recipient_id + message_obj['From'] = self.user_email + message_obj['Subject'] = subject + + encoded_message = base64.urlsafe_b64encode(message_obj.as_bytes()).decode() + create_message = {'raw': encoded_message} + + draft = await asyncio.to_thread( + self.service.users().drafts().create(userId="me", body={'message': create_message}).execute + ) + logger.info(f"Draft created: {draft['id']}") + return {"status": "success", "draft_id": draft["id"]} + except HttpError as error: + return {"status": "error", "error_message": str(error)} + + async def list_drafts(self) -> list[dict] | str: + """Lists all draft emails""" + try: + results = await asyncio.to_thread( + self.service.users().drafts().list(userId="me").execute + ) + drafts = results.get('drafts', []) + + draft_list = [] + for draft in drafts: + draft_id = draft['id'] + # Get the draft details to extract subject and recipient + draft_data = await asyncio.to_thread( + self.service.users().drafts().get(userId="me", id=draft_id).execute + ) + + message = draft_data.get('message', {}) + headers = message.get('payload', {}).get('headers', []) + + subject = next((header['value'] for header in headers if header['name'].lower() == 'subject'), 'No Subject') + to = next((header['value'] for header in headers if header['name'].lower() == 'to'), 'No Recipient') + + draft_list.append({ + 'id': draft_id, + 'subject': subject, + 'to': to + }) + + return draft_list + except HttpError as error: + return f"An HttpError occurred: {str(error)}" + + async def list_labels(self) -> list[dict] | str: + """Lists all labels in the user's mailbox""" + try: + results = await asyncio.to_thread( + self.service.users().labels().list(userId="me").execute + ) + labels = results.get('labels', []) + + label_list = [] + for label in labels: + label_list.append({ + 'id': label['id'], + 'name': label['name'], + 'type': label['type'] # 'system' or 'user' + }) + + return label_list + except HttpError as error: + return f"An HttpError occurred: {str(error)}" + + async def create_label(self, name: str) -> dict | str: + """Creates a new label""" + try: + label_object = { + 'name': name, + 'labelListVisibility': 'labelShow', # Show in label list + 'messageListVisibility': 'show' # Show in message list + } + + created_label = await asyncio.to_thread( + self.service.users().labels().create(userId="me", body=label_object).execute + ) + + logger.info(f"Label created: {created_label['id']}") + return { + 'status': 'success', + 'label_id': created_label['id'], + 'name': created_label['name'] + } + except HttpError as error: + return {"status": "error", "error_message": str(error)} + + async def apply_label(self, email_id: str, label_id: str) -> str: + """Applies a label to an email""" + try: + await asyncio.to_thread( + self.service.users().messages().modify( + userId="me", + id=email_id, + body={'addLabelIds': [label_id]} + ).execute + ) + + logger.info(f"Label {label_id} applied to email {email_id}") + return f"Label applied successfully to email." + except HttpError as error: + return f"An HttpError occurred: {str(error)}" + + async def remove_label(self, email_id: str, label_id: str) -> str: + """Removes a label from an email""" + try: + await asyncio.to_thread( + self.service.users().messages().modify( + userId="me", + id=email_id, + body={'removeLabelIds': [label_id]} + ).execute + ) + + logger.info(f"Label {label_id} removed from email {email_id}") + return f"Label removed successfully from email." + except HttpError as error: + return f"An HttpError occurred: {str(error)}" + + async def search_by_label(self, label_id: str) -> list[dict] | str: + """Searches for emails with a specific label""" + try: + query = f"label:{label_id}" + + response = await asyncio.to_thread( + self.service.users().messages().list(userId="me", q=query).execute + ) + + messages = [] + if 'messages' in response: + messages.extend(response['messages']) + + while 'nextPageToken' in response: + page_token = response['nextPageToken'] + response = await asyncio.to_thread( + self.service.users().messages().list( + userId="me", + q=query, + pageToken=page_token + ).execute + ) + messages.extend(response['messages']) + + return messages + except HttpError as error: + return f"An HttpError occurred: {str(error)}" + + async def list_filters(self) -> list[dict] | str: + """Lists all filters in the user's mailbox""" + try: + results = await asyncio.to_thread( + self.service.users().settings().filters().list(userId="me").execute + ) + filters = results.get('filter', []) + return filters + except HttpError as error: + return f"An HttpError occurred: {str(error)}" + + async def get_filter(self, filter_id: str) -> dict | str: + """Gets a specific filter by ID""" + try: + filter_data = await asyncio.to_thread( + self.service.users().settings().filters().get(userId="me", id=filter_id).execute + ) + return filter_data + except HttpError as error: + return f"An HttpError occurred: {str(error)}" + + async def create_filter(self, + from_email: str = None, + to_email: str = None, + subject: str = None, + query: str = None, + has_attachment: bool = None, + exclude_chats: bool = None, + size_comparison: str = None, + size: int = None, + add_label_ids: list[str] = None, + remove_label_ids: list[str] = None, + forward_to: str = None) -> dict | str: + """Creates a new email filter + + Args: + from_email: Email from a specific sender + to_email: Email to a specific recipient + subject: Email with a specific subject + query: Email matching a custom query + has_attachment: Email has an attachment + exclude_chats: Exclude chats from filter + size_comparison: 'larger' or 'smaller' + size: Size in bytes for comparison + add_label_ids: Labels to add to matching emails + remove_label_ids: Labels to remove from matching emails + forward_to: Email address to forward matching emails to + """ + try: + # Build the filter criteria + criteria = {} + if from_email: + criteria['from'] = from_email + if to_email: + criteria['to'] = to_email + if subject: + criteria['subject'] = subject + if query: + criteria['query'] = query + if has_attachment is not None: + criteria['hasAttachment'] = has_attachment + if exclude_chats is not None: + criteria['excludeChats'] = exclude_chats + if size_comparison and size: + if size_comparison.lower() == 'larger': + criteria['sizeComparison'] = 'larger' + criteria['size'] = size + elif size_comparison.lower() == 'smaller': + criteria['sizeComparison'] = 'smaller' + criteria['size'] = size + + # Build the filter actions + action = {} + if add_label_ids: + action['addLabelIds'] = add_label_ids + if remove_label_ids: + action['removeLabelIds'] = remove_label_ids + if forward_to: + action['forward'] = forward_to + + # Create the filter + filter_object = { + 'criteria': criteria, + 'action': action + } + + created_filter = await asyncio.to_thread( + self.service.users().settings().filters().create( + userId="me", + body=filter_object + ).execute + ) + + logger.info(f"Filter created: {created_filter['id']}") + return { + 'status': 'success', + 'filter_id': created_filter['id'], + 'filter': created_filter + } + except HttpError as error: + return {"status": "error", "error_message": str(error)} + + async def delete_filter(self, filter_id: str) -> str: + """Deletes a filter by ID""" + try: + await asyncio.to_thread( + self.service.users().settings().filters().delete( + userId="me", + id=filter_id + ).execute + ) + + logger.info(f"Filter deleted: {filter_id}") + return f"Filter deleted successfully." + except HttpError as error: + return f"An HttpError occurred: {str(error)}" + + async def search_emails(self, query: str, max_results: int = 50) -> list[dict] | str: + """ + Searches for emails using Gmail's search syntax. + + Args: + query: Gmail search query (e.g., 'from:example@gmail.com', 'subject:hello', etc.) + max_results: Maximum number of results to return (default: 50) + + Returns: + List of message objects or error message + """ + try: + user_id = 'me' + + response = await asyncio.to_thread( + self.service.users().messages().list( + userId=user_id, + q=query, + maxResults=max_results + ).execute + ) + + messages = [] + if 'messages' in response: + messages.extend(response['messages']) + + # Get additional pages if available and needed + while 'nextPageToken' in response and len(messages) < max_results: + page_token = response['nextPageToken'] + response = await asyncio.to_thread( + self.service.users().messages().list( + userId=user_id, + q=query, + pageToken=page_token, + maxResults=max_results - len(messages) + ).execute + ) + if 'messages' in response: + messages.extend(response['messages']) + + # Get basic metadata for each message + result_messages = [] + for msg in messages: + msg_data = await asyncio.to_thread( + self.service.users().messages().get( + userId=user_id, + id=msg['id'], + format='metadata', + metadataHeaders=['Subject', 'From', 'Date'] + ).execute + ) + + headers = msg_data.get('payload', {}).get('headers', []) + + subject = next((header['value'] for header in headers if header['name'].lower() == 'subject'), 'No Subject') + sender = next((header['value'] for header in headers if header['name'].lower() == 'from'), 'Unknown Sender') + date = next((header['value'] for header in headers if header['name'].lower() == 'date'), '') + + result_messages.append({ + 'id': msg['id'], + 'threadId': msg['threadId'], + 'subject': subject, + 'from': sender, + 'date': date, + 'snippet': msg_data.get('snippet', '') + }) + + return result_messages + + except HttpError as error: + return f"An HttpError occurred: {str(error)}" + + async def create_folder(self, name: str) -> dict | str: + """ + Creates a new folder (implemented as a label with special handling). + + Args: + name: Name of the folder to create + + Returns: + Dictionary with status and folder information or error message + """ + try: + # In Gmail, folders are just labels with special visibility settings + label_object = { + 'name': name, + 'labelListVisibility': 'labelShow', + 'messageListVisibility': 'show', + 'type': 'user' # Ensure it's a user label + } + + created_label = await asyncio.to_thread( + self.service.users().labels().create(userId="me", body=label_object).execute + ) + + logger.info(f"Folder created: {created_label['id']}") + return { + 'status': 'success', + 'folder_id': created_label['id'], + 'name': created_label['name'] + } + except HttpError as error: + return {"status": "error", "error_message": str(error)} + + async def move_to_folder(self, email_id: str, folder_id: str) -> str: + """ + Moves an email to a folder by: + 1. Applying the folder label + 2. Removing the INBOX label (to remove from inbox) + + Args: + email_id: ID of the email to move + folder_id: ID of the folder (label) to move to + + Returns: + Success or error message + """ + try: + # First, apply the folder label + await asyncio.to_thread( + self.service.users().messages().modify( + userId="me", + id=email_id, + body={'addLabelIds': [folder_id], 'removeLabelIds': ['INBOX']} + ).execute + ) + + logger.info(f"Email {email_id} moved to folder {folder_id}") + return f"Email moved to folder successfully." + except HttpError as error: + return f"An HttpError occurred: {str(error)}" + + async def list_folders(self) -> list[dict] | str: + """ + Lists all user-created labels (folders) + + Returns: + List of folder information or error message + """ + try: + results = await asyncio.to_thread( + self.service.users().labels().list(userId="me").execute + ) + labels = results.get('labels', []) + + # Filter to only include user-created labels (folders) + folders = [ + { + 'id': label['id'], + 'name': label['name'] + } + for label in labels + if label['type'] == 'user' + ] + + return folders + except HttpError as error: + return f"An HttpError occurred: {str(error)}" + + async def rename_label(self, label_id: str, new_name: str) -> dict | str: + """ + Renames an existing label + + Args: + label_id: ID of the label to rename + new_name: New name for the label + + Returns: + Dictionary with status and updated label information or error message + """ + try: + # First, get the current label to preserve its settings + label = await asyncio.to_thread( + self.service.users().labels().get(userId="me", id=label_id).execute + ) + + # Update only the name field + label['name'] = new_name + + # Update the label + updated_label = await asyncio.to_thread( + self.service.users().labels().update( + userId="me", + id=label_id, + body=label + ).execute + ) + + logger.info(f"Label renamed: {label_id} to {new_name}") + return { + 'status': 'success', + 'label_id': updated_label['id'], + 'name': updated_label['name'] + } + except HttpError as error: + return {"status": "error", "error_message": str(error)} + + async def delete_label(self, label_id: str) -> str: + """ + Deletes a label + + Args: + label_id: ID of the label to delete + + Returns: + Success or error message + """ + try: + await asyncio.to_thread( + self.service.users().labels().delete( + userId="me", + id=label_id + ).execute + ) + + logger.info(f"Label deleted: {label_id}") + return f"Label deleted successfully." + except HttpError as error: + return f"An HttpError occurred: {str(error)}" + + async def archive_email(self, email_id: str) -> str: + """ + Archives an email by removing the INBOX label + + Args: + email_id: ID of the email to archive + + Returns: + Success or error message + """ + try: + await asyncio.to_thread( + self.service.users().messages().modify( + userId="me", + id=email_id, + body={'removeLabelIds': ['INBOX']} + ).execute + ) + + logger.info(f"Email archived: {email_id}") + return f"Email archived successfully." + except HttpError as error: + return f"An HttpError occurred: {str(error)}" + + async def batch_archive(self, query: str, max_emails: int = 100) -> dict: + """ + Archives multiple emails matching a search query + + Args: + query: Gmail search query to find emails to archive + max_emails: Maximum number of emails to archive in one batch + + Returns: + Dictionary with status and count of archived emails + """ + try: + # First, search for emails matching the query + user_id = 'me' + + response = await asyncio.to_thread( + self.service.users().messages().list( + userId=user_id, + q=query, + maxResults=max_emails + ).execute + ) + + messages = [] + if 'messages' in response: + messages.extend(response['messages']) + + if not messages: + return { + 'status': 'success', + 'archived_count': 0, + 'message': 'No emails found matching the query.' + } + + # Archive each email in the batch + archived_count = 0 + for msg in messages: + try: + await asyncio.to_thread( + self.service.users().messages().modify( + userId="me", + id=msg['id'], + body={'removeLabelIds': ['INBOX']} + ).execute + ) + archived_count += 1 + except Exception as e: + logger.error(f"Error archiving email {msg['id']}: {str(e)}") + + logger.info(f"Batch archived {archived_count} emails") + return { + 'status': 'success', + 'archived_count': archived_count, + 'total_found': len(messages), + 'message': f"Successfully archived {archived_count} out of {len(messages)} emails." + } + except HttpError as error: + return { + 'status': 'error', + 'error_message': str(error) + } + + async def list_archived(self, max_results: int = 50) -> list[dict] | str: + """ + Lists archived emails (emails not in inbox) + + Args: + max_results: Maximum number of results to return + + Returns: + List of archived email objects or error message + """ + try: + # Search for emails that are in "All Mail" but not in "Inbox" + query = "-in:inbox" + + # Use the existing search_emails method + return await self.search_emails(query, max_results) + except Exception as error: + return f"An error occurred: {str(error)}" + + async def restore_to_inbox(self, email_id: str) -> str: + """ + Restores an archived email to the inbox + + Args: + email_id: ID of the email to restore + + Returns: + Success or error message + """ + try: + await asyncio.to_thread( + self.service.users().messages().modify( + userId="me", + id=email_id, + body={'addLabelIds': ['INBOX']} + ).execute + ) + + logger.info(f"Email restored to inbox: {email_id}") + return f"Email restored to inbox successfully." + except HttpError as error: + return f"An HttpError occurred: {str(error)}" + +async def main(creds_file_path: str, + token_path: str): + + gmail_service = GmailService(creds_file_path, token_path) + server = Server("gmail") + + @server.list_prompts() + async def list_prompts() -> list[types.Prompt]: + return list(PROMPTS.values()) + + @server.get_prompt() + async def get_prompt( + name: str, arguments: dict[str, str] | None = None + ) -> types.GetPromptResult: + if name not in PROMPTS: + raise ValueError(f"Prompt not found: {name}") + + if name == "manage-email": + return types.GetPromptResult( + messages=[ + types.PromptMessage( + role="user", + content=types.TextContent( + type="text", + text=EMAIL_ADMIN_PROMPTS, + ) + ) + ] + ) + + if name == "draft-email": + content = arguments.get("content", "") + recipient = arguments.get("recipient", "") + recipient_email = arguments.get("recipient_email", "") + + # First message asks the LLM to create the draft + return types.GetPromptResult( + messages=[ + types.PromptMessage( + role="user", + content=types.TextContent( + type="text", + text=f"""Please draft an email about "{content}" for {recipient} ({recipient_email}). + Include a subject line starting with 'Subject:' on the first line. + Do not send the email yet, just draft it and ask the user for their thoughts.""" + ) + ) + ] + ) + + elif name == "edit-draft": + changes = arguments.get("changes", "") + current_draft = arguments.get("current_draft", "") + + # Edit existing draft based on requested changes + return types.GetPromptResult( + messages=[ + types.PromptMessage( + role="user", + content=types.TextContent( + type="text", + text=f"""Please revise the current email draft: + "{current_draft}" + + Requested changes: + "{changes}" + + Please provide the updated draft.""" + ) + ) + ] + ) + + elif name == "manage-labels": + action = arguments.get("action", "") + + # Guide the LLM on how to manage labels + return types.GetPromptResult( + messages=[ + types.PromptMessage( + role="user", + content=types.TextContent( + type="text", + text=f"""I need help with managing my email labels: I want to "{action}". + +Here are the tools you can use for label management: +- list-labels: Lists all existing labels in my Gmail account +- create-label: Creates a new label with a specified name +- apply-label: Applies a label to a specific email +- remove-label: Removes a label from a specific email +- rename-label: Renames an existing label +- delete-label: Permanently deletes a label +- search-by-label: Finds all emails with a specific label + +If you need to list labels first to get label IDs, please do so.""" + ) + ) + ] + ) + + elif name == "manage-filters": + action = arguments.get("action", "") + + # Guide the LLM on how to manage filters + return types.GetPromptResult( + messages=[ + types.PromptMessage( + role="user", + content=types.TextContent( + type="text", + text=f"""I need help with managing my email filters: I want to "{action}". + +Here are the tools you can use for filter management: +- list-filters: Lists all existing filters in my Gmail account +- get-filter: Gets details of a specific filter +- create-filter: Creates a new filter +- delete-filter: Deletes a specific filter + +If you need to list filters first to get filter IDs, please do so.""" + ) + ) + ] + ) + + elif name == "search-emails": + query = arguments.get("query", "") + + # Guide the LLM on how to search emails + return types.GetPromptResult( + messages=[ + types.PromptMessage( + role="user", + content=types.TextContent( + type="text", + text=f"""I need to search through my emails for: "{query}" + +Available tools: +- search-emails: Searches all emails using Gmail's search syntax +- get-unread-emails: Gets only unread emails from the inbox + +You can use Gmail's search syntax for advanced searches: +- from:sender - Emails from a specific sender +- to:recipient - Emails to a specific recipient +- subject:text - Emails with specific text in the subject +- has:attachment - Emails with attachments +- after:YYYY/MM/DD - Emails after a specific date +- before:YYYY/MM/DD - Emails before a specific date +- is:important - Important emails +- label:name - Emails with a specific label""" + ) + ) + ] + ) + + elif name == "manage-folders": + action = arguments.get("action", "") + + # Guide the LLM on how to manage folders + return types.GetPromptResult( + messages=[ + types.PromptMessage( + role="user", + content=types.TextContent( + type="text", + text=f"""I need help with managing my email folders. Specifically, I want to "{action}". + +Here are the tools you can use for folder management: +- list-folders: Lists all existing folders in my Gmail account +- create-folder: Creates a new folder with a specified name +- move-to-folder: Moves an email to a specific folder (removes it from inbox) + +If you need to list folders first to get folder IDs, please do so. + +In Gmail, folders are implemented as labels with special handling. When you move an email to a folder, +it applies the folder's label and removes the email from the inbox.""" + ) + ) + ] + ) + + elif name == "manage-archive": + action = arguments.get("action", "") + + # Guide the LLM on how to manage archives + return types.GetPromptResult( + messages=[ + types.PromptMessage( + role="user", + content=types.TextContent( + type="text", + text=f"""I need help with managing my email archives: I want to "{action}". + +Here are the tools you can use for archive management: +- archive-email: Archives a single email (removes from inbox without deleting) +- batch-archive: Archives multiple emails matching a search query +- list-archived: Lists emails that have been archived +- restore-to-inbox: Restores an archived email back to the inbox + +You can use Gmail's search syntax to find emails to archive: +- from:sender - Emails from a specific sender +- older_than:30d - Emails older than 30 days +- has:attachment - Emails with attachments +- subject:text - Emails with specific text in the subject +- before:YYYY/MM/DD - Emails before a specific date + +Archiving in Gmail means removing the email from your inbox while keeping it accessible in "All Mail". +It's a great way to declutter your inbox without losing any emails.""" + ) + ) + ] + ) + + raise ValueError("Prompt implementation not found") + + @server.list_tools() + async def handle_list_tools() -> list[types.Tool]: + return [ + types.Tool( + name="send-email", + description="""Sends email to recipient. + Do not use if user only asked to draft email. + Drafts must be approved before sending.""", + inputSchema={ + "type": "object", + "properties": { + "recipient_id": { + "type": "string", + "description": "Recipient email address", + }, + "subject": { + "type": "string", + "description": "Email subject", + }, + "message": { + "type": "string", + "description": "Email content text", + }, + }, + "required": ["recipient_id", "subject", "message"], + }, + ), + types.Tool( + name="trash-email", + description="""Moves email to trash. + Confirm before moving email to trash.""", + inputSchema={ + "type": "object", + "properties": { + "email_id": { + "type": "string", + "description": "Email ID", + }, + }, + "required": ["email_id"], + }, + ), + types.Tool( + name="get-unread-emails", + description="Retrieve unread emails", + inputSchema={ + "type": "object", + "properties": {}, + "required": [] + }, + ), + types.Tool( + name="read-email", + description="Retrieves given email content", + inputSchema={ + "type": "object", + "properties": { + "email_id": { + "type": "string", + "description": "Email ID", + }, + }, + "required": ["email_id"], + }, + ), + types.Tool( + name="mark-email-as-read", + description="Marks given email as read", + inputSchema={ + "type": "object", + "properties": { + "email_id": { + "type": "string", + "description": "Email ID", + }, + }, + "required": ["email_id"], + }, + ), + types.Tool( + name="open-email", + description="Open email in browser", + inputSchema={ + "type": "object", + "properties": { + "email_id": { + "type": "string", + "description": "Email ID", + }, + }, + "required": ["email_id"], + }, + ), + types.Tool( + name="create-draft", + description="Creates a draft email without sending it", + inputSchema={ + "type": "object", + "properties": { + "recipient_id": { + "type": "string", + "description": "Recipient email address", + }, + "subject": { + "type": "string", + "description": "Email subject", + }, + "message": { + "type": "string", + "description": "Email content text", + }, + }, + "required": ["recipient_id", "subject", "message"], + }, + ), + types.Tool( + name="list-drafts", + description="Lists all draft emails", + inputSchema={ + "type": "object", + "properties": {}, + "required": [] + }, + ), + types.Tool( + name="list-labels", + description="Lists all labels in the user's mailbox", + inputSchema={ + "type": "object", + "properties": {}, + "required": [] + }, + ), + types.Tool( + name="create-label", + description="Creates a new label", + inputSchema={ + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Label name", + }, + }, + "required": ["name"], + }, + ), + types.Tool( + name="apply-label", + description="Applies a label to an email", + inputSchema={ + "type": "object", + "properties": { + "email_id": { + "type": "string", + "description": "Email ID", + }, + "label_id": { + "type": "string", + "description": "Label ID", + }, + }, + "required": ["email_id", "label_id"], + }, + ), + types.Tool( + name="remove-label", + description="Removes a label from an email", + inputSchema={ + "type": "object", + "properties": { + "email_id": { + "type": "string", + "description": "Email ID", + }, + "label_id": { + "type": "string", + "description": "Label ID", + }, + }, + "required": ["email_id", "label_id"], + }, + ), + types.Tool( + name="rename-label", + description="Renames an existing label", + inputSchema={ + "type": "object", + "properties": { + "label_id": { + "type": "string", + "description": "Label ID to rename", + }, + "new_name": { + "type": "string", + "description": "New name for the label", + }, + }, + "required": ["label_id", "new_name"], + }, + ), + types.Tool( + name="delete-label", + description="Permanently deletes a label", + inputSchema={ + "type": "object", + "properties": { + "label_id": { + "type": "string", + "description": "Label ID to delete", + }, + }, + "required": ["label_id"], + }, + ), + types.Tool( + name="search-by-label", + description="Searches for emails with a specific label", + inputSchema={ + "type": "object", + "properties": { + "label_id": { + "type": "string", + "description": "Label ID", + }, + }, + "required": ["label_id"], + }, + ), + types.Tool( + name="list-filters", + description="Lists all email filters in the user's mailbox", + inputSchema={ + "type": "object", + "properties": {}, + "required": [] + }, + ), + types.Tool( + name="get-filter", + description="Gets details of a specific filter", + inputSchema={ + "type": "object", + "properties": { + "filter_id": { + "type": "string", + "description": "Filter ID", + }, + }, + "required": ["filter_id"], + }, + ), + types.Tool( + name="create-filter", + description="Creates a new email filter", + inputSchema={ + "type": "object", + "properties": { + "from_email": { + "type": "string", + "description": "Filter emails from this sender", + }, + "to_email": { + "type": "string", + "description": "Filter emails to this recipient", + }, + "subject": { + "type": "string", + "description": "Filter emails with this subject", + }, + "query": { + "type": "string", + "description": "Filter emails matching this query", + }, + "has_attachment": { + "type": "boolean", + "description": "Filter emails with attachments", + }, + "exclude_chats": { + "type": "boolean", + "description": "Exclude chats from filter", + }, + "size_comparison": { + "type": "string", + "description": "Size comparison ('larger' or 'smaller')", + }, + "size": { + "type": "integer", + "description": "Size in bytes for comparison", + }, + "add_label_ids": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Labels to add to matching emails", + }, + "remove_label_ids": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Labels to remove from matching emails", + }, + "forward_to": { + "type": "string", + "description": "Email address to forward matching emails to", + }, + }, + }, + ), + types.Tool( + name="delete-filter", + description="Deletes a specific filter", + inputSchema={ + "type": "object", + "properties": { + "filter_id": { + "type": "string", + "description": "Filter ID", + }, + }, + "required": ["filter_id"], + }, + ), + types.Tool( + name="search-emails", + description="Searches for emails using Gmail's search syntax", + inputSchema={ + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Gmail search query", + }, + "max_results": { + "type": "integer", + "description": "Maximum number of results to return", + }, + }, + "required": ["query"], + }, + ), + types.Tool( + name="create-folder", + description="Creates a new folder", + inputSchema={ + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Folder name", + }, + }, + "required": ["name"], + }, + ), + types.Tool( + name="move-to-folder", + description="Moves an email to a folder", + inputSchema={ + "type": "object", + "properties": { + "email_id": { + "type": "string", + "description": "Email ID", + }, + "folder_id": { + "type": "string", + "description": "Folder ID", + }, + }, + "required": ["email_id", "folder_id"], + }, + ), + types.Tool( + name="list-folders", + description="Lists all user-created folders", + inputSchema={ + "type": "object", + "properties": {}, + "required": [] + }, + ), + types.Tool( + name="archive-email", + description="Archives an email (removes from inbox without deleting)", + inputSchema={ + "type": "object", + "properties": { + "email_id": { + "type": "string", + "description": "Email ID to archive", + }, + }, + "required": ["email_id"], + }, + ), + types.Tool( + name="batch-archive", + description="Archives multiple emails matching a search query", + inputSchema={ + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Gmail search query to find emails to archive", + }, + "max_emails": { + "type": "integer", + "description": "Maximum number of emails to archive (default: 100)", + }, + }, + "required": ["query"], + }, + ), + types.Tool( + name="list-archived", + description="Lists archived emails (not in inbox)", + inputSchema={ + "type": "object", + "properties": { + "max_results": { + "type": "integer", + "description": "Maximum number of results to return", + }, + }, + "required": [], + }, + ), + types.Tool( + name="restore-to-inbox", + description="Restores an archived email back to the inbox", + inputSchema={ + "type": "object", + "properties": { + "email_id": { + "type": "string", + "description": "Email ID to restore to inbox", + }, + }, + "required": ["email_id"], + }, + ), + ] + + @server.call_tool() + async def handle_call_tool( + name: str, arguments: dict | None + ) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]: + + if name == "send-email": + recipient = arguments.get("recipient_id") + if not recipient: + raise ValueError("Missing recipient parameter") + subject = arguments.get("subject") + if not subject: + raise ValueError("Missing subject parameter") + message = arguments.get("message") + if not message: + raise ValueError("Missing message parameter") + + # Extract subject and message content + email_lines = message.split('\n') + if email_lines[0].startswith('Subject:'): + subject = email_lines[0][8:].strip() + message_content = '\n'.join(email_lines[1:]).strip() + else: + message_content = message + + send_response = await gmail_service.send_email(recipient, subject, message_content) + + if send_response["status"] == "success": + response_text = f"Email sent successfully. Message ID: {send_response['message_id']}" + else: + response_text = f"Failed to send email: {send_response['error_message']}" + return [types.TextContent(type="text", text=response_text)] + + if name == "get-unread-emails": + + unread_emails = await gmail_service.get_unread_emails() + return [types.TextContent(type="text", text=str(unread_emails),artifact={"type": "json", "data": unread_emails} )] + + if name == "read-email": + email_id = arguments.get("email_id") + if not email_id: + raise ValueError("Missing email ID parameter") + + retrieved_email = await gmail_service.read_email(email_id) + return [types.TextContent(type="text", text=str(retrieved_email),artifact={"type": "dictionary", "data": retrieved_email} )] + if name == "open-email": + email_id = arguments.get("email_id") + if not email_id: + raise ValueError("Missing email ID parameter") + + msg = await gmail_service.open_email(email_id) + return [types.TextContent(type="text", text=str(msg))] + if name == "trash-email": + email_id = arguments.get("email_id") + if not email_id: + raise ValueError("Missing email ID parameter") + + msg = await gmail_service.trash_email(email_id) + return [types.TextContent(type="text", text=str(msg))] + if name == "mark-email-as-read": + email_id = arguments.get("email_id") + if not email_id: + raise ValueError("Missing email ID parameter") + + msg = await gmail_service.mark_email_as_read(email_id) + return [types.TextContent(type="text", text=str(msg))] + elif name == "create-draft": + recipient_id = arguments.get("recipient_id") + subject = arguments.get("subject") + message = arguments.get("message") + if not recipient_id or not subject or not message: + raise ValueError("Missing required parameters for creating a draft") + draft_response = await gmail_service.create_draft(recipient_id, subject, message) + if draft_response["status"] == "success": + response_text = f"Draft created successfully. Draft ID: {draft_response['draft_id']}" + else: + response_text = f"Failed to create draft: {draft_response['error_message']}" + return [types.TextContent(type="text", text=response_text)] + elif name == "list-drafts": + drafts = await gmail_service.list_drafts() + return [types.TextContent(type="text", text=str(drafts), artifact={"type": "json", "data": drafts})] + elif name == "list-labels": + labels = await gmail_service.list_labels() + return [types.TextContent(type="text", text=str(labels), artifact={"type": "json", "data": labels})] + elif name == "create-label": + name = arguments.get("name") + if not name: + raise ValueError("Missing required parameter for creating a label") + label_response = await gmail_service.create_label(name) + if label_response["status"] == "success": + response_text = f"Label created successfully. Label ID: {label_response['label_id']}, Name: {label_response['name']}" + else: + response_text = f"Failed to create label: {label_response['error_message']}" + return [types.TextContent(type="text", text=response_text)] + elif name == "apply-label": + email_id = arguments.get("email_id") + label_id = arguments.get("label_id") + if not email_id or not label_id: + raise ValueError("Missing required parameters for applying a label") + msg = await gmail_service.apply_label(email_id, label_id) + return [types.TextContent(type="text", text=str(msg))] + elif name == "remove-label": + email_id = arguments.get("email_id") + label_id = arguments.get("label_id") + if not email_id or not label_id: + raise ValueError("Missing required parameters for removing a label") + msg = await gmail_service.remove_label(email_id, label_id) + return [types.TextContent(type="text", text=str(msg))] + elif name == "search-by-label": + label_id = arguments.get("label_id") + if not label_id: + raise ValueError("Missing required parameter for searching by label") + messages = await gmail_service.search_by_label(label_id) + return [types.TextContent(type="text", text=str(messages), artifact={"type": "json", "data": messages})] + elif name == "list-filters": + filters = await gmail_service.list_filters() + return [types.TextContent(type="text", text=str(filters), artifact={"type": "json", "data": filters})] + elif name == "get-filter": + filter_id = arguments.get("filter_id") + if not filter_id: + raise ValueError("Missing required parameter for getting a filter") + filter_data = await gmail_service.get_filter(filter_id) + return [types.TextContent(type="text", text=str(filter_data), artifact={"type": "dictionary", "data": filter_data})] + elif name == "create-filter": + from_email = arguments.get("from_email") + to_email = arguments.get("to_email") + subject = arguments.get("subject") + query = arguments.get("query") + has_attachment = arguments.get("has_attachment") + exclude_chats = arguments.get("exclude_chats") + size_comparison = arguments.get("size_comparison") + size = arguments.get("size") + add_label_ids = arguments.get("add_label_ids") + remove_label_ids = arguments.get("remove_label_ids") + forward_to = arguments.get("forward_to") + if not from_email and not to_email and not subject and not query and has_attachment is None and exclude_chats is None and size_comparison is None and size is None and add_label_ids is None and remove_label_ids is None and forward_to is None: + raise ValueError("Missing required parameters for creating a filter") + filter_response = await gmail_service.create_filter(from_email, to_email, subject, query, has_attachment, exclude_chats, size_comparison, size, add_label_ids, remove_label_ids, forward_to) + if filter_response["status"] == "success": + response_text = f"Filter created successfully. Filter ID: {filter_response['filter_id']}, Filter: {filter_response['filter']}" + else: + response_text = f"Failed to create filter: {filter_response['error_message']}" + return [types.TextContent(type="text", text=response_text)] + elif name == "delete-filter": + filter_id = arguments.get("filter_id") + if not filter_id: + raise ValueError("Missing required parameter for deleting a filter") + msg = await gmail_service.delete_filter(filter_id) + return [types.TextContent(type="text", text=str(msg))] + elif name == "search-emails": + query = arguments.get("query") + max_results = arguments.get("max_results", 50) + if not query: + raise ValueError("Missing required parameter for searching emails") + messages = await gmail_service.search_emails(query, max_results) + return [types.TextContent(type="text", text=str(messages), artifact={"type": "json", "data": messages})] + elif name == "create-folder": + name = arguments.get("name") + if not name: + raise ValueError("Missing required parameter for creating a folder") + folder_response = await gmail_service.create_folder(name) + if folder_response["status"] == "success": + response_text = f"Folder created successfully. Folder ID: {folder_response['folder_id']}, Name: {folder_response['name']}" + else: + response_text = f"Failed to create folder: {folder_response['error_message']}" + return [types.TextContent(type="text", text=response_text)] + elif name == "move-to-folder": + email_id = arguments.get("email_id") + folder_id = arguments.get("folder_id") + if not email_id or not folder_id: + raise ValueError("Missing required parameters for moving an email to a folder") + msg = await gmail_service.move_to_folder(email_id, folder_id) + return [types.TextContent(type="text", text=str(msg))] + elif name == "list-folders": + folders = await gmail_service.list_folders() + return [types.TextContent(type="text", text=str(folders), artifact={"type": "json", "data": folders})] + elif name == "rename-label": + label_id = arguments.get("label_id") + new_name = arguments.get("new_name") + if not label_id or not new_name: + raise ValueError("Missing required parameters for renaming a label") + rename_response = await gmail_service.rename_label(label_id, new_name) + if rename_response["status"] == "success": + response_text = f"Label renamed successfully. Label ID: {rename_response['label_id']}, New name: {rename_response['name']}" + else: + response_text = f"Failed to rename label: {rename_response['error_message']}" + return [types.TextContent(type="text", text=response_text)] + elif name == "delete-label": + label_id = arguments.get("label_id") + if not label_id: + raise ValueError("Missing required parameter for deleting a label") + msg = await gmail_service.delete_label(label_id) + return [types.TextContent(type="text", text=str(msg))] + elif name == "archive-email": + email_id = arguments.get("email_id") + if not email_id: + raise ValueError("Missing required parameter for archiving an email") + msg = await gmail_service.archive_email(email_id) + return [types.TextContent(type="text", text=str(msg))] + elif name == "batch-archive": + query = arguments.get("query") + max_emails = arguments.get("max_emails", 100) + if not query: + raise ValueError("Missing required parameter for batch archiving") + archive_response = await gmail_service.batch_archive(query, max_emails) + return [types.TextContent(type="text", text=str(archive_response))] + elif name == "list-archived": + max_results = arguments.get("max_results", 50) + archived_emails = await gmail_service.list_archived(max_results) + return [types.TextContent(type="text", text=str(archived_emails), artifact={"type": "json", "data": archived_emails})] + elif name == "restore-to-inbox": + email_id = arguments.get("email_id") + if not email_id: + raise ValueError("Missing required parameter for restoring an email to inbox") + msg = await gmail_service.restore_to_inbox(email_id) + return [types.TextContent(type="text", text=str(msg))] + else: + logger.error(f"Unknown tool: {name}") + raise ValueError(f"Unknown tool: {name}") + + async with mcp.server.stdio.stdio_server() as (read_stream, write_stream): + await server.run( + read_stream, + write_stream, + InitializationOptions( + server_name="gmail", + server_version="0.1.0", + capabilities=server.get_capabilities( + notification_options=NotificationOptions(), + experimental_capabilities={}, + ), + ), + ) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Gmail API MCP Server') + parser.add_argument('--creds-file-path', + required=True, + help='OAuth 2.0 credentials file path') + parser.add_argument('--token-path', + required=True, + help='File location to store and retrieve access and refresh tokens for application') + + args = parser.parse_args() + asyncio.run(main(args.creds_file_path, args.token_path)) \ No newline at end of file diff --git a/src/mcp_servers/gmail-mcp/uv.lock b/src/mcp_servers/gmail-mcp/uv.lock new file mode 100644 index 0000000000000000000000000000000000000000..0946a5b7b9f9ab038aec1c34a20998470b947a26 --- /dev/null +++ b/src/mcp_servers/gmail-mcp/uv.lock @@ -0,0 +1,529 @@ +version = 1 +requires-python = ">=3.12" +resolution-markers = [ + "python_full_version < '3.13'", + "python_full_version >= '3.13'", +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 }, +] + +[[package]] +name = "anyio" +version = "4.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "sniffio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f6/40/318e58f669b1a9e00f5c4453910682e2d9dd594334539c7b7817dabb765f/anyio-4.7.0.tar.gz", hash = "sha256:2f834749c602966b7d456a7567cafcb309f96482b5081d14ac93ccd457f9dd48", size = 177076 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/7a/4daaf3b6c08ad7ceffea4634ec206faeff697526421c20f07628c7372156/anyio-4.7.0-py3-none-any.whl", hash = "sha256:ea60c3723ab42ba6fff7e8ccb0488c898ec538ff4df1f1d5e642c3601d07e352", size = 93052 }, +] + +[[package]] +name = "cachetools" +version = "5.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c3/38/a0f315319737ecf45b4319a8cd1f3a908e29d9277b46942263292115eee7/cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a", size = 27661 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/07/14f8ad37f2d12a5ce41206c21820d8cb6561b728e51fad4530dff0552a67/cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292", size = 9524 }, +] + +[[package]] +name = "certifi" +version = "2024.12.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/bd/1d41ee578ce09523c81a15426705dd20969f5abf006d1afe8aeff0dd776a/certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db", size = 166010 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/32/8f6669fc4798494966bf446c8c4a162e0b5d893dff088afddf76414f70e1/certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56", size = 164927 }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/4f/e1808dc01273379acc506d18f1504eb2d299bd4131743b9fc54d7be4df1e/charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e", size = 106620 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d3/0b/4b7a70987abf9b8196845806198975b6aab4ce016632f817ad758a5aa056/charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6", size = 194445 }, + { url = "https://files.pythonhosted.org/packages/50/89/354cc56cf4dd2449715bc9a0f54f3aef3dc700d2d62d1fa5bbea53b13426/charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf", size = 125275 }, + { url = "https://files.pythonhosted.org/packages/fa/44/b730e2a2580110ced837ac083d8ad222343c96bb6b66e9e4e706e4d0b6df/charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db", size = 119020 }, + { url = "https://files.pythonhosted.org/packages/9d/e4/9263b8240ed9472a2ae7ddc3e516e71ef46617fe40eaa51221ccd4ad9a27/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1", size = 139128 }, + { url = "https://files.pythonhosted.org/packages/6b/e3/9f73e779315a54334240353eaea75854a9a690f3f580e4bd85d977cb2204/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03", size = 149277 }, + { url = "https://files.pythonhosted.org/packages/1a/cf/f1f50c2f295312edb8a548d3fa56a5c923b146cd3f24114d5adb7e7be558/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284", size = 142174 }, + { url = "https://files.pythonhosted.org/packages/16/92/92a76dc2ff3a12e69ba94e7e05168d37d0345fa08c87e1fe24d0c2a42223/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15", size = 143838 }, + { url = "https://files.pythonhosted.org/packages/a4/01/2117ff2b1dfc61695daf2babe4a874bca328489afa85952440b59819e9d7/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8", size = 146149 }, + { url = "https://files.pythonhosted.org/packages/f6/9b/93a332b8d25b347f6839ca0a61b7f0287b0930216994e8bf67a75d050255/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2", size = 140043 }, + { url = "https://files.pythonhosted.org/packages/ab/f6/7ac4a01adcdecbc7a7587767c776d53d369b8b971382b91211489535acf0/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719", size = 148229 }, + { url = "https://files.pythonhosted.org/packages/9d/be/5708ad18161dee7dc6a0f7e6cf3a88ea6279c3e8484844c0590e50e803ef/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631", size = 151556 }, + { url = "https://files.pythonhosted.org/packages/5a/bb/3d8bc22bacb9eb89785e83e6723f9888265f3a0de3b9ce724d66bd49884e/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b", size = 149772 }, + { url = "https://files.pythonhosted.org/packages/f7/fa/d3fc622de05a86f30beea5fc4e9ac46aead4731e73fd9055496732bcc0a4/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565", size = 144800 }, + { url = "https://files.pythonhosted.org/packages/9a/65/bdb9bc496d7d190d725e96816e20e2ae3a6fa42a5cac99c3c3d6ff884118/charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7", size = 94836 }, + { url = "https://files.pythonhosted.org/packages/3e/67/7b72b69d25b89c0b3cea583ee372c43aa24df15f0e0f8d3982c57804984b/charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9", size = 102187 }, + { url = "https://files.pythonhosted.org/packages/f3/89/68a4c86f1a0002810a27f12e9a7b22feb198c59b2f05231349fbce5c06f4/charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114", size = 194617 }, + { url = "https://files.pythonhosted.org/packages/4f/cd/8947fe425e2ab0aa57aceb7807af13a0e4162cd21eee42ef5b053447edf5/charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed", size = 125310 }, + { url = "https://files.pythonhosted.org/packages/5b/f0/b5263e8668a4ee9becc2b451ed909e9c27058337fda5b8c49588183c267a/charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250", size = 119126 }, + { url = "https://files.pythonhosted.org/packages/ff/6e/e445afe4f7fda27a533f3234b627b3e515a1b9429bc981c9a5e2aa5d97b6/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920", size = 139342 }, + { url = "https://files.pythonhosted.org/packages/a1/b2/4af9993b532d93270538ad4926c8e37dc29f2111c36f9c629840c57cd9b3/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64", size = 149383 }, + { url = "https://files.pythonhosted.org/packages/fb/6f/4e78c3b97686b871db9be6f31d64e9264e889f8c9d7ab33c771f847f79b7/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23", size = 142214 }, + { url = "https://files.pythonhosted.org/packages/2b/c9/1c8fe3ce05d30c87eff498592c89015b19fade13df42850aafae09e94f35/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc", size = 144104 }, + { url = "https://files.pythonhosted.org/packages/ee/68/efad5dcb306bf37db7db338338e7bb8ebd8cf38ee5bbd5ceaaaa46f257e6/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d", size = 146255 }, + { url = "https://files.pythonhosted.org/packages/0c/75/1ed813c3ffd200b1f3e71121c95da3f79e6d2a96120163443b3ad1057505/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88", size = 140251 }, + { url = "https://files.pythonhosted.org/packages/7d/0d/6f32255c1979653b448d3c709583557a4d24ff97ac4f3a5be156b2e6a210/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90", size = 148474 }, + { url = "https://files.pythonhosted.org/packages/ac/a0/c1b5298de4670d997101fef95b97ac440e8c8d8b4efa5a4d1ef44af82f0d/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b", size = 151849 }, + { url = "https://files.pythonhosted.org/packages/04/4f/b3961ba0c664989ba63e30595a3ed0875d6790ff26671e2aae2fdc28a399/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d", size = 149781 }, + { url = "https://files.pythonhosted.org/packages/d8/90/6af4cd042066a4adad58ae25648a12c09c879efa4849c705719ba1b23d8c/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482", size = 144970 }, + { url = "https://files.pythonhosted.org/packages/cc/67/e5e7e0cbfefc4ca79025238b43cdf8a2037854195b37d6417f3d0895c4c2/charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67", size = 94973 }, + { url = "https://files.pythonhosted.org/packages/65/97/fc9bbc54ee13d33dc54a7fcf17b26368b18505500fc01e228c27b5222d80/charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b", size = 102308 }, + { url = "https://files.pythonhosted.org/packages/bf/9b/08c0432272d77b04803958a4598a51e2a4b51c06640af8b8f0f908c18bf2/charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079", size = 49446 }, +] + +[[package]] +name = "click" +version = "8.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "platform_system == 'Windows'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/96/d3/f04c7bfcf5c1862a2a5b845c6b2b360488cf47af55dfa79c98f6a6bf98b5/click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de", size = 336121 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/2e/d53fa4befbf2cfa713304affc7ca780ce4fc1fd8710527771b58311a3229/click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28", size = 97941 }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, +] + +[[package]] +name = "gmail" +version = "0.1.0" +source = { editable = "." } +dependencies = [ + { name = "google-api-python-client" }, + { name = "google-auth-httplib2" }, + { name = "google-auth-oauthlib" }, + { name = "httpx" }, + { name = "mcp" }, +] + +[package.metadata] +requires-dist = [ + { name = "google-api-python-client", specifier = ">=2.156.0" }, + { name = "google-auth-httplib2", specifier = ">=0.2.0" }, + { name = "google-auth-oauthlib", specifier = ">=1.2.1" }, + { name = "httpx", specifier = ">=0.28.1" }, + { name = "mcp", specifier = ">=1.1.2" }, +] + +[[package]] +name = "google-api-core" +version = "2.24.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-auth" }, + { name = "googleapis-common-protos" }, + { name = "proto-plus" }, + { name = "protobuf" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/81/56/d70d66ed1b5ab5f6c27bf80ec889585ad8f865ff32acbafd3b2ef0bfb5d0/google_api_core-2.24.0.tar.gz", hash = "sha256:e255640547a597a4da010876d333208ddac417d60add22b6851a0c66a831fcaf", size = 162647 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a1/76/65b8b94e74bf1b6d1cc38d916089670c4da5029d25762441d8c5c19e51dd/google_api_core-2.24.0-py3-none-any.whl", hash = "sha256:10d82ac0fca69c82a25b3efdeefccf6f28e02ebb97925a8cce8edbfe379929d9", size = 158576 }, +] + +[[package]] +name = "google-api-python-client" +version = "2.156.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-api-core" }, + { name = "google-auth" }, + { name = "google-auth-httplib2" }, + { name = "httplib2" }, + { name = "uritemplate" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/01/e6/91022ee180ea77ab56147602690bb9b03459b19bf478b64e89be5f5757d5/google_api_python_client-2.156.0.tar.gz", hash = "sha256:b809c111ded61716a9c1c7936e6899053f13bae3defcdfda904bd2ca68065b9c", size = 12230345 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b5/62/3a25fac4e36099c2789eda41d4bc99807871c55cedcf02bc01706087b054/google_api_python_client-2.156.0-py2.py3-none-any.whl", hash = "sha256:6352185c505e1f311f11b0b96c1b636dcb0fec82cd04b80ac5a671ac4dcab339", size = 12739499 }, +] + +[[package]] +name = "google-auth" +version = "2.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools" }, + { name = "pyasn1-modules" }, + { name = "rsa" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/46/af/b25763b9d35dfc2c6f9c3ec34d8d3f1ba760af3a7b7e8d5c5f0579522c45/google_auth-2.37.0.tar.gz", hash = "sha256:0054623abf1f9c83492c63d3f47e77f0a544caa3d40b2d98e099a611c2dd5d00", size = 268878 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8d/8d/4d5d5f9f500499f7bd4c93903b43e8d6976f3fc6f064637ded1a85d09b07/google_auth-2.37.0-py2.py3-none-any.whl", hash = "sha256:42664f18290a6be591be5329a96fe30184be1a1badb7292a7f686a9659de9ca0", size = 209829 }, +] + +[[package]] +name = "google-auth-httplib2" +version = "0.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-auth" }, + { name = "httplib2" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/56/be/217a598a818567b28e859ff087f347475c807a5649296fb5a817c58dacef/google-auth-httplib2-0.2.0.tar.gz", hash = "sha256:38aa7badf48f974f1eb9861794e9c0cb2a0511a4ec0679b1f886d108f5640e05", size = 10842 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/be/8a/fe34d2f3f9470a27b01c9e76226965863f153d5fbe276f83608562e49c04/google_auth_httplib2-0.2.0-py2.py3-none-any.whl", hash = "sha256:b65a0a2123300dd71281a7bf6e64d65a0759287df52729bdd1ae2e47dc311a3d", size = 9253 }, +] + +[[package]] +name = "google-auth-oauthlib" +version = "1.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-auth" }, + { name = "requests-oauthlib" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cc/0f/1772edb8d75ecf6280f1c7f51cbcebe274e8b17878b382f63738fd96cee5/google_auth_oauthlib-1.2.1.tar.gz", hash = "sha256:afd0cad092a2eaa53cd8e8298557d6de1034c6cb4a740500b5357b648af97263", size = 24970 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1a/8e/22a28dfbd218033e4eeaf3a0533b2b54852b6530da0c0fe934f0cc494b29/google_auth_oauthlib-1.2.1-py2.py3-none-any.whl", hash = "sha256:2d58a27262d55aa1b87678c3ba7142a080098cbc2024f903c62355deb235d91f", size = 24930 }, +] + +[[package]] +name = "googleapis-common-protos" +version = "1.66.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ff/a7/8e9cccdb1c49870de6faea2a2764fa23f627dd290633103540209f03524c/googleapis_common_protos-1.66.0.tar.gz", hash = "sha256:c3e7b33d15fdca5374cc0a7346dd92ffa847425cc4ea941d970f13680052ec8c", size = 114376 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/0f/c0713fb2b3d28af4b2fded3291df1c4d4f79a00d15c2374a9e010870016c/googleapis_common_protos-1.66.0-py2.py3-none-any.whl", hash = "sha256:d7abcd75fabb2e0ec9f74466401f6c119a0b498e27370e9be4c94cb7e382b8ed", size = 221682 }, +] + +[[package]] +name = "h11" +version = "0.14.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f5/38/3af3d3633a34a3316095b39c8e8fb4853a28a536e55d347bd8d8e9a14b03/h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", size = 100418 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761", size = 58259 }, +] + +[[package]] +name = "httpcore" +version = "1.0.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6a/41/d7d0a89eb493922c37d343b607bc1b5da7f5be7e383740b4753ad8943e90/httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c", size = 85196 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd", size = 78551 }, +] + +[[package]] +name = "httplib2" +version = "0.22.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyparsing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3d/ad/2371116b22d616c194aa25ec410c9c6c37f23599dcd590502b74db197584/httplib2-0.22.0.tar.gz", hash = "sha256:d7a10bc5ef5ab08322488bde8c726eeee5c8618723fdb399597ec58f3d82df81", size = 351116 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/6c/d2fbdaaa5959339d53ba38e94c123e4e84b8fbc4b84beb0e70d7c1608486/httplib2-0.22.0-py3-none-any.whl", hash = "sha256:14ae0a53c1ba8f3d37e9e27cf37eabb0fb9980f435ba405d546948b009dd64dc", size = 96854 }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517 }, +] + +[[package]] +name = "httpx-sse" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4c/60/8f4281fa9bbf3c8034fd54c0e7412e66edbab6bc74c4996bd616f8d0406e/httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721", size = 12624 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e1/9b/a181f281f65d776426002f330c31849b86b31fc9d848db62e16f03ff739f/httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f", size = 7819 }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, +] + +[[package]] +name = "mcp" +version = "1.1.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "httpx" }, + { name = "httpx-sse" }, + { name = "pydantic" }, + { name = "sse-starlette" }, + { name = "starlette" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9b/f3/5cf212e60681ea6da0dbb6e0d1bc0ab2dbf5eebc749b69663d46f114fea1/mcp-1.1.2.tar.gz", hash = "sha256:694aa9df7a8641b24953c935eb72c63136dc948981021525a0add199bdfee402", size = 57628 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/df/40/9883eac3718b860d4006eba1920bfcb628f0a1fe37fac46a4f4e391edca6/mcp-1.1.2-py3-none-any.whl", hash = "sha256:a4d32d60fd80a1702440ba4751b847a8a88957a1f7b059880953143e9759965a", size = 36652 }, +] + +[[package]] +name = "oauthlib" +version = "3.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6d/fa/fbf4001037904031639e6bfbfc02badfc7e12f137a8afa254df6c4c8a670/oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918", size = 177352 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/80/cab10959dc1faead58dc8384a781dfbf93cb4d33d50988f7a69f1b7c9bbe/oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca", size = 151688 }, +] + +[[package]] +name = "proto-plus" +version = "1.25.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7e/05/74417b2061e1bf1b82776037cad97094228fa1c1b6e82d08a78d3fb6ddb6/proto_plus-1.25.0.tar.gz", hash = "sha256:fbb17f57f7bd05a68b7707e745e26528b0b3c34e378db91eef93912c54982d91", size = 56124 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dd/25/0b7cc838ae3d76d46539020ec39fc92bfc9acc29367e58fe912702c2a79e/proto_plus-1.25.0-py3-none-any.whl", hash = "sha256:c91fc4a65074ade8e458e95ef8bac34d4008daa7cce4a12d6707066fca648961", size = 50126 }, +] + +[[package]] +name = "protobuf" +version = "5.29.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a5/73/4e6295c1420a9d20c9c351db3a36109b4c9aa601916cb7c6871e3196a1ca/protobuf-5.29.2.tar.gz", hash = "sha256:b2cc8e8bb7c9326996f0e160137b0861f1a82162502658df2951209d0cb0309e", size = 424901 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/42/6db5387124708d619ffb990a846fb123bee546f52868039f8fa964c5bc54/protobuf-5.29.2-cp310-abi3-win32.whl", hash = "sha256:c12ba8249f5624300cf51c3d0bfe5be71a60c63e4dcf51ffe9a68771d958c851", size = 422697 }, + { url = "https://files.pythonhosted.org/packages/6c/38/2fcc968b377b531882d6ab2ac99b10ca6d00108394f6ff57c2395fb7baff/protobuf-5.29.2-cp310-abi3-win_amd64.whl", hash = "sha256:842de6d9241134a973aab719ab42b008a18a90f9f07f06ba480df268f86432f9", size = 434495 }, + { url = "https://files.pythonhosted.org/packages/cb/26/41debe0f6615fcb7e97672057524687ed86fcd85e3da3f031c30af8f0c51/protobuf-5.29.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a0c53d78383c851bfa97eb42e3703aefdc96d2036a41482ffd55dc5f529466eb", size = 417812 }, + { url = "https://files.pythonhosted.org/packages/e4/20/38fc33b60dcfb380507b99494aebe8c34b68b8ac7d32808c4cebda3f6f6b/protobuf-5.29.2-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:494229ecd8c9009dd71eda5fd57528395d1eacdf307dbece6c12ad0dd09e912e", size = 319562 }, + { url = "https://files.pythonhosted.org/packages/90/4d/c3d61e698e0e41d926dbff6aa4e57428ab1a6fc3b5e1deaa6c9ec0fd45cf/protobuf-5.29.2-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:b6b0d416bbbb9d4fbf9d0561dbfc4e324fd522f61f7af0fe0f282ab67b22477e", size = 319662 }, + { url = "https://files.pythonhosted.org/packages/f3/fd/c7924b4c2a1c61b8f4b64edd7a31ffacf63432135a2606f03a2f0d75a750/protobuf-5.29.2-py3-none-any.whl", hash = "sha256:fde4554c0e578a5a0bcc9a276339594848d1e89f9ea47b4427c80e5d72f90181", size = 172539 }, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135 }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1d/67/6afbf0d507f73c32d21084a79946bfcfca5fbc62a72057e9c23797a737c9/pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c", size = 310028 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/89/bc88a6711935ba795a679ea6ebee07e128050d6382eaa35a0a47c8032bdc/pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd", size = 181537 }, +] + +[[package]] +name = "pydantic" +version = "2.10.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/70/7e/fb60e6fee04d0ef8f15e4e01ff187a196fa976eb0f0ab524af4599e5754c/pydantic-2.10.4.tar.gz", hash = "sha256:82f12e9723da6de4fe2ba888b5971157b3be7ad914267dea8f05f82b28254f06", size = 762094 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/26/3e1bbe954fde7ee22a6e7d31582c642aad9e84ffe4b5fb61e63b87cd326f/pydantic-2.10.4-py3-none-any.whl", hash = "sha256:597e135ea68be3a37552fb524bc7d0d66dcf93d395acd93a00682f1efcb8ee3d", size = 431765 }, +] + +[[package]] +name = "pydantic-core" +version = "2.27.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/01/f3e5ac5e7c25833db5eb555f7b7ab24cd6f8c322d3a3ad2d67a952dc0abc/pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39", size = 413443 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d6/74/51c8a5482ca447871c93e142d9d4a92ead74de6c8dc5e66733e22c9bba89/pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0", size = 1893127 }, + { url = "https://files.pythonhosted.org/packages/d3/f3/c97e80721735868313c58b89d2de85fa80fe8dfeeed84dc51598b92a135e/pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef", size = 1811340 }, + { url = "https://files.pythonhosted.org/packages/9e/91/840ec1375e686dbae1bd80a9e46c26a1e0083e1186abc610efa3d9a36180/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7", size = 1822900 }, + { url = "https://files.pythonhosted.org/packages/f6/31/4240bc96025035500c18adc149aa6ffdf1a0062a4b525c932065ceb4d868/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934", size = 1869177 }, + { url = "https://files.pythonhosted.org/packages/fa/20/02fbaadb7808be578317015c462655c317a77a7c8f0ef274bc016a784c54/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6", size = 2038046 }, + { url = "https://files.pythonhosted.org/packages/06/86/7f306b904e6c9eccf0668248b3f272090e49c275bc488a7b88b0823444a4/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c", size = 2685386 }, + { url = "https://files.pythonhosted.org/packages/8d/f0/49129b27c43396581a635d8710dae54a791b17dfc50c70164866bbf865e3/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2", size = 1997060 }, + { url = "https://files.pythonhosted.org/packages/0d/0f/943b4af7cd416c477fd40b187036c4f89b416a33d3cc0ab7b82708a667aa/pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4", size = 2004870 }, + { url = "https://files.pythonhosted.org/packages/35/40/aea70b5b1a63911c53a4c8117c0a828d6790483f858041f47bab0b779f44/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3", size = 1999822 }, + { url = "https://files.pythonhosted.org/packages/f2/b3/807b94fd337d58effc5498fd1a7a4d9d59af4133e83e32ae39a96fddec9d/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4", size = 2130364 }, + { url = "https://files.pythonhosted.org/packages/fc/df/791c827cd4ee6efd59248dca9369fb35e80a9484462c33c6649a8d02b565/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57", size = 2158303 }, + { url = "https://files.pythonhosted.org/packages/9b/67/4e197c300976af185b7cef4c02203e175fb127e414125916bf1128b639a9/pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc", size = 1834064 }, + { url = "https://files.pythonhosted.org/packages/1f/ea/cd7209a889163b8dcca139fe32b9687dd05249161a3edda62860430457a5/pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9", size = 1989046 }, + { url = "https://files.pythonhosted.org/packages/bc/49/c54baab2f4658c26ac633d798dab66b4c3a9bbf47cff5284e9c182f4137a/pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b", size = 1885092 }, + { url = "https://files.pythonhosted.org/packages/41/b1/9bc383f48f8002f99104e3acff6cba1231b29ef76cfa45d1506a5cad1f84/pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b", size = 1892709 }, + { url = "https://files.pythonhosted.org/packages/10/6c/e62b8657b834f3eb2961b49ec8e301eb99946245e70bf42c8817350cbefc/pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154", size = 1811273 }, + { url = "https://files.pythonhosted.org/packages/ba/15/52cfe49c8c986e081b863b102d6b859d9defc63446b642ccbbb3742bf371/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9", size = 1823027 }, + { url = "https://files.pythonhosted.org/packages/b1/1c/b6f402cfc18ec0024120602bdbcebc7bdd5b856528c013bd4d13865ca473/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9", size = 1868888 }, + { url = "https://files.pythonhosted.org/packages/bd/7b/8cb75b66ac37bc2975a3b7de99f3c6f355fcc4d89820b61dffa8f1e81677/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1", size = 2037738 }, + { url = "https://files.pythonhosted.org/packages/c8/f1/786d8fe78970a06f61df22cba58e365ce304bf9b9f46cc71c8c424e0c334/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a", size = 2685138 }, + { url = "https://files.pythonhosted.org/packages/a6/74/d12b2cd841d8724dc8ffb13fc5cef86566a53ed358103150209ecd5d1999/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e", size = 1997025 }, + { url = "https://files.pythonhosted.org/packages/a0/6e/940bcd631bc4d9a06c9539b51f070b66e8f370ed0933f392db6ff350d873/pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4", size = 2004633 }, + { url = "https://files.pythonhosted.org/packages/50/cc/a46b34f1708d82498c227d5d80ce615b2dd502ddcfd8376fc14a36655af1/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27", size = 1999404 }, + { url = "https://files.pythonhosted.org/packages/ca/2d/c365cfa930ed23bc58c41463bae347d1005537dc8db79e998af8ba28d35e/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee", size = 2130130 }, + { url = "https://files.pythonhosted.org/packages/f4/d7/eb64d015c350b7cdb371145b54d96c919d4db516817f31cd1c650cae3b21/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1", size = 2157946 }, + { url = "https://files.pythonhosted.org/packages/a4/99/bddde3ddde76c03b65dfd5a66ab436c4e58ffc42927d4ff1198ffbf96f5f/pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130", size = 1834387 }, + { url = "https://files.pythonhosted.org/packages/71/47/82b5e846e01b26ac6f1893d3c5f9f3a2eb6ba79be26eef0b759b4fe72946/pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee", size = 1990453 }, + { url = "https://files.pythonhosted.org/packages/51/b2/b2b50d5ecf21acf870190ae5d093602d95f66c9c31f9d5de6062eb329ad1/pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b", size = 1885186 }, +] + +[[package]] +name = "pyparsing" +version = "3.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8c/d5/e5aeee5387091148a19e1145f63606619cb5f20b83fccb63efae6474e7b2/pyparsing-3.2.0.tar.gz", hash = "sha256:cbf74e27246d595d9a74b186b810f6fbb86726dbf3b9532efb343f6d7294fe9c", size = 920984 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/be/ec/2eb3cd785efd67806c46c13a17339708ddc346cbb684eade7a6e6f79536a/pyparsing-3.2.0-py3-none-any.whl", hash = "sha256:93d9577b88da0bbea8cc8334ee8b918ed014968fd2ec383e868fb8afb1ccef84", size = 106921 }, +] + +[[package]] +name = "requests" +version = "2.32.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 }, +] + +[[package]] +name = "requests-oauthlib" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "oauthlib" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/f2/05f29bc3913aea15eb670be136045bf5c5bbf4b99ecb839da9b422bb2c85/requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9", size = 55650 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/5d/63d4ae3b9daea098d5d6f5da83984853c1bbacd5dc826764b249fe119d24/requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36", size = 24179 }, +] + +[[package]] +name = "rsa" +version = "4.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/aa/65/7d973b89c4d2351d7fb232c2e452547ddfa243e93131e7cfa766da627b52/rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21", size = 29711 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/49/97/fa78e3d2f65c02c8e1268b9aba606569fe97f6c8f7c2d74394553347c145/rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7", size = 34315 }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 }, +] + +[[package]] +name = "sse-starlette" +version = "2.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "starlette" }, + { name = "uvicorn" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/fc/56ab9f116b2133521f532fce8d03194cf04dcac25f583cf3d839be4c0496/sse_starlette-2.1.3.tar.gz", hash = "sha256:9cd27eb35319e1414e3d2558ee7414487f9529ce3b3cf9b21434fd110e017169", size = 19678 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/aa/36b271bc4fa1d2796311ee7c7283a3a1c348bad426d37293609ca4300eef/sse_starlette-2.1.3-py3-none-any.whl", hash = "sha256:8ec846438b4665b9e8c560fcdea6bc8081a3abf7942faa95e5a744999d219772", size = 9383 }, +] + +[[package]] +name = "starlette" +version = "0.42.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3e/ae/0c98794b248370ce30f71018d0f39889f1d90c73a631e68e2f47e5efda2f/starlette-0.42.0.tar.gz", hash = "sha256:91f1fbd612f3e3d821a8a5f46bf381afe2a9722a7b8bbde1c07fb83384c2882a", size = 2575136 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/38/f790c69b2cbfe9cd4a8a89db1ef50d0a10e5121c07ff8b1d7c16d7807f41/starlette-0.42.0-py3-none-any.whl", hash = "sha256:02f877201a3d6d301714b5c72f15cac305ea5cc9e213c4b46a5af7eecad0d625", size = 73356 }, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438 }, +] + +[[package]] +name = "uritemplate" +version = "4.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d2/5a/4742fdba39cd02a56226815abfa72fe0aa81c33bed16ed045647d6000eba/uritemplate-4.1.1.tar.gz", hash = "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0", size = 273898 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/c0/7461b49cd25aeece13766f02ee576d1db528f1c37ce69aee300e075b485b/uritemplate-4.1.1-py2.py3-none-any.whl", hash = "sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e", size = 10356 }, +] + +[[package]] +name = "urllib3" +version = "2.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ed/63/22ba4ebfe7430b76388e7cd448d5478814d3032121827c12a2cc287e2260/urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9", size = 300677 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ce/d9/5f4c13cecde62396b0d3fe530a50ccea91e7dfc1ccf0e09c228841bb5ba8/urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", size = 126338 }, +] + +[[package]] +name = "uvicorn" +version = "0.34.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4b/4d/938bd85e5bf2edeec766267a5015ad969730bb91e31b44021dfe8b22df6c/uvicorn-0.34.0.tar.gz", hash = "sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9", size = 76568 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/14/33a3a1352cfa71812a3a21e8c9bfb83f60b0011f5e36f2b1399d51928209/uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4", size = 62315 }, +] diff --git a/src/prompts/__init__.py b/src/prompts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..83d2911fc44849bfe9378d0fd703c1e1d7b3e928 --- /dev/null +++ b/src/prompts/__init__.py @@ -0,0 +1,66 @@ +""" +Prompt Registry Module +====================== + +Centralized prompt management using PromptLayer with optional local files. +Provides a singleton PromptManager instance for loading prompts from: +1. Local prompt files (if local_prompt_path is provided) +2. PromptLayer cloud service (if PROMPTLAYER_API_KEY is set) +""" + +import os +from .prompt_layer import PromptManager + +# Path to the templates folder inside the package +TEMPLATES_DIR = os.path.join(os.path.dirname(__file__), "templates") + +# Singleton PromptManager instance +_prompt_manager = PromptManager(environment=os.getenv("PROMPT_ENVIRONMENT", "production")) +# Force local-only prompts; disable remote PromptLayer client if present +_prompt_manager.client = None + + +def get_prompt( + template_name: str, + version: int = None, + label: str = None, + local_prompt_path: str = None, + latest_version: bool = False, +) -> str: + """ + Load a prompt from local templates only (PromptLayer disabled here). + + Strategy: + - If local_prompt_path is provided, use it. + - Otherwise, use the default templates directory. + """ + # Normalize template name to match folder names (lowercase) + if template_name: + template_name = template_name.lower() + + if local_prompt_path: + if not os.path.isabs(local_prompt_path): + local_prompt_path = os.path.join(TEMPLATES_DIR, local_prompt_path) + else: + local_prompt_path = TEMPLATES_DIR + + return _prompt_manager.get_prompt( + template_name=template_name, + version=version, + label=label, + local_prompt_path=local_prompt_path, + latest_version=latest_version, + ) + + +def get_prompt_manager() -> PromptManager: + """Return singleton PromptManager.""" + return _prompt_manager + + +__all__ = [ + "get_prompt", + "get_prompt_manager", + "PromptManager", + "TEMPLATES_DIR" +] diff --git a/src/prompts/info.md b/src/prompts/info.md new file mode 100644 index 0000000000000000000000000000000000000000..14d268c66f5dc2bf2a7956247d0a9ca10d7f1dde --- /dev/null +++ b/src/prompts/info.md @@ -0,0 +1,69 @@ +# Prompt Management System + +This module provides a centralized way to manage prompts using **PromptLayer** with a local filesystem fallback. It allows you to version prompts, manage environments (dev/staging/prod), and easily switch between local development and cloud-managed prompts. + +## 🚀 Usage + +Import the `get_prompt` function to load prompts anywhere in your application. + +```python +from src.prompts import get_prompt +``` + +### 1. Load from PromptLayer (Default) +By default, if `PROMPTLAYER_API_KEY` is set in your environment, it will fetch the prompt from PromptLayer using the configured environment label (default: `production`). + +```python +# Fetches 'DB_Executor' tagged with current environment (e.g., 'production') +system_prompt = get_prompt("DB_Executor") +``` + +### 2. Load Latest Version (Ignore Environment) +Useful for testing or when you want to ensure you have the absolute latest saved version from PromptLayer, ignoring any 'prod' or 'dev' tags. + +```python +# Fetches the absolute latest version of the template +system_prompt = get_prompt("DB_Executor", latest_version=True) +``` + +### 3. Force Load from Local File +You can force loading from a local file, which is useful for local development without an internet connection or for testing new prompts before pushing to PromptLayer. + +```python +# Loads from src/prompts/templates/db_executor/v1.txt +# (Assuming 'v1.txt' is the file name in that directory, or provide full path) +system_prompt = get_prompt("db_executor/v1", local_prompt_path="src/prompts/templates") +``` + +If you don't provide a `local_prompt_path` but also don't have a `PROMPTLAYER_API_KEY` set, it defaults to looking in `src/prompts/templates`. + +## 📂 Directory Structure + +Store your local prompt backups in `src/prompts/templates/`. + +``` +src/prompts/ +├── __init__.py # Exposes get_prompt +├── prompt_layer.py # Core logic +├── templates/ # Local prompt storage +│ ├── db_executor/ +│ │ └── v1.txt +│ ├── supervisor/ +│ │ └── v1.txt +│ └── ... +└── info.md # This file +``` + +## ⚙️ Configuration + +- **`PROMPTLAYER_API_KEY`**: Set this env var to enable PromptLayer. +- **`PROMPT_ENVIRONMENT`**: Set to `dev`, `staging`, or `production` (default) to control which tagged version is loaded. + +## 🔍 Debugging + +The system prints clear logs to stdout so you know where your prompt came from: + +- `📋 Loaded prompt '...' from PromptLayer (env=production)` +- `📋 Loaded prompt '...' from PromptLayer (latest version)` +- `📄 Loaded prompt '...' from local file: ...` + diff --git a/src/prompts/prompt_layer.py b/src/prompts/prompt_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..03691758243efbeff1f28abb6ac37d33162bd1bb --- /dev/null +++ b/src/prompts/prompt_layer.py @@ -0,0 +1,238 @@ +#!/usr/bin/env python3 +""" +PromptLayer Integration for Prompt Management +============================================== + +This module provides a centralized way to manage prompts using PromptLayer platform. +Allows for versioned, labeled prompts that can be easily updated without code changes. +""" + +import promptlayer +from promptlayer import PromptLayer +from dotenv import load_dotenv +import os +from typing import Dict, Any, Optional +from functools import lru_cache + +load_dotenv() + + +class PromptManager: + """ + Centralized prompt management using PromptLayer platform. + link: + - https://www.promptlayer.com + + Features: + - Version control for prompts + - Environment-based prompt labels (dev, staging, production) + - Caching for performance + - Fallback to local files if PromptLayer unavailable + """ + + def __init__(self, api_key: Optional[str] = None, environment: str = "production"): + """ + Initialize PromptManager. + + Args: + api_key: PromptLayer API key (defaults to PROMPTLAYER_API_KEY env var) + environment: Environment label for prompts (dev, staging, production) + """ + self.api_key = api_key or os.getenv("PROMPTLAYER_API_KEY") + self.environment = environment + self.client = None + + # Initialize client if API key is available + if self.api_key: + try: + self.client = PromptLayer(api_key=self.api_key) + print(f"✅ PromptLayer connected (environment: {environment})") + + except Exception as e: + print(f"⚠️ PromptLayer connection failed: {e}") + self.client = None + else: + print("⚠️ No PROMPTLAYER_API_KEY found, using local fallback") + + @lru_cache(maxsize=128) + def get_prompt( + self, + template_name: str, + version: Optional[int] = None, + label: Optional[str] = None, + local_prompt_path: Optional[str] = None, + latest_version: bool = False, + ) -> str: + """ + Load a prompt from: + 1. A local prompt file (if local_prompt_path is provided) + 2. PromptLayer (if no local path provided) + + Args: + template_name: Name of the prompt template + version: Version for PromptLayer + label: Environment label + local_prompt_path: Full path to local file OR directory containing prompt files + latest_version: If True, explicitly fetch the latest version (ignoring label) + + Returns: + str: Prompt content + """ + + # 1️⃣ Try PromptLayer FIRST if client is available + label = label or self.environment + + if self.client: + try: + if latest_version: + # Fetch the latest template definition directly without execution + response = self.client.templates.get(template_name) + + # Extract the prompt text from llm_kwargs (preferred) or prompt_template + prompt_content = None + + # Strategy 1: Try llm_kwargs (cleanest format) + if isinstance(response, dict) and "llm_kwargs" in response: + messages = response["llm_kwargs"].get("messages", []) + # Try to find system message + for msg in messages: + if msg.get("role") == "system": + prompt_content = msg.get("content") + break + # Fallback to first message + if prompt_content is None and messages: + prompt_content = messages[0].get("content") + + # Strategy 2: Try prompt_template dictionary structure + if prompt_content is None and isinstance(response, dict) and "prompt_template" in response: + pt = response["prompt_template"] + if isinstance(pt, dict) and "messages" in pt: + messages = pt["messages"] + for msg in messages: + # Check role if available + if msg.get("role") == "system" and "content" in msg: + content_list = msg["content"] + if isinstance(content_list, list) and content_list: + # Extract text from content list [{'type': 'text', 'text': '...'}] + for item in content_list: + if item.get("type") == "text": + prompt_content = item.get("text") + break + if prompt_content: break + + # Fallback: first message content + if prompt_content is None and messages and "content" in messages[0]: + content_list = messages[0]["content"] + if isinstance(content_list, list) and content_list: + for item in content_list: + if item.get("type") == "text": + prompt_content = item.get("text") + break + + # Fallback: Stringify if nothing else found + if prompt_content is None: + prompt_content = str(response) + + print( + f"📋 Loaded prompt '{template_name}' from PromptLayer (latest version)", + flush=True + ) + return prompt_content + + # Standard flow using labels (existing logic) + response = self.client.run( + prompt_name=template_name, + input_variables={}, + tags=[label], + ) + + if isinstance(response, dict): + prompt_content = response.get("output") or str(response) + else: + prompt_content = str(response) + + print( + f"📋 Loaded prompt '{template_name}' from PromptLayer (env={label})", + flush=True # force the output to the buffer immediately, + # ensuring it shows up in the docker compose log stream immediately. + ) + return prompt_content + + except Exception as e: + print(f"⚠️ PromptLayer failed: {e}. Falling back to local templates...", flush=True) + + # 2️⃣ Fall back to local files if PromptLayer failed or unavailable + if local_prompt_path: + try: + # If a directory is passed, append template_name + .txt + if os.path.isdir(local_prompt_path): + # Try exact match first: template_name.txt (case-sensitive) + file_path = os.path.join(local_prompt_path, f"{template_name}.txt") + + # If not found, try subdirectory with lowercase template_name + if not os.path.exists(file_path): + lowercase_name = template_name.lower() + file_path = os.path.join(local_prompt_path, lowercase_name, "v1.txt") + + # If still not found, try subdirectory with original template_name + if not os.path.exists(file_path): + file_path = os.path.join(local_prompt_path, template_name, "v1.txt") + else: + file_path = local_prompt_path + + with open(file_path, "r", encoding="utf-8") as f: + print(f"📄 Loaded prompt '{template_name}' from local file: {file_path}", flush=True) + return f.read() + + except Exception as e: + raise ValueError( + f"❌ Failed to load '{template_name}' from local path '{local_prompt_path}': {e}" + ) + + raise ValueError( + f"❌ Failed to load '{template_name}': PromptLayer unavailable and no local_prompt_path provided." + ) + + + + def list_available_prompts(self) -> Dict[str, Any]: + """ + List all available prompts from PromptLayer. + + Returns: + Dictionary of available prompts with metadata + """ + if not self.client: + return {"error": "PromptLayer client not available"} + + try: + # This would depend on PromptLayer's API for listing templates + # Placeholder implementation + return { + "message": "PromptLayer template listing not implemented in this version", + "available_methods": [ + "get_judge_prompt(simple=True/False)", + "get_agent_prompt(version=int)", + "get_prompt(template_name, version, label, fallback_path)" + ] + } + except Exception as e: + return {"error": f"Failed to list prompts: {e}"} + + def clear_cache(self) -> None: + """Clear the prompt cache. + """ + self.get_prompt.cache_clear() + print("🗑️ Prompt cache cleared") + + + def set_environment(self, environment: str) -> None: + """ + Change the environment label for subsequent prompt requests. + + Args: + environment: New environment (dev, staging, production) + """ + self.environment = environment + self.clear_cache() # Clear cache since environment changed + print(f"🔄 Environment changed to: {environment}") diff --git a/src/prompts/templates/compactor/v1.txt b/src/prompts/templates/compactor/v1.txt new file mode 100644 index 0000000000000000000000000000000000000000..b929407f6c5e30a3d06d1430c6b87a519381a544 --- /dev/null +++ b/src/prompts/templates/compactor/v1.txt @@ -0,0 +1,7 @@ +You are a summarizer. You take a long chain of messages between an assistant and a user, and compact them into a summary. You speak from a first person perspective as if you were the assistant. + +Your goal is to put high level summaries of the conversation to reduce the token count of the conversation. For important topics, use more tokens. For less important topics, summarize to few tokens. In addition, for newer conversation items, use more tokens. For older ones, use less. + +Always include emails, names, phone numbers, and other key info. You don't need to use them verbatim more than once in the compaction. + +You are not speaking to the user. You may NOT add ANY new message to ANYONE. Summarize and stop there. \ No newline at end of file diff --git a/src/prompts/templates/cv_screener/v1.txt b/src/prompts/templates/cv_screener/v1.txt new file mode 100644 index 0000000000000000000000000000000000000000..e8ce573eb06114ed16c0b3830359a7c989f6846a --- /dev/null +++ b/src/prompts/templates/cv_screener/v1.txt @@ -0,0 +1,10 @@ +You are an HR assistant evaluating how well a candidate's CV matches a given +job description. Generate a concise assessment summary first to ground your +reasoning. Then assign calibrated match scores between 0 and 1. + +The scores should be based on the following criteria: + 1. Skills Match Score: How well the candidate's skills match the job description." + 2. Experience Match Score: How well the candidate's experience matches the job description. + 3. Education Match Score: How well the candidate's education matches the job description. + 4. Overall Fit Score: How well the candidate's CV fits the job description. + diff --git a/src/prompts/templates/db_executor/v1.txt b/src/prompts/templates/db_executor/v1.txt new file mode 100644 index 0000000000000000000000000000000000000000..9948549597b3372cd7fc824d816e5515eab838b4 --- /dev/null +++ b/src/prompts/templates/db_executor/v1.txt @@ -0,0 +1,74 @@ +You are the **Database Executor Agent**, responsible for generating +and executing **SQLAlchemy ORM-style** Python code on behalf of the HR Supervisor Agent. + +Your job: perform safe and deterministic **read/write/update operations** +in the HR recruitment database, based on clear natural-language requests. + +--- + +### ✅ Rules +1. Use SQLAlchemy ORM — not raw SQL. +2. Use `session` (provided) for all queries. +3. Return clean Python dict or list results — no ORM objects. +4. Commit only when needed (`session.commit()`). +5. Never alter schema, connection, or delete/drop tables. +6. Validate record existence before updating or inserting. +7. Briefly explain what was done in plain English. + +--- + +### 🧩 Database Overview (ORM Models) +**Note**: All these models are already imported and available in the global context. +**DO NOT** try to import them again. Use them directly (e.g. `session.query(Candidate)...`). + +**Candidate** +- id (UUID, PK) +- full_name, email (unique), phone_number +- cv_file_path, parsed_cv_file_path +- status (Enum: `applied`, `cv_screened`, `cv_passed`, `cv_rejected`, `voice_passed`, `voice_rejected`, `interview_scheduled`, `decision_made`) +- Relationships → `cv_screening_results`, `voice_screening_results`, `interview_scheduling`, `final_decision` + +**CVScreeningResult** +- candidate_id → Candidate.id +- skills_match_score, experience_match_score, education_match_score, overall_fit_score +- llm_feedback, reasoning_trace (JSON), timestamp + +**VoiceScreeningResult** +- candidate_id → Candidate.id +- transcript_text, sentiment_score, communication_score, confidence_score +- llm_summary, llm_judgment_json, audio_url, timestamp + +**InterviewScheduling** +- candidate_id → Candidate.id +- calendar_event_id, start_time, end_time +- status (Enum: `scheduled`, `completed`, `cancelled`) + +**FinalDecision** +- candidate_id → Candidate.id +- overall_score, decision (Enum: `hire`, `reject`, `maybe`) +- llm_rationale, human_notes, timestamp + +--- + +🧾 Expected Execution Pattern +When asked to perform a task, you must: +1. Construct ORM-based Python code using session and the given models. +2. Store final results in a variable named result. +3. Print the results using: +```python +import json +print(json.dumps(result, indent=2, default=str)) +``` +4. Optionally, include a short explanatory comment after the code. + +### 🧾 Output Format +1. **Execution:** Your Python code must `print()` the results so they are visible in the tool output. +2. **Final Response:** After the code runs, provide a **clear, natural language summary** of what you found or did. + - *Example:* "I successfully updated the status for Sebastian Wefers to 'scheduled'." + - *Example:* "I retrieved 3 candidates: John, Jane, and Bob." + +### 🚨 Error Handling +If you encounter errors: +1. **Self-Correction:** Attempt to fix the code and retry within the reasoning loop. +2. **Terminal Failure:** If you cannot resolve the issue, explain the problem clearly to the user in plain English. + - *Example:* "I tried to update the record, but I could not find a candidate with that email address." \ No newline at end of file diff --git a/src/prompts/templates/db_executor/v2.txt b/src/prompts/templates/db_executor/v2.txt new file mode 100644 index 0000000000000000000000000000000000000000..51c62791ea6baf25c94853fd3daa0f00c7e0f04f --- /dev/null +++ b/src/prompts/templates/db_executor/v2.txt @@ -0,0 +1,55 @@ +You are the **Database Executor Agent**, responsible for generating and executing **SQLAlchemy ORM-style** Python code on behalf of the HR Supervisor Agent. +Your job: perform safe and deterministic **read/write/update operations** in the HR recruitment database, based on clear natural-language requests. +--- +# Rules +1. Use SQLAlchemy ORM not raw SQL. +2. Use `session` for all queries. +3. Return clean Python dict or list results, no ORM objects. +4. Commit only when needed (`session.commit()`). +5. Never alter schema, connection, or delete/drop tables. +6. Validate record existence before updating or inserting. +7. Briefly explain what was done in plain English. +--- +# Database Overview (ORM Models) +**Note**: All these models are already imported and available in the global context. +**DO NOT** try to import them again. Use them directly (e.g. `session.query(Candidate)...`). +**Candidate** +- id (UUID, PK) +- full_name, email (unique), phone_number +- cv_file_path, parsed_cv_file_path, created_at, updated_at, auth_code +- status (Enum: `applied`, `cv_screened`, `cv_passed`, `cv_rejected`, `voice_invitation_sent`, `voice_done`, `voice_passed`, `voice_rejected`, `interview_scheduled`, `decision_made`) +- Relationships → `cv_screening_results`, `voice_screening_results`, `interview_scheduling`, `final_decision` +**CVScreeningResult** +- candidate_id → Candidate.id +- job_title, skills_match_score, experience_match_score, education_match_score, overall_fit_score +- llm_feedback, reasoning_trace (JSON), timestamp +**VoiceScreeningResult** +- candidate_id → Candidate.id +- call_sid, transcript_text, sentiment_score, communication_score, confidence_score +- llm_summary, llm_judgment_json, audio_url, timestamp +**InterviewScheduling** +- candidate_id → Candidate.id +- calendar_event_id, event_summary, start_time, end_time +- status (Enum: `scheduled`, `completed`, `cancelled`, `passed`, `rejected`) +**FinalDecision** +- candidate_id → Candidate.id +- overall_score, decision (Enum: `hired`, `rejected`, `pending`) +- llm_rationale, human_notes, timestamp +--- +Expected Execution Pattern +When asked to perform a task, you must: +1. Construct ORM-based Python code using session and the given models. +2. Store final results in a variable named result. +3. Print the results using: +```python +import json +print(json.dumps(result, indent=2, default=str)) +``` +4. Optionally, include a short explanatory comment after the code. +# Output Format +1. **Execution:** Your Python code must `print()` the results so they are visible in the tool output. +2. **Final Response:** After the code runs, provide a **clear, natural language summary** of what you found or did. It should be clear enough that a random person would understand. +# Error Handling +If you encounter errors: +1. **Self-Correction:** Attempt to fix the code and retry within the reasoning loop. +2. **Terminal Failure:** If you cannot resolve the issue, explain the problem clearly in plain English. Provide verbatim snippets of the error. \ No newline at end of file diff --git a/src/prompts/templates/gcalendar/v1.txt b/src/prompts/templates/gcalendar/v1.txt new file mode 100644 index 0000000000000000000000000000000000000000..9ea82387205fd47a6d7485d0e31c3af5e9f0cc6b --- /dev/null +++ b/src/prompts/templates/gcalendar/v1.txt @@ -0,0 +1,10 @@ +You are a scheduling assistant authorized to use Google Calendar MCP tools. +You can for instance list, create, and analyze events. + +IMPORTANT: +- For any requests regarding "my calendar", "my availability", or general scheduling without specific attendees, assume the "primary" calendar. +- You do NOT need to ask for a calendar ID for the user; the system defaults to their primary calendar. +- Only ask for calendar IDs if the user asks about a specific third party whose email/ID is not known. + +Always confirm the action taken and if an error occurs report it back +for transparency and troubleshooting. \ No newline at end of file diff --git a/src/prompts/templates/gmail/v1.txt b/src/prompts/templates/gmail/v1.txt new file mode 100644 index 0000000000000000000000000000000000000000..815f72879758b76c945e0db69520daa26916800e --- /dev/null +++ b/src/prompts/templates/gmail/v1.txt @@ -0,0 +1,4 @@ +You are an agent authorized to use Gmail MCP tools. +For instance you can read, search, create drafts, and send emails, etc. +When asked to send an email, always confirm the details before sending if ambiguous, +but if the instruction is clear, proceed. \ No newline at end of file diff --git a/src/prompts/templates/supervisor/v1.txt b/src/prompts/templates/supervisor/v1.txt new file mode 100644 index 0000000000000000000000000000000000000000..401c2d8d6a248f1a86682adc101fe771c1849a6c --- /dev/null +++ b/src/prompts/templates/supervisor/v1.txt @@ -0,0 +1,50 @@ +You are the **Supervisor Agent** overseeing the entire recruitment workflow. +You act on behalf of the HR manager **Casey Jordan** (`hr.cjordan.agent.hack.winter25@gmail.com`), +who is the only person talking to you. + +Understand the candidate lifecycle status flow: +`applied` → `cv_screened` → `interview_scheduled` → `decision_made`. + +--- + +### 🎯 Your Role +You coordinate and supervise the hiring process from CV submission to final decision. +You have access to specialized sub-agents that handle: +- Database operations (querying, updating, reporting) +- CV screening and evaluation +- Email communication (for candidates and Casey) +- Calendar scheduling (for HR meetings and interviews) + +You do **not** perform these actions yourself — instead, you **delegate** to sub-agents when needed. +--- + +### ⚙️ Recruitment Process Overview +1. **Application submitted** → Candidate starts with status `applied`. +2. **CV screening** → + - Run `cv_screening_workflow` (updates status to `cv_screened` automatically). + - Ask `db_executor` to "evaluate screening results" (updates status to `cv_passed` or `cv_rejected`). + Here you can optionally specify a minimum passing score (default is 7.0). +3. **Notification** → + - If `cv_rejected`, send a polite rejection email. + - If `cv_passed`, send an email requesting available time slots for a voice or in-person interview. +4. **Scheduling** → + - Use the calendar agent to check **our (HR)** availability (`primary` calendar). + - You CANNOT check the candidate's calendar. You must **ask** the candidate for their preferred times via email. + - Once a time is agreed upon, use the calendar agent to schedule the interview. +5. **Decision** → Once interviews are complete, record and communicate the final decision. + +Always notify Casey what a status was updated to. +--- + +### 🧠 Reasoning & Planning Strategy +Before calling tools, **THINK**: +1. **Sequential Dependencies (Action A → Action B):** If Action B requires data (like an email address), perform Action A (fetch data) first. + - **Example:** Before asking `gmail_agent` to send an email, you **must always** ask `db_executor` to retrieve the candidate's email address first. +2. **Robust DB Instructions:** ALWAYS ask the `db_executor` to "**Create or update** the record" when changing status. NEVER just ask to "Update", as the record might not exist yet. + + +### 🧠 Your Behavior +- Use the available sub-agents for all database queries, screenings, email sends, and calendar operations. +- Respond clearly, professionally and comprehensively to Casey’s requests. +- Always share with Casey what actions you have taken and what results were produced. +- If you or any sub-agent encounter an error, **notify the Casey immediately** for troubleshooting. diff --git a/src/prompts/templates/supervisor/v2.txt b/src/prompts/templates/supervisor/v2.txt new file mode 100644 index 0000000000000000000000000000000000000000..c1617435dce60d8a9f0b7a8918d76f7144cd2931 --- /dev/null +++ b/src/prompts/templates/supervisor/v2.txt @@ -0,0 +1,58 @@ +You are the **Supervisor Agent** overseeing the entire recruitment workflow. You act on behalf of the HR manager **Casey Jordan** (`hr.cjordan.agent.hack.winter25@gmail.com`), who is the only person talking to you. +Understand the candidate lifecycle status flow: +1. `applied` (Application received) +2. `cv_screened` (CV Analyzed) +3. `cv_passed` or `cv_rejected` (Outcome of CV Screening) +4. `voice_invitation_sent` (If CV Passed) +5. `voice_done` (Candidate completed AI Voice Interview) +6. `voice_passed` or `voice_rejected` (Outcome of Voice Analysis) +7. `interview_scheduled` (Final Human Interview) +8. `decision_made` (Final Offer or Rejection) +--- +# Your Role +You coordinate and supervise the hiring process from CV submission to final decision. +You have access to specialized sub-agents that handle: +- Database operations (querying, updating, reporting) +- CV screening and evaluation +- Voice screening and analysis +- Email communication (for candidates and Casey) +- Calendar scheduling (for HR meetings and interviews) +You do **not** perform these actions yourself: instead, you **delegate** to sub-agents when needed. +--- +# Recruitment Process Overview +1. **Application submitted** → Candidate starts with status `applied`. +2. **CV screening** → + - Run `cv_screening_workflow` (updates status to `cv_screened` automatically). + - Ask `db_executor` to "evaluate screening results" (updates status to `cv_passed` or `cv_rejected`). + Here you can optionally specify a minimum passing score (default is 7.0). +3. **Voice Screening Invitation** → + - If `cv_rejected`, send a polite rejection email. + - If `cv_passed`, send an email with the invitation to the voice screening with `auth_code` retrieved from the database for screening access. + - Update status to `voice_invitation_sent` via `db_executor`. +4. **Voice Screening** → + - Candidates complete the AI voice interview. + - The system updates status to `voice_done` automatically. + - Ask `voice_judge` to "evaluate voice screening results" (this automatically updates status to `voice_passed` or `voice_rejected`). +5. **Interview Invitation (Person-to-Person)** → + - If `voice_rejected`, send a polite rejection email. + - If `voice_passed`: + - Use the calendar agent to check **HR availability** for this and next week (`primary` calendar). + - Send a success email to the candidate suggesting these available time slots and asking for their preference. +6. **Scheduling** → + - Once the candidate replies with a preferred time, use the calendar agent to schedule the interview. + - Update status to `interview_scheduled`. +7. **Final Decision** → + - Once interviews are complete, record the final decision (`hired`, `rejected`, or `pending`). + - Communicate the result to the candidate. +Always notify Casey what a status was updated to. +--- +# Reasoning & Planning Strategy +Before calling tools, **THINK**: +1. **Sequential Dependencies (Action A → Action B):** If Action B requires data (like an email address), perform Action A (fetch data) first. + - **Example:** Before asking `gmail_agent` to send an email, you **must always** ask `db_executor` to retrieve the candidate's email address first. +2. **Robust DB Instructions:** ALWAYS ask the `db_executor` to "**create or update** the record" when changing status. NEVER just ask to "Update", as the record might not exist yet. +# Your Behavior +- Use the available sub-agents for all database queries, screenings, email sends, and calendar operations. +- Respond clearly, professionally and comprehensively to the user's requests. +- Always share with the user what actions you have taken and what results were produced. +- If you or any sub-agent encounter an error, **notify the user immediately**. diff --git a/src/prompts/templates/voice_screening/v1.txt b/src/prompts/templates/voice_screening/v1.txt new file mode 100644 index 0000000000000000000000000000000000000000..ac39ad9257b26b4857f6b223a4dff752973f4d96 --- /dev/null +++ b/src/prompts/templates/voice_screening/v1.txt @@ -0,0 +1 @@ +You are a helpful assistant. \ No newline at end of file diff --git a/src/prompts/templates/voice_screening_judge/v1.txt b/src/prompts/templates/voice_screening_judge/v1.txt new file mode 100644 index 0000000000000000000000000000000000000000..9e93957ac3390f3a35cd671b60eb4ecca15098bd --- /dev/null +++ b/src/prompts/templates/voice_screening_judge/v1.txt @@ -0,0 +1,34 @@ +You are an expert AI Voice Screening Judge for a recruitment agency. Your task is to evaluate a candidate's preliminary voice screening interview to determine their suitability for the specified position. +**Input Data:** +You will receive: +1. **Candidate Details**: Name and the Job Position they are applying for. +2. **Audio Recording**: The raw audio of the interview (if available). Use this to evaluate tone, pitch, and hesitation. +3. **Transcript**: A text transcript of the conversation. Use this to analyze the content of the answers. +**Evaluation Criteria:** +Analyze the interview based on the following four key metrics. Assign a score from **0.0 (lowest)** to **1.0 (highest)** for each: +1. **Sentiment Score**: + * Assess the candidate's attitude, enthusiasm, and professionalism. + * *High score*: Positive, energetic, polite, and professional. + * *Low score*: Negative, disinterested, rude, or flat affect. +2. **Confidence Score**: + * Evaluate how self-assured the candidate sounds. + * *High score*: Clear voice, steady pace, minimal filler words (um, uh), direct answers. + * *Low score*: Shaky voice, excessive hesitation, frequent fillers, uncertainty in answers. +3. **Communication Score**: + * Judge the clarity and effectiveness of their communication. + * *High score*: Articulate, concise, easy to understand, good grammar, stays on topic. + * *Low score*: Rambling, mumbling, hard to follow, answers unrelated questions. +4. **Proficiency Score**: + * Estimate their technical or role-specific suitability based on the content of their answers. + * *High score*: Demonstrates deep knowledge, relevant experience, and gives specific examples. + * *Low score*: Vague answers, lack of knowledge, or clearly unqualified for the role. +**Output Requirements:** +You must generate a structured evaluation containing: +* The four scores defined above. +* **llm_summary**: A concise, professional summary of the candidate's performance (2-3 sentences). +* **key_traits**: A list of 3-5 key adjectives or phrases describing the candidate (e.g., "Articulate", "Hesitant", "Strong Python knowledge"). +* **recommendation**: A clear final verdict string (e.g., "Strong Pass", "Pass", "Borderline", "Reject"). +**Important:** +* Be objective and fair. +* If audio is provided, prioritize it for judging Confidence and Sentiment. +* If the candidate is silent or the audio is unintelligible, assign low scores and note this in the summary. \ No newline at end of file diff --git a/src/prompts/test.py b/src/prompts/test.py new file mode 100644 index 0000000000000000000000000000000000000000..ad704d90ba13a0c350dcefc5faff4fbf0a28ffd3 --- /dev/null +++ b/src/prompts/test.py @@ -0,0 +1,26 @@ +import sys +import os + +# Add project root to path to ensure we can import from src +project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) +sys.path.append(project_root) + +from src.prompts import get_prompt + +def test_fetch_prompt(): + prompt_name = "DB_Executor" + print(f"Attempting to fetch prompt: {prompt_name}") + + try: + # Using latest_version=True to test the new feature and bypass env labels + print(f"Fetching prompt '{prompt_name}' using latest_version=True...") + prompt = get_prompt(prompt_name, latest_version=True) + print(f"\n✅ Successfully fetched '{prompt_name}':") + print("-" * 40) + print(prompt) + print("-" * 40) + except Exception as e: + print(f"\n❌ Error fetching prompt: {e}") + +if __name__ == "__main__": + test_fetch_prompt() diff --git a/src/sdk/__init__.py b/src/sdk/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b84f1aae471659b22a2a4d5e1f6c0de017b65d43 --- /dev/null +++ b/src/sdk/__init__.py @@ -0,0 +1,33 @@ +""" +SDK for interacting with Recruitment Agent APIs. + +Usage: + from src.sdk import SupervisorClient, CVUploadClient, DatabaseClient + + # Supervisor Agent + supervisor = SupervisorClient() + response = supervisor.chat("Show me all candidates") + print(response.content) + + # CV Upload + cv_client = CVUploadClient() + with open("my_cv.pdf", "rb") as f: + response = cv_client.submit( + full_name="Ada Lovelace", + email="ada@example.com", + cv_file=f, + filename="my_cv.pdf" + ) + + # Database Queries + db = DatabaseClient() + candidates = db.get_candidates(status="applied") + candidate = db.get_candidate_by_email("ada@example.com") +""" + +from src.sdk.supervisor import SupervisorClient +from src.sdk.cv_upload import CVUploadClient +from src.sdk.database import DatabaseClient + +__all__ = ["SupervisorClient", "CVUploadClient", "DatabaseClient"] + diff --git a/src/sdk/cv_upload.py b/src/sdk/cv_upload.py new file mode 100644 index 0000000000000000000000000000000000000000..9ac7cca7e946f334677d748151d2a3f5fef63c63 --- /dev/null +++ b/src/sdk/cv_upload.py @@ -0,0 +1,155 @@ +""" +CV Upload API Client. + +A client for submitting job applications with CV uploads. +""" + +import os +from dataclasses import dataclass +from typing import Optional, BinaryIO +import requests + + +def _clean_base_url(url: str) -> str: + """Normalize base URL to avoid issues from quoted env vars.""" + cleaned = url.strip().strip("\"'") + if cleaned.endswith("/"): + cleaned = cleaned[:-1] + return cleaned + + +@dataclass +class SubmitResponse: + """Response from a CV submission.""" + success: bool + message: str + candidate_name: str = "" + email: str = "" + cv_file_path: str = "" + already_exists: bool = False + + +class CVUploadClient: + """ + Client for the CV Upload API. + + Usage: + client = CVUploadClient() + + # Submit application + with open("my_cv.pdf", "rb") as f: + response = client.submit( + full_name="Ada Lovelace", + email="ada@example.com", + phone="+49 123 456789", + cv_file=f, + filename="my_cv.pdf" + ) + + if response.success: + print(f"Application submitted: {response.message}") + elif response.already_exists: + print("You already applied!") + else: + print(f"Error: {response.message}") + """ + + def __init__(self, base_url: Optional[str] = None, session_id: Optional[str] = None): + """ + Initialize the CV Upload client. + + Args: + base_url: API base URL. Defaults to CV_UPLOAD_API_URL env var + or http://localhost:8080/api/v1/cv + """ + raw = base_url or os.getenv( + "CV_UPLOAD_API_URL", + "http://localhost:8080/api/v1/cv" + ) + self.base_url = _clean_base_url(raw) + self.session_id = (session_id or os.getenv("SESSION_ID") or "").strip().strip("\"'") + + def _headers(self) -> dict: + headers = {} + if self.session_id: + headers["X-Session-Id"] = self.session_id + return headers + + def submit( + self, + full_name: str, + email: str, + cv_file: BinaryIO, + filename: str, + phone: str = "", + timeout: int = 120 + ) -> SubmitResponse: + """ + Submit a job application with CV. + + Args: + full_name: Candidate's full name + email: Candidate's email address + cv_file: File-like object containing the CV (PDF or DOCX) + filename: Original filename of the CV + phone: Optional phone number + timeout: Request timeout in seconds + + Returns: + SubmitResponse with success status and details + + Raises: + requests.exceptions.RequestException: On connection errors + ValueError: On API errors + """ + files = { + "cv_file": (filename, cv_file, "application/octet-stream") + } + data = { + "full_name": full_name, + "email": email, + "phone": phone, + } + + response = requests.post( + f"{self.base_url}/submit", + files=files, + data=data, + headers=self._headers(), + timeout=timeout + ) + + if response.status_code == 400: + error = response.json().get("detail", "Invalid request") + raise ValueError(f"Validation error: {error}") + + if response.status_code == 500: + error = response.json().get("detail", "Server error") + raise ValueError(f"Server error: {error}") + + if response.status_code != 200: + raise ValueError(f"Unexpected status: {response.status_code}") + + data = response.json() + return SubmitResponse( + success=data["success"], + message=data["message"], + candidate_name=data.get("candidate_name", ""), + email=data.get("email", ""), + cv_file_path=data.get("cv_file_path", ""), + already_exists=data.get("already_exists", False), + ) + + def health(self) -> bool: + """ + Check if the API is healthy. + + Returns: + True if healthy, False otherwise + """ + try: + response = requests.get(f"{self.base_url}/health", timeout=5, headers=self._headers()) + return response.status_code == 200 + except requests.exceptions.RequestException: + return False + diff --git a/src/sdk/database.py b/src/sdk/database.py new file mode 100644 index 0000000000000000000000000000000000000000..07935cf25227054f64f71db9394238d44567f236 --- /dev/null +++ b/src/sdk/database.py @@ -0,0 +1,517 @@ +""" +Database API Client. + +A client for querying the recruitment database via the API. +""" + +import os +from dataclasses import dataclass, field +from typing import Any, Optional +from uuid import UUID + +import requests + + +def _clean_base_url(url: str) -> str: + """Normalize base URL to avoid issues from quoted env vars.""" + cleaned = url.strip().strip("\"'") + if cleaned.endswith("/"): + cleaned = cleaned[:-1] + return cleaned + + +@dataclass +class QueryResponse: + """Response from a database query.""" + success: bool + table: str + total_count: int + returned_count: int + offset: int + data: list[dict[str, Any]] + message: Optional[str] = None + + +@dataclass +class SingleRecordResponse: + """Response for a single record lookup.""" + success: bool + table: str + data: Optional[dict[str, Any]] = None + message: Optional[str] = None + + +@dataclass +class StatsResponse: + """Database statistics response.""" + success: bool + stats: dict[str, Any] = field(default_factory=dict) + + +class DatabaseClient: + """ + Client for the Database Query API. + + Usage: + client = DatabaseClient() + + # Get all candidates + response = client.get_candidates() + for candidate in response.data: + print(candidate["full_name"], candidate["status"]) + + # Get candidate by email with all related data + candidate = client.get_candidate_by_email("ada@example.com") + if candidate.success: + print(candidate.data["cv_screening_results"]) + + # Flexible query with filters + response = client.query( + table="candidates", + filters={"status": "applied"}, + fields=["id", "full_name", "email"], + limit=10 + ) + + # Get CV screening results with score filter + screenings = client.get_cv_screenings(min_score=0.8) + """ + + def __init__(self, base_url: Optional[str] = None, session_id: Optional[str] = None): + """ + Initialize the Database client. + + Args: + base_url: API base URL. Defaults to DATABASE_API_URL env var + or http://localhost:8080/api/v1/db + """ + raw = base_url or os.getenv( + "DATABASE_API_URL", + "http://localhost:8080/api/v1/db" + ) + self.base_url = _clean_base_url(raw) + self.session_id = (session_id or os.getenv("SESSION_ID") or "").strip().strip("\"'") + self.timeout = 30 + + def _headers(self) -> dict: + headers = {} + if self.session_id: + headers["X-Session-Id"] = self.session_id + return headers + + # ================================================================================== + # FLEXIBLE QUERY + # ================================================================================== + + def query( + self, + table: str, + filters: Optional[dict[str, Any]] = None, + fields: Optional[list[str]] = None, + include_relations: bool = False, + limit: int = 100, + offset: int = 0, + sort_by: Optional[str] = None, + sort_order: str = "desc" + ) -> QueryResponse: + """ + Flexible query for any table. + + Args: + table: Table name (candidates, cv_screening_results, voice_screening_results, + interview_scheduling, final_decision) + filters: Key-value filters. Supports operators like {"field": {"$gte": 0.8}} + fields: Specific fields to return. None returns all. + include_relations: Include related data (candidates table only) + limit: Max records to return + offset: Number of records to skip + sort_by: Field to sort by + sort_order: "asc" or "desc" + + Returns: + QueryResponse with data and pagination info + """ + payload = { + "table": table, + "filters": filters, + "fields": fields, + "include_relations": include_relations, + "limit": limit, + "offset": offset, + "sort_by": sort_by, + "sort_order": sort_order, + } + + response = requests.post( + f"{self.base_url}/query", + json=payload, + headers=self._headers(), + timeout=self.timeout + ) + self._handle_error(response) + + data = response.json() + return QueryResponse( + success=data["success"], + table=data["table"], + total_count=data["total_count"], + returned_count=data["returned_count"], + offset=data["offset"], + data=data["data"], + message=data.get("message"), + ) + + # ================================================================================== + # CANDIDATES + # ================================================================================== + + def get_candidates( + self, + status: Optional[str] = None, + limit: int = 100, + offset: int = 0, + include_relations: bool = False + ) -> QueryResponse: + """ + List all candidates with optional filtering. + + Args: + status: Filter by status (e.g., "applied", "screening", "interviewed") + limit: Max records to return + offset: Pagination offset + include_relations: Include CV/voice screening results, interviews, decisions + + Returns: + QueryResponse with candidate data + """ + params = { + "limit": limit, + "offset": offset, + "include_relations": include_relations, + } + if status: + params["status"] = status + + response = requests.get( + f"{self.base_url}/candidates", + params=params, + headers=self._headers(), + timeout=self.timeout + ) + self._handle_error(response) + + data = response.json() + return QueryResponse( + success=data["success"], + table=data["table"], + total_count=data["total_count"], + returned_count=data["returned_count"], + offset=data["offset"], + data=data["data"], + message=data.get("message"), + ) + + def get_candidate( + self, + candidate_id: str | UUID, + include_relations: bool = True + ) -> SingleRecordResponse: + """ + Get a single candidate by ID with all related data. + + Args: + candidate_id: Candidate UUID + include_relations: Include CV/voice screening, interviews, decisions + + Returns: + SingleRecordResponse with full candidate profile + """ + response = requests.get( + f"{self.base_url}/candidates/{candidate_id}", + params={"include_relations": include_relations}, + headers=self._headers(), + timeout=self.timeout + ) + self._handle_error(response) + + data = response.json() + return SingleRecordResponse( + success=data["success"], + table=data["table"], + data=data.get("data"), + message=data.get("message"), + ) + + def get_candidate_by_email( + self, + email: str, + include_relations: bool = True + ) -> SingleRecordResponse: + """ + Get a candidate by email address with all related data. + + Args: + email: Candidate's email address + include_relations: Include CV/voice screening, interviews, decisions + + Returns: + SingleRecordResponse with full candidate profile + """ + response = requests.get( + f"{self.base_url}/candidates/email/{email}", + params={"include_relations": include_relations}, + headers=self._headers(), + timeout=self.timeout + ) + self._handle_error(response) + + data = response.json() + return SingleRecordResponse( + success=data["success"], + table=data["table"], + data=data.get("data"), + message=data.get("message"), + ) + + # ================================================================================== + # CV SCREENING + # ================================================================================== + + def get_cv_screenings( + self, + candidate_id: Optional[str | UUID] = None, + min_score: Optional[float] = None, + limit: int = 100, + offset: int = 0 + ) -> QueryResponse: + """ + List CV screening results. + + Args: + candidate_id: Filter by candidate + min_score: Minimum overall fit score (0.0 - 1.0) + limit: Max records + offset: Pagination offset + + Returns: + QueryResponse with CV screening results + """ + params = {"limit": limit, "offset": offset} + if candidate_id: + params["candidate_id"] = str(candidate_id) + if min_score is not None: + params["min_score"] = min_score + + response = requests.get( + f"{self.base_url}/cv-screening", + params=params, + headers=self._headers(), + timeout=self.timeout + ) + self._handle_error(response) + + data = response.json() + return QueryResponse( + success=data["success"], + table=data["table"], + total_count=data["total_count"], + returned_count=data["returned_count"], + offset=data["offset"], + data=data["data"], + message=data.get("message"), + ) + + # ================================================================================== + # VOICE SCREENING + # ================================================================================== + + def get_voice_screenings( + self, + candidate_id: Optional[str | UUID] = None, + limit: int = 100, + offset: int = 0 + ) -> QueryResponse: + """ + List voice screening results. + + Args: + candidate_id: Filter by candidate + limit: Max records + offset: Pagination offset + + Returns: + QueryResponse with voice screening results + """ + params = {"limit": limit, "offset": offset} + if candidate_id: + params["candidate_id"] = str(candidate_id) + + response = requests.get( + f"{self.base_url}/voice-screening", + params=params, + headers=self._headers(), + timeout=self.timeout + ) + self._handle_error(response) + + data = response.json() + return QueryResponse( + success=data["success"], + table=data["table"], + total_count=data["total_count"], + returned_count=data["returned_count"], + offset=data["offset"], + data=data["data"], + message=data.get("message"), + ) + + # ================================================================================== + # INTERVIEWS + # ================================================================================== + + def get_interviews( + self, + candidate_id: Optional[str | UUID] = None, + status: Optional[str] = None, + limit: int = 100, + offset: int = 0 + ) -> QueryResponse: + """ + List interview scheduling records. + + Args: + candidate_id: Filter by candidate + status: Filter by interview status + limit: Max records + offset: Pagination offset + + Returns: + QueryResponse with interview data + """ + params = {"limit": limit, "offset": offset} + if candidate_id: + params["candidate_id"] = str(candidate_id) + if status: + params["status"] = status + + response = requests.get( + f"{self.base_url}/interviews", + params=params, + headers=self._headers(), + timeout=self.timeout + ) + self._handle_error(response) + + data = response.json() + return QueryResponse( + success=data["success"], + table=data["table"], + total_count=data["total_count"], + returned_count=data["returned_count"], + offset=data["offset"], + data=data["data"], + message=data.get("message"), + ) + + # ================================================================================== + # DECISIONS + # ================================================================================== + + def get_decisions( + self, + decision: Optional[str] = None, + min_score: Optional[float] = None, + limit: int = 100, + offset: int = 0 + ) -> QueryResponse: + """ + List final hiring decisions. + + Args: + decision: Filter by decision (e.g., "hired", "rejected") + min_score: Minimum overall score + limit: Max records + offset: Pagination offset + + Returns: + QueryResponse with decision data + """ + params = {"limit": limit, "offset": offset} + if decision: + params["decision"] = decision + if min_score is not None: + params["min_score"] = min_score + + response = requests.get( + f"{self.base_url}/decisions", + params=params, + headers=self._headers(), + timeout=self.timeout + ) + self._handle_error(response) + + data = response.json() + return QueryResponse( + success=data["success"], + table=data["table"], + total_count=data["total_count"], + returned_count=data["returned_count"], + offset=data["offset"], + data=data["data"], + message=data.get("message"), + ) + + # ================================================================================== + # STATS & HEALTH + # ================================================================================== + + def get_stats(self) -> StatsResponse: + """ + Get database statistics. + + Returns: + StatsResponse with counts for all tables and status breakdown + """ + response = requests.get( + f"{self.base_url}/stats", + headers=self._headers(), + timeout=self.timeout + ) + self._handle_error(response) + + data = response.json() + return StatsResponse( + success=data["success"], + stats=data["stats"], + ) + + def health(self) -> bool: + """ + Check if the database API is healthy. + + Returns: + True if healthy, False otherwise + """ + try: + response = requests.get(f"{self.base_url}/health", timeout=5, headers=self._headers()) + return response.status_code == 200 and response.json().get("status") == "healthy" + except requests.exceptions.RequestException: + return False + + # ================================================================================== + # HELPERS + # ================================================================================== + + def _handle_error(self, response: requests.Response) -> None: + """Raise appropriate exceptions for error responses.""" + if response.status_code == 400: + error = response.json().get("detail", "Invalid request") + raise ValueError(f"Validation error: {error}") + + if response.status_code == 500: + error = response.json().get("detail", "Server error") + raise ValueError(f"Server error: {error}") + + if response.status_code != 200: + raise ValueError(f"Unexpected status: {response.status_code}") + diff --git a/src/sdk/supervisor.py b/src/sdk/supervisor.py new file mode 100644 index 0000000000000000000000000000000000000000..37753b9dff081edb7c31a411eb168ba542012977 --- /dev/null +++ b/src/sdk/supervisor.py @@ -0,0 +1,410 @@ +""" +Supervisor API Client. + +A client for interacting with the HR Supervisor Agent API. +Supports both regular and streaming responses. + +============================================================================= +AVAILABLE METHODS: +============================================================================= + +WITH CONTEXT ENGINEERING (CompactingSupervisor wrapper): + - chat() : Batch response with automatic context compaction + - stream() : Streaming response with context compaction [HAS ERRORS - TODO FIX] + +RAW SUPERVISOR (Direct agent access, no wrapper): + - chat_raw() : Batch response, direct supervisor agent + - stream_raw() : Streaming response, direct supervisor agent [HAS ERRORS - TODO FIX] + +============================================================================= +NOTE: Both streaming methods (stream() and stream_raw()) have known issues +that need to be fixed. Use batch methods (chat, chat_raw) for reliable operation. +============================================================================= +""" + +import os +import json +from dataclasses import dataclass +from typing import Generator, Optional +import requests + + +def _clean_base_url(url: str) -> str: + """Normalize base URL to avoid issues from quoted env vars.""" + cleaned = url.strip().strip("\"'") + if cleaned.endswith("/"): + cleaned = cleaned[:-1] + return cleaned + + +@dataclass +class ChatResponse: + """Response from a chat request.""" + content: str + thread_id: str + token_count: int + + +@dataclass +class StreamChunk: + """A chunk from a streaming response.""" + type: str # 'token', 'done', or 'error' + content: Optional[str] = None + thread_id: Optional[str] = None + token_count: Optional[int] = None + error: Optional[str] = None + + +class SupervisorClient: + """ + Client for the HR Supervisor Agent API. + + Two categories of methods: + + 1. WITH CONTEXT ENGINEERING (uses CompactingSupervisor wrapper): + - chat() : Batch request with automatic context compaction + - stream() : Streaming with context compaction [HAS ERRORS - TODO FIX] + + 2. RAW SUPERVISOR (direct agent access, no wrapper): + - chat_raw() : Batch request, direct supervisor agent + - stream_raw() : Streaming, direct supervisor agent [HAS ERRORS - TODO FIX] + + ⚠️ NOTE: Both streaming methods have issues. Use batch methods for reliable operation. + + Usage: + client = SupervisorClient() + + # Batch chat with context engineering (RECOMMENDED) + response = client.chat("Show me all candidates") + print(response.content) + + # Batch chat without wrapper (RECOMMENDED) + response = client.chat_raw("Show me all candidates") + print(response.content) + + # With conversation continuity + response1 = client.chat("Show me all candidates", thread_id="abc123") + response2 = client.chat("Tell me more about the first one", thread_id="abc123") + """ + + def __init__(self, base_url: Optional[str] = None, session_id: Optional[str] = None): + """ + Initialize the Supervisor client. + + Args: + base_url: API base URL. Defaults to SUPERVISOR_API_URL env var + or http://localhost:8080/api/v1/supervisor + """ + raw = base_url or os.getenv( + "SUPERVISOR_API_URL", + "http://localhost:8080/api/v1/supervisor" + ) + self.base_url = _clean_base_url(raw) + self.session_id = (session_id or os.getenv("SESSION_ID") or "").strip().strip("\"'") + + def _headers(self) -> dict: + """Build headers including session isolation id.""" + headers = {} + if self.session_id: + headers["X-Session-Id"] = self.session_id + return headers + + # ========================================================================= + # CONTEXT ENGINEERING METHODS (with CompactingSupervisor wrapper) + # ========================================================================= + + def chat(self, message: str, thread_id: Optional[str] = None, timeout: int = 120) -> ChatResponse: + """ + Send a message and get a complete response. + + Uses CompactingSupervisor wrapper for automatic context management. + When token limit is exceeded, old messages are compacted/summarized. + + Args: + message: The message to send + thread_id: Optional thread ID for conversation continuity + timeout: Request timeout in seconds + + Returns: + ChatResponse with content, thread_id, and token_count + + Raises: + requests.exceptions.RequestException: On connection errors + ValueError: On API errors + """ + payload = {"message": message, "thread_id": thread_id} + + response = requests.post( + f"{self.base_url}/chat", + json=payload, + headers=self._headers(), + timeout=timeout + ) + + if response.status_code != 200: + error = response.json().get("detail", "Unknown error") + raise ValueError(f"API error: {error}") + + data = response.json() + return ChatResponse( + content=data["response"], + thread_id=data["thread_id"], + token_count=data["token_count"] + ) + + def stream( + self, + message: str, + thread_id: Optional[str] = None, + timeout: int = 300 + ) -> Generator[StreamChunk, None, None]: + """ + Send a message and stream the response token by token. + + ⚠️ WARNING: This method has known issues and needs to be fixed. + Use stream_raw() for reliable streaming, or chat() for batch requests. + + Uses CompactingSupervisor wrapper for automatic context management. + + Args: + message: The message to send + thread_id: Optional thread ID for conversation continuity + timeout: Request timeout in seconds + + Yields: + StreamChunk objects with type 'token', 'done', or 'error' + + Example: + full_response = "" + for chunk in client.stream("Hello"): + if chunk.type == "token": + full_response += chunk.content + print(chunk.content, end="", flush=True) + elif chunk.type == "done": + print(f"\\nThread: {chunk.thread_id}") + elif chunk.type == "error": + print(f"Error: {chunk.error}") + """ + payload = {"message": message, "thread_id": thread_id} + + try: + with requests.post( + f"{self.base_url}/chat/stream", + json=payload, + headers=self._headers(), + stream=True, + timeout=timeout + ) as response: + if response.status_code != 200: + yield StreamChunk( + type="error", + error=f"API returned status {response.status_code}" + ) + return + + current_event = None + for line in response.iter_lines(decode_unicode=True): + if not line: + continue + + if line.startswith("event:"): + current_event = line[6:].strip() + elif line.startswith("data:") and current_event: + try: + data = json.loads(line[5:].strip()) + + if current_event == "token": + yield StreamChunk( + type="token", + content=data.get("content", "") + ) + elif current_event == "done": + yield StreamChunk( + type="done", + thread_id=data.get("thread_id"), + token_count=data.get("token_count", 0) + ) + elif current_event == "error": + yield StreamChunk( + type="error", + error=data.get("error", "Unknown error") + ) + except json.JSONDecodeError: + continue + current_event = None + + except requests.exceptions.ConnectionError: + yield StreamChunk( + type="error", + error="Cannot connect to API. Make sure the server is running." + ) + except requests.exceptions.Timeout: + yield StreamChunk(type="error", error="Request timed out.") + except Exception as e: + yield StreamChunk(type="error", error=str(e)) + + def new_chat(self) -> str: + """ + Create a new chat session. + + Returns: + New thread_id + + Raises: + requests.exceptions.RequestException: On connection errors + """ + response = requests.post(f"{self.base_url}/new", headers=self._headers()) + response.raise_for_status() + return response.json()["thread_id"] + + + + # ========================================================================= + # RAW SUPERVISOR METHODS (No CompactingSupervisor wrapper) + # ========================================================================= + + def chat_raw(self, message: str, thread_id: Optional[str] = None, timeout: int = 120) -> ChatResponse: + """ + Send a message to the raw supervisor agent (without context compaction). + + This bypasses the CompactingSupervisor wrapper, giving direct access + to the underlying supervisor agent. Useful for debugging or when you + want full control over context management. + + Args: + message: The message to send + thread_id: Optional thread ID for conversation continuity + timeout: Request timeout in seconds + + Returns: + ChatResponse with content, thread_id, and token_count + + Raises: + requests.exceptions.RequestException: On connection errors + ValueError: On API errors + """ + payload = {"message": message, "thread_id": thread_id} + + response = requests.post( + f"{self.base_url}/raw/chat", + json=payload, + headers=self._headers(), + timeout=timeout + ) + + if response.status_code != 200: + error = response.json().get("detail", "Unknown error") + raise ValueError(f"API error: {error}") + + data = response.json() + return ChatResponse( + content=data["response"], + thread_id=data["thread_id"], + token_count=data["token_count"] + ) + + def stream_raw( + self, + message: str, + thread_id: Optional[str] = None, + timeout: int = 300 + ) -> Generator[StreamChunk, None, None]: + """ + Stream a response from the raw supervisor agent (without context compaction). + + ⚠️ WARNING: This method has known issues and needs to be fixed. + Use chat_raw() for reliable batch requests. + + This bypasses the CompactingSupervisor wrapper, giving direct access + to the underlying supervisor agent's streaming capabilities. + + Args: + message: The message to send + thread_id: Optional thread ID for conversation continuity + timeout: Request timeout in seconds + + Yields: + StreamChunk objects with type 'token', 'done', or 'error' + + Example: + full_response = "" + for chunk in client.stream_raw("Hello"): + if chunk.type == "token": + full_response += chunk.content + print(chunk.content, end="", flush=True) + elif chunk.type == "done": + print(f"\\nThread: {chunk.thread_id}") + elif chunk.type == "error": + print(f"Error: {chunk.error}") + """ + payload = {"message": message, "thread_id": thread_id} + + try: + with requests.post( + f"{self.base_url}/raw/chat/stream", + json=payload, + headers=self._headers(), + stream=True, + timeout=timeout + ) as response: + if response.status_code != 200: + yield StreamChunk( + type="error", + error=f"API returned status {response.status_code}" + ) + return + + current_event = None + for line in response.iter_lines(decode_unicode=True): + if not line: + continue + + if line.startswith("event:"): + current_event = line[6:].strip() + elif line.startswith("data:") and current_event: + try: + data = json.loads(line[5:].strip()) + + if current_event == "token": + yield StreamChunk( + type="token", + content=data.get("content", "") + ) + elif current_event == "done": + yield StreamChunk( + type="done", + thread_id=data.get("thread_id"), + token_count=data.get("token_count", 0) + ) + elif current_event == "error": + yield StreamChunk( + type="error", + error=data.get("error", "Unknown error") + ) + except json.JSONDecodeError: + continue + current_event = None + + except requests.exceptions.ConnectionError: + yield StreamChunk( + type="error", + error="Cannot connect to API. Make sure the server is running." + ) + except requests.exceptions.Timeout: + yield StreamChunk(type="error", error="Request timed out.") + except Exception as e: + yield StreamChunk(type="error", error=str(e)) + + + def health(self) -> bool: + """ + Check if the API is healthy. + + Returns: + True if healthy, False otherwise + """ + try: + response = requests.get(f"{self.base_url}/health", timeout=5, headers=self._headers()) + return response.status_code == 200 + except requests.exceptions.RequestException: + return False diff --git a/src/state/__init__.py b/src/state/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/state/candidate.py b/src/state/candidate.py new file mode 100644 index 0000000000000000000000000000000000000000..9ab159fec1356e80697967bf6bee514ce652572b --- /dev/null +++ b/src/state/candidate.py @@ -0,0 +1,65 @@ +import enum + +class CandidateStatus(str, enum.Enum): + """ + Application proces and status updates + ------------------------------------- + 1) CV Upload + -> "applied" + + 2) CV Screening + -> "cv_screened" + -> "cv_passed" + -> "cv_rejected" + + 3) Voice Screening Invitation + -> "voice_invitation_sent" + + 4) Voice Screening + -> "voice_screened" + & "voice_passed" + OR "voice_rejected" + """ + applied = "applied" + cv_screened = "cv_screened" + cv_passed = "cv_passed" + cv_rejected = "cv_rejected" + voice_invitation_sent = "voice_invitation_sent" + voice_done = "voice_done" + voice_passed = "voice_passed" + voice_rejected = "voice_rejected" + interview_scheduled = "interview_scheduled" + decision_made = "decision_made" + + +class InterviewStatus(str, enum.Enum): + """ + Person-to-Person Interview + ------------------------------------- + 5) Interview Scheduling + -> "interview_scheduled" + -> "interview_completed" + -> "interview_cancelled" + + -> "interview_passed" + -> "interview_rejected" + """ + scheduled = "scheduled" + completed = "completed" + cancelled = "cancelled" + passed = "passed" + rejected = "rejected" + + +class DecisionStatus(str, enum.Enum): + """ + Final Decision + ------------------------------------- + 6) Decision Made + -> "hired" + -> "rejected" + -> "pending" + """ + hired = "hired" + rejected = "rejected" + pending = "pending" \ No newline at end of file diff --git a/start.sh b/start.sh new file mode 100644 index 0000000000000000000000000000000000000000..564c87d8d578dc4044090afe46be96a375dd4f6b --- /dev/null +++ b/start.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +set -e + +# Hugging Face provides PORT; default to 7860 locally +export PORT="${PORT:-7860}" + +# Defaults for local in-container routing; can be overridden via env +export SUPERVISOR_API_URL="${SUPERVISOR_API_URL:-http://127.0.0.1:8080/api/v1/supervisor}" +export DATABASE_API_URL="${DATABASE_API_URL:-http://127.0.0.1:8080/api/v1/db}" +export CV_UPLOAD_API_URL="${CV_UPLOAD_API_URL:-http://127.0.0.1:8080/api/v1/cv}" + +# Start FastAPI backend +uvicorn src.api.app:app --host 0.0.0.0 --port 8080 & + +# Give the API a moment to come up +sleep 2 + +# Run Gradio frontend +python src/frontend/gradio/app.py \ No newline at end of file diff --git a/terraform/.terraform.lock.hcl b/terraform/.terraform.lock.hcl new file mode 100644 index 0000000000000000000000000000000000000000..78b8795f9bc15057e16bc698a4ad5f17184b3497 --- /dev/null +++ b/terraform/.terraform.lock.hcl @@ -0,0 +1,22 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/google" { + version = "6.50.0" + constraints = "~> 6.0" + hashes = [ + "h1:79CwMTsp3Ud1nOl5hFS5mxQHyT0fGVye7pqpU0PPlHI=", + "zh:1f3513fcfcbf7ca53d667a168c5067a4dd91a4d4cccd19743e248ff31065503c", + "zh:3da7db8fc2c51a77dd958ea8baaa05c29cd7f829bd8941c26e2ea9cb3aadc1e5", + "zh:3e09ac3f6ca8111cbb659d38c251771829f4347ab159a12db195e211c76068bb", + "zh:7bb9e41c568df15ccf1a8946037355eefb4dfb4e35e3b190808bb7c4abae547d", + "zh:81e5d78bdec7778e6d67b5c3544777505db40a826b6eb5abe9b86d4ba396866b", + "zh:8d309d020fb321525883f5c4ea864df3d5942b6087f6656d6d8b3a1377f340fc", + "zh:93e112559655ab95a523193158f4a4ac0f2bfed7eeaa712010b85ebb551d5071", + "zh:d3efe589ffd625b300cef5917c4629513f77e3a7b111c9df65075f76a46a63c7", + "zh:d4a4d672bbef756a870d8f32b35925f8ce2ef4f6bbd5b71a3cb764f1b6c85421", + "zh:e13a86bca299ba8a118e80d5f84fbdd708fe600ecdceea1a13d4919c068379fe", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:fec30c095647b583a246c39d557704947195a1b7d41f81e369ba377d997faef6", + ] +} diff --git a/terraform/main.tf b/terraform/main.tf new file mode 100644 index 0000000000000000000000000000000000000000..38e3b811ac2d30ae263883ac533e5a4494eed0ad --- /dev/null +++ b/terraform/main.tf @@ -0,0 +1,55 @@ +# Create or adopt the project WITHOUT billing +resource "google_project" "project" { + project_id = var.project_id + name = var.project_name + + # If you're under an Organization/Folder and want to place it there, + # we can add folder_id/org_id later. For hackathon, keep it simple. + deletion_policy = "ABANDON" +} + +# Enable Gmail API (no billing required) +resource "google_project_service" "gmail" { + project = google_project.project.project_id + service = "gmail.googleapis.com" + disable_on_destroy = false + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } + + depends_on = [ + google_project.project + ] +} + +# Enable the Google Calendar API +resource "google_project_service" "calendar_api" { + project = google_project.project.project_id + service = "calendar.googleapis.com" + disable_on_destroy = false +} + + +# Grants your user account the minimal roles needed to manage API services and +# avoid “permission denied” errors when enabling APIs. +resource "google_project_iam_member" "user_editor" { + project = google_project.project.project_id + role = "roles/editor" + member = "user:${var.user_email}" +} + +resource "google_project_iam_member" "user_serviceusage_admin" { + project = google_project.project.project_id + role = "roles/serviceusage.serviceUsageAdmin" + member = "user:${var.user_email}" +} + +# Give yourself full Owner access for full API + IAM control +resource "google_project_iam_member" "user_owner" { + project = google_project.project.project_id + role = "roles/owner" + member = "user:${var.user_email}" +} diff --git a/terraform/outputs.tf b/terraform/outputs.tf new file mode 100644 index 0000000000000000000000000000000000000000..a42474fdee95e146c5f67bc5ea1fbc084d328dd1 --- /dev/null +++ b/terraform/outputs.tf @@ -0,0 +1,31 @@ +output "project_id" { + value = google_project.project.project_id + description = "Project ID" +} + +output "project_number" { + value = google_project.project.number + description = "Project number" +} + +output "gmail_api_service" { + value = google_project_service.gmail.service + description = "Gmail API service name (resource exists ⇒ enabled)" +} + +output "console_calendar_api_url" { + value = "https://console.cloud.google.com/apis/library/calendar.googleapis.com?project=${google_project.project.project_id}" +} + + + +# Handy console URLs to complete the unavoidable manual OAuth bits: +output "console_oauth_consent_screen_url" { + value = "https://console.cloud.google.com/apis/credentials/consent?project=${google_project.project.project_id}" + description = "Configure OAuth consent (External + add your email as test user, add gmail.modify scope)." +} + +output "console_oauth_credentials_url" { + value = "https://console.cloud.google.com/apis/credentials?project=${google_project.project.project_id}" + description = "Create OAuth 2.0 Client ID (Application type: Desktop app)." +} diff --git a/terraform/providers.tf b/terraform/providers.tf new file mode 100644 index 0000000000000000000000000000000000000000..fc74f25a5128ab211eb08d5f1450b88c724bcac2 --- /dev/null +++ b/terraform/providers.tf @@ -0,0 +1,4 @@ +provider "google" { + project = var.project_id + region = var.region +} diff --git a/terraform/variables.tf b/terraform/variables.tf new file mode 100644 index 0000000000000000000000000000000000000000..e699a7d7e00b4cb401dbc8f4ac4290c325dfcad8 --- /dev/null +++ b/terraform/variables.tf @@ -0,0 +1,23 @@ +variable "project_id" { + type = string + description = "Project ID to create or reuse" + default = "gradio-hackathon-25" +} + +variable "project_name" { + type = string + description = "Human-friendly project name" + default = "Gradio Agent MCP Hackathon 25" +} + +variable "region" { + type = string + default = "europe-west3" + description = "Default region" +} + +variable "user_email" { + type = string + description = "Your Google account email to grant project roles to" + default = "hr.cjordan.agent.hack.winter25@gmail.com" +} diff --git a/terraform/versions.tf b/terraform/versions.tf new file mode 100644 index 0000000000000000000000000000000000000000..4fc1806039e1fea7084b3a761826c303b52efaf0 --- /dev/null +++ b/terraform/versions.tf @@ -0,0 +1,9 @@ +terraform { + required_version = ">= 1.6.0" + required_providers { + google = { + source = "hashicorp/google" + version = "~> 6.0" + } + } +} diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/create_dummy_candidate.py b/tests/create_dummy_candidate.py new file mode 100644 index 0000000000000000000000000000000000000000..5e5b048d0e196bc0e8165ad0e102a51af8a83274 --- /dev/null +++ b/tests/create_dummy_candidate.py @@ -0,0 +1,48 @@ +import uuid +from datetime import datetime +from src.database.candidates.client import SessionLocal +from src.database.candidates.models import Candidate, CVScreeningResult +from src.state.candidate import CandidateStatus + +def create_dummy_candidate(): + with SessionLocal() as db: + # Check if dummy candidate exists + candidate = db.query(Candidate).filter(Candidate.email == "test_candidate@example.com").first() + + if not candidate: + candidate_id = uuid.uuid4() + candidate = Candidate( + id=candidate_id, + full_name="Test Candidate", + email="test_candidate@example.com", + phone_number="+1234567890", + status=CandidateStatus.applied, + created_at=datetime.utcnow() + ) + db.add(candidate) + + # Add dummy CV screening result so we have a job title + cv_result = CVScreeningResult( + id=uuid.uuid4(), + candidate_id=candidate_id, + job_title="Software Engineer", + skills_match_score=85.0, + experience_match_score=90.0, + education_match_score=80.0, + overall_fit_score=85.0, + llm_feedback="Strong candidate", + timestamp=datetime.utcnow() + ) + db.add(cv_result) + + db.commit() + print(f"✅ Created dummy candidate with ID: {candidate_id}") + print(f"Email: test_candidate@example.com") + else: + print(f"ℹ️ Dummy candidate already exists with ID: {candidate.id}") + print(f"Email: {candidate.email}") + + return str(candidate.id) + +if __name__ == "__main__": + create_dummy_candidate() diff --git a/tests/verify_voice_integration.py b/tests/verify_voice_integration.py new file mode 100644 index 0000000000000000000000000000000000000000..519fa742d75af601a6bec3fd7ae1cd241e484987 --- /dev/null +++ b/tests/verify_voice_integration.py @@ -0,0 +1,104 @@ +import sys +import os +from sqlalchemy import select, desc +from uuid import uuid4 +from dotenv import load_dotenv + +# Load environment variables +load_dotenv() + +# Add src to path +sys.path.append(os.path.join(os.path.dirname(__file__), "..")) + +from src.database.candidates.models import Candidate, CVScreeningResult, Base +from src.database.candidates.client import SessionLocal, engine +from src.agents.voice_screening.utils.questions import get_screening_questions + +def verify_integration(): + print("Verifying integration...") + + # Create tables if not exist (for test) + Base.metadata.create_all(bind=engine) + + # Create a dummy candidate + candidate_id = uuid4() + candidate = Candidate( + id=candidate_id, + full_name="Test Candidate", + email=f"test_{candidate_id}@example.com", + status="applied" + ) + + # Create a dummy CV result + cv_result = CVScreeningResult( + candidate_id=candidate_id, + job_title="Senior Python Engineer", + skills_match_score=0.9, + experience_match_score=0.8, + education_match_score=0.9, + overall_fit_score=0.85, + llm_feedback="Good fit", + reasoning_trace={} + ) + + try: + with SessionLocal() as db: + db.add(candidate) + db.add(cv_result) + db.commit() + print(f"Created test candidate: {candidate_id}") + + # Simulate the logic in proxy.py + fetched_candidate = db.execute( + select(Candidate).where(Candidate.id == candidate_id) + ).scalar_one_or_none() + + if fetched_candidate: + print(f"Fetched candidate: {fetched_candidate.full_name}") + + fetched_cv_result = db.execute( + select(CVScreeningResult) + .where(CVScreeningResult.candidate_id == candidate_id) + .order_by(desc(CVScreeningResult.timestamp)) + .limit(1) + ).scalar_one_or_none() + + job_title = fetched_cv_result.job_title if fetched_cv_result else "the position" + print(f"Fetched job title: {job_title}") + + questions = get_screening_questions(job_title) + print(f"Generated {len(questions)} questions") + + instructions = ( + f"You are a friendly HR assistant conducting a phone screening interview with {fetched_candidate.full_name} " + f"for the position of {job_title}. " + f"Greet the candidate warmly by name. " + f"Your goal is to ask the following questions to assess their fit:\n\n" + ) + + for i, q in enumerate(questions, 1): + instructions += f"{i}. {q}\n" + + instructions += ( + "\nAsk one question at a time. Wait for their response before moving to the next. " + "Be professional but conversational. If they ask clarifying questions, answer them briefly." + ) + + print("\nGenerated Instructions:") + print("-" * 40) + print(instructions) + print("-" * 40) + + # Clean up + db.delete(cv_result) + db.delete(candidate) + db.commit() + print("Cleaned up test data") + + except Exception as e: + print(f"Verification failed: {e}") + import traceback + traceback.print_exc() + +if __name__ == "__main__": + verify_integration()