Commit
·
9c400b9
0
Parent(s):
first commit
Browse files- .chainlit/config.toml +121 -0
- .chainlit/translations/en-US.json +231 -0
- .gitignore +112 -0
- TODO.md +78 -0
- ai_functions.py +60 -0
- chat-app.py +55 -0
- lite-llm-config.yaml +15 -0
- reference.md +23 -0
- requirements.bk.txt +104 -0
- requirements.txt +148 -0
- run.py +42 -0
- src/data_sources/coin_gecko.py +71 -0
- src/data_sources/cryptocompare.py +63 -0
- src/data_sources/dexscreener.py +107 -0
- src/databases/redis.py +129 -0
- src/libs/constants.py +61 -0
- src/libs/helper_functions.py +132 -0
- src/libs/logger.py +17 -0
- src/libs/redis.py +14 -0
- src/llms/sourcegraph.py +126 -0
- src/requirements.txt +8 -0
- src/search_services/exa.py +0 -0
- src/search_services/jina_ai.py +100 -0
- src/tools/crypto_coin_price_tool.py +60 -0
- src/xbt-core.py +43 -0
.chainlit/config.toml
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
# Whether to enable telemetry (default: true). No personal data is collected.
|
| 3 |
+
enable_telemetry = true
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
# List of environment variables to be provided by each user to use the app.
|
| 7 |
+
user_env = []
|
| 8 |
+
|
| 9 |
+
# Duration (in seconds) during which the session is saved when the connection is lost
|
| 10 |
+
session_timeout = 3600
|
| 11 |
+
|
| 12 |
+
# Enable third parties caching (e.g LangChain cache)
|
| 13 |
+
cache = false
|
| 14 |
+
|
| 15 |
+
# Authorized origins
|
| 16 |
+
allow_origins = ["*"]
|
| 17 |
+
|
| 18 |
+
# Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
|
| 19 |
+
# follow_symlink = false
|
| 20 |
+
|
| 21 |
+
[features]
|
| 22 |
+
# Show the prompt playground
|
| 23 |
+
prompt_playground = true
|
| 24 |
+
|
| 25 |
+
# Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
|
| 26 |
+
unsafe_allow_html = false
|
| 27 |
+
|
| 28 |
+
# Process and display mathematical expressions. This can clash with "$" characters in messages.
|
| 29 |
+
latex = false
|
| 30 |
+
|
| 31 |
+
# Automatically tag threads with the current chat profile (if a chat profile is used)
|
| 32 |
+
auto_tag_thread = true
|
| 33 |
+
|
| 34 |
+
# Authorize users to spontaneously upload files with messages
|
| 35 |
+
[features.spontaneous_file_upload]
|
| 36 |
+
enabled = true
|
| 37 |
+
accept = ["*/*"]
|
| 38 |
+
max_files = 20
|
| 39 |
+
max_size_mb = 500
|
| 40 |
+
|
| 41 |
+
[features.audio]
|
| 42 |
+
# Threshold for audio recording
|
| 43 |
+
min_decibels = -45
|
| 44 |
+
# Delay for the user to start speaking in MS
|
| 45 |
+
initial_silence_timeout = 3000
|
| 46 |
+
# Delay for the user to continue speaking in MS. If the user stops speaking for this duration, the recording will stop.
|
| 47 |
+
silence_timeout = 1500
|
| 48 |
+
# Above this duration (MS), the recording will forcefully stop.
|
| 49 |
+
max_duration = 15000
|
| 50 |
+
# Duration of the audio chunks in MS
|
| 51 |
+
chunk_duration = 1000
|
| 52 |
+
# Sample rate of the audio
|
| 53 |
+
sample_rate = 44100
|
| 54 |
+
|
| 55 |
+
[UI]
|
| 56 |
+
# Name of the app and chatbot.
|
| 57 |
+
name = "Chatbot"
|
| 58 |
+
|
| 59 |
+
# Show the readme while the thread is empty.
|
| 60 |
+
show_readme_as_default = true
|
| 61 |
+
|
| 62 |
+
# Description of the app and chatbot. This is used for HTML tags.
|
| 63 |
+
# description = ""
|
| 64 |
+
|
| 65 |
+
# Large size content are by default collapsed for a cleaner ui
|
| 66 |
+
default_collapse_content = true
|
| 67 |
+
|
| 68 |
+
# The default value for the expand messages settings.
|
| 69 |
+
default_expand_messages = false
|
| 70 |
+
|
| 71 |
+
# Hide the chain of thought details from the user in the UI.
|
| 72 |
+
hide_cot = false
|
| 73 |
+
|
| 74 |
+
# Link to your github repo. This will add a github button in the UI's header.
|
| 75 |
+
# github = ""
|
| 76 |
+
|
| 77 |
+
# Specify a CSS file that can be used to customize the user interface.
|
| 78 |
+
# The CSS file can be served from the public directory or via an external link.
|
| 79 |
+
# custom_css = "/public/test.css"
|
| 80 |
+
|
| 81 |
+
# Specify a Javascript file that can be used to customize the user interface.
|
| 82 |
+
# The Javascript file can be served from the public directory.
|
| 83 |
+
# custom_js = "/public/test.js"
|
| 84 |
+
|
| 85 |
+
# Specify a custom font url.
|
| 86 |
+
# custom_font = "https://fonts.googleapis.com/css2?family=Inter:wght@400;500;700&display=swap"
|
| 87 |
+
|
| 88 |
+
# Specify a custom meta image url.
|
| 89 |
+
# custom_meta_image_url = "https://chainlit-cloud.s3.eu-west-3.amazonaws.com/logo/chainlit_banner.png"
|
| 90 |
+
|
| 91 |
+
# Specify a custom build directory for the frontend.
|
| 92 |
+
# This can be used to customize the frontend code.
|
| 93 |
+
# Be careful: If this is a relative path, it should not start with a slash.
|
| 94 |
+
# custom_build = "./public/build"
|
| 95 |
+
|
| 96 |
+
[UI.theme]
|
| 97 |
+
#layout = "wide"
|
| 98 |
+
#font_family = "Inter, sans-serif"
|
| 99 |
+
# Override default MUI light theme. (Check theme.ts)
|
| 100 |
+
[UI.theme.light]
|
| 101 |
+
#background = "#FAFAFA"
|
| 102 |
+
#paper = "#FFFFFF"
|
| 103 |
+
|
| 104 |
+
[UI.theme.light.primary]
|
| 105 |
+
#main = "#F80061"
|
| 106 |
+
#dark = "#980039"
|
| 107 |
+
#light = "#FFE7EB"
|
| 108 |
+
|
| 109 |
+
# Override default MUI dark theme. (Check theme.ts)
|
| 110 |
+
[UI.theme.dark]
|
| 111 |
+
#background = "#FAFAFA"
|
| 112 |
+
#paper = "#FFFFFF"
|
| 113 |
+
|
| 114 |
+
[UI.theme.dark.primary]
|
| 115 |
+
#main = "#F80061"
|
| 116 |
+
#dark = "#980039"
|
| 117 |
+
#light = "#FFE7EB"
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
[meta]
|
| 121 |
+
generated_by = "1.1.202"
|
.chainlit/translations/en-US.json
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"components": {
|
| 3 |
+
"atoms": {
|
| 4 |
+
"buttons": {
|
| 5 |
+
"userButton": {
|
| 6 |
+
"menu": {
|
| 7 |
+
"settings": "Settings",
|
| 8 |
+
"settingsKey": "S",
|
| 9 |
+
"APIKeys": "API Keys",
|
| 10 |
+
"logout": "Logout"
|
| 11 |
+
}
|
| 12 |
+
}
|
| 13 |
+
}
|
| 14 |
+
},
|
| 15 |
+
"molecules": {
|
| 16 |
+
"newChatButton": {
|
| 17 |
+
"newChat": "New Chat"
|
| 18 |
+
},
|
| 19 |
+
"tasklist": {
|
| 20 |
+
"TaskList": {
|
| 21 |
+
"title": "\ud83d\uddd2\ufe0f Task List",
|
| 22 |
+
"loading": "Loading...",
|
| 23 |
+
"error": "An error occured"
|
| 24 |
+
}
|
| 25 |
+
},
|
| 26 |
+
"attachments": {
|
| 27 |
+
"cancelUpload": "Cancel upload",
|
| 28 |
+
"removeAttachment": "Remove attachment"
|
| 29 |
+
},
|
| 30 |
+
"newChatDialog": {
|
| 31 |
+
"createNewChat": "Create new chat?",
|
| 32 |
+
"clearChat": "This will clear the current messages and start a new chat.",
|
| 33 |
+
"cancel": "Cancel",
|
| 34 |
+
"confirm": "Confirm"
|
| 35 |
+
},
|
| 36 |
+
"settingsModal": {
|
| 37 |
+
"settings": "Settings",
|
| 38 |
+
"expandMessages": "Expand Messages",
|
| 39 |
+
"hideChainOfThought": "Hide Chain of Thought",
|
| 40 |
+
"darkMode": "Dark Mode"
|
| 41 |
+
},
|
| 42 |
+
"detailsButton": {
|
| 43 |
+
"using": "Using",
|
| 44 |
+
"running": "Running",
|
| 45 |
+
"took_one": "Took {{count}} step",
|
| 46 |
+
"took_other": "Took {{count}} steps"
|
| 47 |
+
},
|
| 48 |
+
"auth": {
|
| 49 |
+
"authLogin": {
|
| 50 |
+
"title": "Login to access the app.",
|
| 51 |
+
"form": {
|
| 52 |
+
"email": "Email address",
|
| 53 |
+
"password": "Password",
|
| 54 |
+
"noAccount": "Don't have an account?",
|
| 55 |
+
"alreadyHaveAccount": "Already have an account?",
|
| 56 |
+
"signup": "Sign Up",
|
| 57 |
+
"signin": "Sign In",
|
| 58 |
+
"or": "OR",
|
| 59 |
+
"continue": "Continue",
|
| 60 |
+
"forgotPassword": "Forgot password?",
|
| 61 |
+
"passwordMustContain": "Your password must contain:",
|
| 62 |
+
"emailRequired": "email is a required field",
|
| 63 |
+
"passwordRequired": "password is a required field"
|
| 64 |
+
},
|
| 65 |
+
"error": {
|
| 66 |
+
"default": "Unable to sign in.",
|
| 67 |
+
"signin": "Try signing in with a different account.",
|
| 68 |
+
"oauthsignin": "Try signing in with a different account.",
|
| 69 |
+
"redirect_uri_mismatch": "The redirect URI is not matching the oauth app configuration.",
|
| 70 |
+
"oauthcallbackerror": "Try signing in with a different account.",
|
| 71 |
+
"oauthcreateaccount": "Try signing in with a different account.",
|
| 72 |
+
"emailcreateaccount": "Try signing in with a different account.",
|
| 73 |
+
"callback": "Try signing in with a different account.",
|
| 74 |
+
"oauthaccountnotlinked": "To confirm your identity, sign in with the same account you used originally.",
|
| 75 |
+
"emailsignin": "The e-mail could not be sent.",
|
| 76 |
+
"emailverify": "Please verify your email, a new email has been sent.",
|
| 77 |
+
"credentialssignin": "Sign in failed. Check the details you provided are correct.",
|
| 78 |
+
"sessionrequired": "Please sign in to access this page."
|
| 79 |
+
}
|
| 80 |
+
},
|
| 81 |
+
"authVerifyEmail": {
|
| 82 |
+
"almostThere": "You're almost there! We've sent an email to ",
|
| 83 |
+
"verifyEmailLink": "Please click on the link in that email to complete your signup.",
|
| 84 |
+
"didNotReceive": "Can't find the email?",
|
| 85 |
+
"resendEmail": "Resend email",
|
| 86 |
+
"goBack": "Go Back",
|
| 87 |
+
"emailSent": "Email sent successfully.",
|
| 88 |
+
"verifyEmail": "Verify your email address"
|
| 89 |
+
},
|
| 90 |
+
"providerButton": {
|
| 91 |
+
"continue": "Continue with {{provider}}",
|
| 92 |
+
"signup": "Sign up with {{provider}}"
|
| 93 |
+
},
|
| 94 |
+
"authResetPassword": {
|
| 95 |
+
"newPasswordRequired": "New password is a required field",
|
| 96 |
+
"passwordsMustMatch": "Passwords must match",
|
| 97 |
+
"confirmPasswordRequired": "Confirm password is a required field",
|
| 98 |
+
"newPassword": "New password",
|
| 99 |
+
"confirmPassword": "Confirm password",
|
| 100 |
+
"resetPassword": "Reset Password"
|
| 101 |
+
},
|
| 102 |
+
"authForgotPassword": {
|
| 103 |
+
"email": "Email address",
|
| 104 |
+
"emailRequired": "email is a required field",
|
| 105 |
+
"emailSent": "Please check the email address {{email}} for instructions to reset your password.",
|
| 106 |
+
"enterEmail": "Enter your email address and we will send you instructions to reset your password.",
|
| 107 |
+
"resendEmail": "Resend email",
|
| 108 |
+
"continue": "Continue",
|
| 109 |
+
"goBack": "Go Back"
|
| 110 |
+
}
|
| 111 |
+
}
|
| 112 |
+
},
|
| 113 |
+
"organisms": {
|
| 114 |
+
"chat": {
|
| 115 |
+
"history": {
|
| 116 |
+
"index": {
|
| 117 |
+
"showHistory": "Show history",
|
| 118 |
+
"lastInputs": "Last Inputs",
|
| 119 |
+
"noInputs": "Such empty...",
|
| 120 |
+
"loading": "Loading..."
|
| 121 |
+
}
|
| 122 |
+
},
|
| 123 |
+
"inputBox": {
|
| 124 |
+
"input": {
|
| 125 |
+
"placeholder": "Type your message here..."
|
| 126 |
+
},
|
| 127 |
+
"speechButton": {
|
| 128 |
+
"start": "Start recording",
|
| 129 |
+
"stop": "Stop recording"
|
| 130 |
+
},
|
| 131 |
+
"SubmitButton": {
|
| 132 |
+
"sendMessage": "Send message",
|
| 133 |
+
"stopTask": "Stop Task"
|
| 134 |
+
},
|
| 135 |
+
"UploadButton": {
|
| 136 |
+
"attachFiles": "Attach files"
|
| 137 |
+
},
|
| 138 |
+
"waterMark": {
|
| 139 |
+
"text": "Built with"
|
| 140 |
+
}
|
| 141 |
+
},
|
| 142 |
+
"Messages": {
|
| 143 |
+
"index": {
|
| 144 |
+
"running": "Running",
|
| 145 |
+
"executedSuccessfully": "executed successfully",
|
| 146 |
+
"failed": "failed",
|
| 147 |
+
"feedbackUpdated": "Feedback updated",
|
| 148 |
+
"updating": "Updating"
|
| 149 |
+
}
|
| 150 |
+
},
|
| 151 |
+
"dropScreen": {
|
| 152 |
+
"dropYourFilesHere": "Drop your files here"
|
| 153 |
+
},
|
| 154 |
+
"index": {
|
| 155 |
+
"failedToUpload": "Failed to upload",
|
| 156 |
+
"cancelledUploadOf": "Cancelled upload of",
|
| 157 |
+
"couldNotReachServer": "Could not reach the server",
|
| 158 |
+
"continuingChat": "Continuing previous chat"
|
| 159 |
+
},
|
| 160 |
+
"settings": {
|
| 161 |
+
"settingsPanel": "Settings panel",
|
| 162 |
+
"reset": "Reset",
|
| 163 |
+
"cancel": "Cancel",
|
| 164 |
+
"confirm": "Confirm"
|
| 165 |
+
}
|
| 166 |
+
},
|
| 167 |
+
"threadHistory": {
|
| 168 |
+
"sidebar": {
|
| 169 |
+
"filters": {
|
| 170 |
+
"FeedbackSelect": {
|
| 171 |
+
"feedbackAll": "Feedback: All",
|
| 172 |
+
"feedbackPositive": "Feedback: Positive",
|
| 173 |
+
"feedbackNegative": "Feedback: Negative"
|
| 174 |
+
},
|
| 175 |
+
"SearchBar": {
|
| 176 |
+
"search": "Search"
|
| 177 |
+
}
|
| 178 |
+
},
|
| 179 |
+
"DeleteThreadButton": {
|
| 180 |
+
"confirmMessage": "This will delete the thread as well as it's messages and elements.",
|
| 181 |
+
"cancel": "Cancel",
|
| 182 |
+
"confirm": "Confirm",
|
| 183 |
+
"deletingChat": "Deleting chat",
|
| 184 |
+
"chatDeleted": "Chat deleted"
|
| 185 |
+
},
|
| 186 |
+
"index": {
|
| 187 |
+
"pastChats": "Past Chats"
|
| 188 |
+
},
|
| 189 |
+
"ThreadList": {
|
| 190 |
+
"empty": "Empty...",
|
| 191 |
+
"today": "Today",
|
| 192 |
+
"yesterday": "Yesterday",
|
| 193 |
+
"previous7days": "Previous 7 days",
|
| 194 |
+
"previous30days": "Previous 30 days"
|
| 195 |
+
},
|
| 196 |
+
"TriggerButton": {
|
| 197 |
+
"closeSidebar": "Close sidebar",
|
| 198 |
+
"openSidebar": "Open sidebar"
|
| 199 |
+
}
|
| 200 |
+
},
|
| 201 |
+
"Thread": {
|
| 202 |
+
"backToChat": "Go back to chat",
|
| 203 |
+
"chatCreatedOn": "This chat was created on"
|
| 204 |
+
}
|
| 205 |
+
},
|
| 206 |
+
"header": {
|
| 207 |
+
"chat": "Chat",
|
| 208 |
+
"readme": "Readme"
|
| 209 |
+
}
|
| 210 |
+
}
|
| 211 |
+
},
|
| 212 |
+
"hooks": {
|
| 213 |
+
"useLLMProviders": {
|
| 214 |
+
"failedToFetchProviders": "Failed to fetch providers:"
|
| 215 |
+
}
|
| 216 |
+
},
|
| 217 |
+
"pages": {
|
| 218 |
+
"Design": {},
|
| 219 |
+
"Env": {
|
| 220 |
+
"savedSuccessfully": "Saved successfully",
|
| 221 |
+
"requiredApiKeys": "Required API Keys",
|
| 222 |
+
"requiredApiKeysInfo": "To use this app, the following API keys are required. The keys are stored on your device's local storage."
|
| 223 |
+
},
|
| 224 |
+
"Page": {
|
| 225 |
+
"notPartOfProject": "You are not part of this project."
|
| 226 |
+
},
|
| 227 |
+
"ResumeButton": {
|
| 228 |
+
"resumeChat": "Resume Chat"
|
| 229 |
+
}
|
| 230 |
+
}
|
| 231 |
+
}
|
.gitignore
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
*.egg-info/
|
| 24 |
+
.installed.cfg
|
| 25 |
+
*.egg
|
| 26 |
+
MANIFEST
|
| 27 |
+
|
| 28 |
+
# Virtual environment
|
| 29 |
+
env/
|
| 30 |
+
venv/
|
| 31 |
+
ENV/
|
| 32 |
+
env.bak/
|
| 33 |
+
venv.bak/
|
| 34 |
+
.venv/
|
| 35 |
+
|
| 36 |
+
# PyInstaller
|
| 37 |
+
# Usually these files are written by a python script from a template
|
| 38 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 39 |
+
*.manifest
|
| 40 |
+
*.spec
|
| 41 |
+
|
| 42 |
+
# Installer logs
|
| 43 |
+
pip-log.txt
|
| 44 |
+
pip-delete-this-directory.txt
|
| 45 |
+
|
| 46 |
+
# Unit test / coverage reports
|
| 47 |
+
htmlcov/
|
| 48 |
+
.tox/
|
| 49 |
+
.nox/
|
| 50 |
+
.coverage
|
| 51 |
+
.coverage.*
|
| 52 |
+
.cache
|
| 53 |
+
nosetests.xml
|
| 54 |
+
coverage.xml
|
| 55 |
+
*.cover
|
| 56 |
+
.hypothesis/
|
| 57 |
+
.pytest_cache/
|
| 58 |
+
pytest-debug.log
|
| 59 |
+
|
| 60 |
+
# Translations
|
| 61 |
+
*.mo
|
| 62 |
+
*.pot
|
| 63 |
+
|
| 64 |
+
# Django stuff:
|
| 65 |
+
*.log
|
| 66 |
+
local_settings.py
|
| 67 |
+
db.sqlite3
|
| 68 |
+
db.sqlite3-journal
|
| 69 |
+
|
| 70 |
+
# Flask stuff:
|
| 71 |
+
instance/
|
| 72 |
+
.webassets-cache
|
| 73 |
+
|
| 74 |
+
# Scrapy stuff:
|
| 75 |
+
.scrapy
|
| 76 |
+
|
| 77 |
+
# Sphinx documentation
|
| 78 |
+
docs/_build/
|
| 79 |
+
docs/_site/
|
| 80 |
+
|
| 81 |
+
# Jupyter Notebook
|
| 82 |
+
.ipynb_checkpoints
|
| 83 |
+
|
| 84 |
+
# pyenv
|
| 85 |
+
.python-version
|
| 86 |
+
|
| 87 |
+
# celery beat schedule file
|
| 88 |
+
celerybeat-schedule
|
| 89 |
+
|
| 90 |
+
# SageMath parsed files
|
| 91 |
+
*.sage.py
|
| 92 |
+
|
| 93 |
+
# dotenv
|
| 94 |
+
.env
|
| 95 |
+
.env.*
|
| 96 |
+
|
| 97 |
+
# mypy
|
| 98 |
+
.mypy_cache/
|
| 99 |
+
.dmypy.json
|
| 100 |
+
dmypy.json
|
| 101 |
+
|
| 102 |
+
# Pyre type checker
|
| 103 |
+
.pyre/
|
| 104 |
+
|
| 105 |
+
# VS Code
|
| 106 |
+
.vscode/
|
| 107 |
+
|
| 108 |
+
# IntelliJ
|
| 109 |
+
.idea/
|
| 110 |
+
|
| 111 |
+
# Mac OS
|
| 112 |
+
.DS_Store
|
TODO.md
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## Initial System Efficiency Strategy
|
| 2 |
+
1. Query generator system
|
| 3 |
+
a. Discovery
|
| 4 |
+
b. Tokens
|
| 5 |
+
2. Prompt enhancer system
|
| 6 |
+
3. Typesense instance for (short cached)
|
| 7 |
+
a. user actions similarity and recomendation/suggestion
|
| 8 |
+
b. function path and execution graph search
|
| 9 |
+
c. external data fetches and proactive data fetch
|
| 10 |
+
4. Function path and execution graph generator class
|
| 11 |
+
5. Timescale instance for metric measurements and ranking
|
| 12 |
+
a. Function execution time
|
| 13 |
+
b. Gas spend
|
| 14 |
+
c. Integration latency
|
| 15 |
+
d. Prompt -> answer latency
|
| 16 |
+
6. Proactive cache enrichment based on user activity
|
| 17 |
+
7. Proactive knowledge graph update based on market activity and user interests
|
| 18 |
+
a. KG pipeline flow
|
| 19 |
+
b. Progressive cache and hot context for llm as flow progresses
|
| 20 |
+
8. Purchase suggestion engines for users
|
| 21 |
+
9. Subtle user education to increase chances of purchases
|
| 22 |
+
10. Interest provoking alerts
|
| 23 |
+
a. Toasts
|
| 24 |
+
b. Email
|
| 25 |
+
c. Highlights
|
| 26 |
+
11. Short form content generation engine
|
| 27 |
+
a. Giphs
|
| 28 |
+
b. Memes
|
| 29 |
+
c. Short video highlights
|
| 30 |
+
d. Short voice highlights in background as user interacts with system
|
| 31 |
+
e. Short top gainer and looser animations
|
| 32 |
+
12. User PnL stats animations layerd on system prediction performance
|
| 33 |
+
13. User pseudo trading system gamified
|
| 34 |
+
14. Fast and easy token purchase for boosted assets (LP partners)
|
| 35 |
+
15. Index system for users (create, manage and adjust) for short term and long term
|
| 36 |
+
16. User budgeting and financial planning system with scores
|
| 37 |
+
17. Users global engagment score and leaaderboard system with point awards
|
| 38 |
+
18. Fundamentaal analysis feature
|
| 39 |
+
19. Signal and listing alerts
|
| 40 |
+
20. Copy trading service
|
| 41 |
+
21. Bot trading service
|
| 42 |
+
22. Persive-Agressive investor potfolio execution and management service
|
| 43 |
+
23. Stock trading service
|
| 44 |
+
24. Utility payments
|
| 45 |
+
25. Travel service
|
| 46 |
+
a. Cheap flight paths
|
| 47 |
+
b. Esim
|
| 48 |
+
c. Travel expense on crypto
|
| 49 |
+
d. Airtime and data
|
| 50 |
+
e. Vouchers and points
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
## Infra
|
| 54 |
+
1. Async everthing
|
| 55 |
+
2. CI/CD pipeline
|
| 56 |
+
3. Strict typed input and output validations
|
| 57 |
+
4. Debugging logs
|
| 58 |
+
5. Proper general logging and export to log service
|
| 59 |
+
6. LLM load balancing and redundancy
|
| 60 |
+
7. LLM caching
|
| 61 |
+
8. Local and secure micro LLMs servers (Fly or RunPod)
|
| 62 |
+
9. Proper backups of all infra resources
|
| 63 |
+
10. Cloudflare captcha and WAF
|
| 64 |
+
11. User traffic monitoring and active banning system
|
| 65 |
+
12. Ofac and sanctions checks
|
| 66 |
+
13. KYC flows
|
| 67 |
+
14. KYC check service offering to 3rd party
|
| 68 |
+
15. V1 SDK realease
|
| 69 |
+
16. V1 developer contribution campaings
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
## Priority
|
| 73 |
+
1. Host TypeSence or Upstash Redis, Timescale DB, LiteLLM, Ollama, FalkorDB,
|
| 74 |
+
a. compare Fly and Digital Ocean
|
| 75 |
+
2. Create secret store and integrate
|
| 76 |
+
3. Create ChatXBT host scalabe hosting (fly or Digital Ocean or RunPod)
|
| 77 |
+
4. Set alerts for billing and uptime
|
| 78 |
+
5. Set error alerts and llm investigation
|
ai_functions.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel, Field
|
| 2 |
+
from typing import Dict, Optional, Type
|
| 3 |
+
|
| 4 |
+
from data_sources.coin_gecko import CoinGecko
|
| 5 |
+
from src.data_sources.cryptocompare import CryptoCompare
|
| 6 |
+
|
| 7 |
+
from langchain.tools.base import BaseTool
|
| 8 |
+
|
| 9 |
+
class CryptoCoinPrice(BaseModel):
|
| 10 |
+
"""Represents the prices of a coin in various currencies."""
|
| 11 |
+
prices: Dict[str, float] = Field(..., description="Prices in various currencies")
|
| 12 |
+
|
| 13 |
+
class CryptoCoinPriceData(BaseModel):
|
| 14 |
+
"""Encapsulates both CoinGecko and CryptoCompare price data."""
|
| 15 |
+
coingecko_price: Dict[str, CryptoCoinPrice] = Field(..., description="CoinGecko prices for various coins")
|
| 16 |
+
crypto_compare_price: Dict[str, CryptoCoinPrice] = Field(..., description="CryptoCompare prices for various coins")
|
| 17 |
+
|
| 18 |
+
class PriceInput(BaseModel):
|
| 19 |
+
coin_id: str = Field(..., description="The ID of the cryptocurrency coin to retrieve prices for")
|
| 20 |
+
vs_currency: str = Field("usd", description="The currency to compare against")
|
| 21 |
+
|
| 22 |
+
class CryptoCoinPriceOutput(BaseModel):
|
| 23 |
+
price_data: CryptoCoinPriceData
|
| 24 |
+
|
| 25 |
+
class CryptoCoinPriceTool(BaseTool):
|
| 26 |
+
name = "CryptoCoinPriceTool"
|
| 27 |
+
description = "Fetches price data for a given cryptocurrency coin from CoinGecko and CryptoCompare"
|
| 28 |
+
args_schema: Type[BaseModel] = PriceInput
|
| 29 |
+
return_direct: bool = True
|
| 30 |
+
|
| 31 |
+
def __init__(self, id: Optional[str] = None):
|
| 32 |
+
self.id = id
|
| 33 |
+
self.coingecko = CoinGecko()
|
| 34 |
+
self.crypto_compare = CryptoCompare()
|
| 35 |
+
|
| 36 |
+
def _run(self, coin_id: str, vs_currency: str = "usd") -> CryptoCoinPriceData:
|
| 37 |
+
coingecko_price_data = self.coingecko.get_coin_price(ids=[coin_id], vs_currencies=[vs_currency])
|
| 38 |
+
crypto_compare_price_data = self.crypto_compare.get_coin_price(ids=[coin_id], vs_currencies=[vs_currency])
|
| 39 |
+
|
| 40 |
+
coingecko_price = {}
|
| 41 |
+
crypto_compare_price = {}
|
| 42 |
+
|
| 43 |
+
if coin_id in coingecko_price_data:
|
| 44 |
+
coingecko_price[coin_id] = CryptoCoinPrice(prices=coingecko_price_data[coin_id])
|
| 45 |
+
else:
|
| 46 |
+
print(f"Warning: CoinGecko data for {coin_id} not found.")
|
| 47 |
+
|
| 48 |
+
if coin_id.upper() in crypto_compare_price_data:
|
| 49 |
+
crypto_compare_price[coin_id] = CryptoCoinPrice(prices=crypto_compare_price_data[coin_id.upper()])
|
| 50 |
+
else:
|
| 51 |
+
print(f"Warning: CryptoCompare data for {coin_id} not found.")
|
| 52 |
+
|
| 53 |
+
return CryptoCoinPriceData(
|
| 54 |
+
coingecko_price=coingecko_price,
|
| 55 |
+
crypto_compare_price=crypto_compare_price
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
def __call__(self, inputs: PriceInput) -> CryptoCoinPriceOutput:
|
| 59 |
+
price_data = self._run(inputs.coin_id, inputs.vs_currency)
|
| 60 |
+
return CryptoCoinPriceOutput(price_data=price_data)
|
chat-app.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dotenv import load_dotenv
|
| 2 |
+
|
| 3 |
+
from langchain.chains import LLMMathChain
|
| 4 |
+
from langchain.llms.openai import OpenAI
|
| 5 |
+
from langchain.chat_models import ChatOpenAI
|
| 6 |
+
from langchain.utilities.serpapi import SerpAPIWrapper
|
| 7 |
+
from langchain.agents import initialize_agent, Tool, AgentExecutor
|
| 8 |
+
import chainlit as cl
|
| 9 |
+
|
| 10 |
+
from src.tools.crypto_coin_price_tool import CryptoCoinPriceTool
|
| 11 |
+
|
| 12 |
+
load_dotenv()
|
| 13 |
+
|
| 14 |
+
@cl.on_chat_start
|
| 15 |
+
def start():
|
| 16 |
+
llm = ChatOpenAI(temperature=0, streaming=True)
|
| 17 |
+
llm1 = OpenAI(temperature=0, streaming=True)
|
| 18 |
+
search = SerpAPIWrapper()
|
| 19 |
+
get_crypto_coin_price = CryptoCoinPriceTool()
|
| 20 |
+
llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True)
|
| 21 |
+
|
| 22 |
+
tools = [
|
| 23 |
+
Tool(
|
| 24 |
+
name="Search",
|
| 25 |
+
func=search.run,
|
| 26 |
+
description="useful for when you need to answer questions about current events. You should ask targeted questions",
|
| 27 |
+
handle_tool_error=True,
|
| 28 |
+
),
|
| 29 |
+
Tool(
|
| 30 |
+
name="Calculator",
|
| 31 |
+
func=llm_math_chain.run,
|
| 32 |
+
description="useful for when you need to answer questions about math",
|
| 33 |
+
handle_tool_error=True,
|
| 34 |
+
),
|
| 35 |
+
Tool(
|
| 36 |
+
name=get_crypto_coin_price.name,
|
| 37 |
+
func=get_crypto_coin_price.run,
|
| 38 |
+
description=get_crypto_coin_price.description,
|
| 39 |
+
handle_tool_error=True,
|
| 40 |
+
),
|
| 41 |
+
]
|
| 42 |
+
agent = initialize_agent(
|
| 43 |
+
tools, llm1, agent="chat-zero-shot-react-description", verbose=True, handle_parsing_errors=True
|
| 44 |
+
)
|
| 45 |
+
cl.user_session.set("agent", agent)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
@cl.on_message
|
| 49 |
+
async def main(message: cl.Message):
|
| 50 |
+
agent = cl.user_session.get("agent") # type: AgentExecutor
|
| 51 |
+
cb = cl.LangchainCallbackHandler(stream_final_answer=True)
|
| 52 |
+
|
| 53 |
+
await cl.make_async(agent.run)(message.content, callbacks=[cb])
|
| 54 |
+
|
| 55 |
+
|
lite-llm-config.yaml
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_list:
|
| 2 |
+
- model_name: gpt-3.5-turbo # user-facing model alias
|
| 3 |
+
litellm_params: # all params accepted by litellm.completion() - https://docs.litellm.ai/docs/completion/input
|
| 4 |
+
model: azure/<your-deployment-name>
|
| 5 |
+
api_base: <your-azure-api-endpoint>
|
| 6 |
+
api_key: <your-azure-api-key>
|
| 7 |
+
- model_name: gpt-3.5-turbo
|
| 8 |
+
litellm_params:
|
| 9 |
+
model: azure/gpt-turbo-small-ca
|
| 10 |
+
api_base: https://my-endpoint-canada-berri992.openai.azure.com/
|
| 11 |
+
api_key: <your-azure-api-key>
|
| 12 |
+
- model_name: vllm-model
|
| 13 |
+
litellm_params:
|
| 14 |
+
model: openai/<your-model-name>
|
| 15 |
+
api_base: <your-api-base> # e.g. http://0.0.0.0:3000
|
reference.md
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## py-jsonq
|
| 2 |
+
https://github.com/s1s1ty/py-jsonq
|
| 3 |
+
py-jsonq is a simple, elegant Python package to Query over any type of JSON Data. It'll make your life easier by giving the flavour of an ORM-like query on your JSON.
|
| 4 |
+
|
| 5 |
+
## JMESPath Python: JSON Query Language
|
| 6 |
+
https://python.land/data-processing/working-with-json/jmespath
|
| 7 |
+
|
| 8 |
+
## fastapi-cloudevents
|
| 9 |
+
Allows to easily consume and produce CloudEvents over REST API.
|
| 10 |
+
https://pypi.org/project/fastapi-cloudevents/
|
| 11 |
+
|
| 12 |
+
## Pydantic
|
| 13 |
+
1. Logfire
|
| 14 |
+
https://pydantic.dev/logfire
|
| 15 |
+
https://pydantic.dev/opensource
|
| 16 |
+
https://docs.pydantic.dev/logfire
|
| 17 |
+
2. Settings & Config - https://docs.pydantic.dev/latest/concepts/pydantic_settings/
|
| 18 |
+
|
| 19 |
+
## Dev Tools
|
| 20 |
+
3. https://docs.astral.sh/ruff/
|
| 21 |
+
4. https://tox.wiki/en/4.15.1/index.html
|
| 22 |
+
5. https://black.readthedocs.io/en/stable/
|
| 23 |
+
6. https://pycqa.github.io/isort/
|
requirements.bk.txt
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
aiohttp==3.9.5
|
| 2 |
+
aiosignal==1.3.1
|
| 3 |
+
annotated-types==0.7.0
|
| 4 |
+
anyio==4.4.0
|
| 5 |
+
appnope==0.1.4
|
| 6 |
+
asgiref==3.8.1
|
| 7 |
+
asttokens==2.4.1
|
| 8 |
+
async-timeout==4.0.3
|
| 9 |
+
attrs==23.2.0
|
| 10 |
+
backcall==0.2.0
|
| 11 |
+
beautifulsoup4==4.12.3
|
| 12 |
+
bleach==6.1.0
|
| 13 |
+
certifi==2024.6.2
|
| 14 |
+
charset-normalizer==3.3.2
|
| 15 |
+
coingecko==0.13
|
| 16 |
+
decorator==5.1.1
|
| 17 |
+
defusedxml==0.7.1
|
| 18 |
+
Deprecated==1.2.14
|
| 19 |
+
docopt==0.6.2
|
| 20 |
+
exceptiongroup==1.2.1
|
| 21 |
+
executing==2.0.1
|
| 22 |
+
fastjsonschema==2.19.1
|
| 23 |
+
frozenlist==1.4.1
|
| 24 |
+
googleapis-common-protos==1.63.1
|
| 25 |
+
h11==0.14.0
|
| 26 |
+
httpcore==1.0.5
|
| 27 |
+
httpx==0.27.0
|
| 28 |
+
idna==3.7
|
| 29 |
+
importlib_metadata==7.1.0
|
| 30 |
+
ipython==8.12.3
|
| 31 |
+
jedi==0.19.1
|
| 32 |
+
Jinja2==3.1.4
|
| 33 |
+
jsonschema==4.22.0
|
| 34 |
+
jsonschema-specifications==2023.12.1
|
| 35 |
+
jupyter_client==8.6.2
|
| 36 |
+
jupyter_core==5.7.2
|
| 37 |
+
jupyterlab_pygments==0.3.0
|
| 38 |
+
logfire==0.42.0
|
| 39 |
+
markdown-it-py==3.0.0
|
| 40 |
+
MarkupSafe==2.1.5
|
| 41 |
+
matplotlib-inline==0.1.7
|
| 42 |
+
mdurl==0.1.2
|
| 43 |
+
mistune==3.0.2
|
| 44 |
+
multidict==6.0.5
|
| 45 |
+
nbclient==0.10.0
|
| 46 |
+
nbconvert==7.16.4
|
| 47 |
+
nbformat==5.10.4
|
| 48 |
+
opentelemetry-api==1.25.0
|
| 49 |
+
opentelemetry-exporter-otlp-proto-common==1.25.0
|
| 50 |
+
opentelemetry-exporter-otlp-proto-http==1.25.0
|
| 51 |
+
opentelemetry-instrumentation==0.46b0
|
| 52 |
+
opentelemetry-instrumentation-asgi==0.46b0
|
| 53 |
+
opentelemetry-instrumentation-fastapi==0.46b0
|
| 54 |
+
opentelemetry-instrumentation-httpx==0.46b0
|
| 55 |
+
opentelemetry-instrumentation-redis==0.46b0
|
| 56 |
+
opentelemetry-instrumentation-requests==0.46b0
|
| 57 |
+
opentelemetry-instrumentation-system-metrics==0.46b0
|
| 58 |
+
opentelemetry-proto==1.25.0
|
| 59 |
+
opentelemetry-sdk==1.25.0
|
| 60 |
+
opentelemetry-semantic-conventions==0.46b0
|
| 61 |
+
opentelemetry-util-http==0.46b0
|
| 62 |
+
packaging==24.1
|
| 63 |
+
pandocfilters==1.5.1
|
| 64 |
+
parso==0.8.4
|
| 65 |
+
pexpect==4.9.0
|
| 66 |
+
pickleshare==0.7.5
|
| 67 |
+
pipdeptree==2.22.0
|
| 68 |
+
pipreqs==0.5.0
|
| 69 |
+
platformdirs==4.2.2
|
| 70 |
+
prompt_toolkit==3.0.47
|
| 71 |
+
protobuf==4.25.3
|
| 72 |
+
psutil==5.9.8
|
| 73 |
+
ptyprocess==0.7.0
|
| 74 |
+
pure-eval==0.2.2
|
| 75 |
+
pydantic==2.7.4
|
| 76 |
+
pydantic_core==2.18.4
|
| 77 |
+
Pygments==2.18.0
|
| 78 |
+
python-dateutil==2.9.0.post0
|
| 79 |
+
python-dotenv==1.0.1
|
| 80 |
+
python-ulid==2.6.0
|
| 81 |
+
pyzmq==26.0.3
|
| 82 |
+
redis==5.0.6
|
| 83 |
+
referencing==0.35.1
|
| 84 |
+
requests==2.32.3
|
| 85 |
+
rich==13.7.1
|
| 86 |
+
rpds-py==0.18.1
|
| 87 |
+
six==1.16.0
|
| 88 |
+
sniffio==1.3.1
|
| 89 |
+
soupsieve==2.5
|
| 90 |
+
stack-data==0.6.3
|
| 91 |
+
tinycss2==1.3.0
|
| 92 |
+
tomli==2.0.1
|
| 93 |
+
tornado==6.4.1
|
| 94 |
+
traitlets==5.14.3
|
| 95 |
+
typing_extensions==4.12.2
|
| 96 |
+
ulid==1.1
|
| 97 |
+
upstash-redis==1.1.0
|
| 98 |
+
urllib3==2.2.1
|
| 99 |
+
wcwidth==0.2.13
|
| 100 |
+
webencodings==0.5.1
|
| 101 |
+
wrapt==1.16.0
|
| 102 |
+
yarg==0.1.9
|
| 103 |
+
yarl==1.9.4
|
| 104 |
+
zipp==3.19.2
|
requirements.txt
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
aiofiles==23.2.1
|
| 2 |
+
aiohttp==3.9.5
|
| 3 |
+
aiosignal==1.3.1
|
| 4 |
+
annotated-types==0.7.0
|
| 5 |
+
anyio==3.7.1
|
| 6 |
+
appnope==0.1.4
|
| 7 |
+
asgiref==3.8.1
|
| 8 |
+
asttokens==2.4.1
|
| 9 |
+
async-timeout==4.0.3
|
| 10 |
+
asyncer==0.0.2
|
| 11 |
+
attrs==23.2.0
|
| 12 |
+
backcall==0.2.0
|
| 13 |
+
beautifulsoup4==4.12.3
|
| 14 |
+
bidict==0.23.1
|
| 15 |
+
bleach==6.1.0
|
| 16 |
+
certifi==2024.6.2
|
| 17 |
+
chainlit==1.1.300
|
| 18 |
+
charset-normalizer==3.3.2
|
| 19 |
+
chevron==0.14.0
|
| 20 |
+
click==8.1.7
|
| 21 |
+
coingecko==0.13
|
| 22 |
+
dataclasses-json==0.5.14
|
| 23 |
+
decorator==5.1.1
|
| 24 |
+
defusedxml==0.7.1
|
| 25 |
+
Deprecated==1.2.14
|
| 26 |
+
distro==1.9.0
|
| 27 |
+
docopt==0.6.2
|
| 28 |
+
exceptiongroup==1.2.1
|
| 29 |
+
executing==2.0.1
|
| 30 |
+
fastapi==0.110.3
|
| 31 |
+
fastjsonschema==2.19.1
|
| 32 |
+
filetype==1.2.0
|
| 33 |
+
frozenlist==1.4.1
|
| 34 |
+
googleapis-common-protos==1.63.1
|
| 35 |
+
grpcio==1.64.1
|
| 36 |
+
h11==0.14.0
|
| 37 |
+
httpcore==1.0.5
|
| 38 |
+
httpx==0.27.0
|
| 39 |
+
idna==3.7
|
| 40 |
+
importlib_metadata==7.1.0
|
| 41 |
+
ipython==8.12.3
|
| 42 |
+
jedi==0.19.1
|
| 43 |
+
Jinja2==3.1.4
|
| 44 |
+
jsonpatch==1.33
|
| 45 |
+
jsonpointer==3.0.0
|
| 46 |
+
jsonschema==4.22.0
|
| 47 |
+
jsonschema-specifications==2023.12.1
|
| 48 |
+
jupyter_client==8.6.2
|
| 49 |
+
jupyter_core==5.7.2
|
| 50 |
+
jupyterlab_pygments==0.3.0
|
| 51 |
+
langchain==0.2.4
|
| 52 |
+
langchain-community==0.2.4
|
| 53 |
+
langchain-core==0.2.6
|
| 54 |
+
langchain-text-splitters==0.2.1
|
| 55 |
+
langsmith==0.1.77
|
| 56 |
+
Lazify==0.4.0
|
| 57 |
+
literalai==0.0.604
|
| 58 |
+
logfire==0.42.0
|
| 59 |
+
markdown-it-py==3.0.0
|
| 60 |
+
MarkupSafe==2.1.5
|
| 61 |
+
marshmallow==3.21.3
|
| 62 |
+
matplotlib-inline==0.1.7
|
| 63 |
+
mdurl==0.1.2
|
| 64 |
+
mistune==3.0.2
|
| 65 |
+
multidict==6.0.5
|
| 66 |
+
mypy-extensions==1.0.0
|
| 67 |
+
nbclient==0.10.0
|
| 68 |
+
nbconvert==7.16.4
|
| 69 |
+
nbformat==5.10.4
|
| 70 |
+
nest-asyncio==1.6.0
|
| 71 |
+
numpy==1.26.4
|
| 72 |
+
openai==1.34.0
|
| 73 |
+
opentelemetry-api==1.25.0
|
| 74 |
+
opentelemetry-exporter-otlp==1.25.0
|
| 75 |
+
opentelemetry-exporter-otlp-proto-common==1.25.0
|
| 76 |
+
opentelemetry-exporter-otlp-proto-grpc==1.25.0
|
| 77 |
+
opentelemetry-exporter-otlp-proto-http==1.25.0
|
| 78 |
+
opentelemetry-instrumentation==0.46b0
|
| 79 |
+
opentelemetry-instrumentation-asgi==0.46b0
|
| 80 |
+
opentelemetry-instrumentation-fastapi==0.46b0
|
| 81 |
+
opentelemetry-instrumentation-httpx==0.46b0
|
| 82 |
+
opentelemetry-instrumentation-redis==0.46b0
|
| 83 |
+
opentelemetry-instrumentation-requests==0.46b0
|
| 84 |
+
opentelemetry-instrumentation-system-metrics==0.46b0
|
| 85 |
+
opentelemetry-proto==1.25.0
|
| 86 |
+
opentelemetry-sdk==1.25.0
|
| 87 |
+
opentelemetry-semantic-conventions==0.46b0
|
| 88 |
+
opentelemetry-util-http==0.46b0
|
| 89 |
+
orjson==3.10.5
|
| 90 |
+
packaging==23.2
|
| 91 |
+
pandocfilters==1.5.1
|
| 92 |
+
parso==0.8.4
|
| 93 |
+
pexpect==4.9.0
|
| 94 |
+
pickleshare==0.7.5
|
| 95 |
+
pipdeptree==2.22.0
|
| 96 |
+
pipreqs==0.5.0
|
| 97 |
+
platformdirs==4.2.2
|
| 98 |
+
prompt_toolkit==3.0.47
|
| 99 |
+
protobuf==4.25.3
|
| 100 |
+
psutil==5.9.8
|
| 101 |
+
ptyprocess==0.7.0
|
| 102 |
+
pure-eval==0.2.2
|
| 103 |
+
pydantic==2.7.4
|
| 104 |
+
pydantic_core==2.18.4
|
| 105 |
+
Pygments==2.18.0
|
| 106 |
+
PyJWT==2.8.0
|
| 107 |
+
python-dateutil==2.9.0.post0
|
| 108 |
+
python-dotenv==1.0.1
|
| 109 |
+
python-engineio==4.9.1
|
| 110 |
+
python-multipart==0.0.9
|
| 111 |
+
python-socketio==5.11.2
|
| 112 |
+
python-ulid==2.6.0
|
| 113 |
+
PyYAML==6.0.1
|
| 114 |
+
pyzmq==26.0.3
|
| 115 |
+
redis==5.0.6
|
| 116 |
+
referencing==0.35.1
|
| 117 |
+
requests==2.32.3
|
| 118 |
+
rich==13.7.1
|
| 119 |
+
rpds-py==0.18.1
|
| 120 |
+
simple-websocket==1.0.0
|
| 121 |
+
six==1.16.0
|
| 122 |
+
sniffio==1.3.1
|
| 123 |
+
soupsieve==2.5
|
| 124 |
+
SQLAlchemy==2.0.30
|
| 125 |
+
stack-data==0.6.3
|
| 126 |
+
starlette==0.37.2
|
| 127 |
+
syncer==2.0.3
|
| 128 |
+
tenacity==8.3.0
|
| 129 |
+
tinycss2==1.3.0
|
| 130 |
+
tomli==2.0.1
|
| 131 |
+
tornado==6.4.1
|
| 132 |
+
tqdm==4.66.4
|
| 133 |
+
traitlets==5.14.3
|
| 134 |
+
typing-inspect==0.9.0
|
| 135 |
+
typing_extensions==4.12.2
|
| 136 |
+
ulid==1.1
|
| 137 |
+
upstash-redis==1.1.0
|
| 138 |
+
uptrace==1.24.0
|
| 139 |
+
urllib3==2.2.1
|
| 140 |
+
uvicorn==0.25.0
|
| 141 |
+
watchfiles==0.20.0
|
| 142 |
+
wcwidth==0.2.13
|
| 143 |
+
webencodings==0.5.1
|
| 144 |
+
wrapt==1.16.0
|
| 145 |
+
wsproto==1.2.0
|
| 146 |
+
yarg==0.1.9
|
| 147 |
+
yarl==1.9.4
|
| 148 |
+
zipp==3.19.2
|
run.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import pprint
|
| 3 |
+
from src.libs.logger import logger
|
| 4 |
+
|
| 5 |
+
# from src.data_sources.coingecko import CoinGecko
|
| 6 |
+
# from src.data_sources.cryptocompare import CryptoCompare
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
# cgc = CoinGecko()
|
| 10 |
+
# ccp = CryptoCompare()
|
| 11 |
+
|
| 12 |
+
# result = cgc.get_coin_price(ids=["bitcoin", "ethereum"], vs_currencies=["usd"])
|
| 13 |
+
# logger.info(f"Got token price: {result}")
|
| 14 |
+
|
| 15 |
+
# result = cgc.get_coin_data(id="tron")
|
| 16 |
+
# logger.info(f"Got token data: {result}")
|
| 17 |
+
|
| 18 |
+
# pprint.pprint(result)
|
| 19 |
+
|
| 20 |
+
# coingecko_instance = get_coingecko_instance()
|
| 21 |
+
# print(coingecko_instance)
|
| 22 |
+
|
| 23 |
+
# result = coingecko_instance.exchanges.get_list()
|
| 24 |
+
# logger.info(f"Got exchange data: {result}")
|
| 25 |
+
|
| 26 |
+
# pprint.pprint(result)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# result = cgc.get_coin_price(ids=["pepe"], vs_currencies=["usd"])
|
| 30 |
+
# # logger.info(f"Got token data: {result}")
|
| 31 |
+
# pprint.pprint(result)
|
| 32 |
+
|
| 33 |
+
# result1 = ccp.get_coin_price(ids=["portal"], vs_currencies=["usd"])
|
| 34 |
+
# # logger.info(f"Got token data: {result1}")
|
| 35 |
+
# pprint.pprint(result1)
|
| 36 |
+
|
| 37 |
+
from ai_functions import AIFunctions
|
| 38 |
+
|
| 39 |
+
aif = AIFunctions()
|
| 40 |
+
|
| 41 |
+
result2 = aif.get_asset_price(asset_id="btc", vs_currency="usd")
|
| 42 |
+
pprint.pprint(result2)
|
src/data_sources/coin_gecko.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from dotenv import load_dotenv
|
| 3 |
+
from src.databases.redis import REDIS_CACHED
|
| 4 |
+
from src.libs.constants import ONE_HOUR_IN_SECONDS, ONE_MONTH_IN_SECONDS
|
| 5 |
+
from src.libs.logger import logger
|
| 6 |
+
from coingecko import CoinGeckoProClient, CoinGeckoDemoClient
|
| 7 |
+
|
| 8 |
+
load_dotenv()
|
| 9 |
+
|
| 10 |
+
redis_cache = REDIS_CACHED
|
| 11 |
+
|
| 12 |
+
class CoinGecko:
|
| 13 |
+
def __init__(self, pro_api: bool = False) -> None:
|
| 14 |
+
if pro_api:
|
| 15 |
+
self.cgc = CoinGeckoProClient(api_key=os.getenv('COINGECKO_PRO_API_KEY'))
|
| 16 |
+
else:
|
| 17 |
+
self.cgc = CoinGeckoDemoClient(api_key=os.getenv('COINGECKO_DEMO_API_KEY'))
|
| 18 |
+
|
| 19 |
+
@redis_cache(ttl=ONE_HOUR_IN_SECONDS)
|
| 20 |
+
@logger.instrument()
|
| 21 |
+
def get_coin_price(self, ids: list, vs_currencies: list, cache_ttl: int = None) -> dict:
|
| 22 |
+
# logger.debug(f"ids: {ids}")
|
| 23 |
+
# logger.debug(f"vs_currencies: {vs_currencies}")
|
| 24 |
+
|
| 25 |
+
result = self.cgc.simple.get_price(ids=ids, vs_currencies=vs_currencies)
|
| 26 |
+
# logger.debug(f"Result: {result}")
|
| 27 |
+
|
| 28 |
+
return result
|
| 29 |
+
|
| 30 |
+
@redis_cache(ttl=ONE_HOUR_IN_SECONDS)
|
| 31 |
+
@logger.instrument()
|
| 32 |
+
def get_coin_data(self, id: str, cache_ttl: int = None) -> dict:
|
| 33 |
+
|
| 34 |
+
result = self.cgc.coins.get_id(id=id, localization=False, market_data=False, tickers=False, sparkline=False)
|
| 35 |
+
logger.debug(f"Result: {result}")
|
| 36 |
+
|
| 37 |
+
return result
|
| 38 |
+
|
| 39 |
+
@redis_cache(ttl=ONE_MONTH_IN_SECONDS)
|
| 40 |
+
@logger.instrument()
|
| 41 |
+
def get_coin_category_data(self, params: dict = None, cache_ttl: int = None) -> dict:
|
| 42 |
+
|
| 43 |
+
params = params or {
|
| 44 |
+
"order": "market_cap_desc",
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
result = self.cgc.categories.get_data(endpoint="coins/categories", params=params)
|
| 48 |
+
logger.debug(f"Result: {result}")
|
| 49 |
+
|
| 50 |
+
return { "categories": result}
|
| 51 |
+
|
| 52 |
+
@redis_cache(ttl=ONE_MONTH_IN_SECONDS)
|
| 53 |
+
@logger.instrument()
|
| 54 |
+
def get_exchanges_list(self, cache_ttl: int = None) -> dict:
|
| 55 |
+
|
| 56 |
+
# result = self.cgc.exchanges.get(per_page=10)
|
| 57 |
+
# result = self.cgc.exchanges.get_id(id=id)
|
| 58 |
+
result = self.cgc.exchanges.get_list()
|
| 59 |
+
logger.debug(f"Result: {result}")
|
| 60 |
+
|
| 61 |
+
return { 'exchanges': result }
|
| 62 |
+
|
| 63 |
+
@redis_cache(ttl=ONE_HOUR_IN_SECONDS)
|
| 64 |
+
@logger.instrument()
|
| 65 |
+
def get_exchange_data(self, id: str = None, cache_ttl: int = None) -> dict:
|
| 66 |
+
|
| 67 |
+
result = self.cgc.exchanges.get_id(id=id)
|
| 68 |
+
logger.debug(f"Result: {result}")
|
| 69 |
+
|
| 70 |
+
return result
|
| 71 |
+
|
src/data_sources/cryptocompare.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from dotenv import load_dotenv
|
| 3 |
+
|
| 4 |
+
import httpx
|
| 5 |
+
from src.libs.helper_functions import convert_to_snakecase
|
| 6 |
+
from src.databases.redis import REDIS_CACHED
|
| 7 |
+
from src.libs.constants import ONE_MINUTE_IN_SECONDS
|
| 8 |
+
from src.libs.constants import CRYPTO_COMPARE_BASE_URL
|
| 9 |
+
from src.libs.logger import logger
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
load_dotenv()
|
| 13 |
+
|
| 14 |
+
redis_cache = REDIS_CACHED
|
| 15 |
+
|
| 16 |
+
class CryptoCompare:
|
| 17 |
+
def __init__(self, base_url: str = None) -> None:
|
| 18 |
+
self.CRYPTO_COMPARE_BASE_URL = base_url or CRYPTO_COMPARE_BASE_URL
|
| 19 |
+
|
| 20 |
+
@redis_cache(ttl=ONE_MINUTE_IN_SECONDS)
|
| 21 |
+
@logger.instrument()
|
| 22 |
+
def get_all_coins(self) -> dict:
|
| 23 |
+
url = f"{self.CRYPTO_COMPARE_BASE_URL}data/all/coinlist"
|
| 24 |
+
logger.debug(url)
|
| 25 |
+
params = {"api_key": os.getenv("CRYPTOCOMPARE_API_KEY")}
|
| 26 |
+
logger.debug(params)
|
| 27 |
+
headers = {"Content-type":"application/json; charset=UTF-8"}
|
| 28 |
+
logger.debug(headers)
|
| 29 |
+
|
| 30 |
+
try:
|
| 31 |
+
with httpx.Client(timeout=30.0) as client:
|
| 32 |
+
response = client.get(url, params=params, headers=headers)
|
| 33 |
+
response.raise_for_status() # Raise an exception if the request was unsuccessful
|
| 34 |
+
return convert_to_snakecase(response.json())
|
| 35 |
+
except httpx.HTTPError as e:
|
| 36 |
+
print(f"An error occurred while making the request: {e}")
|
| 37 |
+
return None
|
| 38 |
+
|
| 39 |
+
@redis_cache(ttl=ONE_MINUTE_IN_SECONDS)
|
| 40 |
+
@logger.instrument()
|
| 41 |
+
def get_coin_price(self, ids: list, vs_currencies: list, cache_ttl: int = None) -> dict:
|
| 42 |
+
|
| 43 |
+
url = f"{self.CRYPTO_COMPARE_BASE_URL}data/pricemulti"
|
| 44 |
+
# logger.debug(url)
|
| 45 |
+
|
| 46 |
+
params = {
|
| 47 |
+
"api_key": os.getenv("CRYPTOCOMPARE_API_KEY"),
|
| 48 |
+
"fsyms": ",".join(ids),
|
| 49 |
+
"tsyms": ",".join(vs_currencies)
|
| 50 |
+
}
|
| 51 |
+
# logger.debug(f"{params}")
|
| 52 |
+
|
| 53 |
+
headers = {"Content-type":"application/json; charset=UTF-8"}
|
| 54 |
+
# logger.debug(f"{headers}")
|
| 55 |
+
|
| 56 |
+
try:
|
| 57 |
+
with httpx.Client(timeout=30.0) as client:
|
| 58 |
+
response = client.get(url, params=params, headers=headers)
|
| 59 |
+
response.raise_for_status() # Raise an exception if the request was unsuccessful
|
| 60 |
+
return response.json()
|
| 61 |
+
except httpx.HTTPError as e:
|
| 62 |
+
logger.debug(f"An error occurred while making the request: {e}")
|
| 63 |
+
return None
|
src/data_sources/dexscreener.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dotenv import load_dotenv
|
| 2 |
+
|
| 3 |
+
import httpx
|
| 4 |
+
from typing import List, Union, Self
|
| 5 |
+
from src.libs.helper_functions import convert_to_snakecase
|
| 6 |
+
from src.databases.redis import REDIS_CACHED
|
| 7 |
+
from src.libs.constants import ONE_MINUTE_IN_SECONDS
|
| 8 |
+
from src.libs.constants import DEX_SCREENER_BASE_URL
|
| 9 |
+
|
| 10 |
+
load_dotenv()
|
| 11 |
+
|
| 12 |
+
redis_cache = REDIS_CACHED
|
| 13 |
+
|
| 14 |
+
class DexScreener:
|
| 15 |
+
"""
|
| 16 |
+
A class for interacting with the Dex Screener API.
|
| 17 |
+
|
| 18 |
+
Attributes:
|
| 19 |
+
DEX_SCREENER_BASE_URL (str): The base URL for the Dex Screener API.
|
| 20 |
+
|
| 21 |
+
Methods:
|
| 22 |
+
__init__(self, base_url: str) -> None:
|
| 23 |
+
Initialize the DexScreener class.
|
| 24 |
+
|
| 25 |
+
get_pairs(self, chain_id: str, pair_addresses: str) -> dict:
|
| 26 |
+
|
| 27 |
+
get_tokens(self, token_addresses: Union[str, List[str]]) -> dict:
|
| 28 |
+
|
| 29 |
+
search_pairs(self, query: str) -> dict:
|
| 30 |
+
"""
|
| 31 |
+
def __init__(self, base_url: str = None) -> None:
|
| 32 |
+
self.DEX_SCREENER_BASE_URL = base_url or DEX_SCREENER_BASE_URL
|
| 33 |
+
|
| 34 |
+
@redis_cache(ttl=ONE_MINUTE_IN_SECONDS)
|
| 35 |
+
def get_pairs(self, chain_id: str, pair_addresses: str) -> dict:
|
| 36 |
+
"""
|
| 37 |
+
This method is used to fetch pair data from Dex Screener API based on the provided chain_id and pair_addresses.
|
| 38 |
+
|
| 39 |
+
Parameters:
|
| 40 |
+
chain_id (str): The ID of the blockchain network.
|
| 41 |
+
pair_addresses (str): The address(es) of the pair(s) on the blockchain.
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
dict: A dictionary containing the JSON response from the API. Returns None if an error occurs.
|
| 45 |
+
|
| 46 |
+
Raises:
|
| 47 |
+
httpx.HTTPError: If an error occurs while making the request to the API.
|
| 48 |
+
"""
|
| 49 |
+
url = f"{self.DEX_SCREENER_BASE_URL}pairs/{chain_id}/{pair_addresses}"
|
| 50 |
+
try:
|
| 51 |
+
response = httpx.get(url)
|
| 52 |
+
response.raise_for_status() # Raise an exception if the request was unsuccessful
|
| 53 |
+
return convert_to_snakecase(response.json())
|
| 54 |
+
except httpx.HTTPError as e:
|
| 55 |
+
print(f"An error occurred while making the request: {e}")
|
| 56 |
+
return None
|
| 57 |
+
|
| 58 |
+
@redis_cache(ttl=ONE_MINUTE_IN_SECONDS)
|
| 59 |
+
def get_tokens(self, token_addresses: Union[str, List[str]]) -> dict:
|
| 60 |
+
"""
|
| 61 |
+
This method is used to fetch token data from Dex Screener API based on the provided token_addresses.
|
| 62 |
+
|
| 63 |
+
Parameters:
|
| 64 |
+
token_addresses (Union[str, List[str]]): The address(es) of the token(s) on the blockchain.
|
| 65 |
+
This parameter can be a single address (str) or a list of addresses (List[str]).
|
| 66 |
+
|
| 67 |
+
Returns:
|
| 68 |
+
dict: A dictionary containing the JSON response from the API. Returns None if an error occurs.
|
| 69 |
+
|
| 70 |
+
Raises:
|
| 71 |
+
httpx.HTTPError: If an error occurs while making the request to the API.
|
| 72 |
+
"""
|
| 73 |
+
if isinstance(token_addresses, list):
|
| 74 |
+
token_addresses = ','.join(token_addresses)
|
| 75 |
+
|
| 76 |
+
url = f"{self.DEX_SCREENER_BASE_URL}tokens/{token_addresses}"
|
| 77 |
+
try:
|
| 78 |
+
response = httpx.get(url)
|
| 79 |
+
response.raise_for_status() # Raise an exception if the request was unsuccessful
|
| 80 |
+
return convert_to_snakecase(response.json())
|
| 81 |
+
except httpx.HTTPError as e:
|
| 82 |
+
print(f"An error occurred while making the request: {e}")
|
| 83 |
+
return None
|
| 84 |
+
|
| 85 |
+
@redis_cache(ttl=ONE_MINUTE_IN_SECONDS)
|
| 86 |
+
def search_pairs(self, query: str) -> dict:
|
| 87 |
+
"""
|
| 88 |
+
This method is used to search for pairs matching the provided query from Dex Screener API.
|
| 89 |
+
The query may include pair address, token address, token name, or token symbol.
|
| 90 |
+
|
| 91 |
+
Parameters:
|
| 92 |
+
query (str): The search query.
|
| 93 |
+
|
| 94 |
+
Returns:
|
| 95 |
+
dict: A dictionary containing the JSON response from the API. Returns None if an error occurs.
|
| 96 |
+
|
| 97 |
+
Raises:
|
| 98 |
+
httpx.HTTPError: If an error occurs while making the request to the API.
|
| 99 |
+
"""
|
| 100 |
+
url = f"{self.DEX_SCREENER_BASE_URL}search/?q={query}"
|
| 101 |
+
try:
|
| 102 |
+
response = httpx.get(url)
|
| 103 |
+
response.raise_for_status() # Raise an exception if the request was unsuccessful
|
| 104 |
+
return convert_to_snakecase(response.json())
|
| 105 |
+
except httpx.HTTPError as e:
|
| 106 |
+
print(f"An error occurred while making the request: {e}")
|
| 107 |
+
return None
|
src/databases/redis.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
from upstash_redis import Redis
|
| 5 |
+
from src.libs.logger import logger
|
| 6 |
+
from src.libs.helper_functions import chunk_data, create_uuid_from_string
|
| 7 |
+
load_dotenv()
|
| 8 |
+
|
| 9 |
+
url = os.getenv("UPSTASH_REDIS_REST_URL")
|
| 10 |
+
token = os.getenv("UPSTASH_REDIS_REST_TOKEN")
|
| 11 |
+
|
| 12 |
+
REDIS = Redis(url=url, token=token)
|
| 13 |
+
REDIS_PIPELINE = REDIS.pipeline()
|
| 14 |
+
|
| 15 |
+
@logger.instrument()
|
| 16 |
+
def args_to_key(*args, **kwargs):
|
| 17 |
+
"""
|
| 18 |
+
This function generates a unique key based on the provided arguments and keyword arguments.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
*args: A variable number of positional arguments.
|
| 22 |
+
**kwargs: A variable number of keyword arguments.
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
str: A unique key string generated from the provided arguments and keyword arguments.
|
| 26 |
+
|
| 27 |
+
Example:
|
| 28 |
+
>>> args_to_key("hello", "world", a=1, b=2)
|
| 29 |
+
'hello_world_a_1_b_2'
|
| 30 |
+
"""
|
| 31 |
+
params = []
|
| 32 |
+
|
| 33 |
+
# TODO: turn these into debugging logs
|
| 34 |
+
logger.debug(f"args: {args}")
|
| 35 |
+
logger.debug(f"kwargs: {kwargs}")
|
| 36 |
+
|
| 37 |
+
# Append the names of callable arguments to the params list
|
| 38 |
+
for arg in args:
|
| 39 |
+
if callable(arg):
|
| 40 |
+
params.append(arg.__name__)
|
| 41 |
+
|
| 42 |
+
# Append the string representations of non-callable arguments and keyword arguments to the params list
|
| 43 |
+
for arg in args:
|
| 44 |
+
if not callable(arg):
|
| 45 |
+
params.append(cast_args_to_string_and_return_first_index(arg))
|
| 46 |
+
|
| 47 |
+
for kwarg in kwargs.values():
|
| 48 |
+
params.append(str(kwarg))
|
| 49 |
+
|
| 50 |
+
# Join the elements in the params list using the '_' character as a separator
|
| 51 |
+
return "_".join(params)
|
| 52 |
+
|
| 53 |
+
@logger.instrument()
|
| 54 |
+
def REDIS_CACHED(ttl: int = 3600, chunk: bool = False):
|
| 55 |
+
"""
|
| 56 |
+
This decorator caches the result of a function call in Redis.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
ttl (int): The time-to-live (in seconds) for the cached result. Defaults to 3600 seconds (1 hour).
|
| 60 |
+
chunk (bool): Whether to chunk the result of the original function call. Defaults to False.
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
A wrapper function that caches the result of the original function call.
|
| 64 |
+
|
| 65 |
+
Example:
|
| 66 |
+
>>> @REDIS_CACHED(ttl=60, chunk=True)
|
| 67 |
+
... def example_function(arg1, arg2):
|
| 68 |
+
... return arg1 + arg2
|
| 69 |
+
...
|
| 70 |
+
... cached_result = example_function(3, 4)
|
| 71 |
+
... print(cached_result) # Output: 7
|
| 72 |
+
... print(cached_result) # Output: 7 (from cache)
|
| 73 |
+
"""
|
| 74 |
+
def decorator(func):
|
| 75 |
+
def wrapper(*args, **kwargs):
|
| 76 |
+
r = REDIS
|
| 77 |
+
|
| 78 |
+
cache_key = args_to_key(func, *args, **kwargs)
|
| 79 |
+
logger.debug(f"Cache key: {cache_key}") # TODO: turn these into debugging logs
|
| 80 |
+
cache_key = str(create_uuid_from_string(cache_key))
|
| 81 |
+
logger.debug(f"Cache key: {cache_key}") # TODO: turn these into debugging logs
|
| 82 |
+
|
| 83 |
+
# Test if a matching cache key exists
|
| 84 |
+
cached = r.get(cache_key)
|
| 85 |
+
if cached:
|
| 86 |
+
# Found in cache, return it
|
| 87 |
+
return json.loads(cached)
|
| 88 |
+
|
| 89 |
+
# Otherwise, pass everything to the downstream function
|
| 90 |
+
result = func(*args, **kwargs)
|
| 91 |
+
|
| 92 |
+
# Set cache time-to-live duration
|
| 93 |
+
# Use the default TTL if not provided as an argument
|
| 94 |
+
cache_ttl = kwargs.get('cache_ttl')
|
| 95 |
+
ttl_seconds = cache_ttl or kwargs.get('ttl', ttl)
|
| 96 |
+
|
| 97 |
+
if chunk:
|
| 98 |
+
chunked_result = chunk_data(result.data, 100)
|
| 99 |
+
for i in range(len(chunked_result)):
|
| 100 |
+
r.rpush(cache_key, json.dumps(chunked_result[i]))
|
| 101 |
+
r.pexpire(cache_key, ttl_seconds)
|
| 102 |
+
else:
|
| 103 |
+
# Put the result from downstream function into cache, with a TTL
|
| 104 |
+
# So next call with the same parameters will be handled by the cache
|
| 105 |
+
r.setex(cache_key, ttl_seconds, result)
|
| 106 |
+
|
| 107 |
+
# Return the result transparently
|
| 108 |
+
return result
|
| 109 |
+
return wrapper
|
| 110 |
+
return decorator
|
| 111 |
+
|
| 112 |
+
@logger.instrument()
|
| 113 |
+
def cast_args_to_string_and_return_first_index(args):
|
| 114 |
+
"""
|
| 115 |
+
This function takes an argument, casts it to a string, removes '<' and '>', splits it by spaces, and returns the first index.
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
args (any): The argument to be processed.
|
| 119 |
+
|
| 120 |
+
Returns:
|
| 121 |
+
str or None: The first index of the argument after processing, or None if the argument is empty.
|
| 122 |
+
|
| 123 |
+
Example:
|
| 124 |
+
>>> cast_args_to_string_and_return_first_index("hello <world>")
|
| 125 |
+
'hello'
|
| 126 |
+
"""
|
| 127 |
+
args_str = str(args).strip('<>').split(' ')
|
| 128 |
+
return args_str[0] if args_str else None
|
| 129 |
+
|
src/libs/constants.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Constants representing the nuumber of seconds in a week.
|
| 3 |
+
"""
|
| 4 |
+
ONE_MONTH_IN_SECONDS : int = 86400 * 30
|
| 5 |
+
|
| 6 |
+
"""
|
| 7 |
+
Constants representing the nuumber of seconds in a week.
|
| 8 |
+
"""
|
| 9 |
+
ONE_WEEK_IN_SECONDS : int = 604800
|
| 10 |
+
|
| 11 |
+
"""
|
| 12 |
+
Constants representing the nuumber of seconds in a day.
|
| 13 |
+
"""
|
| 14 |
+
ONE_DAY_IN_SECONDS : int = 86400
|
| 15 |
+
|
| 16 |
+
"""
|
| 17 |
+
Constant representing the number of seconds in one hour.
|
| 18 |
+
"""
|
| 19 |
+
ONE_HOUR_IN_SECONDS : int = 3600
|
| 20 |
+
|
| 21 |
+
"""
|
| 22 |
+
Constant representing the number of seconds in one minute.
|
| 23 |
+
"""
|
| 24 |
+
ONE_MINUTE_IN_SECONDS : int = 60
|
| 25 |
+
|
| 26 |
+
"""
|
| 27 |
+
DEX_SCREENER_BASE_URL : str = "https://api.dexscreener.com/latest/dex/"
|
| 28 |
+
|
| 29 |
+
This constant is used to represent the base URL for DexScreener API.
|
| 30 |
+
It is defined as "https://api.dexscreener.com/latest/dex/".
|
| 31 |
+
"""
|
| 32 |
+
DEX_SCREENER_BASE_URL : str = "https://api.dexscreener.com/latest/dex/"
|
| 33 |
+
|
| 34 |
+
"""
|
| 35 |
+
JINA_SEARCH_BASE_ENDPOINT : str = "https://s.jina.ai/"
|
| 36 |
+
|
| 37 |
+
This constant is used to represent the base endpoint for Jina Search API.
|
| 38 |
+
It is defined as "https://s.jina.ai/".
|
| 39 |
+
"""
|
| 40 |
+
JINA_SEARCH_BASE_ENDPOINT : str = "https://s.jina.ai/"
|
| 41 |
+
|
| 42 |
+
"""
|
| 43 |
+
JINA_READER_BASE_ENDPOINT : str = "https://r.jina.ai/"
|
| 44 |
+
|
| 45 |
+
This constant is used to represent the base endpoint for Jina Reader API.
|
| 46 |
+
It is defined as "https://r.jina.ai/".
|
| 47 |
+
"""
|
| 48 |
+
JINA_READER_BASE_ENDPOINT : str = "https://r.jina.ai/"
|
| 49 |
+
|
| 50 |
+
"""
|
| 51 |
+
CRYPTO_COMPARE_BASE_URL: str = "https://min-api.cryptocompare.com/"
|
| 52 |
+
|
| 53 |
+
This constant is used to represent the base URL for CryptoCompare API.
|
| 54 |
+
It is defined as "https://min-api.cryptocompare.com/".
|
| 55 |
+
"""
|
| 56 |
+
CRYPTO_COMPARE_BASE_URL: str = "https://min-api.cryptocompare.com/"
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# TTLs
|
| 60 |
+
|
| 61 |
+
SEARCH_DATA_TTL : int = ONE_WEEK_IN_SECONDS
|
src/libs/helper_functions.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import uuid
|
| 3 |
+
import hashlib
|
| 4 |
+
from ulid import ULID
|
| 5 |
+
|
| 6 |
+
def chunk_data(data : list | dict, chunk_size: int):
|
| 7 |
+
"""
|
| 8 |
+
This function takes an array and a chunk size as input, and returns a new array
|
| 9 |
+
where the original array is divided into smaller chunks of the specified size.
|
| 10 |
+
|
| 11 |
+
Parameters:
|
| 12 |
+
data (list): The original data to be chunked.
|
| 13 |
+
chunk_size (int): The size of each chunk.
|
| 14 |
+
|
| 15 |
+
Returns:
|
| 16 |
+
list: A new array containing the chunks of the original array.
|
| 17 |
+
|
| 18 |
+
Example:
|
| 19 |
+
>>> chunk_array([1, 2, 3, 4, 5, 6], 2)
|
| 20 |
+
[[1, 2], [3, 4], [5, 6]]
|
| 21 |
+
"""
|
| 22 |
+
is_object = isinstance(data, dict)
|
| 23 |
+
is_array = isinstance(data, list)
|
| 24 |
+
|
| 25 |
+
if not is_object and not is_array:
|
| 26 |
+
raise TypeError("Data must be a list or a dictionary.")
|
| 27 |
+
elif is_array:
|
| 28 |
+
return [data[i:i+chunk_size] for i in range(0, len(data), chunk_size)]
|
| 29 |
+
elif is_object:
|
| 30 |
+
items = list(data.items())
|
| 31 |
+
for i in range(0, len(items), chunk_size):
|
| 32 |
+
yield dict(items[i:i + chunk_size])
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def generate_ulid(seed: any = None) -> str:
|
| 36 |
+
"""
|
| 37 |
+
This function generates a Universally Unique Lexicographically Sortable Identifier (ULID).
|
| 38 |
+
If a seed is provided, it will be used as the basis for the ULID generation.
|
| 39 |
+
|
| 40 |
+
Parameters:
|
| 41 |
+
seed (any, optional): A value to be used as the basis for the ULID generation. Defaults to None.
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
str: A string representing the generated ULID.
|
| 45 |
+
|
| 46 |
+
Example:
|
| 47 |
+
>>> generate_ulid()
|
| 48 |
+
'00000000-0001-0100-0000-000000000001'
|
| 49 |
+
>>> generate_ulid('example_seed')
|
| 50 |
+
'00000000-0001-0100-0000-000000000002'
|
| 51 |
+
"""
|
| 52 |
+
if seed is None:
|
| 53 |
+
ulid = ULID()
|
| 54 |
+
else:
|
| 55 |
+
ulid = ULID(seed)
|
| 56 |
+
|
| 57 |
+
return ulid.generate()
|
| 58 |
+
|
| 59 |
+
def create_uuid_from_string(val: str) -> str:
|
| 60 |
+
"""
|
| 61 |
+
This function takes a string as input and generates a UUID (Universally Unique Identifier)
|
| 62 |
+
using the input string as the basis for the hash.
|
| 63 |
+
|
| 64 |
+
Parameters:
|
| 65 |
+
val (str): The input string from which the UUID will be generated.
|
| 66 |
+
|
| 67 |
+
Returns:
|
| 68 |
+
str: A string representing the generated UUID.
|
| 69 |
+
|
| 70 |
+
Example:
|
| 71 |
+
>>> create_uuid_from_string('example_string')
|
| 72 |
+
'00000000-0001-0100-0000-000000000001'
|
| 73 |
+
"""
|
| 74 |
+
hex_string = hashlib.md5(val.encode("UTF-8")).hexdigest()
|
| 75 |
+
return str(uuid.UUID(hex=hex_string))
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def to_snake_case(s):
|
| 79 |
+
"""
|
| 80 |
+
This function takes a string as input and converts it to snake_case format.
|
| 81 |
+
|
| 82 |
+
Parameters:
|
| 83 |
+
s (str): The input string to be converted to snake_case.
|
| 84 |
+
|
| 85 |
+
Returns:
|
| 86 |
+
str: A string in snake_case format.
|
| 87 |
+
|
| 88 |
+
Example:
|
| 89 |
+
>>> to_snake_case('FirstName')
|
| 90 |
+
'first_name'
|
| 91 |
+
"""
|
| 92 |
+
if not s:
|
| 93 |
+
return ''
|
| 94 |
+
|
| 95 |
+
# Check if the string is in all caps
|
| 96 |
+
if s.isupper():
|
| 97 |
+
return s
|
| 98 |
+
|
| 99 |
+
return '_'.join(
|
| 100 |
+
word.lower() for word in re.findall(r'[A-Z]{2,}(?=[A-Z][a-z]+[0-9]*|\b)|[A-Z]?[a-z]+[0-9]*|[A-Z]|[0-9]+', s)
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
def convert_to_snakecase(original_data):
|
| 104 |
+
"""
|
| 105 |
+
This function takes a dictionary or list of dictionaries as input and converts its keys to snake_case format.
|
| 106 |
+
If the input is a list of dictionaries, it will recursively convert each nested dictionary.
|
| 107 |
+
|
| 108 |
+
Parameters:
|
| 109 |
+
original_data (dict or list): The input dictionary or list of dictionaries.
|
| 110 |
+
|
| 111 |
+
Returns:
|
| 112 |
+
dict: A new dictionary with keys in snake_case format.
|
| 113 |
+
|
| 114 |
+
Example:
|
| 115 |
+
>>> convert_to_snakecase({'FirstName': 'John', 'LastName': 'Doe'})
|
| 116 |
+
{'first_name': 'John', 'last_name': 'Doe'}
|
| 117 |
+
>>> convert_to_snakecase([{'FirstName': 'Jane', 'LastName': 'Smith'}, {'FirstName': 'Bob', 'LastName': 'Johnson'}])
|
| 118 |
+
[{'first_name': 'Jane', 'last_name': 'Smith'}, {'first_name': 'Bob', 'last_name': 'Johnson'}]
|
| 119 |
+
"""
|
| 120 |
+
if isinstance(original_data, dict):
|
| 121 |
+
transformed_dict = {}
|
| 122 |
+
for k, v in original_data.items():
|
| 123 |
+
new_key = to_snake_case(k)
|
| 124 |
+
if isinstance(v, (dict, list)):
|
| 125 |
+
transformed_dict[new_key] = convert_to_snakecase(v)
|
| 126 |
+
else:
|
| 127 |
+
transformed_dict[new_key] = v
|
| 128 |
+
return transformed_dict
|
| 129 |
+
elif isinstance(original_data, list):
|
| 130 |
+
return [convert_to_snakecase(item) for item in original_data]
|
| 131 |
+
else:
|
| 132 |
+
raise TypeError("Input must be a dictionary or a list of dictionaries.")
|
src/libs/logger.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from dotenv import load_dotenv
|
| 3 |
+
|
| 4 |
+
import logfire
|
| 5 |
+
|
| 6 |
+
load_dotenv()
|
| 7 |
+
|
| 8 |
+
logfire.configure(
|
| 9 |
+
token=os.getenv('LOGFIRE_TOKEN'),
|
| 10 |
+
pydantic_plugin=logfire.PydanticPlugin(record='all'),
|
| 11 |
+
console=logfire.ConsoleOptions(min_log_level= os.getenv('LOG_LEVEL'))
|
| 12 |
+
)
|
| 13 |
+
logfire.instrument_redis()
|
| 14 |
+
logfire.instrument_httpx()
|
| 15 |
+
logfire.instrument_requests()
|
| 16 |
+
|
| 17 |
+
logger = logfire
|
src/libs/redis.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
keys = {
|
| 2 |
+
"cryptocompare": {
|
| 3 |
+
"top_exchanges": "top_exchanges",
|
| 4 |
+
"all_exchanges": "all_exchanges",
|
| 5 |
+
"top_assets": "top_assets",
|
| 6 |
+
"all_assets": "all_assets",
|
| 7 |
+
},
|
| 8 |
+
"coingecko": {
|
| 9 |
+
"top_exchanges": "top_exchanges",
|
| 10 |
+
"all_exchanges": "all_exchanges",
|
| 11 |
+
"top_assets": "top_assets",
|
| 12 |
+
"all_assets": "all_assets",
|
| 13 |
+
}
|
| 14 |
+
}
|
src/llms/sourcegraph.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from dotenv import load_dotenv
|
| 3 |
+
|
| 4 |
+
from scrapegraphai.graphs import SearchGraph
|
| 5 |
+
from scrapegraphai.graphs import SmartScraperMultiGraph
|
| 6 |
+
from scrapegraphai.graphs import ScriptCreatorGraph
|
| 7 |
+
|
| 8 |
+
from src.databases.redis import REDIS_CACHED
|
| 9 |
+
from src.libs.constants import ONE_HOUR_IN_SECONDS
|
| 10 |
+
from src.libs.logger import logger
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
load_dotenv()
|
| 14 |
+
|
| 15 |
+
redis_cache = REDIS_CACHED
|
| 16 |
+
|
| 17 |
+
default_graph_config: dict = {
|
| 18 |
+
"openai": {
|
| 19 |
+
"llm": {
|
| 20 |
+
"api_key": os.getenv("OPENAI_API_KEY"),
|
| 21 |
+
"model": "gpt-3.5-turbo",
|
| 22 |
+
# "model": "gpt-4o",?
|
| 23 |
+
"temperature": 0,
|
| 24 |
+
}
|
| 25 |
+
},
|
| 26 |
+
"groq": {
|
| 27 |
+
"llm": {
|
| 28 |
+
"model": "groq/llama3-70b-8192",
|
| 29 |
+
"api_key": os.getenv("GROQ_API_KEY"),
|
| 30 |
+
"temperature": 0
|
| 31 |
+
},
|
| 32 |
+
"embeddings": {
|
| 33 |
+
"model": "ollama/nomic-embed-text",
|
| 34 |
+
"base_url": "http://localhost:11434",
|
| 35 |
+
}
|
| 36 |
+
}
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
@redis_cache(ttl=ONE_HOUR_IN_SECONDS)
|
| 40 |
+
@logger.instrument()
|
| 41 |
+
def scrape_graph(prompt: str, sources: str | list[str], config: dict | str = None, cache_ttl: int = None) -> str:
|
| 42 |
+
"""
|
| 43 |
+
This function scrapes the web using a multi-graph approach. It takes a prompt, sources, optional configuration, and optional cache time-to-live.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
prompt (str): The prompt or query to be used for scraping.
|
| 47 |
+
sources (str | list[str]): The source(s) from which to scrape data. It can be a single string or a list of strings.
|
| 48 |
+
config (dict | str, optional): The configuration for the scraping process. Defaults to the default "openai" configuration.
|
| 49 |
+
cache_ttl (int, optional): The time-to-live (in seconds) for the cached results. Defaults to one hour.
|
| 50 |
+
|
| 51 |
+
Returns:
|
| 52 |
+
str: The scraped data as a string.
|
| 53 |
+
"""
|
| 54 |
+
logger.info({'prompt': prompt, 'sources': sources, 'config': config, 'cache_ttl': cache_ttl})
|
| 55 |
+
|
| 56 |
+
config = type(config) == str and default_graph_config[config] or config or default_graph_config["openai"]
|
| 57 |
+
logger.debug(f"Config: {config}")
|
| 58 |
+
|
| 59 |
+
smart_scraper_graph = SmartScraperMultiGraph(
|
| 60 |
+
prompt=prompt,
|
| 61 |
+
source=sources,
|
| 62 |
+
config=config
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
result = smart_scraper_graph.run()
|
| 66 |
+
logger.debug(f"Result: {result}")
|
| 67 |
+
|
| 68 |
+
return result
|
| 69 |
+
|
| 70 |
+
@redis_cache(ttl=ONE_HOUR_IN_SECONDS)
|
| 71 |
+
@logger.instrument()
|
| 72 |
+
def search_graph(prompt: str, config: dict | str = None, cache_ttl: int = None) -> str:
|
| 73 |
+
"""
|
| 74 |
+
This function uses a SearchGraph to search the web for relevant information based on the provided prompt.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
prompt (str): The prompt or query to be used for searching.
|
| 78 |
+
config (dict | str, optional): The configuration for the search process. Defaults to the default "openai" configuration.
|
| 79 |
+
cache_ttl (int, optional): The time-to-live (in seconds) for the cached results. Defaults to one hour.
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
str: The search results as a string.
|
| 83 |
+
"""
|
| 84 |
+
logger.info({'prompt': prompt, 'config': config, 'cache_ttl': cache_ttl})
|
| 85 |
+
|
| 86 |
+
config = type(config) == str and default_graph_config[config] or config or default_graph_config["openai"]
|
| 87 |
+
logger.debug(f"Config: {config}")
|
| 88 |
+
|
| 89 |
+
search_graph = SearchGraph(
|
| 90 |
+
prompt=prompt,
|
| 91 |
+
config=config,
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
result = search_graph.run()
|
| 95 |
+
logger.debug(f"Result: {result}")
|
| 96 |
+
|
| 97 |
+
return result
|
| 98 |
+
|
| 99 |
+
@redis_cache(ttl=ONE_HOUR_IN_SECONDS)
|
| 100 |
+
@logger.instrument()
|
| 101 |
+
def _create_script_graph(prompt: str, source: str, library: str, config: dict | str = None, cache_ttl: int = None) -> str:
|
| 102 |
+
"""
|
| 103 |
+
This function creates a ScriptCreatorGraph for generating scripts based on the provided prompt, source, and library.
|
| 104 |
+
|
| 105 |
+
Args:
|
| 106 |
+
prompt (str): The prompt or query to be used for generating the script.
|
| 107 |
+
source (str): The source from which to generate the script.
|
| 108 |
+
library (str): The library to be used for generating the script.
|
| 109 |
+
config (dict | str, optional): The configuration for the script creation process. Defaults to the default "openai" configuration.
|
| 110 |
+
cache_ttl (int, optional): The time-to-live (in seconds) for the cached results. Defaults to one hour.
|
| 111 |
+
|
| 112 |
+
Returns:
|
| 113 |
+
str: The generated script as a string.
|
| 114 |
+
"""
|
| 115 |
+
config = type(config) == str and default_graph_config[config] or config or default_graph_config["openai"]
|
| 116 |
+
|
| 117 |
+
script_creator_graph = ScriptCreatorGraph(
|
| 118 |
+
prompt=prompt,
|
| 119 |
+
source=source,
|
| 120 |
+
config=config,
|
| 121 |
+
library=library
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
result = script_creator_graph.run()
|
| 125 |
+
|
| 126 |
+
return result
|
src/requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
auto_mix_prep==0.2.0
|
| 2 |
+
httpx==0.27.0
|
| 3 |
+
logfire==0.42.0
|
| 4 |
+
pyjsonq==1.0.2
|
| 5 |
+
python-dotenv==1.0.1
|
| 6 |
+
scrapegraphai==1.6.0
|
| 7 |
+
ulid==1.1
|
| 8 |
+
upstash_redis==1.1.0
|
src/search_services/exa.py
ADDED
|
File without changes
|
src/search_services/jina_ai.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dotenv import load_dotenv
|
| 2 |
+
|
| 3 |
+
import httpx
|
| 4 |
+
import urllib.parse
|
| 5 |
+
from typing import Self
|
| 6 |
+
from src.libs.helper_functions import convert_to_snakecase
|
| 7 |
+
from src.databases.redis import REDIS_CACHED
|
| 8 |
+
from src.libs.constants import ONE_MINUTE_IN_SECONDS
|
| 9 |
+
from src.libs.constants import JINA_READER_BASE_ENDPOINT, JINA_SEARCH_BASE_ENDPOINT, ONE_MINUTE_IN_SECONDS
|
| 10 |
+
|
| 11 |
+
load_dotenv()
|
| 12 |
+
|
| 13 |
+
redis_cache = REDIS_CACHED
|
| 14 |
+
|
| 15 |
+
class JinaAI:
|
| 16 |
+
"""
|
| 17 |
+
A class for interacting with Jina AI's search and reader services.
|
| 18 |
+
|
| 19 |
+
Attributes:
|
| 20 |
+
JINA_SEARCH_BASE_ENDPOINT (str): The base URL for the Jina AI search service.
|
| 21 |
+
JINA_READER_BASE_ENDPOINT (str): The base URL for the Jina AI reader service.
|
| 22 |
+
|
| 23 |
+
Methods:
|
| 24 |
+
__init__(self, search_base_url: str = None, reader_base_url: str = None) -> None:
|
| 25 |
+
Initialize the JinaAI instance with optional search and reader base URLs.
|
| 26 |
+
|
| 27 |
+
search_web_with_jina(self, search_query: str) -> dict | None:
|
| 28 |
+
Search the web using Jina AI and return the search results as a dictionary.
|
| 29 |
+
|
| 30 |
+
read_website_with_jina(self, website_url: str) -> dict | None:
|
| 31 |
+
Read a website using Jina AI and return the website content as a dictionary.
|
| 32 |
+
"""
|
| 33 |
+
def __init__(self, search_base_url: str = None, reader_base_url: str = None) -> None:
|
| 34 |
+
self.JINA_SEARCH_BASE_ENDPOINT = search_base_url or JINA_SEARCH_BASE_ENDPOINT
|
| 35 |
+
self.JINA_READER_BASE_ENDPOINT = reader_base_url or JINA_READER_BASE_ENDPOINT
|
| 36 |
+
|
| 37 |
+
@redis_cache(ttl=ONE_MINUTE_IN_SECONDS)
|
| 38 |
+
def search_web_with_jina(self, search_query: str) -> dict | None:
|
| 39 |
+
"""
|
| 40 |
+
Search the web using Jina AI.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
search_query (str): The query to be searched on the web.
|
| 44 |
+
|
| 45 |
+
Returns:
|
| 46 |
+
dict | None: A dictionary containing the search results if successful, otherwise None.
|
| 47 |
+
|
| 48 |
+
Raises:
|
| 49 |
+
httpx.HTTPError: If an HTTP error occurs during the request.
|
| 50 |
+
|
| 51 |
+
Usage:
|
| 52 |
+
jina_ai_instance.search_web_with_jina(search_query)
|
| 53 |
+
|
| 54 |
+
Example:
|
| 55 |
+
jina_ai_instance.search_web_with_jina("example search query")
|
| 56 |
+
"""
|
| 57 |
+
url = self.JINA_SEARCH_BASE_ENDPOINT
|
| 58 |
+
encoded_search_query = urllib.parse.quote(search_query)
|
| 59 |
+
headers = {"Accept": "application/json"}
|
| 60 |
+
|
| 61 |
+
try:
|
| 62 |
+
with httpx.Client(timeout=30.0) as client:
|
| 63 |
+
response = client.get(f"{url}{encoded_search_query}", headers=headers)
|
| 64 |
+
response.raise_for_status()
|
| 65 |
+
return convert_to_snakecase(response.json())
|
| 66 |
+
except httpx.HTTPError as e:
|
| 67 |
+
print(f"An error occurred: {e}")
|
| 68 |
+
return None
|
| 69 |
+
|
| 70 |
+
@redis_cache(ttl=ONE_MINUTE_IN_SECONDS)
|
| 71 |
+
def read_website_with_jina(self, website_url: str) -> dict | None:
|
| 72 |
+
"""
|
| 73 |
+
Read a website using Jina AI.
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
website_url (str): The URL of the website to be read.
|
| 77 |
+
|
| 78 |
+
Returns:
|
| 79 |
+
dict | None: A dictionary containing the content of the website if successful, otherwise None.
|
| 80 |
+
|
| 81 |
+
Raises:
|
| 82 |
+
httpx.HTTPError: If an HTTP error occurs during the request.
|
| 83 |
+
|
| 84 |
+
Usage:
|
| 85 |
+
jina_ai_instance.read_website_with_jina(website_url)
|
| 86 |
+
|
| 87 |
+
Example:
|
| 88 |
+
jina_ai_instance.read_website_with_jina("https://example.com")
|
| 89 |
+
"""
|
| 90 |
+
url = self.JINA_READER_BASE_ENDPOINT
|
| 91 |
+
headers = {"Accept": "application/json"}
|
| 92 |
+
|
| 93 |
+
try:
|
| 94 |
+
with httpx.Client(timeout=30.0) as client:
|
| 95 |
+
response = client.get(f"{url}{website_url}", headers=headers)
|
| 96 |
+
response.raise_for_status()
|
| 97 |
+
return convert_to_snakecase(response.json())
|
| 98 |
+
except httpx.HTTPError as e:
|
| 99 |
+
print(f"An error occurred: {e}")
|
| 100 |
+
return None
|
src/tools/crypto_coin_price_tool.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel, Field
|
| 2 |
+
from typing import Dict, Optional, Type
|
| 3 |
+
|
| 4 |
+
from src.data_sources.coin_gecko import CoinGecko
|
| 5 |
+
from src.data_sources.cryptocompare import CryptoCompare
|
| 6 |
+
|
| 7 |
+
from langchain.tools.base import BaseTool
|
| 8 |
+
|
| 9 |
+
class CryptoCoinPrice(BaseModel):
|
| 10 |
+
"""Represents the prices of a coin in various currencies."""
|
| 11 |
+
prices: Dict[str, float] = Field(..., description="Prices in various currencies")
|
| 12 |
+
|
| 13 |
+
class CryptoCoinPriceData(BaseModel):
|
| 14 |
+
"""Encapsulates both CoinGecko and CryptoCompare price data."""
|
| 15 |
+
coingecko_price: Dict[str, CryptoCoinPrice] = Field(..., description="CoinGecko prices for various coins")
|
| 16 |
+
crypto_compare_price: Dict[str, CryptoCoinPrice] = Field(..., description="CryptoCompare prices for various coins")
|
| 17 |
+
|
| 18 |
+
class PriceInput(BaseModel):
|
| 19 |
+
coin_id: str = Field(..., description="The ID of the cryptocurrency coin to retrieve prices for")
|
| 20 |
+
vs_currency: str = Field("usd", description="The currency to compare against")
|
| 21 |
+
|
| 22 |
+
class CryptoCoinPriceOutput(BaseModel):
|
| 23 |
+
price_data: CryptoCoinPriceData
|
| 24 |
+
|
| 25 |
+
class CryptoCoinPriceTool(BaseTool):
|
| 26 |
+
name = "CryptoCoinPriceTool"
|
| 27 |
+
description = "Fetches price data for a given cryptocurrency coin from CoinGecko and CryptoCompare"
|
| 28 |
+
args_schema: Type[BaseModel] = PriceInput
|
| 29 |
+
return_direct: bool = True
|
| 30 |
+
|
| 31 |
+
def __init__(self, id: Optional[str] = None):
|
| 32 |
+
self.id = id
|
| 33 |
+
self.coingecko = CoinGecko()
|
| 34 |
+
self.crypto_compare = CryptoCompare()
|
| 35 |
+
|
| 36 |
+
def _run(self, coin_id: str, vs_currency: str = "usd") -> CryptoCoinPriceData:
|
| 37 |
+
coingecko_price_data = self.coingecko.get_coin_price(ids=[coin_id], vs_currencies=[vs_currency])
|
| 38 |
+
crypto_compare_price_data = self.crypto_compare.get_coin_price(ids=[coin_id], vs_currencies=[vs_currency])
|
| 39 |
+
|
| 40 |
+
coingecko_price = {}
|
| 41 |
+
crypto_compare_price = {}
|
| 42 |
+
|
| 43 |
+
if coin_id in coingecko_price_data:
|
| 44 |
+
coingecko_price[coin_id] = CryptoCoinPrice(prices=coingecko_price_data[coin_id])
|
| 45 |
+
else:
|
| 46 |
+
print(f"Warning: CoinGecko data for {coin_id} not found.")
|
| 47 |
+
|
| 48 |
+
if coin_id.upper() in crypto_compare_price_data:
|
| 49 |
+
crypto_compare_price[coin_id] = CryptoCoinPrice(prices=crypto_compare_price_data[coin_id.upper()])
|
| 50 |
+
else:
|
| 51 |
+
print(f"Warning: CryptoCompare data for {coin_id} not found.")
|
| 52 |
+
|
| 53 |
+
return CryptoCoinPriceData(
|
| 54 |
+
coingecko_price=coingecko_price,
|
| 55 |
+
crypto_compare_price=crypto_compare_price
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
def __call__(self, inputs: PriceInput) -> CryptoCoinPriceOutput:
|
| 59 |
+
price_data = self._run(inputs.coin_id, inputs.vs_currency)
|
| 60 |
+
return CryptoCoinPriceOutput(price_data=price_data)
|
src/xbt-core.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import httpx
|
| 3 |
+
import urllib.parse
|
| 4 |
+
from typing import Self
|
| 5 |
+
from pyjsonq import JsonQ
|
| 6 |
+
from dotenv import load_dotenv
|
| 7 |
+
|
| 8 |
+
load_dotenv()
|
| 9 |
+
|
| 10 |
+
JINA_SEARCH_BASE_ENDPOINT = "r.jina.ai"
|
| 11 |
+
JINA_READER_BASE_ENDPOINT = "s.jina.ai"
|
| 12 |
+
|
| 13 |
+
class XBTCore:
|
| 14 |
+
def __init__(self) -> None:
|
| 15 |
+
self.value = 0
|
| 16 |
+
self.JINA_SEARCH_BASE_ENDPOINT = "s.jina.ai"
|
| 17 |
+
self.JINA_READER_BASE_ENDPOINT = "r.jina.ai"
|
| 18 |
+
|
| 19 |
+
def search_web_with_jina(self, search_query: str=False) -> Self:
|
| 20 |
+
url = self.JINA_SEARCH_BASE_ENDPOINT
|
| 21 |
+
encoded_search_query = urllib.parse.quote(search_query)
|
| 22 |
+
|
| 23 |
+
try:
|
| 24 |
+
with httpx.Client() as client:
|
| 25 |
+
response = client.get(f"{url}/{encoded_search_query}")
|
| 26 |
+
response.raise_for_status()
|
| 27 |
+
return response.json()
|
| 28 |
+
except httpx.HTTPError as e:
|
| 29 |
+
print(f"An error occurred: {e}")
|
| 30 |
+
return None
|
| 31 |
+
|
| 32 |
+
def read_website_with_jina(self, website_url: str=False) -> Self:
|
| 33 |
+
url = self.JINA_READER_BASE_ENDPOINT
|
| 34 |
+
|
| 35 |
+
try:
|
| 36 |
+
with httpx.Client() as client:
|
| 37 |
+
response = client.get(f"{url}/{website_url}")
|
| 38 |
+
response.raise_for_status()
|
| 39 |
+
return response.json()
|
| 40 |
+
except httpx.HTTPError as e:
|
| 41 |
+
print(f"An error occurred: {e}")
|
| 42 |
+
return None
|
| 43 |
+
|