Spaces:
Sleeping
Sleeping
Create api.py
Browse files
api.py
ADDED
|
@@ -0,0 +1,697 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from typing import Dict, Any, Optional
|
| 3 |
+
import requests
|
| 4 |
+
import os
|
| 5 |
+
import time
|
| 6 |
+
# import gspread # TODO: Install with pip install gspread
|
| 7 |
+
# from slack_sdk import WebClient # TODO: Install with pip install slack-sdk
|
| 8 |
+
import smtplib
|
| 9 |
+
from email.mime.text import MIMEText
|
| 10 |
+
from email.mime.multipart import MIMEMultipart
|
| 11 |
+
from fastapi import FastAPI, Request, HTTPException
|
| 12 |
+
from fastapi.responses import JSONResponse
|
| 13 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 14 |
+
|
| 15 |
+
# FastAPI application for converted n8n workflow
|
| 16 |
+
app = FastAPI(
|
| 17 |
+
title="n8n2py Converted Workflow",
|
| 18 |
+
description="Converted n8n workflow running as FastAPI service",
|
| 19 |
+
version="1.0.0",
|
| 20 |
+
docs_url="/docs",
|
| 21 |
+
redoc_url="/redoc"
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
# Add CORS middleware for cross-origin requests
|
| 25 |
+
app.add_middleware(
|
| 26 |
+
CORSMiddleware,
|
| 27 |
+
allow_origins=["*"], # Configure this for production
|
| 28 |
+
allow_credentials=True,
|
| 29 |
+
allow_methods=["*"],
|
| 30 |
+
allow_headers=["*"],
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
def run_workflow(data: Dict[str, Any]) -> Dict[str, Any]:
|
| 34 |
+
'''
|
| 35 |
+
Execute the converted n8n workflow
|
| 36 |
+
Generated from n8n workflow conversion using n8n2py
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
data: Input data for the workflow
|
| 40 |
+
|
| 41 |
+
Returns:
|
| 42 |
+
Dict containing execution results
|
| 43 |
+
'''
|
| 44 |
+
print("π Starting workflow execution with data:", data)
|
| 45 |
+
|
| 46 |
+
try:
|
| 47 |
+
|
| 48 |
+
# TODO: Unknown node type: n8n-nodes-base.stickyNote
|
| 49 |
+
# Node: Sticky Note
|
| 50 |
+
# This node type is not recognized by n8n2py
|
| 51 |
+
# Parameters: {
|
| 52 |
+
"content": "# Research Daily News and Write Script",
|
| 53 |
+
"height": 2100,
|
| 54 |
+
"width": 940,
|
| 55 |
+
"color": 2
|
| 56 |
+
}
|
| 57 |
+
#
|
| 58 |
+
# To implement this node:
|
| 59 |
+
# 1. Research the node's functionality in n8n documentation
|
| 60 |
+
# 2. Find appropriate Python libraries for the same functionality
|
| 61 |
+
# 3. Add implementation to nodeHandlers.ts
|
| 62 |
+
# 4. Test the conversion
|
| 63 |
+
|
| 64 |
+
print(f"β οΈ Skipping unknown node: Sticky Note ({node.type})")
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
# TODO: Unknown node type: n8n-nodes-base.stickyNote
|
| 68 |
+
# Node: Sticky Note1
|
| 69 |
+
# This node type is not recognized by n8n2py
|
| 70 |
+
# Parameters: {
|
| 71 |
+
"content": "# Create AI Avatar Video\n\n- Heygen API plan (paid) is required; the free plan is insufficient.\n- if you have a long script, you may need to increase the WAIT time.\n- you can use Elevenlabs voice by integrating Elevenlabs from within Heygen app.",
|
| 72 |
+
"height": 2100,
|
| 73 |
+
"width": 800,
|
| 74 |
+
"color": 3
|
| 75 |
+
}
|
| 76 |
+
#
|
| 77 |
+
# To implement this node:
|
| 78 |
+
# 1. Research the node's functionality in n8n documentation
|
| 79 |
+
# 2. Find appropriate Python libraries for the same functionality
|
| 80 |
+
# 3. Add implementation to nodeHandlers.ts
|
| 81 |
+
# 4. Test the conversion
|
| 82 |
+
|
| 83 |
+
print(f"β οΈ Skipping unknown node: Sticky Note1 ({node.type})")
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
# TODO: Unknown node type: n8n-nodes-base.stickyNote
|
| 87 |
+
# Node: Sticky Note2
|
| 88 |
+
# This node type is not recognized by n8n2py
|
| 89 |
+
# Parameters: {
|
| 90 |
+
"content": "# Publish to Social Media\n\n- Sign up for Blotato: https://www.blotato.com\n- Blotato's API currently doesn't support uploading videos to Bluesky or Pinterest. \n- This feature will be released in the future, so I've deactivated the nodes for now.",
|
| 91 |
+
"height": 2100,
|
| 92 |
+
"width": 1120,
|
| 93 |
+
"color": 4
|
| 94 |
+
}
|
| 95 |
+
#
|
| 96 |
+
# To implement this node:
|
| 97 |
+
# 1. Research the node's functionality in n8n documentation
|
| 98 |
+
# 2. Find appropriate Python libraries for the same functionality
|
| 99 |
+
# 3. Add implementation to nodeHandlers.ts
|
| 100 |
+
# 4. Test the conversion
|
| 101 |
+
|
| 102 |
+
print(f"β οΈ Skipping unknown node: Sticky Note2 ({node.type})")
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
# HTTP Request: [Instagram] Publish via Blotato
|
| 106 |
+
headers = {"Content-Type": "application/json"}
|
| 107 |
+
|
| 108 |
+
instagram_publish_via_blotato_response = requests.post(
|
| 109 |
+
url="https://backend.blotato.com/v2/posts",
|
| 110 |
+
headers=headers
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
# Handle response
|
| 114 |
+
if instagram_publish_via_blotato_response.status_code == 200:
|
| 115 |
+
instagram_publish_via_blotato_data = instagram_publish_via_blotato_response.json()
|
| 116 |
+
print(f"β
[Instagram] Publish via Blotato successful: {instagram_publish_via_blotato_response.status_code}")
|
| 117 |
+
else:
|
| 118 |
+
print(f"β [Instagram] Publish via Blotato failed: {instagram_publish_via_blotato_response.status_code}")
|
| 119 |
+
instagram_publish_via_blotato_data = None
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
# HTTP Request: [Youtube] Publish via Blotato
|
| 123 |
+
headers = {"Content-Type": "application/json"}
|
| 124 |
+
|
| 125 |
+
youtube_publish_via_blotato_response = requests.post(
|
| 126 |
+
url="https://backend.blotato.com/v2/posts",
|
| 127 |
+
headers=headers
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
# Handle response
|
| 131 |
+
if youtube_publish_via_blotato_response.status_code == 200:
|
| 132 |
+
youtube_publish_via_blotato_data = youtube_publish_via_blotato_response.json()
|
| 133 |
+
print(f"β
[Youtube] Publish via Blotato successful: {youtube_publish_via_blotato_response.status_code}")
|
| 134 |
+
else:
|
| 135 |
+
print(f"β [Youtube] Publish via Blotato failed: {youtube_publish_via_blotato_response.status_code}")
|
| 136 |
+
youtube_publish_via_blotato_data = None
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
# HTTP Request: [Facebook] Publish via Blotato
|
| 140 |
+
headers = {"Content-Type": "application/json"}
|
| 141 |
+
|
| 142 |
+
facebook_publish_via_blotato_response = requests.post(
|
| 143 |
+
url="https://backend.blotato.com/v2/posts",
|
| 144 |
+
headers=headers
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
# Handle response
|
| 148 |
+
if facebook_publish_via_blotato_response.status_code == 200:
|
| 149 |
+
facebook_publish_via_blotato_data = facebook_publish_via_blotato_response.json()
|
| 150 |
+
print(f"β
[Facebook] Publish via Blotato successful: {facebook_publish_via_blotato_response.status_code}")
|
| 151 |
+
else:
|
| 152 |
+
print(f"β [Facebook] Publish via Blotato failed: {facebook_publish_via_blotato_response.status_code}")
|
| 153 |
+
facebook_publish_via_blotato_data = None
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
# HTTP Request: [Threads] Publish via Blotato
|
| 157 |
+
headers = {"Content-Type": "application/json"}
|
| 158 |
+
|
| 159 |
+
threads_publish_via_blotato_response = requests.post(
|
| 160 |
+
url="https://backend.blotato.com/v2/posts",
|
| 161 |
+
headers=headers
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
# Handle response
|
| 165 |
+
if threads_publish_via_blotato_response.status_code == 200:
|
| 166 |
+
threads_publish_via_blotato_data = threads_publish_via_blotato_response.json()
|
| 167 |
+
print(f"β
[Threads] Publish via Blotato successful: {threads_publish_via_blotato_response.status_code}")
|
| 168 |
+
else:
|
| 169 |
+
print(f"β [Threads] Publish via Blotato failed: {threads_publish_via_blotato_response.status_code}")
|
| 170 |
+
threads_publish_via_blotato_data = None
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
# TODO: Unknown node type: @n8n/n8n-nodes-langchain.openAi
|
| 174 |
+
# Node: Write Long Caption
|
| 175 |
+
# This node type is not recognized by n8n2py
|
| 176 |
+
# Parameters: {
|
| 177 |
+
"modelId": {
|
| 178 |
+
"__rl": true,
|
| 179 |
+
"value": "gpt-4o",
|
| 180 |
+
"mode": "list",
|
| 181 |
+
"cachedResultName": "GPT-4O"
|
| 182 |
+
},
|
| 183 |
+
"messages": {
|
| 184 |
+
"values": [
|
| 185 |
+
{
|
| 186 |
+
"content": "=# EXAMPLE\n\n<example>\nMany people have recently asked me about ask engine optimization, which is all about optimizing your website and existing content, so it can be pulled into ChatGPT and other generative AI tools. Consider that generative AI tools tend to be more conversational in nature and have a Q&A type format, so search engines will want to pull in snippets that concisely answer a userβs question.- what is ask engine optimization in the age of AI?- How does traditional SEO compare to ask engine optimization today?- top tips and tricks to get started with ask engine optimization?\n\n#ai #askengineoptimization #chatgpts #seo #aitools #digitalmarketing\n</example>\n\n# CONTEXT\n\nInfer the topic from the sources provided.\n\n# WRITING STYLE\n\nHereβs how you always write:\n\n<writing_style>\n\n- Your writing style is spartan and informative.\n- Use clear, simple language.\n- Employ short, impactful sentences.\n- Use active voice; avoid passive voice.\n- Focus on practical, actionable insights.\n- Incorporate data or statistics to support claims when possible.\n- Use \"\"\"\"\"\"\"\"you\"\"\"\"\"\"\"\" and \"\"\"\"\"\"\"\"your\"\"\"\"\"\"\"\" to directly address the reader.\n- Avoid metaphors and clichΓ©s.\n- Avoid generalizations.\n- Do not include common setup language in any sentence, including: in conclusion, in closing, etc.\n- Do not output warnings or notesβjust the output requested.\n- Do not use hashtags.\n- Do not use semicolons.\n- Do not use emojis.\n- Do not use asterisks.\n- Do not use adjectives and adverbs.\n- Do NOT use these words:\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"can, may, just, that, very, really, literally, actually, certainly, probably, basically, could, maybe, delve, embark, enlightening, esteemed, shed light, craft, crafting, imagine, realm, game-changer, unlock, discover, skyrocket, abyss, you're not alone, in a world where, revolutionize, disruptive, utilize, utilizing, dive deep, tapestry, illuminate, unveil, pivotal, enrich, intricate, elucidate, hence, furthermore, realm, however, harness, exciting, groundbreaking, cutting-edge, remarkable, it. remains to be seen, glimpse into, navigating, landscape, stark, testament, in summary, in conclusion, moreover, boost, bustling, opened up, powerful, inquiries, ever-evolving\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n</writing_style>\n\n# PLANNING\n\nYour goal is to write a 50-word video caption based on the provided source.\n\n1. Analyze the provided sources thoroughly.\n2. Study the <example> post carefully. You will be asked to replicate their:\n - Overall structure.\n - Tone and voice.\n - Formatting (including line breaks and spacing).\n - Length (aim for a similarly detailed post).\n - Absence of emojis.\n - Use of hashtags.\n - Emotional resonance.\n\n# OUTPUT\nFollow the GUIDELINES below to write the post. Use your analysis from step 1 and step 2. Use the provided sources as the foundation for your post, expanding on it significantly while maintaining the style and structure of the examples provided from step 2. You MUST use information from the provided sources. Make sure you adhere to your <writing_style>.\n\n<guidelines>\nThe description should be structured as follows:\n1. Start with 1 paragraph summarizing the source\n2. Newline, followed by 3 bullet points of questions that a viewer might ask on a search engine about the source\n3. Newline, followed by these hashtags: #ai #artificialintelligence #ainews #sabrinaramonov #aiavatar\n</guidelines>\n\nTake a deep breath and take it step-by-step!\n\n# INPUT\nUse the following information sources:\n<sources>\n{{ $json.output }}\n</sources>"
|
| 187 |
+
}
|
| 188 |
+
]
|
| 189 |
+
},
|
| 190 |
+
"options": {}
|
| 191 |
+
}
|
| 192 |
+
#
|
| 193 |
+
# To implement this node:
|
| 194 |
+
# 1. Research the node's functionality in n8n documentation
|
| 195 |
+
# 2. Find appropriate Python libraries for the same functionality
|
| 196 |
+
# 3. Add implementation to nodeHandlers.ts
|
| 197 |
+
# 4. Test the conversion
|
| 198 |
+
|
| 199 |
+
print(f"β οΈ Skipping unknown node: Write Long Caption ({node.type})")
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
# TODO: Unknown node type: n8n-nodes-base.scheduleTrigger
|
| 203 |
+
# Node: Schedule Trigger
|
| 204 |
+
# This node type is not recognized by n8n2py
|
| 205 |
+
# Parameters: {
|
| 206 |
+
"rule": {
|
| 207 |
+
"interval": [
|
| 208 |
+
{
|
| 209 |
+
"triggerAtHour": 10
|
| 210 |
+
}
|
| 211 |
+
]
|
| 212 |
+
}
|
| 213 |
+
}
|
| 214 |
+
#
|
| 215 |
+
# To implement this node:
|
| 216 |
+
# 1. Research the node's functionality in n8n documentation
|
| 217 |
+
# 2. Find appropriate Python libraries for the same functionality
|
| 218 |
+
# 3. Add implementation to nodeHandlers.ts
|
| 219 |
+
# 4. Test the conversion
|
| 220 |
+
|
| 221 |
+
print(f"β οΈ Skipping unknown node: Schedule Trigger ({node.type})")
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
# TODO: Unknown node type: @n8n/n8n-nodes-langchain.agent
|
| 225 |
+
# Node: AI Agent
|
| 226 |
+
# This node type is not recognized by n8n2py
|
| 227 |
+
# Parameters: {
|
| 228 |
+
"promptType": "define",
|
| 229 |
+
"text": "# INSTRUCTIONS\n\nPerform the following tasks, in order:\n\n1. Fetch the top 10 stories from Hacker News from the past 24 hours related to AI or LLMs. \n\n2. Select the top story that is most likely to go viral on social media. \n\n3. Fetch the article and Hacker News comments.\n\n4. Create a 30-second monologue script for an AI avatar video, following these guidelines:\n - The script should be approximately 30 seconds when spoken aloud.\n - Include lots of details and statistics from the article.\n - Use 6th grade reading level.\n - Balanced viewpoint.\n - Script should be in single paragraph\n\n5. Update the script's first 2 sentences to use sensational viral hooks that grab the viewer's attention and spark curiosity. The 3rd sentence should start diving into the article's details.\n\n6. Replace the last sentence with: \"Hit follow to stay ahead in AI!\"\n\n# OUTPUT FORMAT\n\nONLY output the exact video script. Do not output anything else. NEVER include intermediate thoughts, notes, or formatting.\nR",
|
| 230 |
+
"options": {
|
| 231 |
+
"returnIntermediateSteps": true
|
| 232 |
+
}
|
| 233 |
+
}
|
| 234 |
+
#
|
| 235 |
+
# To implement this node:
|
| 236 |
+
# 1. Research the node's functionality in n8n documentation
|
| 237 |
+
# 2. Find appropriate Python libraries for the same functionality
|
| 238 |
+
# 3. Add implementation to nodeHandlers.ts
|
| 239 |
+
# 4. Test the conversion
|
| 240 |
+
|
| 241 |
+
print(f"β οΈ Skipping unknown node: AI Agent ({node.type})")
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
# Set Variables: Setup Heygen
|
| 245 |
+
# No variables to set
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
# HTTP Request: Create Avatar Video
|
| 249 |
+
base_url = os.getenv("HTTP_BASE_URL", "https://api.heygen.com/v2/video/generate")
|
| 250 |
+
headers = {"Content-Type": "application/json"}
|
| 251 |
+
|
| 252 |
+
create_avatar_video_response = requests.post(
|
| 253 |
+
url=base_url,
|
| 254 |
+
headers=headers
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
# Handle response
|
| 258 |
+
if create_avatar_video_response.status_code == 200:
|
| 259 |
+
create_avatar_video_data = create_avatar_video_response.json()
|
| 260 |
+
print(f"β
Create Avatar Video successful: {create_avatar_video_response.status_code}")
|
| 261 |
+
else:
|
| 262 |
+
print(f"β Create Avatar Video failed: {create_avatar_video_response.status_code}")
|
| 263 |
+
create_avatar_video_data = None
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
# TODO: Unknown node type: n8n-nodes-base.wait
|
| 267 |
+
# Node: Wait
|
| 268 |
+
# This node type is not recognized by n8n2py
|
| 269 |
+
# Parameters: {
|
| 270 |
+
"amount": 8,
|
| 271 |
+
"unit": "minutes"
|
| 272 |
+
}
|
| 273 |
+
#
|
| 274 |
+
# To implement this node:
|
| 275 |
+
# 1. Research the node's functionality in n8n documentation
|
| 276 |
+
# 2. Find appropriate Python libraries for the same functionality
|
| 277 |
+
# 3. Add implementation to nodeHandlers.ts
|
| 278 |
+
# 4. Test the conversion
|
| 279 |
+
|
| 280 |
+
print(f"β οΈ Skipping unknown node: Wait ({node.type})")
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
# HTTP Request: Get Avatar Video
|
| 284 |
+
base_url = os.getenv("HTTP_BASE_URL", "https://api.heygen.com/v1/video_status.get")
|
| 285 |
+
headers = {"Content-Type": "application/json"}
|
| 286 |
+
|
| 287 |
+
get_avatar_video_response = requests.get(
|
| 288 |
+
url=base_url,
|
| 289 |
+
headers=headers
|
| 290 |
+
)
|
| 291 |
+
|
| 292 |
+
# Handle response
|
| 293 |
+
if get_avatar_video_response.status_code == 200:
|
| 294 |
+
get_avatar_video_data = get_avatar_video_response.json()
|
| 295 |
+
print(f"β
Get Avatar Video successful: {get_avatar_video_response.status_code}")
|
| 296 |
+
else:
|
| 297 |
+
print(f"β Get Avatar Video failed: {get_avatar_video_response.status_code}")
|
| 298 |
+
get_avatar_video_data = None
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
# Set Variables: Prepare for Publish
|
| 302 |
+
# No variables to set
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
# HTTP Request: Upload to Blotato
|
| 306 |
+
headers = {"Content-Type": "application/json"}
|
| 307 |
+
|
| 308 |
+
upload_to_blotato_response = requests.post(
|
| 309 |
+
url="https://backend.blotato.com/v2/media",
|
| 310 |
+
headers=headers
|
| 311 |
+
)
|
| 312 |
+
|
| 313 |
+
# Handle response
|
| 314 |
+
if upload_to_blotato_response.status_code == 200:
|
| 315 |
+
upload_to_blotato_data = upload_to_blotato_response.json()
|
| 316 |
+
print(f"β
Upload to Blotato successful: {upload_to_blotato_response.status_code}")
|
| 317 |
+
else:
|
| 318 |
+
print(f"β Upload to Blotato failed: {upload_to_blotato_response.status_code}")
|
| 319 |
+
upload_to_blotato_data = None
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
# HTTP Request: [Linkedin] Publish via Blotato
|
| 323 |
+
headers = {"Content-Type": "application/json"}
|
| 324 |
+
|
| 325 |
+
linkedin_publish_via_blotato_response = requests.post(
|
| 326 |
+
url="https://backend.blotato.com/v2/posts",
|
| 327 |
+
headers=headers
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
# Handle response
|
| 331 |
+
if linkedin_publish_via_blotato_response.status_code == 200:
|
| 332 |
+
linkedin_publish_via_blotato_data = linkedin_publish_via_blotato_response.json()
|
| 333 |
+
print(f"β
[Linkedin] Publish via Blotato successful: {linkedin_publish_via_blotato_response.status_code}")
|
| 334 |
+
else:
|
| 335 |
+
print(f"β [Linkedin] Publish via Blotato failed: {linkedin_publish_via_blotato_response.status_code}")
|
| 336 |
+
linkedin_publish_via_blotato_data = None
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
# HTTP Request: [Twitter] Publish via Blotato
|
| 340 |
+
headers = {"Content-Type": "application/json"}
|
| 341 |
+
|
| 342 |
+
twitter_publish_via_blotato_response = requests.post(
|
| 343 |
+
url="https://backend.blotato.com/v2/posts",
|
| 344 |
+
headers=headers
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
# Handle response
|
| 348 |
+
if twitter_publish_via_blotato_response.status_code == 200:
|
| 349 |
+
twitter_publish_via_blotato_data = twitter_publish_via_blotato_response.json()
|
| 350 |
+
print(f"β
[Twitter] Publish via Blotato successful: {twitter_publish_via_blotato_response.status_code}")
|
| 351 |
+
else:
|
| 352 |
+
print(f"β [Twitter] Publish via Blotato failed: {twitter_publish_via_blotato_response.status_code}")
|
| 353 |
+
twitter_publish_via_blotato_data = None
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
# TODO: Unknown node type: @n8n/n8n-nodes-langchain.openAi
|
| 357 |
+
# Node: Write Short Caption
|
| 358 |
+
# This node type is not recognized by n8n2py
|
| 359 |
+
# Parameters: {
|
| 360 |
+
"modelId": {
|
| 361 |
+
"__rl": true,
|
| 362 |
+
"value": "gpt-4o",
|
| 363 |
+
"mode": "list",
|
| 364 |
+
"cachedResultName": "GPT-4O"
|
| 365 |
+
},
|
| 366 |
+
"messages": {
|
| 367 |
+
"values": [
|
| 368 |
+
{
|
| 369 |
+
"content": "=Write a spartan 2-sentence caption summarizing the video content, use 6th grade language, balanced neutral perspective, no emojis:\n\n<content>\n{{ $json.message.content }}\n</content>"
|
| 370 |
+
}
|
| 371 |
+
]
|
| 372 |
+
},
|
| 373 |
+
"options": {}
|
| 374 |
+
}
|
| 375 |
+
#
|
| 376 |
+
# To implement this node:
|
| 377 |
+
# 1. Research the node's functionality in n8n documentation
|
| 378 |
+
# 2. Find appropriate Python libraries for the same functionality
|
| 379 |
+
# 3. Add implementation to nodeHandlers.ts
|
| 380 |
+
# 4. Test the conversion
|
| 381 |
+
|
| 382 |
+
print(f"β οΈ Skipping unknown node: Write Short Caption ({node.type})")
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
# TODO: Unknown node type: n8n-nodes-base.stickyNote
|
| 386 |
+
# Node: Sticky Note3
|
| 387 |
+
# This node type is not recognized by n8n2py
|
| 388 |
+
# Parameters: {
|
| 389 |
+
"content": "## Make sure you fill out \"Setup Heygen\"",
|
| 390 |
+
"height": 180,
|
| 391 |
+
"width": 150,
|
| 392 |
+
"color": 6
|
| 393 |
+
}
|
| 394 |
+
#
|
| 395 |
+
# To implement this node:
|
| 396 |
+
# 1. Research the node's functionality in n8n documentation
|
| 397 |
+
# 2. Find appropriate Python libraries for the same functionality
|
| 398 |
+
# 3. Add implementation to nodeHandlers.ts
|
| 399 |
+
# 4. Test the conversion
|
| 400 |
+
|
| 401 |
+
print(f"β οΈ Skipping unknown node: Sticky Note3 ({node.type})")
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
# TODO: Unknown node type: n8n-nodes-base.stickyNote
|
| 405 |
+
# Node: Sticky Note4
|
| 406 |
+
# This node type is not recognized by n8n2py
|
| 407 |
+
# Parameters: {
|
| 408 |
+
"content": "## Make sure you fill out \"Prepare for Publish\"",
|
| 409 |
+
"height": 180,
|
| 410 |
+
"width": 160,
|
| 411 |
+
"color": 6
|
| 412 |
+
}
|
| 413 |
+
#
|
| 414 |
+
# To implement this node:
|
| 415 |
+
# 1. Research the node's functionality in n8n documentation
|
| 416 |
+
# 2. Find appropriate Python libraries for the same functionality
|
| 417 |
+
# 3. Add implementation to nodeHandlers.ts
|
| 418 |
+
# 4. Test the conversion
|
| 419 |
+
|
| 420 |
+
print(f"β οΈ Skipping unknown node: Sticky Note4 ({node.type})")
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
# TODO: Unknown node type: @n8n/n8n-nodes-langchain.lmChatOpenAi
|
| 424 |
+
# Node: Write Script
|
| 425 |
+
# This node type is not recognized by n8n2py
|
| 426 |
+
# Parameters: {
|
| 427 |
+
"model": {
|
| 428 |
+
"__rl": true,
|
| 429 |
+
"value": "gpt-4o-mini",
|
| 430 |
+
"mode": "list",
|
| 431 |
+
"cachedResultName": "gpt-4o-mini"
|
| 432 |
+
},
|
| 433 |
+
"options": {}
|
| 434 |
+
}
|
| 435 |
+
#
|
| 436 |
+
# To implement this node:
|
| 437 |
+
# 1. Research the node's functionality in n8n documentation
|
| 438 |
+
# 2. Find appropriate Python libraries for the same functionality
|
| 439 |
+
# 3. Add implementation to nodeHandlers.ts
|
| 440 |
+
# 4. Test the conversion
|
| 441 |
+
|
| 442 |
+
print(f"β οΈ Skipping unknown node: Write Script ({node.type})")
|
| 443 |
+
|
| 444 |
+
|
| 445 |
+
# TODO: Unknown node type: n8n-nodes-base.hackerNewsTool
|
| 446 |
+
# Node: Fetch HN Front Page
|
| 447 |
+
# This node type is not recognized by n8n2py
|
| 448 |
+
# Parameters: {
|
| 449 |
+
"resource": "all",
|
| 450 |
+
"additionalFields": {
|
| 451 |
+
"keyword": "AI",
|
| 452 |
+
"tags": [
|
| 453 |
+
"front_page"
|
| 454 |
+
]
|
| 455 |
+
}
|
| 456 |
+
}
|
| 457 |
+
#
|
| 458 |
+
# To implement this node:
|
| 459 |
+
# 1. Research the node's functionality in n8n documentation
|
| 460 |
+
# 2. Find appropriate Python libraries for the same functionality
|
| 461 |
+
# 3. Add implementation to nodeHandlers.ts
|
| 462 |
+
# 4. Test the conversion
|
| 463 |
+
|
| 464 |
+
print(f"β οΈ Skipping unknown node: Fetch HN Front Page ({node.type})")
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
# TODO: Unknown node type: n8n-nodes-base.hackerNewsTool
|
| 468 |
+
# Node: Fetch HN Article
|
| 469 |
+
# This node type is not recognized by n8n2py
|
| 470 |
+
# Parameters: {
|
| 471 |
+
"articleId": "={{ /*n8n-auto-generated-fromAI-override*/ $fromAI('Article_ID', ``, 'string') }}",
|
| 472 |
+
"additionalFields": {
|
| 473 |
+
"includeComments": true
|
| 474 |
+
}
|
| 475 |
+
}
|
| 476 |
+
#
|
| 477 |
+
# To implement this node:
|
| 478 |
+
# 1. Research the node's functionality in n8n documentation
|
| 479 |
+
# 2. Find appropriate Python libraries for the same functionality
|
| 480 |
+
# 3. Add implementation to nodeHandlers.ts
|
| 481 |
+
# 4. Test the conversion
|
| 482 |
+
|
| 483 |
+
print(f"β οΈ Skipping unknown node: Fetch HN Article ({node.type})")
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
# HTTP Request: [Tiktok] Publish via Blotato
|
| 487 |
+
headers = {"Content-Type": "application/json"}
|
| 488 |
+
|
| 489 |
+
tiktok_publish_via_blotato_response = requests.post(
|
| 490 |
+
url="https://backend.blotato.com/v2/posts",
|
| 491 |
+
headers=headers
|
| 492 |
+
)
|
| 493 |
+
|
| 494 |
+
# Handle response
|
| 495 |
+
if tiktok_publish_via_blotato_response.status_code == 200:
|
| 496 |
+
tiktok_publish_via_blotato_data = tiktok_publish_via_blotato_response.json()
|
| 497 |
+
print(f"β
[Tiktok] Publish via Blotato successful: {tiktok_publish_via_blotato_response.status_code}")
|
| 498 |
+
else:
|
| 499 |
+
print(f"β [Tiktok] Publish via Blotato failed: {tiktok_publish_via_blotato_response.status_code}")
|
| 500 |
+
tiktok_publish_via_blotato_data = None
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
# HTTP Request: [Bluesky] Publish via Blotato
|
| 504 |
+
headers = {"Content-Type": "application/json"}
|
| 505 |
+
|
| 506 |
+
bluesky_publish_via_blotato_response = requests.post(
|
| 507 |
+
url="https://backend.blotato.com/v2/posts",
|
| 508 |
+
headers=headers
|
| 509 |
+
)
|
| 510 |
+
|
| 511 |
+
# Handle response
|
| 512 |
+
if bluesky_publish_via_blotato_response.status_code == 200:
|
| 513 |
+
bluesky_publish_via_blotato_data = bluesky_publish_via_blotato_response.json()
|
| 514 |
+
print(f"β
[Bluesky] Publish via Blotato successful: {bluesky_publish_via_blotato_response.status_code}")
|
| 515 |
+
else:
|
| 516 |
+
print(f"β [Bluesky] Publish via Blotato failed: {bluesky_publish_via_blotato_response.status_code}")
|
| 517 |
+
bluesky_publish_via_blotato_data = None
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
# TODO: Unknown node type: @n8n/n8n-nodes-langchain.openAi
|
| 521 |
+
# Node: OpenAI
|
| 522 |
+
# This node type is not recognized by n8n2py
|
| 523 |
+
# Parameters: {
|
| 524 |
+
"resource": "image",
|
| 525 |
+
"prompt": "={{ $('Prepare for Publish').item.json.final_text_long }}",
|
| 526 |
+
"options": {
|
| 527 |
+
"returnImageUrls": true
|
| 528 |
+
}
|
| 529 |
+
}
|
| 530 |
+
#
|
| 531 |
+
# To implement this node:
|
| 532 |
+
# 1. Research the node's functionality in n8n documentation
|
| 533 |
+
# 2. Find appropriate Python libraries for the same functionality
|
| 534 |
+
# 3. Add implementation to nodeHandlers.ts
|
| 535 |
+
# 4. Test the conversion
|
| 536 |
+
|
| 537 |
+
print(f"β οΈ Skipping unknown node: OpenAI ({node.type})")
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
# HTTP Request: Upload to Blotato - Image
|
| 541 |
+
headers = {"Content-Type": "application/json"}
|
| 542 |
+
|
| 543 |
+
upload_to_blotato_image_response = requests.post(
|
| 544 |
+
url="https://backend.blotato.com/v2/media",
|
| 545 |
+
headers=headers
|
| 546 |
+
)
|
| 547 |
+
|
| 548 |
+
# Handle response
|
| 549 |
+
if upload_to_blotato_image_response.status_code == 200:
|
| 550 |
+
upload_to_blotato_image_data = upload_to_blotato_image_response.json()
|
| 551 |
+
print(f"β
Upload to Blotato - Image successful: {upload_to_blotato_image_response.status_code}")
|
| 552 |
+
else:
|
| 553 |
+
print(f"β Upload to Blotato - Image failed: {upload_to_blotato_image_response.status_code}")
|
| 554 |
+
upload_to_blotato_image_data = None
|
| 555 |
+
|
| 556 |
+
|
| 557 |
+
# HTTP Request: [Pinterest] Publish via Blotato
|
| 558 |
+
headers = {"Content-Type": "application/json"}
|
| 559 |
+
|
| 560 |
+
pinterest_publish_via_blotato_response = requests.post(
|
| 561 |
+
url="https://backend.blotato.com/v2/posts",
|
| 562 |
+
headers=headers
|
| 563 |
+
)
|
| 564 |
+
|
| 565 |
+
# Handle response
|
| 566 |
+
if pinterest_publish_via_blotato_response.status_code == 200:
|
| 567 |
+
pinterest_publish_via_blotato_data = pinterest_publish_via_blotato_response.json()
|
| 568 |
+
print(f"β
[Pinterest] Publish via Blotato successful: {pinterest_publish_via_blotato_response.status_code}")
|
| 569 |
+
else:
|
| 570 |
+
print(f"β [Pinterest] Publish via Blotato failed: {pinterest_publish_via_blotato_response.status_code}")
|
| 571 |
+
pinterest_publish_via_blotato_data = None
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
# Return the workflow results
|
| 575 |
+
result = {
|
| 576 |
+
"status": "success",
|
| 577 |
+
"message": "Workflow executed successfully",
|
| 578 |
+
"data": data,
|
| 579 |
+
"timestamp": json.dumps({"executed_at": str(__import__('datetime').datetime.now())})
|
| 580 |
+
}
|
| 581 |
+
|
| 582 |
+
print("β
Workflow completed successfully!")
|
| 583 |
+
return result
|
| 584 |
+
|
| 585 |
+
except Exception as e:
|
| 586 |
+
error_msg = f"Workflow execution failed: {str(e)}"
|
| 587 |
+
print(f"β {error_msg}")
|
| 588 |
+
import traceback
|
| 589 |
+
traceback.print_exc()
|
| 590 |
+
|
| 591 |
+
return {
|
| 592 |
+
"status": "error",
|
| 593 |
+
"message": error_msg,
|
| 594 |
+
"error_type": type(e).__name__,
|
| 595 |
+
"data": data,
|
| 596 |
+
"timestamp": json.dumps({"executed_at": str(__import__('datetime').datetime.now())})
|
| 597 |
+
}
|
| 598 |
+
|
| 599 |
+
@app.get("/")
|
| 600 |
+
async def root():
|
| 601 |
+
'''Root endpoint with API information'''
|
| 602 |
+
return {
|
| 603 |
+
"message": "n8n2py converted workflow is running",
|
| 604 |
+
"status": "healthy",
|
| 605 |
+
"service": "n8n2py-workflow",
|
| 606 |
+
"endpoints": {
|
| 607 |
+
"POST /run": "Execute the workflow",
|
| 608 |
+
"GET /docs": "Interactive API documentation",
|
| 609 |
+
"GET /redoc": "Alternative API documentation",
|
| 610 |
+
"GET /health": "Health check endpoint"
|
| 611 |
+
},
|
| 612 |
+
"version": "1.0.0"
|
| 613 |
+
}
|
| 614 |
+
|
| 615 |
+
@app.get("/health")
|
| 616 |
+
async def health_check():
|
| 617 |
+
'''Health check endpoint for monitoring'''
|
| 618 |
+
return {
|
| 619 |
+
"status": "healthy",
|
| 620 |
+
"service": "n8n2py-workflow",
|
| 621 |
+
"timestamp": str(__import__('datetime').datetime.now())
|
| 622 |
+
}
|
| 623 |
+
|
| 624 |
+
@app.post("/run")
|
| 625 |
+
async def execute_workflow(request: Request):
|
| 626 |
+
'''
|
| 627 |
+
Execute the converted n8n workflow
|
| 628 |
+
|
| 629 |
+
Send a POST request with JSON data to trigger the workflow execution.
|
| 630 |
+
The workflow will process the input data and return results.
|
| 631 |
+
|
| 632 |
+
Example request:
|
| 633 |
+
```json
|
| 634 |
+
{
|
| 635 |
+
"parameters": {
|
| 636 |
+
"key": "value",
|
| 637 |
+
"another_key": "another_value"
|
| 638 |
+
}
|
| 639 |
+
}
|
| 640 |
+
```
|
| 641 |
+
'''
|
| 642 |
+
try:
|
| 643 |
+
# Parse request body
|
| 644 |
+
data = await request.json()
|
| 645 |
+
except Exception as e:
|
| 646 |
+
raise HTTPException(
|
| 647 |
+
status_code=400,
|
| 648 |
+
detail={
|
| 649 |
+
"error": "Invalid JSON in request body",
|
| 650 |
+
"message": str(e),
|
| 651 |
+
"type": "validation_error"
|
| 652 |
+
}
|
| 653 |
+
)
|
| 654 |
+
|
| 655 |
+
# Execute the workflow
|
| 656 |
+
result = run_workflow(data)
|
| 657 |
+
|
| 658 |
+
# Return appropriate HTTP status code based on workflow result
|
| 659 |
+
status_code = 200 if result["status"] == "success" else 500
|
| 660 |
+
|
| 661 |
+
return JSONResponse(
|
| 662 |
+
status_code=status_code,
|
| 663 |
+
content=result
|
| 664 |
+
)
|
| 665 |
+
|
| 666 |
+
@app.get("/openapi.json")
|
| 667 |
+
async def get_openapi():
|
| 668 |
+
'''Get OpenAPI specification'''
|
| 669 |
+
from fastapi.openapi.utils import get_openapi
|
| 670 |
+
return get_openapi(
|
| 671 |
+
title="n8n2py Converted Workflow",
|
| 672 |
+
version="1.0.0",
|
| 673 |
+
description="API for converted n8n workflow",
|
| 674 |
+
routes=app.routes,
|
| 675 |
+
)
|
| 676 |
+
|
| 677 |
+
if __name__ == "__main__":
|
| 678 |
+
import uvicorn
|
| 679 |
+
import os
|
| 680 |
+
|
| 681 |
+
# Configuration
|
| 682 |
+
host = os.getenv("API_HOST", "0.0.0.0")
|
| 683 |
+
port = int(os.getenv("API_PORT", "8000"))
|
| 684 |
+
debug = os.getenv("API_DEBUG", "false").lower() == "true"
|
| 685 |
+
|
| 686 |
+
print("π Starting n8n2py converted workflow server...")
|
| 687 |
+
print(f"π API documentation: http://{host}:{port}/docs")
|
| 688 |
+
print(f"π Health check: http://{host}:{port}/health")
|
| 689 |
+
print(f"β‘ Execute workflow: POST http://{host}:{port}/run")
|
| 690 |
+
|
| 691 |
+
uvicorn.run(
|
| 692 |
+
app,
|
| 693 |
+
host=host,
|
| 694 |
+
port=port,
|
| 695 |
+
reload=debug,
|
| 696 |
+
log_level="info"
|
| 697 |
+
)
|