{
"data": {
"edges": [
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "ChatOutput",
"id": "ChatOutput-xch66",
"name": "message",
"output_types": [
"Message"
]
},
"targetHandle": {
"fieldName": "comment",
"id": "Prompt-wV87t",
"inputTypes": [
"Message"
],
"type": "str"
}
},
"id": "xy-edge__ChatOutput-xch66{œdataTypeœ:œChatOutputœ,œidœ:œChatOutput-xch66œ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-Prompt-wV87t{œfieldNameœ:œcommentœ,œidœ:œPrompt-wV87tœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
"selected": false,
"source": "ChatOutput-xch66",
"sourceHandle": "{œdataTypeœ:œChatOutputœ,œidœ:œChatOutput-xch66œ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}",
"target": "Prompt-wV87t",
"targetHandle": "{œfieldNameœ:œcommentœ,œidœ:œPrompt-wV87tœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}"
},
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "YouTubeCommentsComponent",
"id": "YouTubeCommentsComponent-nxbZY",
"name": "comments",
"output_types": [
"DataFrame"
]
},
"targetHandle": {
"fieldName": "input_value",
"id": "ChatOutput-xch66",
"inputTypes": [
"Data",
"DataFrame",
"Message"
],
"type": "other"
}
},
"id": "xy-edge__YouTubeCommentsComponent-nxbZY{œdataTypeœ:œYouTubeCommentsComponentœ,œidœ:œYouTubeCommentsComponent-nxbZYœ,œnameœ:œcommentsœ,œoutput_typesœ:[œDataFrameœ]}-ChatOutput-xch66{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-xch66œ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}",
"selected": false,
"source": "YouTubeCommentsComponent-nxbZY",
"sourceHandle": "{œdataTypeœ:œYouTubeCommentsComponentœ,œidœ:œYouTubeCommentsComponent-nxbZYœ,œnameœ:œcommentsœ,œoutput_typesœ:[œDataFrameœ]}",
"target": "ChatOutput-xch66",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-xch66œ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}"
},
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "Prompt",
"id": "Prompt-wV87t",
"name": "prompt",
"output_types": [
"Message"
]
},
"targetHandle": {
"fieldName": "input_value",
"id": "GoogleGenerativeAIModel-kaDHv",
"inputTypes": [
"Message"
],
"type": "str"
}
},
"id": "xy-edge__Prompt-wV87t{œdataTypeœ:œPromptœ,œidœ:œPrompt-wV87tœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-GoogleGenerativeAIModel-kaDHv{œfieldNameœ:œinput_valueœ,œidœ:œGoogleGenerativeAIModel-kaDHvœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
"selected": false,
"source": "Prompt-wV87t",
"sourceHandle": "{œdataTypeœ:œPromptœ,œidœ:œPrompt-wV87tœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}",
"target": "GoogleGenerativeAIModel-kaDHv",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œGoogleGenerativeAIModel-kaDHvœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}"
},
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "GoogleGenerativeAIModel",
"id": "GoogleGenerativeAIModel-kaDHv",
"name": "text_output",
"output_types": [
"Message"
]
},
"targetHandle": {
"fieldName": "input_value",
"id": "ChatOutput-WZN6f",
"inputTypes": [
"Data",
"DataFrame",
"Message"
],
"type": "other"
}
},
"id": "xy-edge__GoogleGenerativeAIModel-kaDHv{œdataTypeœ:œGoogleGenerativeAIModelœ,œidœ:œGoogleGenerativeAIModel-kaDHvœ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-WZN6f{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-WZN6fœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}",
"selected": false,
"source": "GoogleGenerativeAIModel-kaDHv",
"sourceHandle": "{œdataTypeœ:œGoogleGenerativeAIModelœ,œidœ:œGoogleGenerativeAIModel-kaDHvœ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}",
"target": "ChatOutput-WZN6f",
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-WZN6fœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}"
}
],
"nodes": [
{
"data": {
"description": "Retrieves detailed information and statistics about YouTube videos.",
"display_name": "YouTube Video Details",
"id": "YouTubeVideoDetailsComponent-Y4gxm",
"node": {
"base_classes": [
"DataFrame"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Retrieves detailed information and statistics about YouTube videos.",
"display_name": "YouTube Video Details",
"documentation": "",
"edited": false,
"field_order": [
"video_url",
"api_key",
"include_statistics",
"include_content_details",
"include_tags",
"include_thumbnails"
],
"frozen": false,
"icon": "YouTube",
"legacy": false,
"lf_version": "1.3.2",
"metadata": {},
"minimized": false,
"output_types": [],
"outputs": [
{
"allows_loop": false,
"cache": true,
"display_name": "Video Data",
"method": "get_video_details",
"name": "video_data",
"options": null,
"required_inputs": null,
"selected": "DataFrame",
"tool_mode": true,
"types": [
"DataFrame"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"api_key": {
"_input_type": "SecretStrInput",
"advanced": false,
"display_name": "YouTube API Key",
"dynamic": false,
"info": "Your YouTube Data API key.",
"input_types": [
"Message"
],
"load_from_db": false,
"name": "api_key",
"password": true,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "str",
"value": "AIzaSyBuNxsm0LnHF0OkbYgMSNHnwu8iVUVi5gc"
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from contextlib import contextmanager\n\nimport googleapiclient\nimport pandas as pd\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\n\nfrom langflow.custom import Component\nfrom langflow.inputs import BoolInput, MessageTextInput, SecretStrInput\nfrom langflow.schema import DataFrame\nfrom langflow.template import Output\n\n\nclass YouTubeVideoDetailsComponent(Component):\n \"\"\"A component that retrieves detailed information about YouTube videos.\"\"\"\n\n display_name: str = \"YouTube Video Details\"\n description: str = \"Retrieves detailed information and statistics about YouTube videos.\"\n icon: str = \"YouTube\"\n\n inputs = [\n MessageTextInput(\n name=\"video_url\",\n display_name=\"Video URL\",\n info=\"The URL of the YouTube video.\",\n tool_mode=True,\n required=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"YouTube API Key\",\n info=\"Your YouTube Data API key.\",\n required=True,\n ),\n BoolInput(\n name=\"include_statistics\",\n display_name=\"Include Statistics\",\n value=True,\n info=\"Include video statistics (views, likes, comments).\",\n ),\n BoolInput(\n name=\"include_content_details\",\n display_name=\"Include Content Details\",\n value=True,\n info=\"Include video duration, quality, and age restriction info.\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_tags\",\n display_name=\"Include Tags\",\n value=True,\n info=\"Include video tags and keywords.\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_thumbnails\",\n display_name=\"Include Thumbnails\",\n value=True,\n info=\"Include video thumbnail URLs in different resolutions.\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(name=\"video_data\", display_name=\"Video Data\", method=\"get_video_details\"),\n ]\n\n API_FORBIDDEN = 403\n VIDEO_NOT_FOUND = 404\n\n @contextmanager\n def youtube_client(self):\n \"\"\"Context manager for YouTube API client.\"\"\"\n client = build(\"youtube\", \"v3\", developerKey=self.api_key)\n try:\n yield client\n finally:\n client.close()\n\n def _extract_video_id(self, video_url: str) -> str:\n \"\"\"Extracts the video ID from a YouTube URL.\"\"\"\n import re\n\n patterns = [\n r\"(?:youtube\\.com\\/watch\\?v=|youtu.be\\/|youtube.com\\/embed\\/)([^&\\n?#]+)\",\n r\"youtube.com\\/shorts\\/([^&\\n?#]+)\",\n ]\n\n for pattern in patterns:\n match = re.search(pattern, video_url)\n if match:\n return match.group(1)\n\n return video_url.strip()\n\n def _format_duration(self, duration: str) -> str:\n \"\"\"Formats the ISO 8601 duration to a readable format.\"\"\"\n import re\n\n hours = 0\n minutes = 0\n seconds = 0\n\n hours_match = re.search(r\"(\\d+)H\", duration)\n minutes_match = re.search(r\"(\\d+)M\", duration)\n seconds_match = re.search(r\"(\\d+)S\", duration)\n\n if hours_match:\n hours = int(hours_match.group(1))\n if minutes_match:\n minutes = int(minutes_match.group(1))\n if seconds_match:\n seconds = int(seconds_match.group(1))\n\n if hours > 0:\n return f\"{hours:02d}:{minutes:02d}:{seconds:02d}\"\n return f\"{minutes:02d}:{seconds:02d}\"\n\n def get_video_details(self) -> DataFrame:\n \"\"\"Retrieves detailed information about a YouTube video and returns as DataFrame.\"\"\"\n try:\n with self.youtube_client() as youtube:\n # Extract video ID\n video_id = self._extract_video_id(self.video_url)\n\n # Prepare parts for the API request\n parts = [\"snippet\"]\n if self.include_statistics:\n parts.append(\"statistics\")\n if self.include_content_details:\n parts.append(\"contentDetails\")\n\n # Get video information\n video_response = youtube.videos().list(part=\",\".join(parts), id=video_id).execute()\n\n if not video_response[\"items\"]:\n return DataFrame(pd.DataFrame({\"error\": [\"Video not found\"]}))\n\n video_info = video_response[\"items\"][0]\n snippet = video_info[\"snippet\"]\n\n # Build video data dictionary\n video_data = {\n \"video_id\": [video_id],\n \"url\": [f\"https://www.youtube.com/watch?v={video_id}\"],\n \"title\": [snippet[\"title\"]],\n \"description\": [snippet[\"description\"]],\n \"published_at\": [snippet[\"publishedAt\"]],\n \"channel_id\": [snippet[\"channelId\"]],\n \"channel_title\": [snippet[\"channelTitle\"]],\n \"category_id\": [snippet.get(\"categoryId\", \"Unknown\")],\n \"live_broadcast_content\": [snippet.get(\"liveBroadcastContent\", \"none\")],\n }\n\n # Add thumbnails if requested\n if self.include_thumbnails:\n for size, thumb in snippet[\"thumbnails\"].items():\n video_data[f\"thumbnail_{size}_url\"] = [thumb[\"url\"]]\n video_data[f\"thumbnail_{size}_width\"] = [thumb.get(\"width\", 0)]\n video_data[f\"thumbnail_{size}_height\"] = [thumb.get(\"height\", 0)]\n\n # Add tags if requested\n if self.include_tags and \"tags\" in snippet:\n video_data[\"tags\"] = [\", \".join(snippet[\"tags\"])]\n video_data[\"tags_count\"] = [len(snippet[\"tags\"])]\n\n # Add statistics if requested\n if self.include_statistics and \"statistics\" in video_info:\n stats = video_info[\"statistics\"]\n video_data.update(\n {\n \"view_count\": [int(stats.get(\"viewCount\", 0))],\n \"like_count\": [int(stats.get(\"likeCount\", 0))],\n \"favorite_count\": [int(stats.get(\"favoriteCount\", 0))],\n \"comment_count\": [int(stats.get(\"commentCount\", 0))],\n }\n )\n\n # Add content details if requested\n if self.include_content_details and \"contentDetails\" in video_info:\n content_details = video_info[\"contentDetails\"]\n video_data.update(\n {\n \"duration\": [self._format_duration(content_details[\"duration\"])],\n \"dimension\": [content_details.get(\"dimension\", \"2d\")],\n \"definition\": [content_details.get(\"definition\", \"hd\").upper()],\n \"has_captions\": [content_details.get(\"caption\", \"false\") == \"true\"],\n \"licensed_content\": [content_details.get(\"licensedContent\", False)],\n \"projection\": [content_details.get(\"projection\", \"rectangular\")],\n \"has_custom_thumbnails\": [content_details.get(\"hasCustomThumbnail\", False)],\n }\n )\n\n # Add content rating if available\n if \"contentRating\" in content_details:\n rating_info = content_details[\"contentRating\"]\n video_data[\"content_rating\"] = [str(rating_info)]\n\n # Create DataFrame with organized columns\n video_df = pd.DataFrame(video_data)\n\n # Organize columns in logical groups\n basic_cols = [\n \"video_id\",\n \"title\",\n \"url\",\n \"channel_id\",\n \"channel_title\",\n \"published_at\",\n \"category_id\",\n \"live_broadcast_content\",\n \"description\",\n ]\n\n stat_cols = [\"view_count\", \"like_count\", \"favorite_count\", \"comment_count\"]\n\n content_cols = [\n \"duration\",\n \"dimension\",\n \"definition\",\n \"has_captions\",\n \"licensed_content\",\n \"projection\",\n \"has_custom_thumbnails\",\n \"content_rating\",\n ]\n\n tag_cols = [\"tags\", \"tags_count\"]\n\n thumb_cols = [col for col in video_df.columns if col.startswith(\"thumbnail_\")]\n\n # Reorder columns based on what's included\n ordered_cols = basic_cols.copy()\n\n if self.include_statistics:\n ordered_cols.extend([col for col in stat_cols if col in video_df.columns])\n\n if self.include_content_details:\n ordered_cols.extend([col for col in content_cols if col in video_df.columns])\n\n if self.include_tags:\n ordered_cols.extend([col for col in tag_cols if col in video_df.columns])\n\n if self.include_thumbnails:\n ordered_cols.extend(sorted(thumb_cols))\n\n # Add any remaining columns\n remaining_cols = [col for col in video_df.columns if col not in ordered_cols]\n ordered_cols.extend(remaining_cols)\n\n return DataFrame(video_df[ordered_cols])\n\n except (HttpError, googleapiclient.errors.HttpError) as e:\n error_message = f\"YouTube API error: {e!s}\"\n if e.resp.status == self.API_FORBIDDEN:\n error_message = \"API quota exceeded or access forbidden.\"\n elif e.resp.status == self.VIDEO_NOT_FOUND:\n error_message = \"Video not found.\"\n\n return DataFrame(pd.DataFrame({\"error\": [error_message]}))\n\n except KeyError as e:\n return DataFrame(pd.DataFrame({\"error\": [str(e)]}))\n"
},
"include_content_details": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Include Content Details",
"dynamic": false,
"info": "Include video duration, quality, and age restriction info.",
"list": false,
"list_add_label": "Add More",
"name": "include_content_details",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"include_statistics": {
"_input_type": "BoolInput",
"advanced": false,
"display_name": "Include Statistics",
"dynamic": false,
"info": "Include video statistics (views, likes, comments).",
"list": false,
"list_add_label": "Add More",
"name": "include_statistics",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"include_tags": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Include Tags",
"dynamic": false,
"info": "Include video tags and keywords.",
"list": false,
"list_add_label": "Add More",
"name": "include_tags",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"include_thumbnails": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Include Thumbnails",
"dynamic": false,
"info": "Include video thumbnail URLs in different resolutions.",
"list": false,
"list_add_label": "Add More",
"name": "include_thumbnails",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"video_url": {
"_input_type": "MessageTextInput",
"advanced": false,
"display_name": "Video URL",
"dynamic": false,
"info": "The URL of the YouTube video.",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "video_url",
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"tool_mode": true,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "https://youtu.be/LLAZUTbc97I?si=hngxho6-_X5oBqlu"
}
},
"tool_mode": false
},
"showNode": true,
"type": "YouTubeVideoDetailsComponent"
},
"dragging": false,
"id": "YouTubeVideoDetailsComponent-Y4gxm",
"measured": {
"height": 372,
"width": 320
},
"position": {
"x": 28.819024012888264,
"y": 91.69462709761478
},
"selected": false,
"type": "genericNode"
},
{
"data": {
"id": "YouTubeCommentsComponent-nxbZY",
"node": {
"base_classes": [
"DataFrame"
],
"beta": false,
"category": "youtube",
"conditional_paths": [],
"custom_fields": {},
"description": "Retrieves and analyzes comments from YouTube videos.",
"display_name": "YouTube Comments",
"documentation": "",
"edited": false,
"field_order": [
"video_url",
"api_key",
"max_results",
"sort_by",
"include_replies",
"include_metrics"
],
"frozen": false,
"icon": "YouTube",
"key": "YouTubeCommentsComponent",
"legacy": false,
"lf_version": "1.3.2",
"metadata": {},
"minimized": false,
"output_types": [],
"outputs": [
{
"allows_loop": false,
"cache": true,
"display_name": "Comments",
"method": "get_video_comments",
"name": "comments",
"selected": "DataFrame",
"tool_mode": true,
"types": [
"DataFrame"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"score": 0.04695651050810526,
"template": {
"_type": "Component",
"api_key": {
"_input_type": "SecretStrInput",
"advanced": false,
"display_name": "YouTube API Key",
"dynamic": false,
"info": "Your YouTube Data API key.",
"input_types": [
"Message"
],
"load_from_db": false,
"name": "api_key",
"password": true,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "str",
"value": "AIzaSyBuNxsm0LnHF0OkbYgMSNHnwu8iVUVi5gc"
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from contextlib import contextmanager\n\nimport pandas as pd\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\n\nfrom langflow.custom import Component\nfrom langflow.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput\nfrom langflow.schema import DataFrame\nfrom langflow.template import Output\n\n\nclass YouTubeCommentsComponent(Component):\n \"\"\"A component that retrieves comments from YouTube videos.\"\"\"\n\n display_name: str = \"YouTube Comments\"\n description: str = \"Retrieves and analyzes comments from YouTube videos.\"\n icon: str = \"YouTube\"\n\n # Constants\n COMMENTS_DISABLED_STATUS = 403\n NOT_FOUND_STATUS = 404\n API_MAX_RESULTS = 100\n\n inputs = [\n MessageTextInput(\n name=\"video_url\",\n display_name=\"Video URL\",\n info=\"The URL of the YouTube video to get comments from.\",\n tool_mode=True,\n required=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"YouTube API Key\",\n info=\"Your YouTube Data API key.\",\n required=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n value=20,\n info=\"The maximum number of comments to return.\",\n ),\n DropdownInput(\n name=\"sort_by\",\n display_name=\"Sort By\",\n options=[\"time\", \"relevance\"],\n value=\"relevance\",\n info=\"Sort comments by time or relevance.\",\n ),\n BoolInput(\n name=\"include_replies\",\n display_name=\"Include Replies\",\n value=False,\n info=\"Whether to include replies to comments.\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_metrics\",\n display_name=\"Include Metrics\",\n value=True,\n info=\"Include metrics like like count and reply count.\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(name=\"comments\", display_name=\"Comments\", method=\"get_video_comments\"),\n ]\n\n def _extract_video_id(self, video_url: str) -> str:\n \"\"\"Extracts the video ID from a YouTube URL.\"\"\"\n import re\n\n patterns = [\n r\"(?:youtube\\.com\\/watch\\?v=|youtu.be\\/|youtube.com\\/embed\\/)([^&\\n?#]+)\",\n r\"youtube.com\\/shorts\\/([^&\\n?#]+)\",\n ]\n\n for pattern in patterns:\n match = re.search(pattern, video_url)\n if match:\n return match.group(1)\n\n return video_url.strip()\n\n def _process_reply(self, reply: dict, parent_id: str, *, include_metrics: bool = True) -> dict:\n \"\"\"Process a single reply comment.\"\"\"\n reply_snippet = reply[\"snippet\"]\n reply_data = {\n \"comment_id\": reply[\"id\"],\n \"parent_comment_id\": parent_id,\n \"author\": reply_snippet[\"authorDisplayName\"],\n \"text\": reply_snippet[\"textDisplay\"],\n \"published_at\": reply_snippet[\"publishedAt\"],\n \"is_reply\": True,\n }\n if include_metrics:\n reply_data[\"like_count\"] = reply_snippet[\"likeCount\"]\n reply_data[\"reply_count\"] = 0 # Replies can't have replies\n\n return reply_data\n\n def _process_comment(\n self, item: dict, *, include_metrics: bool = True, include_replies: bool = False\n ) -> list[dict]:\n \"\"\"Process a single comment thread.\"\"\"\n comment = item[\"snippet\"][\"topLevelComment\"][\"snippet\"]\n comment_id = item[\"snippet\"][\"topLevelComment\"][\"id\"]\n\n # Basic comment data\n processed_comments = [\n {\n \"comment_id\": comment_id,\n \"parent_comment_id\": \"\", # Empty for top-level comments\n \"author\": comment[\"authorDisplayName\"],\n \"author_channel_url\": comment.get(\"authorChannelUrl\", \"\"),\n \"text\": comment[\"textDisplay\"],\n \"published_at\": comment[\"publishedAt\"],\n \"updated_at\": comment[\"updatedAt\"],\n \"is_reply\": False,\n }\n ]\n\n # Add metrics if requested\n if include_metrics:\n processed_comments[0].update(\n {\n \"like_count\": comment[\"likeCount\"],\n \"reply_count\": item[\"snippet\"][\"totalReplyCount\"],\n }\n )\n\n # Add replies if requested\n if include_replies and item[\"snippet\"][\"totalReplyCount\"] > 0 and \"replies\" in item:\n for reply in item[\"replies\"][\"comments\"]:\n reply_data = self._process_reply(reply, parent_id=comment_id, include_metrics=include_metrics)\n processed_comments.append(reply_data)\n\n return processed_comments\n\n @contextmanager\n def youtube_client(self):\n \"\"\"Context manager for YouTube API client.\"\"\"\n client = build(\"youtube\", \"v3\", developerKey=self.api_key)\n try:\n yield client\n finally:\n client.close()\n\n def get_video_comments(self) -> DataFrame:\n \"\"\"Retrieves comments from a YouTube video and returns as DataFrame.\"\"\"\n try:\n # Extract video ID from URL\n video_id = self._extract_video_id(self.video_url)\n\n # Use context manager for YouTube API client\n with self.youtube_client() as youtube:\n comments_data = []\n results_count = 0\n request = youtube.commentThreads().list(\n part=\"snippet,replies\",\n videoId=video_id,\n maxResults=min(self.API_MAX_RESULTS, self.max_results),\n order=self.sort_by,\n textFormat=\"plainText\",\n )\n\n while request and results_count < self.max_results:\n response = request.execute()\n\n for item in response.get(\"items\", []):\n if results_count >= self.max_results:\n break\n\n comments = self._process_comment(\n item, include_metrics=self.include_metrics, include_replies=self.include_replies\n )\n comments_data.extend(comments)\n results_count += 1\n\n # Get the next page if available and needed\n if \"nextPageToken\" in response and results_count < self.max_results:\n request = youtube.commentThreads().list(\n part=\"snippet,replies\",\n videoId=video_id,\n maxResults=min(self.API_MAX_RESULTS, self.max_results - results_count),\n order=self.sort_by,\n textFormat=\"plainText\",\n pageToken=response[\"nextPageToken\"],\n )\n else:\n request = None\n\n # Convert to DataFrame\n comments_df = pd.DataFrame(comments_data)\n\n # Add video metadata\n comments_df[\"video_id\"] = video_id\n comments_df[\"video_url\"] = self.video_url\n\n # Sort columns for better organization\n column_order = [\n \"video_id\",\n \"video_url\",\n \"comment_id\",\n \"parent_comment_id\",\n \"is_reply\",\n \"author\",\n \"author_channel_url\",\n \"text\",\n \"published_at\",\n \"updated_at\",\n ]\n\n if self.include_metrics:\n column_order.extend([\"like_count\", \"reply_count\"])\n\n comments_df = comments_df[column_order]\n\n return DataFrame(comments_df)\n\n except HttpError as e:\n error_message = f\"YouTube API error: {e!s}\"\n if e.resp.status == self.COMMENTS_DISABLED_STATUS:\n error_message = \"Comments are disabled for this video or API quota exceeded.\"\n elif e.resp.status == self.NOT_FOUND_STATUS:\n error_message = \"Video not found.\"\n\n return DataFrame(pd.DataFrame({\"error\": [error_message]}))\n"
},
"include_metrics": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Include Metrics",
"dynamic": false,
"info": "Include metrics like like count and reply count.",
"list": false,
"list_add_label": "Add More",
"name": "include_metrics",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"include_replies": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Include Replies",
"dynamic": false,
"info": "Whether to include replies to comments.",
"list": false,
"list_add_label": "Add More",
"name": "include_replies",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": false
},
"max_results": {
"_input_type": "IntInput",
"advanced": false,
"display_name": "Max Results",
"dynamic": false,
"info": "The maximum number of comments to return.",
"list": false,
"list_add_label": "Add More",
"name": "max_results",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "int",
"value": 20
},
"sort_by": {
"_input_type": "DropdownInput",
"advanced": false,
"combobox": false,
"dialog_inputs": {},
"display_name": "Sort By",
"dynamic": false,
"info": "Sort comments by time or relevance.",
"name": "sort_by",
"options": [
"time",
"relevance"
],
"options_metadata": [],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "relevance"
},
"video_url": {
"_input_type": "MessageTextInput",
"advanced": false,
"display_name": "Video URL",
"dynamic": false,
"info": "The URL of the YouTube video to get comments from.",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "video_url",
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"tool_mode": true,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "https://youtu.be/LLAZUTbc97I?si=hngxho6-_X5oBqlu"
}
},
"tool_mode": false
},
"showNode": true,
"type": "YouTubeCommentsComponent"
},
"dragging": false,
"id": "YouTubeCommentsComponent-nxbZY",
"measured": {
"height": 492,
"width": 320
},
"position": {
"x": 442.1226546434905,
"y": 34.699977181183414
},
"selected": false,
"type": "genericNode"
},
{
"data": {
"id": "Prompt-wV87t",
"node": {
"base_classes": [
"Message"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {
"template": [
"comment"
]
},
"description": "Create a prompt template with dynamic variables.",
"display_name": "Prompt",
"documentation": "",
"edited": false,
"error": null,
"field_order": [
"template",
"tool_placeholder"
],
"frozen": false,
"full_path": null,
"icon": "prompts",
"is_composition": null,
"is_input": null,
"is_output": null,
"legacy": false,
"lf_version": "1.3.2",
"metadata": {},
"minimized": false,
"name": "",
"output_types": [],
"outputs": [
{
"allows_loop": false,
"cache": true,
"display_name": "Prompt Message",
"hidden": null,
"method": "build_prompt",
"name": "prompt",
"options": null,
"required_inputs": null,
"selected": "Message",
"tool_mode": true,
"types": [
"Message"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"priority": null,
"template": {
"_type": "Component",
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt Message\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"comment": {
"advanced": false,
"display_name": "comment",
"dynamic": false,
"field_type": "str",
"fileTypes": [],
"file_path": "",
"info": "",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"multiline": true,
"name": "comment",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"type": "str",
"value": ""
},
"template": {
"_input_type": "PromptInput",
"advanced": false,
"display_name": "Template",
"dynamic": false,
"info": "",
"list": false,
"list_add_label": "Add More",
"name": "template",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"type": "prompt",
"value": "Analyze the sentiment (Positive, Negative, Neutral) of the following YouTube comment:\n\"{comment}\"\nAlso mention the like count if available.\n"
},
"tool_placeholder": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Tool Placeholder",
"dynamic": false,
"info": "A placeholder input for tool mode.",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "tool_placeholder",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": true,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
}
},
"tool_mode": false
},
"showNode": true,
"type": "Prompt"
},
"dragging": false,
"id": "Prompt-wV87t",
"measured": {
"height": 379,
"width": 320
},
"position": {
"x": 1100.1143846063583,
"y": 35.18781696458764
},
"selected": false,
"type": "genericNode"
},
{
"data": {
"id": "ChatOutput-xch66",
"node": {
"base_classes": [
"Message"
],
"beta": false,
"category": "outputs",
"conditional_paths": [],
"custom_fields": {},
"description": "Display a chat message in the Playground.",
"display_name": "Chat Output",
"documentation": "",
"edited": false,
"field_order": [
"input_value",
"should_store_message",
"sender",
"sender_name",
"session_id",
"data_template",
"background_color",
"chat_icon",
"text_color",
"clean_data"
],
"frozen": false,
"icon": "MessagesSquare",
"key": "ChatOutput",
"legacy": false,
"lf_version": "1.3.2",
"metadata": {},
"minimized": true,
"output_types": [],
"outputs": [
{
"allows_loop": false,
"cache": true,
"display_name": "Message",
"method": "message_response",
"name": "message",
"selected": "Message",
"tool_mode": true,
"types": [
"Message"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"score": 0.00012027401062119145,
"template": {
"_type": "Component",
"background_color": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Background Color",
"dynamic": false,
"info": "The background color of the icon.",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "background_color",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"chat_icon": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Icon",
"dynamic": false,
"info": "The icon of the message.",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "chat_icon",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"clean_data": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Basic Clean Data",
"dynamic": false,
"info": "Whether to clean the data",
"list": false,
"list_add_label": "Add More",
"name": "clean_data",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from collections.abc import Generator\nfrom typing import Any\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def _safe_convert(self, data: Any) -> str:\n \"\"\"Safely convert input data to string.\"\"\"\n try:\n if isinstance(data, str):\n return data\n if isinstance(data, Message):\n return data.get_text()\n if isinstance(data, Data):\n if data.get_text() is None:\n msg = \"Empty Data object\"\n raise ValueError(msg)\n return data.get_text()\n if isinstance(data, DataFrame):\n if self.clean_data:\n # Remove empty rows\n data = data.dropna(how=\"all\")\n # Remove empty lines in each cell\n data = data.replace(r\"^\\s*$\", \"\", regex=True)\n # Replace multiple newlines with a single newline\n data = data.replace(r\"\\n+\", \"\\n\", regex=True)\n\n # Replace pipe characters to avoid markdown table issues\n processed_data = data.replace(r\"\\|\", r\"\\\\|\", regex=True)\n\n processed_data = processed_data.map(\n lambda x: str(x).replace(\"\\n\", \"
\") if isinstance(x, str) else x\n )\n\n return processed_data.to_markdown(index=False)\n return str(data)\n except (ValueError, TypeError, AttributeError) as e:\n msg = f\"Error converting data: {e!s}\"\n raise ValueError(msg) from e\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([self._safe_convert(item) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return self._safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Data Template",
"dynamic": false,
"info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "data_template",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "{text}"
},
"input_value": {
"_input_type": "HandleInput",
"advanced": false,
"display_name": "Text",
"dynamic": false,
"info": "Message to be passed as output.",
"input_types": [
"Data",
"DataFrame",
"Message"
],
"list": false,
"list_add_label": "Add More",
"name": "input_value",
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "other",
"value": ""
},
"sender": {
"_input_type": "DropdownInput",
"advanced": true,
"combobox": false,
"dialog_inputs": {},
"display_name": "Sender Type",
"dynamic": false,
"info": "Type of sender.",
"name": "sender",
"options": [
"Machine",
"User"
],
"options_metadata": [],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "Machine"
},
"sender_name": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Sender Name",
"dynamic": false,
"info": "Name of the sender.",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "sender_name",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "AI"
},
"session_id": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Session ID",
"dynamic": false,
"info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "session_id",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"should_store_message": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Store Messages",
"dynamic": false,
"info": "Store the message in the history.",
"list": false,
"list_add_label": "Add More",
"name": "should_store_message",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"text_color": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Text Color",
"dynamic": false,
"info": "The text color of the name",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "text_color",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
}
},
"tool_mode": false
},
"showNode": false,
"type": "ChatOutput"
},
"dragging": false,
"id": "ChatOutput-xch66",
"measured": {
"height": 65,
"width": 192
},
"position": {
"x": 851.9786338622672,
"y": 86.61981411123915
},
"selected": false,
"type": "genericNode"
},
{
"data": {
"id": "ChatOutput-WZN6f",
"node": {
"base_classes": [
"Message"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Display a chat message in the Playground.",
"display_name": "Chat Output",
"documentation": "",
"edited": false,
"field_order": [
"input_value",
"should_store_message",
"sender",
"sender_name",
"session_id",
"data_template",
"background_color",
"chat_icon",
"text_color",
"clean_data"
],
"frozen": false,
"icon": "MessagesSquare",
"legacy": false,
"lf_version": "1.3.2",
"metadata": {},
"minimized": true,
"output_types": [],
"outputs": [
{
"allows_loop": false,
"cache": true,
"display_name": "Message",
"method": "message_response",
"name": "message",
"selected": "Message",
"tool_mode": true,
"types": [
"Message"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"background_color": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Background Color",
"dynamic": false,
"info": "The background color of the icon.",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "background_color",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"chat_icon": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Icon",
"dynamic": false,
"info": "The icon of the message.",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "chat_icon",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"clean_data": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Basic Clean Data",
"dynamic": false,
"info": "Whether to clean the data",
"list": false,
"list_add_label": "Add More",
"name": "clean_data",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from collections.abc import Generator\nfrom typing import Any\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def _safe_convert(self, data: Any) -> str:\n \"\"\"Safely convert input data to string.\"\"\"\n try:\n if isinstance(data, str):\n return data\n if isinstance(data, Message):\n return data.get_text()\n if isinstance(data, Data):\n if data.get_text() is None:\n msg = \"Empty Data object\"\n raise ValueError(msg)\n return data.get_text()\n if isinstance(data, DataFrame):\n if self.clean_data:\n # Remove empty rows\n data = data.dropna(how=\"all\")\n # Remove empty lines in each cell\n data = data.replace(r\"^\\s*$\", \"\", regex=True)\n # Replace multiple newlines with a single newline\n data = data.replace(r\"\\n+\", \"\\n\", regex=True)\n\n # Replace pipe characters to avoid markdown table issues\n processed_data = data.replace(r\"\\|\", r\"\\\\|\", regex=True)\n\n processed_data = processed_data.map(\n lambda x: str(x).replace(\"\\n\", \"
\") if isinstance(x, str) else x\n )\n\n return processed_data.to_markdown(index=False)\n return str(data)\n except (ValueError, TypeError, AttributeError) as e:\n msg = f\"Error converting data: {e!s}\"\n raise ValueError(msg) from e\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([self._safe_convert(item) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return self._safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Data Template",
"dynamic": false,
"info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "data_template",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "{text}"
},
"input_value": {
"_input_type": "HandleInput",
"advanced": false,
"display_name": "Text",
"dynamic": false,
"info": "Message to be passed as output.",
"input_types": [
"Data",
"DataFrame",
"Message"
],
"list": false,
"list_add_label": "Add More",
"name": "input_value",
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "other",
"value": ""
},
"sender": {
"_input_type": "DropdownInput",
"advanced": true,
"combobox": false,
"dialog_inputs": {},
"display_name": "Sender Type",
"dynamic": false,
"info": "Type of sender.",
"name": "sender",
"options": [
"Machine",
"User"
],
"options_metadata": [],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "Machine"
},
"sender_name": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Sender Name",
"dynamic": false,
"info": "Name of the sender.",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "sender_name",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "AI"
},
"session_id": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Session ID",
"dynamic": false,
"info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "session_id",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"should_store_message": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Store Messages",
"dynamic": false,
"info": "Store the message in the history.",
"list": false,
"list_add_label": "Add More",
"name": "should_store_message",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"text_color": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Text Color",
"dynamic": false,
"info": "The text color of the name",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "text_color",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
}
},
"tool_mode": false
},
"showNode": false,
"type": "ChatOutput"
},
"dragging": false,
"id": "ChatOutput-WZN6f",
"measured": {
"height": 65,
"width": 192
},
"position": {
"x": 1976.462696772347,
"y": 182.8988442123822
},
"selected": false,
"type": "genericNode"
},
{
"data": {
"id": "GoogleGenerativeAIModel-kaDHv",
"node": {
"base_classes": [
"LanguageModel",
"Message"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Generate text using Google Generative AI.",
"display_name": "Google Generative AI",
"documentation": "",
"edited": false,
"field_order": [
"input_value",
"system_message",
"stream",
"max_output_tokens",
"model_name",
"api_key",
"top_p",
"temperature",
"n",
"top_k",
"tool_model_enabled"
],
"frozen": false,
"icon": "GoogleGenerativeAI",
"legacy": false,
"lf_version": "1.3.2",
"metadata": {},
"minimized": false,
"output_types": [],
"outputs": [
{
"allows_loop": false,
"cache": true,
"display_name": "Message",
"hidden": null,
"method": "text_response",
"name": "text_output",
"options": null,
"required_inputs": [],
"selected": "Message",
"tool_mode": true,
"types": [
"Message"
],
"value": "__UNDEFINED__"
},
{
"allows_loop": false,
"cache": true,
"display_name": "Language Model",
"hidden": null,
"method": "build_model",
"name": "model_output",
"options": null,
"required_inputs": [
"api_key"
],
"selected": "LanguageModel",
"tool_mode": true,
"types": [
"LanguageModel"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"api_key": {
"_input_type": "SecretStrInput",
"advanced": false,
"display_name": "Google API Key",
"dynamic": false,
"info": "The Google API Key to use for the Google Generative AI.",
"input_types": [
"Message"
],
"load_from_db": false,
"name": "api_key",
"password": true,
"placeholder": "",
"real_time_refresh": true,
"required": true,
"show": true,
"title_case": false,
"type": "str",
"value": "AIzaSyCrsOrY7H2pcozIxeP_goDrwQEO0FBgAAM"
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from typing import Any\n\nimport requests\nfrom loguru import logger\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.google_generative_ai_constants import GOOGLE_GENERATIVE_AI_MODELS\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import DropdownInput, FloatInput, IntInput, SecretStrInput, SliderInput\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.schema import dotdict\n\n\nclass GoogleGenerativeAIComponent(LCModelComponent):\n display_name = \"Google Generative AI\"\n description = \"Generate text using Google Generative AI.\"\n icon = \"GoogleGenerativeAI\"\n name = \"GoogleGenerativeAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_output_tokens\", display_name=\"Max Output Tokens\", info=\"The maximum number of tokens to generate.\"\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model\",\n info=\"The name of the model to use.\",\n options=GOOGLE_GENERATIVE_AI_MODELS,\n value=\"gemini-1.5-pro\",\n refresh_button=True,\n combobox=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Google API Key\",\n info=\"The Google API Key to use for the Google Generative AI.\",\n required=True,\n real_time_refresh=True,\n ),\n FloatInput(\n name=\"top_p\",\n display_name=\"Top P\",\n info=\"The maximum cumulative probability of tokens to consider when sampling.\",\n advanced=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=2, step=0.01),\n info=\"Controls randomness. Lower values are more deterministic, higher values are more creative.\",\n ),\n IntInput(\n name=\"n\",\n display_name=\"N\",\n info=\"Number of chat completions to generate for each prompt. \"\n \"Note that the API may not return the full n completions if duplicates are generated.\",\n advanced=True,\n ),\n IntInput(\n name=\"top_k\",\n display_name=\"Top K\",\n info=\"Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.\",\n advanced=True,\n ),\n BoolInput(\n name=\"tool_model_enabled\",\n display_name=\"Tool Model Enabled\",\n info=\"Whether to use the tool model.\",\n value=False,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n try:\n from langchain_google_genai import ChatGoogleGenerativeAI\n except ImportError as e:\n msg = \"The 'langchain_google_genai' package is required to use the Google Generative AI model.\"\n raise ImportError(msg) from e\n\n google_api_key = self.api_key\n model = self.model_name\n max_output_tokens = self.max_output_tokens\n temperature = self.temperature\n top_k = self.top_k\n top_p = self.top_p\n n = self.n\n\n return ChatGoogleGenerativeAI(\n model=model,\n max_output_tokens=max_output_tokens or None,\n temperature=temperature,\n top_k=top_k or None,\n top_p=top_p or None,\n n=n or 1,\n google_api_key=SecretStr(google_api_key).get_secret_value(),\n )\n\n def get_models(self, tool_model_enabled: bool | None = None) -> list[str]:\n try:\n import google.generativeai as genai\n\n genai.configure(api_key=self.api_key)\n model_ids = [\n model.name.replace(\"models/\", \"\")\n for model in genai.list_models()\n if \"generateContent\" in model.supported_generation_methods\n ]\n model_ids.sort(reverse=True)\n except (ImportError, ValueError) as e:\n logger.exception(f\"Error getting model names: {e}\")\n model_ids = GOOGLE_GENERATIVE_AI_MODELS\n if tool_model_enabled:\n try:\n from langchain_google_genai.chat_models import ChatGoogleGenerativeAI\n except ImportError as e:\n msg = \"langchain_google_genai is not installed.\"\n raise ImportError(msg) from e\n for model in model_ids:\n model_with_tool = ChatGoogleGenerativeAI(\n model=self.model_name,\n google_api_key=self.api_key,\n )\n if not self.supports_tool_calling(model_with_tool):\n model_ids.remove(model)\n return model_ids\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):\n if field_name in {\"base_url\", \"model_name\", \"tool_model_enabled\", \"api_key\"} and field_value:\n try:\n if len(self.api_key) == 0:\n ids = GOOGLE_GENERATIVE_AI_MODELS\n else:\n try:\n ids = self.get_models(tool_model_enabled=self.tool_model_enabled)\n except (ImportError, ValueError, requests.exceptions.RequestException) as e:\n logger.exception(f\"Error getting model names: {e}\")\n ids = GOOGLE_GENERATIVE_AI_MODELS\n build_config[\"model_name\"][\"options\"] = ids\n build_config[\"model_name\"][\"value\"] = ids[0]\n except Exception as e:\n msg = f\"Error getting model names: {e}\"\n raise ValueError(msg) from e\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
"advanced": false,
"display_name": "Input",
"dynamic": false,
"info": "",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "input_value",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"max_output_tokens": {
"_input_type": "IntInput",
"advanced": false,
"display_name": "Max Output Tokens",
"dynamic": false,
"info": "The maximum number of tokens to generate.",
"list": false,
"list_add_label": "Add More",
"name": "max_output_tokens",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "int",
"value": ""
},
"model_name": {
"_input_type": "DropdownInput",
"advanced": false,
"combobox": true,
"dialog_inputs": {},
"display_name": "Model",
"dynamic": false,
"info": "The name of the model to use.",
"name": "model_name",
"options": [
"learnlm-1.5-pro-experimental",
"gemma-3-4b-it",
"gemma-3-27b-it",
"gemma-3-1b-it",
"gemma-3-12b-it",
"gemini-pro-vision",
"gemini-exp-1206",
"gemini-2.5-pro-preview-03-25",
"gemini-2.5-pro-exp-03-25",
"gemini-2.0-pro-exp-02-05",
"gemini-2.0-pro-exp",
"gemini-2.0-flash-thinking-exp-1219",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-flash-thinking-exp",
"gemini-2.0-flash-lite-preview-02-05",
"gemini-2.0-flash-lite-preview",
"gemini-2.0-flash-lite-001",
"gemini-2.0-flash-lite",
"gemini-2.0-flash-exp-image-generation",
"gemini-2.0-flash-exp",
"gemini-2.0-flash-001",
"gemini-2.0-flash",
"gemini-1.5-pro-latest",
"gemini-1.5-pro-002",
"gemini-1.5-pro-001",
"gemini-1.5-pro",
"gemini-1.5-flash-latest",
"gemini-1.5-flash-8b-latest",
"gemini-1.5-flash-8b-exp-0924",
"gemini-1.5-flash-8b-exp-0827",
"gemini-1.5-flash-8b-001",
"gemini-1.5-flash-8b",
"gemini-1.5-flash-002",
"gemini-1.5-flash-001-tuning",
"gemini-1.5-flash-001",
"gemini-1.5-flash",
"gemini-1.0-pro-vision-latest"
],
"options_metadata": [],
"placeholder": "",
"refresh_button": true,
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "learnlm-1.5-pro-experimental"
},
"n": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "N",
"dynamic": false,
"info": "Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.",
"list": false,
"list_add_label": "Add More",
"name": "n",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "int",
"value": ""
},
"stream": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Stream",
"dynamic": false,
"info": "Stream the response from the model. Streaming works only in Chat.",
"list": false,
"list_add_label": "Add More",
"name": "stream",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": false
},
"system_message": {
"_input_type": "MultilineInput",
"advanced": false,
"copy_field": false,
"display_name": "System Message",
"dynamic": false,
"info": "System message to pass to the model.",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"multiline": true,
"name": "system_message",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "You are a sentiment analysis assistant. Instructions: - For each YouTube comment, analyze and classify it as **Positive**, **Negative**, or **Neutral**. - Also mention the **number of likes** if available. - Format the result as a **Markdown table** with these columns: Comment | Sentiment | Likes. - After the table, write a short **summary** including: - Which comment got the **most likes** - Which is the **most positive** comment - Which is the **most negative** comment Comment: {comment}"
},
"temperature": {
"_input_type": "SliderInput",
"advanced": false,
"display_name": "Temperature",
"dynamic": false,
"info": "Controls randomness. Lower values are more deterministic, higher values are more creative.",
"max_label": "",
"max_label_icon": "",
"min_label": "",
"min_label_icon": "",
"name": "temperature",
"placeholder": "",
"range_spec": {
"max": 2,
"min": 0,
"step": 0.01,
"step_type": "float"
},
"required": false,
"show": true,
"slider_buttons": false,
"slider_buttons_options": [],
"slider_input": false,
"title_case": false,
"tool_mode": false,
"type": "slider",
"value": 0.27
},
"tool_model_enabled": {
"_input_type": "BoolInput",
"advanced": false,
"display_name": "Tool Model Enabled",
"dynamic": false,
"info": "Whether to use the tool model.",
"list": false,
"list_add_label": "Add More",
"name": "tool_model_enabled",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": false
},
"top_k": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Top K",
"dynamic": false,
"info": "Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.",
"list": false,
"list_add_label": "Add More",
"name": "top_k",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "int",
"value": ""
},
"top_p": {
"_input_type": "FloatInput",
"advanced": true,
"display_name": "Top P",
"dynamic": false,
"info": "The maximum cumulative probability of tokens to consider when sampling.",
"list": false,
"list_add_label": "Add More",
"name": "top_p",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "float",
"value": ""
}
},
"tool_mode": false
},
"showNode": true,
"type": "GoogleGenerativeAIModel"
},
"dragging": false,
"id": "GoogleGenerativeAIModel-kaDHv",
"measured": {
"height": 732,
"width": 320
},
"position": {
"x": 1541.8173512233059,
"y": 12.994756719713479
},
"selected": false,
"type": "genericNode"
}
],
"viewport": {
"x": 31.582643982783736,
"y": 96.06522601994814,
"zoom": 0.4040604823841842
}
},
"description": "Flow into the Future of Language.",
"endpoint_name": null,
"id": "3d4f5cc3-d2cf-4732-882e-f6c3460c8aba",
"is_component": false,
"last_tested_version": "1.3.2",
"name": "YOUTUBE_Analysis",
"tags": null
}