Datasets:
File size: 4,691 Bytes
50202e9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 | name: datapipe
description: Data pipeline orchestrator for connecting sources, applying transformations, and routing to sinks on configurable schedules
binary: datapipe
auth:
type: env_var
key: DATAPIPE_API_KEY
commands:
- name: source list
description: List all configured data sources
args:
- name: type
type: enum
required: false
description: Filter by source type
values: ["postgres", "mysql", "s3", "api", "kafka", "mongodb"]
output_format: json
side_effects: false
example: "datapipe source list --type postgres"
- name: source connect
description: Register a new data source connection
args:
- name: name
type: string
required: true
description: Source name (unique identifier)
- name: type
type: enum
required: true
description: Source type
values: ["postgres", "mysql", "s3", "api", "kafka", "mongodb"]
- name: config
type: json
required: true
description: "Connection configuration as JSON (e.g. {\"host\": \"db.example.com\", \"port\": 5432, \"database\": \"analytics\"})"
output_format: json
side_effects: true
example: "datapipe source connect --name prod-db --type postgres --config '{\"host\": \"db.example.com\", \"port\": 5432, \"database\": \"analytics\"}'"
- name: transform create
description: Create a named transformation step with SQL logic
args:
- name: name
type: string
required: true
description: Transform name (unique identifier)
- name: sql
type: string
required: true
description: SQL transformation query
- name: source
type: string
required: true
description: Source name to read data from
- name: description
type: string
required: false
description: Human-readable description of the transformation
output_format: json
side_effects: true
example: "datapipe transform create --name daily-revenue --sql 'SELECT date, SUM(amount) as revenue FROM orders GROUP BY date' --source prod-db"
- name: transform run
description: Execute a transformation step
args:
- name: name
type: string
required: true
description: Transform name to execute
- name: dry-run
type: bool
required: false
description: Preview the output without writing results
- name: limit
type: int
required: false
description: Limit output rows (useful for preview)
output_format: json
side_effects: true
example: "datapipe transform run --name daily-revenue --dry-run --limit 10"
- name: sink create
description: Create a data sink destination
args:
- name: name
type: string
required: true
description: Sink name (unique identifier)
- name: type
type: enum
required: true
description: Sink type
values: ["s3", "bigquery", "warehouse", "postgres", "elasticsearch"]
- name: config
type: json
required: true
description: "Sink configuration as JSON (e.g. {\"bucket\": \"analytics-output\", \"prefix\": \"daily/\"})"
output_format: json
side_effects: true
example: "datapipe sink create --name analytics-lake --type s3 --config '{\"bucket\": \"analytics-output\", \"prefix\": \"daily/\"}'"
- name: pipeline create
description: Create a complete data pipeline connecting source, transforms, and sink with a schedule
args:
- name: name
type: string
required: true
description: Pipeline name (unique identifier)
- name: source
type: string
required: true
description: Source name
- name: transforms
type: string
required: true
description: Comma-separated list of transform names in execution order
- name: sink
type: string
required: true
description: Sink name
- name: schedule
type: string
required: true
description: Cron expression for scheduled execution
output_format: json
side_effects: true
example: "datapipe pipeline create --name daily-etl --source prod-db --transforms daily-revenue,add-dimensions --sink analytics-lake --schedule '0 2 * * *'"
- name: pipeline status
description: Check the status and execution history of a pipeline
args:
- name: name
type: string
required: true
description: Pipeline name
output_format: json
side_effects: false
example: "datapipe pipeline status --name daily-etl"
|