hc99 commited on
Commit
8766bc5
·
verified ·
1 Parent(s): 2b06d1d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. testbed/gradio-app__gradio/.changeset/README.md +8 -0
  2. testbed/gradio-app__gradio/.changeset/changeset.cjs +280 -0
  3. testbed/gradio-app__gradio/.changeset/config.json +11 -0
  4. testbed/gradio-app__gradio/.changeset/fix_changelogs.cjs +122 -0
  5. testbed/gradio-app__gradio/.config/.prettierignore +27 -0
  6. testbed/gradio-app__gradio/.config/.prettierrc.json +7 -0
  7. testbed/gradio-app__gradio/.config/basevite.config.ts +91 -0
  8. testbed/gradio-app__gradio/.config/eslint.config.js +142 -0
  9. testbed/gradio-app__gradio/.config/playwright-ct.config.ts +41 -0
  10. testbed/gradio-app__gradio/.config/playwright-setup.js +151 -0
  11. testbed/gradio-app__gradio/.config/playwright.config.js +9 -0
  12. testbed/gradio-app__gradio/.config/playwright/index.html +12 -0
  13. testbed/gradio-app__gradio/.config/playwright/index.ts +2 -0
  14. testbed/gradio-app__gradio/.config/postcss.config.cjs +8 -0
  15. testbed/gradio-app__gradio/.config/setup_vite_tests.ts +11 -0
  16. testbed/gradio-app__gradio/.config/tailwind.config.cjs +12 -0
  17. testbed/gradio-app__gradio/.config/vitest.config.ts +3 -0
  18. testbed/gradio-app__gradio/.devcontainer/devcontainer.json +41 -0
  19. testbed/gradio-app__gradio/README.md +323 -0
  20. testbed/gradio-app__gradio/client/js/CHANGELOG.md +80 -0
  21. testbed/gradio-app__gradio/client/js/README.md +339 -0
  22. testbed/gradio-app__gradio/client/js/package.json +33 -0
  23. testbed/gradio-app__gradio/client/js/src/client.node-test.ts +172 -0
  24. testbed/gradio-app__gradio/client/js/src/client.ts +1367 -0
  25. testbed/gradio-app__gradio/client/js/src/globals.d.ts +29 -0
  26. testbed/gradio-app__gradio/client/js/src/index.ts +8 -0
  27. testbed/gradio-app__gradio/client/js/src/types.ts +116 -0
  28. testbed/gradio-app__gradio/client/js/src/utils.ts +212 -0
  29. testbed/gradio-app__gradio/client/js/tsconfig.json +14 -0
  30. testbed/gradio-app__gradio/client/js/vite.config.js +23 -0
  31. testbed/gradio-app__gradio/client/python/CHANGELOG.md +402 -0
  32. testbed/gradio-app__gradio/client/python/README.md +143 -0
  33. testbed/gradio-app__gradio/client/python/build_pypi.sh +9 -0
  34. testbed/gradio-app__gradio/client/python/gradio_client/CHANGELOG.md +402 -0
  35. testbed/gradio-app__gradio/client/python/gradio_client/__init__.py +7 -0
  36. testbed/gradio-app__gradio/client/python/gradio_client/cli/__init__.py +3 -0
  37. testbed/gradio-app__gradio/client/python/gradio_client/cli/deploy_discord.py +58 -0
  38. testbed/gradio-app__gradio/client/python/gradio_client/client.py +1251 -0
  39. testbed/gradio-app__gradio/client/python/gradio_client/data_classes.py +17 -0
  40. testbed/gradio-app__gradio/client/python/gradio_client/documentation.py +266 -0
  41. testbed/gradio-app__gradio/client/python/gradio_client/media_data.py +0 -0
  42. testbed/gradio-app__gradio/client/python/gradio_client/package.json +7 -0
  43. testbed/gradio-app__gradio/client/python/gradio_client/serializing.py +582 -0
  44. testbed/gradio-app__gradio/client/python/gradio_client/templates/discord_chat.py +193 -0
  45. testbed/gradio-app__gradio/client/python/gradio_client/types.json +199 -0
  46. testbed/gradio-app__gradio/client/python/gradio_client/utils.py +598 -0
  47. testbed/gradio-app__gradio/client/python/pyproject.toml +70 -0
  48. testbed/gradio-app__gradio/client/python/requirements.txt +7 -0
  49. testbed/gradio-app__gradio/client/python/scripts/build_pypi.sh +9 -0
  50. testbed/gradio-app__gradio/client/python/scripts/check_pypi.py +17 -0
testbed/gradio-app__gradio/.changeset/README.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Changesets
2
+
3
+ Hello and welcome! This folder has been automatically generated by `@changesets/cli`, a build tool that works
4
+ with multi-package repos, or single-package repos to help you version and publish your code. You can
5
+ find the full documentation for it [in our repository](https://github.com/changesets/changesets)
6
+
7
+ We have a quick list of common questions to get you started engaging with this project in
8
+ [our documentation](https://github.com/changesets/changesets/blob/main/docs/common-questions.md)
testbed/gradio-app__gradio/.changeset/changeset.cjs ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const { getPackagesSync } = require("@manypkg/get-packages");
2
+ const gh = require("@changesets/get-github-info");
3
+ const { existsSync, readFileSync, writeFileSync } = require("fs");
4
+ const { join } = require("path");
5
+
6
+ const { getInfo, getInfoFromPullRequest } = gh;
7
+ const { packages, rootDir } = getPackagesSync(process.cwd());
8
+
9
+ /**
10
+ * @typedef {{packageJson: {name: string, python?: boolean}, dir: string}} Package
11
+ */
12
+
13
+ /**
14
+ * @typedef {{summary: string, id: string, commit: string, releases: {name: string}}} Changeset
15
+ */
16
+
17
+ /**
18
+ *
19
+ * @param {string} package_name The name of the package to find the directories for
20
+ * @returns {string[]} The directories for the package
21
+ */
22
+ function find_packages_dirs(package_name) {
23
+ /** @type {string[]} */
24
+ let package_dirs = [];
25
+
26
+ /** @type {Package | undefined} */
27
+ const _package = packages.find((p) => p.packageJson.name === package_name);
28
+ if (!_package) throw new Error(`Package ${package_name} not found`);
29
+
30
+ package_dirs.push(_package.dir);
31
+ if (_package.packageJson.python) {
32
+ package_dirs.push(join(_package.dir, ".."));
33
+ }
34
+ return package_dirs;
35
+ }
36
+
37
+ const changelogFunctions = {
38
+ /**
39
+ *
40
+ * @param {Changeset[]} changesets The changesets that have been created
41
+ * @param {any} dependenciesUpdated The dependencies that have been updated
42
+ * @param {any} options The options passed to the changelog generator
43
+ * @returns {Promise<string>} The release line for the dependencies
44
+ */
45
+ getDependencyReleaseLine: async (
46
+ changesets,
47
+ dependenciesUpdated,
48
+ options
49
+ ) => {
50
+ if (!options.repo) {
51
+ throw new Error(
52
+ 'Please provide a repo to this changelog generator like this:\n"changelog": ["@changesets/changelog-github", { "repo": "org/repo" }]'
53
+ );
54
+ }
55
+ if (dependenciesUpdated.length === 0) return "";
56
+
57
+ const changesetLink = `- Updated dependencies [${(
58
+ await Promise.all(
59
+ changesets.map(async (cs) => {
60
+ if (cs.commit) {
61
+ let { links } = await getInfo({
62
+ repo: options.repo,
63
+ commit: cs.commit
64
+ });
65
+ return links.commit;
66
+ }
67
+ })
68
+ )
69
+ )
70
+ .filter((_) => _)
71
+ .join(", ")}]:`;
72
+
73
+ const updatedDepenenciesList = dependenciesUpdated.map(
74
+ /**
75
+ *
76
+ * @param {any} dependency The dependency that has been updated
77
+ * @returns {string} The formatted dependency
78
+ */
79
+ (dependency) => ` - ${dependency.name}@${dependency.newVersion}`
80
+ );
81
+
82
+ return [changesetLink, ...updatedDepenenciesList].join("\n");
83
+ },
84
+ /**
85
+ *
86
+ * @param {{summary: string, id: string, commit: string, releases: {name: string}[]}} changeset The changeset that has been created
87
+ * @param {any} type The type of changeset
88
+ * @param {any} options The options passed to the changelog generator
89
+ * @returns {Promise<string>} The release line for the changeset
90
+ */
91
+ getReleaseLine: async (changeset, type, options) => {
92
+ if (!options || !options.repo) {
93
+ throw new Error(
94
+ 'Please provide a repo to this changelog generator like this:\n"changelog": ["@changesets/changelog-github", { "repo": "org/repo" }]'
95
+ );
96
+ }
97
+
98
+ let prFromSummary;
99
+ let commitFromSummary;
100
+ /**
101
+ * @type {string[]}
102
+ */
103
+ let usersFromSummary = [];
104
+
105
+ const replacedChangelog = changeset.summary
106
+ .replace(/^\s*(?:pr|pull|pull\s+request):\s*#?(\d+)/im, (_, pr) => {
107
+ let num = Number(pr);
108
+ if (!isNaN(num)) prFromSummary = num;
109
+ return "";
110
+ })
111
+ .replace(/^\s*commit:\s*([^\s]+)/im, (_, commit) => {
112
+ commitFromSummary = commit;
113
+ return "";
114
+ })
115
+ .replace(/^\s*(?:author|user):\s*@?([^\s]+)/gim, (_, user) => {
116
+ usersFromSummary.push(user);
117
+ return "";
118
+ })
119
+ .trim();
120
+
121
+ const [firstLine, ...futureLines] = replacedChangelog
122
+ .split("\n")
123
+ .map((l) => l.trimRight());
124
+
125
+ const links = await (async () => {
126
+ if (prFromSummary !== undefined) {
127
+ let { links } = await getInfoFromPullRequest({
128
+ repo: options.repo,
129
+ pull: prFromSummary
130
+ });
131
+ if (commitFromSummary) {
132
+ links = {
133
+ ...links,
134
+ commit: `[\`${commitFromSummary}\`](https://github.com/${options.repo}/commit/${commitFromSummary})`
135
+ };
136
+ }
137
+ return links;
138
+ }
139
+ const commitToFetchFrom = commitFromSummary || changeset.commit;
140
+ if (commitToFetchFrom) {
141
+ let { links } = await getInfo({
142
+ repo: options.repo,
143
+ commit: commitToFetchFrom
144
+ });
145
+ return links;
146
+ }
147
+ return {
148
+ commit: null,
149
+ pull: null,
150
+ user: null
151
+ };
152
+ })();
153
+
154
+ const users =
155
+ usersFromSummary && usersFromSummary.length
156
+ ? usersFromSummary
157
+ .map(
158
+ (userFromSummary) =>
159
+ `[@${userFromSummary}](https://github.com/${userFromSummary})`
160
+ )
161
+ .join(", ")
162
+ : links.user;
163
+
164
+ const prefix = [
165
+ links.pull === null ? "" : `${links.pull}`,
166
+ links.commit === null ? "" : `${links.commit}`
167
+ ]
168
+ .join(" ")
169
+ .trim();
170
+
171
+ const suffix = users === null ? "" : ` Thanks ${users}!`;
172
+
173
+ /**
174
+ * @typedef {{[key: string]: string[] | {dirs: string[], current_changelog: string, feat: {summary: string}[], fix: {summary: string}[], highlight: {summary: string}[]}}} ChangesetMeta
175
+ */
176
+
177
+ /**
178
+ * @type { ChangesetMeta & { _handled: string[] } }}
179
+ */
180
+ let lines;
181
+ if (existsSync(join(rootDir, ".changeset", "_changelog.json"))) {
182
+ lines = JSON.parse(
183
+ readFileSync(join(rootDir, ".changeset", "_changelog.json"), "utf-8")
184
+ );
185
+ } else {
186
+ lines = {
187
+ _handled: []
188
+ };
189
+ }
190
+
191
+ if (lines._handled.includes(changeset.id)) {
192
+ return "done";
193
+ }
194
+ lines._handled.push(changeset.id);
195
+
196
+ changeset.releases.forEach((release) => {
197
+ if (!lines[release.name])
198
+ lines[release.name] = {
199
+ dirs: find_packages_dirs(release.name),
200
+ current_changelog: "",
201
+ feat: [],
202
+ fix: [],
203
+ highlight: []
204
+ };
205
+
206
+ const changelog_path = join(
207
+ //@ts-ignore
208
+ lines[release.name].dirs[1] || lines[release.name].dirs[0],
209
+ "CHANGELOG.md"
210
+ );
211
+
212
+ if (existsSync(changelog_path)) {
213
+ //@ts-ignore
214
+ lines[release.name].current_changelog = readFileSync(
215
+ changelog_path,
216
+ "utf-8"
217
+ )
218
+ .replace(`# ${release.name}`, "")
219
+ .trim();
220
+ }
221
+
222
+ const [, _type, summary] = changeset.summary
223
+ .trim()
224
+ .match(/^(feat|fix|highlight)\s*:\s*([^]*)/im) || [
225
+ ,
226
+ false,
227
+ changeset.summary
228
+ ];
229
+
230
+ let formatted_summary = "";
231
+
232
+ if (_type === "highlight") {
233
+ const [heading, ...rest] = summary.trim().split("\n");
234
+ const _heading = `${heading} ${prefix ? `(${prefix})` : ""}`;
235
+ const _rest = rest.concat(["", suffix]);
236
+
237
+ formatted_summary = `${_heading}\n${_rest.join("\n")}`;
238
+ } else {
239
+ formatted_summary = handle_line(summary, prefix, suffix);
240
+ }
241
+
242
+ //@ts-ignore
243
+ lines[release.name][_type].push({
244
+ summary: formatted_summary
245
+ });
246
+ });
247
+
248
+ writeFileSync(
249
+ join(rootDir, ".changeset", "_changelog.json"),
250
+ JSON.stringify(lines, null, 2)
251
+ );
252
+
253
+ return `\n\n-${prefix ? `${prefix} -` : ""} ${firstLine}\n${futureLines
254
+ .map((l) => ` ${l}`)
255
+ .join("\n")}`;
256
+ }
257
+ };
258
+
259
+ /**
260
+ * @param {string} str The changelog entry
261
+ * @param {string} prefix The prefix to add to the first line
262
+ * @param {string} suffix The suffix to add to the last line
263
+ * @returns {string} The formatted changelog entry
264
+ */
265
+ function handle_line(str, prefix, suffix) {
266
+ const [_s, ...lines] = str.split("\n").filter(Boolean);
267
+
268
+ const desc = `${prefix ? `${prefix} -` : ""} ${_s.replace(
269
+ /[\s\.]$/,
270
+ ""
271
+ )}. ${suffix}`;
272
+
273
+ if (_s.length === 1) {
274
+ return desc;
275
+ }
276
+
277
+ return [desc, ...lines.map((l) => ` ${l}`)].join("/n");
278
+ }
279
+
280
+ module.exports = changelogFunctions;
testbed/gradio-app__gradio/.changeset/config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "$schema": "https://unpkg.com/@changesets/config@2.3.0/schema.json",
3
+ "changelog": ["./changeset.cjs", { "repo": "gradio-app/gradio" }],
4
+ "commit": false,
5
+ "fixed": [],
6
+ "linked": [],
7
+ "access": "public",
8
+ "baseBranch": "main",
9
+ "updateInternalDependencies": "patch",
10
+ "ignore": ["@gradio/spaces-test", "@gradio/cdn-test"]
11
+ }
testbed/gradio-app__gradio/.changeset/fix_changelogs.cjs ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const { join } = require("path");
2
+ const { readFileSync, existsSync, writeFileSync, unlinkSync } = require("fs");
3
+ const { getPackagesSync } = require("@manypkg/get-packages");
4
+
5
+ const RE_PKG_NAME = /^[\w-]+\b/;
6
+ const pkg_meta = getPackagesSync(process.cwd());
7
+
8
+ /**
9
+ * @typedef {{dirs: string[], highlight: {summary: string}[], feat: {summary: string}[], fix: {summary: string}[], current_changelog: string}} ChangesetMeta
10
+ */
11
+
12
+ /**
13
+ * @typedef {{[key: string]: ChangesetMeta}} ChangesetMetaCollection
14
+ */
15
+
16
+ function run() {
17
+ if (!existsSync(join(pkg_meta.rootDir, ".changeset", "_changelog.json"))) {
18
+ console.warn("No changesets to process");
19
+ return;
20
+ }
21
+
22
+ /**
23
+ * @type { ChangesetMetaCollection & { _handled: string[] } }}
24
+ */
25
+ const { _handled, ...packages } = JSON.parse(
26
+ readFileSync(
27
+ join(pkg_meta.rootDir, ".changeset", "_changelog.json"),
28
+ "utf-8"
29
+ )
30
+ );
31
+
32
+ /**
33
+ * @typedef { {packageJson: {name: string, version: string, python: boolean}, dir: string} } PackageMeta
34
+ */
35
+
36
+ /**
37
+ * @type { {[key:string]: PackageMeta} }
38
+ */
39
+ const all_packages = pkg_meta.packages.reduce((acc, pkg) => {
40
+ acc[pkg.packageJson.name] = /**@type {PackageMeta} */ (
41
+ /** @type {unknown} */ (pkg)
42
+ );
43
+ return acc;
44
+ }, /** @type {{[key:string] : PackageMeta}} */ ({}));
45
+
46
+ for (const pkg_name in packages) {
47
+ const { dirs, highlight, feat, fix, current_changelog } =
48
+ /**@type {ChangesetMeta} */ (packages[pkg_name]);
49
+
50
+ const { version, python } = all_packages[pkg_name].packageJson;
51
+
52
+ const highlights = highlight.map((h) => `${h.summary}`);
53
+ const features = feat.map((f) => `- ${f.summary}`);
54
+ const fixes = fix.map((f) => `- ${f.summary}`);
55
+
56
+ const release_notes = /** @type {[string[], string][]} */ ([
57
+ [highlights, "### Highlights"],
58
+ [features, "### Features"],
59
+ [fixes, "### Fixes"]
60
+ ])
61
+ .filter(([s], i) => s.length > 0)
62
+ .map(([lines, title]) => {
63
+ if (title === "### Highlights") {
64
+ return `${title}\n\n${lines.join("\n\n")}`;
65
+ }
66
+
67
+ return `${title}\n\n${lines.join("\n")}`;
68
+ })
69
+ .join("\n\n");
70
+
71
+ const new_changelog = `# ${pkg_name}
72
+
73
+ ## ${version}
74
+
75
+ ${release_notes}
76
+
77
+ ${current_changelog.replace(`# ${pkg_name}`, "").trim()}
78
+ `.trim();
79
+
80
+ dirs.forEach((dir) => {
81
+ writeFileSync(join(dir, "CHANGELOG.md"), new_changelog);
82
+ });
83
+
84
+ if (python) {
85
+ bump_local_dependents(pkg_name, version);
86
+ }
87
+ }
88
+
89
+ unlinkSync(join(pkg_meta.rootDir, ".changeset", "_changelog.json"));
90
+
91
+ /**
92
+ * @param {string} pkg_to_bump The name of the package to bump
93
+ * @param {string} version The version to bump to
94
+ * @returns {void}
95
+ * */
96
+ function bump_local_dependents(pkg_to_bump, version) {
97
+ for (const pkg_name in all_packages) {
98
+ const {
99
+ dir,
100
+ packageJson: { python }
101
+ } = all_packages[pkg_name];
102
+
103
+ if (!python) continue;
104
+
105
+ const requirements_path = join(dir, "..", "requirements.txt");
106
+ const requirements = readFileSync(requirements_path, "utf-8").split("\n");
107
+
108
+ const pkg_index = requirements.findIndex((line) => {
109
+ const m = line.trim().match(RE_PKG_NAME);
110
+ if (!m) return false;
111
+ return m[0] === pkg_to_bump;
112
+ });
113
+
114
+ if (pkg_index !== -1) {
115
+ requirements[pkg_index] = `${pkg_to_bump}==${version}`;
116
+ writeFileSync(requirements_path, requirements.join("\n"));
117
+ }
118
+ }
119
+ }
120
+ }
121
+
122
+ run();
testbed/gradio-app__gradio/.config/.prettierignore ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ **/js/app/public/**
2
+ **/pnpm-workspace.yaml
3
+ **/js/app/dist/**
4
+ **/js/wasm/dist/**
5
+ **/client/js/dist/**
6
+ **/js/lite/dist/**
7
+ **/pnpm-lock.yaml
8
+ **/js/plot/src/Plot.svelte
9
+ **/.svelte-kit/**
10
+ **/demo/**
11
+ **/gradio/**
12
+ **/.pnpm-store/**
13
+ **/.venv/**
14
+ **/.github/**
15
+ /guides/**
16
+ **/.mypy_cache/**
17
+ !test-strategy.md
18
+ **/js/_space-test/**
19
+ ../js/app/src/lite/theme.css
20
+ ../js/storybook/theme.css
21
+ **/gradio_cached_examples/**
22
+ **/storybook-static/**
23
+ **/.vscode/**
24
+ sweep.yaml
25
+ **/.vercel/**
26
+ **/build/**
27
+ **/*.md
testbed/gradio-app__gradio/.config/.prettierrc.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "useTabs": true,
3
+ "singleQuote": false,
4
+ "trailingComma": "none",
5
+ "printWidth": 80,
6
+ "plugins": ["prettier-plugin-svelte"]
7
+ }
testbed/gradio-app__gradio/.config/basevite.config.ts ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { defineConfig } from "vite";
2
+ import { svelte } from "@sveltejs/vite-plugin-svelte";
3
+ import sveltePreprocess from "svelte-preprocess";
4
+ // @ts-ignore
5
+ import custom_media from "postcss-custom-media";
6
+ import global_data from "@csstools/postcss-global-data";
7
+ // @ts-ignore
8
+ import prefixer from "postcss-prefix-selector";
9
+ import { readFileSync } from "fs";
10
+ import { join } from "path";
11
+ import { fileURLToPath } from "url";
12
+
13
+ const __dirname = fileURLToPath(new URL(".", import.meta.url));
14
+ const version_path = join(__dirname, "..", "gradio", "package.json");
15
+ const theme_token_path = join(
16
+ __dirname,
17
+ "..",
18
+ "js",
19
+ "theme",
20
+ "src",
21
+ "tokens.css"
22
+ );
23
+
24
+ const version = JSON.parse(readFileSync(version_path, { encoding: 'utf-8' })).version.trim().replace(/\./g, '-');
25
+
26
+ //@ts-ignore
27
+ export default defineConfig(({ mode }) => {
28
+ const production =
29
+ mode === "production:cdn" ||
30
+ mode === "production:local" ||
31
+ mode === "production:website";
32
+
33
+ return {
34
+ server: {
35
+ port: 9876
36
+ },
37
+
38
+ build: {
39
+ sourcemap: false,
40
+ target: "esnext",
41
+ minify: production
42
+ },
43
+ define: {
44
+ BUILD_MODE: production ? JSON.stringify("prod") : JSON.stringify("dev"),
45
+ BACKEND_URL: production
46
+ ? JSON.stringify("")
47
+ : JSON.stringify("http://localhost:7860/"),
48
+ GRADIO_VERSION: JSON.stringify(version)
49
+ },
50
+ css: {
51
+ postcss: {
52
+ plugins: [
53
+ prefixer({
54
+ prefix: `.gradio-container-${version}`,
55
+ // @ts-ignore
56
+ transform(prefix, selector, prefixedSelector, fileName) {
57
+ if (selector.indexOf("gradio-container") > -1) {
58
+ return prefix;
59
+ } else if (
60
+ selector.indexOf(":root") > -1 ||
61
+ selector.indexOf("dark") > -1 ||
62
+ fileName.indexOf(".svelte") > -1
63
+ ) {
64
+ return selector;
65
+ }
66
+ return prefixedSelector;
67
+ }
68
+ }),
69
+ custom_media()
70
+ ]
71
+ }
72
+ },
73
+ plugins: [
74
+ svelte({
75
+ inspector: true,
76
+ compilerOptions: {
77
+ dev: !production
78
+ },
79
+ hot: !process.env.VITEST && !production,
80
+ preprocess: sveltePreprocess({
81
+ postcss: {
82
+ plugins: [
83
+ global_data({ files: [theme_token_path] }),
84
+ custom_media()
85
+ ]
86
+ }
87
+ })
88
+ })
89
+ ]
90
+ };
91
+ });
testbed/gradio-app__gradio/.config/eslint.config.js ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import globals from "globals";
2
+ import ts_plugin from "@typescript-eslint/eslint-plugin";
3
+ import js_plugin from "@eslint/js";
4
+
5
+ import typescriptParser from "@typescript-eslint/parser";
6
+ import sveltePlugin from "eslint-plugin-svelte";
7
+ import svelteParser from "svelte-eslint-parser";
8
+
9
+ const ts_rules_disabled = Object.fromEntries(
10
+ Object.keys(ts_plugin.rules).map((rule) => [
11
+ `@typescript-eslint/${rule}`,
12
+ "off"
13
+ ])
14
+ );
15
+ const js_rules_disabled = Object.fromEntries(
16
+ Object.keys(js_plugin.configs.all.rules).map((rule) => [rule, "off"])
17
+ );
18
+
19
+ const js_rules = {
20
+ ...js_rules_disabled,
21
+ "no-console": ["error", { allow: ["warn", "error", "debug"] }],
22
+ "no-constant-condition": "error",
23
+ "no-dupe-args": "error",
24
+ "no-extra-boolean-cast": "error",
25
+ "no-unexpected-multiline": "error",
26
+ "no-unreachable": "error",
27
+ "valid-jsdoc": "error",
28
+ "array-callback-return": "error",
29
+ complexity: "error",
30
+ "no-else-return": "error",
31
+ "no-useless-return": "error",
32
+ "no-undef": "error"
33
+ };
34
+
35
+ const ts_rules = {
36
+ ...ts_rules_disabled,
37
+ "@typescript-eslint/adjacent-overload-signatures": "error",
38
+ "@typescript-eslint/explicit-function-return-type": [
39
+ "error",
40
+ { allowExpressions: true }
41
+ ],
42
+ "@typescript-eslint/consistent-type-exports": "error",
43
+ "@typescript-eslint/ban-types": "error",
44
+ "@typescript-eslint/array-type": "error",
45
+ "@typescript-eslint/no-inferrable-types": "error"
46
+ };
47
+
48
+ const { browser, es2021, node } = globals;
49
+
50
+ export default [
51
+ {
52
+ ignores: [
53
+ ".svelte-kit/**/*",
54
+ "**/node_modules/**",
55
+ "**/dist/**",
56
+ "**/.config/*",
57
+ "**/*.spec.ts",
58
+ "**/*.test.ts",
59
+ "**/*.node-test.ts",
60
+ "js/app/test/**/*",
61
+ "**/*vite.config.ts",
62
+ "**/_website/**/*",
63
+ "**/_spaces-test/**/*"
64
+ ]
65
+ },
66
+ {
67
+ files: ["**/*.js", "**/*.cjs"],
68
+ languageOptions: {
69
+ globals: {
70
+ ...browser,
71
+ ...es2021,
72
+ ...node
73
+ }
74
+ },
75
+
76
+ plugins: {
77
+ "eslint:recommended": js_plugin
78
+ },
79
+ rules: js_rules
80
+ },
81
+
82
+ {
83
+ files: ["**/*.ts"],
84
+ languageOptions: {
85
+ parser: typescriptParser,
86
+ parserOptions: {
87
+ project: "./tsconfig.json",
88
+ extraFileExtensions: [".svelte"]
89
+ },
90
+ globals: {
91
+ ...browser,
92
+ ...es2021,
93
+ ...node
94
+ }
95
+ },
96
+
97
+ plugins: {
98
+ "@typescript-eslint": ts_plugin,
99
+ "eslint:recommended": js_plugin
100
+ },
101
+ rules: {
102
+ ...ts_rules,
103
+ ...js_rules,
104
+ "no-undef": "off"
105
+ }
106
+ },
107
+ {
108
+ files: ["**/client/js/**"],
109
+ languageOptions: {
110
+ parserOptions: {
111
+ project: "./client/js/tsconfig.json"
112
+ }
113
+ }
114
+ },
115
+ {
116
+ files: ["**/*.svelte"],
117
+ languageOptions: {
118
+ parser: svelteParser,
119
+ parserOptions: {
120
+ parser: typescriptParser,
121
+ project: "./tsconfig.json",
122
+ extraFileExtensions: [".svelte"]
123
+ },
124
+ globals: {
125
+ ...browser,
126
+ ...es2021
127
+ }
128
+ },
129
+ plugins: {
130
+ svelte: sveltePlugin,
131
+ "@typescript-eslint": ts_plugin,
132
+ "eslint:recommended": js_plugin
133
+ },
134
+ rules: {
135
+ ...ts_rules,
136
+ ...js_rules,
137
+ ...sveltePlugin.configs.recommended.rules,
138
+ "svelte/no-at-html-tags": "off",
139
+ "no-undef": "off"
140
+ }
141
+ }
142
+ ];
testbed/gradio-app__gradio/.config/playwright-ct.config.ts ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { defineConfig, devices } from "@playwright/experimental-ct-svelte";
2
+ import config from "./basevite.config";
3
+
4
+ /**
5
+ * See https://playwright.dev/docs/test-configuration.
6
+ */
7
+ export default defineConfig({
8
+ testDir: "../",
9
+ /* The base directory, relative to the config file, for snapshot files created with toMatchSnapshot and toHaveScreenshot. */
10
+ snapshotDir: "./__snapshots__",
11
+ /* Maximum time one test can run for. */
12
+ timeout: 10 * 1000,
13
+ /* Run tests in files in parallel */
14
+ fullyParallel: true,
15
+ /* Fail the build on CI if you accidentally left test.only in the source code. */
16
+ forbidOnly: !!process.env.CI,
17
+ /* Retry on CI only */
18
+ retries: process.env.CI ? 2 : 0,
19
+ /* Opt out of parallel tests on CI. */
20
+ workers: process.env.CI ? 1 : undefined,
21
+ /* Reporter to use. See https://playwright.dev/docs/test-reporters */
22
+ reporter: "html",
23
+ /* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */
24
+ use: {
25
+ /* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */
26
+ trace: "on-first-retry",
27
+
28
+ /* Port to use for Playwright component endpoint. */
29
+ ctPort: 3100,
30
+ ctViteConfig: config({ mode: "development" })
31
+ },
32
+ testMatch: "*.component.spec.ts",
33
+
34
+ /* Configure projects for major browsers */
35
+ projects: [
36
+ {
37
+ name: "chromium",
38
+ use: { ...devices["Desktop Chrome"] }
39
+ }
40
+ ]
41
+ });
testbed/gradio-app__gradio/.config/playwright-setup.js ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { spawn } from "node:child_process";
2
+ import { join, basename } from "path";
3
+ import { fileURLToPath } from "url";
4
+ import { readdirSync, writeFileSync } from "fs";
5
+ import net from "net";
6
+
7
+ import kl from "kleur";
8
+
9
+ const __dirname = fileURLToPath(new URL(".", import.meta.url));
10
+ const TEST_APP_PATH = join(__dirname, "./test.py");
11
+ const TEST_FILES_PATH = join(__dirname, "..", "js", "app", "test");
12
+ const ROOT = join(__dirname, "..");
13
+
14
+ const test_files = readdirSync(TEST_FILES_PATH)
15
+ .filter(
16
+ (f) =>
17
+ f.endsWith("spec.ts") &&
18
+ !f.endsWith(".skip.spec.ts") &&
19
+ !f.endsWith(".component.spec.ts")
20
+ )
21
+ .map((f) => basename(f, ".spec.ts"));
22
+
23
+ export default async function global_setup() {
24
+ const verbose = process.env.GRADIO_TEST_VERBOSE;
25
+
26
+ const port = await find_free_port(7860, 8860);
27
+ process.env.GRADIO_E2E_TEST_PORT = port;
28
+
29
+ process.stdout.write(kl.yellow("\nCreating test gradio app.\n\n"));
30
+
31
+ const test_app = make_app(test_files, port);
32
+ process.stdout.write(kl.yellow("App created. Starting test server.\n\n"));
33
+
34
+ process.stdout.write(kl.bgBlue(" =========================== \n"));
35
+ process.stdout.write(kl.bgBlue(" === PYTHON STARTUP LOGS === \n"));
36
+ process.stdout.write(kl.bgBlue(" =========================== \n\n"));
37
+
38
+ writeFileSync(TEST_APP_PATH, test_app);
39
+
40
+ const app = await spawn_gradio_app(TEST_APP_PATH, port, verbose);
41
+
42
+ process.stdout.write(
43
+ kl.green(`\n\nServer started. Running tests on port ${port}.\n`)
44
+ );
45
+
46
+ return () => {
47
+ process.stdout.write(kl.green(`\nTests complete, cleaning up!\n`));
48
+
49
+ kill_process(app);
50
+ };
51
+ }
52
+ const INFO_RE = /^INFO:/;
53
+
54
+ function spawn_gradio_app(app, port, verbose) {
55
+ const PORT_RE = new RegExp(`:${port}`);
56
+
57
+ return new Promise((res, rej) => {
58
+ const _process = spawn(`python`, [app], {
59
+ shell: true,
60
+ stdio: "pipe",
61
+ cwd: ROOT,
62
+ env: {
63
+ ...process.env,
64
+ GRADIO_SERVER_PORT: `7879`,
65
+ PYTHONUNBUFFERED: "true"
66
+ }
67
+ });
68
+ _process.stdout.setEncoding("utf8");
69
+
70
+ function std_out(data) {
71
+ const _data = data.toString();
72
+ const is_info = INFO_RE.test(_data);
73
+
74
+ if (is_info) {
75
+ process.stdout.write(kl.yellow(_data));
76
+ }
77
+
78
+ if (!is_info) {
79
+ process.stdout.write(`${_data}\n`);
80
+ }
81
+
82
+ if (PORT_RE.test(_data)) {
83
+ process.stdout.write(kl.bgBlue("\n =========== END =========== "));
84
+
85
+ res(_process);
86
+
87
+ if (!verbose) {
88
+ _process.stdout.off("data", std_out);
89
+ _process.stderr.off("data", std_out);
90
+ }
91
+ }
92
+ }
93
+
94
+ _process.stdout.on("data", std_out);
95
+ _process.stderr.on("data", std_out);
96
+ _process.on("exit", () => kill_process(_process));
97
+ _process.on("close", () => kill_process(_process));
98
+ _process.on("disconnect", () => kill_process(_process));
99
+ });
100
+ }
101
+
102
+ function kill_process(process) {
103
+ process.kill("SIGKILL");
104
+ }
105
+
106
+ function make_app(demos, port) {
107
+ return `import gradio as gr
108
+ import uvicorn
109
+ from fastapi import FastAPI
110
+ import gradio as gr
111
+ ${demos.map((d) => `from demo.${d}.run import demo as ${d}`).join("\n")}
112
+
113
+ app = FastAPI()
114
+ ${demos
115
+ .map((d) => `app = gr.mount_gradio_app(app, ${d}, path="/${d}")`)
116
+ .join("\n")}
117
+
118
+ config = uvicorn.Config(app, port=${port}, log_level="info")
119
+ server = uvicorn.Server(config=config)
120
+ server.run()`;
121
+ }
122
+
123
+ export async function find_free_port(start_port, end_port) {
124
+ for (let port = start_port; port < end_port; port++) {
125
+ if (await is_free_port(port)) {
126
+ return port;
127
+ }
128
+ }
129
+
130
+ throw new Error(
131
+ `Could not find free ports: there were not enough ports available.`
132
+ );
133
+ }
134
+
135
+ export function is_free_port(port) {
136
+ return new Promise((accept, reject) => {
137
+ const sock = net.createConnection(port, "127.0.0.1");
138
+ sock.once("connect", () => {
139
+ sock.end();
140
+ accept(false);
141
+ });
142
+ sock.once("error", (e) => {
143
+ sock.destroy();
144
+ if (e.code === "ECONNREFUSED") {
145
+ accept(true);
146
+ } else {
147
+ reject(e);
148
+ }
149
+ });
150
+ });
151
+ }
testbed/gradio-app__gradio/.config/playwright.config.js ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ export default {
2
+ use: {
3
+ screenshot: "only-on-failure",
4
+ trace: "retain-on-failure"
5
+ },
6
+ testMatch: /.*.spec.ts/,
7
+ testDir: "..",
8
+ globalSetup: "./playwright-setup.js"
9
+ };
testbed/gradio-app__gradio/.config/playwright/index.html ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!doctype html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
6
+ <title>Testing Page</title>
7
+ </head>
8
+ <body>
9
+ <div id="root"></div>
10
+ <script type="module" src="./index.ts"></script>
11
+ </body>
12
+ </html>
testbed/gradio-app__gradio/.config/playwright/index.ts ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ // Import styles, initialize component theme here.
2
+ // import '../src/common.css';
testbed/gradio-app__gradio/.config/postcss.config.cjs ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ const tailwindcss = require("tailwindcss");
2
+ const autoprefixer = require("autoprefixer");
3
+ const nested = require("tailwindcss/nesting");
4
+ const tw_config = require("./tailwind.config.cjs");
5
+
6
+ module.exports = {
7
+ plugins: [nested, tailwindcss(tw_config), autoprefixer]
8
+ };
testbed/gradio-app__gradio/.config/setup_vite_tests.ts ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import type { TestingLibraryMatchers } from "@testing-library/jest-dom/matchers";
2
+ import matchers from "@testing-library/jest-dom/matchers";
3
+ import { expect } from "vitest";
4
+
5
+ declare module "vitest" {
6
+ interface Assertion<T = any>
7
+ extends jest.Matchers<void, T>,
8
+ TestingLibraryMatchers<T, void> {}
9
+ }
10
+
11
+ expect.extend(matchers);
testbed/gradio-app__gradio/.config/tailwind.config.cjs ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ module.exports = {
2
+ content: [
3
+ "./src/**/*.{html,js,svelte,ts}",
4
+ "**/@gradio/**/*.{html,js,svelte,ts}"
5
+ ],
6
+
7
+ theme: {
8
+ extend: {}
9
+ },
10
+
11
+ plugins: [require("@tailwindcss/forms")]
12
+ };
testbed/gradio-app__gradio/.config/vitest.config.ts ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ import config from "../js/app/vite.config";
2
+
3
+ export default config;
testbed/gradio-app__gradio/.devcontainer/devcontainer.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // See https://containers.dev
2
+ {
3
+ "name": "Python 3",
4
+ "image": "mcr.microsoft.com/devcontainers/python:0-3.9",
5
+
6
+ // See https://containers.dev/features
7
+ "features": {
8
+ "ghcr.io/devcontainers/features/git:1": {},
9
+ "ghcr.io/devcontainers/features/node:1": {},
10
+ "ghcr.io/devcontainers-contrib/features/ffmpeg-apt-get:1": {}
11
+ },
12
+
13
+ "hostRequirements": {
14
+ "cpus": 4,
15
+ "memory": "8gb",
16
+ "storage": "32gb"
17
+ },
18
+
19
+ "customizations": {
20
+ "vscode": {
21
+ "extensions": [
22
+ "ms-python.python",
23
+ "ms-python.vscode-pylance",
24
+ "ms-python.black-formatter",
25
+ "ms-toolsai.jupyter",
26
+ "esbenp.prettier-vscode",
27
+ "svelte.svelte-vscode",
28
+ "phoenisx.cssvar"
29
+ ],
30
+ "remote.autoForwardPorts": false
31
+ }
32
+ },
33
+
34
+ "forwardPorts": [7860, 9876],
35
+ "portsAttributes": {
36
+ "7860": { "label": "gradio port" },
37
+ "9876": { "label": "gradio dev port" }
38
+ },
39
+
40
+ "postCreateCommand": "export NODE_OPTIONS=\"--max-old-space-size=8192\" && chmod +x scripts/install_gradio.sh scripts/install_test_requirements.sh scripts/build_frontend.sh && ./scripts/install_gradio.sh && ./scripts/install_test_requirements.sh && ./scripts/build_frontend.sh"
41
+ }
testbed/gradio-app__gradio/README.md ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!-- DO NOT EDIT THIS FILE DIRECTLY. INSTEAD EDIT THE `readme_template.md` OR `guides/1)getting_started/1)quickstart.md` TEMPLATES AND THEN RUN `render_readme.py` SCRIPT. -->
2
+
3
+ <div align="center">
4
+
5
+ [<img src="readme_files/gradio.svg" alt="gradio" width=300>](https://gradio.app)<br>
6
+ <em>Build & share delightful machine learning apps easily</em>
7
+
8
+ [![gradio-backend](https://github.com/gradio-app/gradio/actions/workflows/backend.yml/badge.svg)](https://github.com/gradio-app/gradio/actions/workflows/backend.yml)
9
+ [![gradio-ui](https://github.com/gradio-app/gradio/actions/workflows/ui.yml/badge.svg)](https://github.com/gradio-app/gradio/actions/workflows/ui.yml)
10
+ [![PyPI](https://img.shields.io/pypi/v/gradio)](https://pypi.org/project/gradio/)
11
+ [![PyPI downloads](https://img.shields.io/pypi/dm/gradio)](https://pypi.org/project/gradio/)
12
+ ![Python version](https://img.shields.io/badge/python-3.8+-important)
13
+ [![Twitter follow](https://img.shields.io/twitter/follow/gradio?style=social&label=follow)](https://twitter.com/gradio)
14
+
15
+ [Website](https://gradio.app)
16
+ | [Documentation](https://gradio.app/docs/)
17
+ | [Guides](https://gradio.app/guides/)
18
+ | [Getting Started](https://gradio.app/getting_started/)
19
+ | [Examples](demo/)
20
+ | [中文](readme_files/zh-cn#readme)
21
+
22
+ </div>
23
+
24
+ # Gradio: Build Machine Learning Web Apps — in Python
25
+
26
+ Gradio is an open-source Python library that is used to build machine learning and data science demos and web applications.
27
+
28
+ With Gradio, you can quickly create a beautiful user interface around your machine learning models or data science workflow and let people "try it out" by dragging-and-dropping in their own images,
29
+ pasting text, recording their own voice, and interacting with your demo, all through the browser.
30
+
31
+ ![Interface montage](readme_files/header-image.jpg)
32
+
33
+ Gradio is useful for:
34
+
35
+ - **Demoing** your machine learning models for clients/collaborators/users/students.
36
+
37
+ - **Deploying** your models quickly with automatic shareable links and getting feedback on model performance.
38
+
39
+ - **Debugging** your model interactively during development using built-in manipulation and interpretation tools.
40
+
41
+ ## Quickstart
42
+
43
+ **Prerequisite**: Gradio requires Python 3.8 or higher, that's all!
44
+
45
+ ### What Does Gradio Do?
46
+
47
+ One of the _best ways to share_ your machine learning model, API, or data science workflow with others is to create an **interactive app** that allows your users or colleagues to try out the demo in their browsers.
48
+
49
+ Gradio allows you to **build demos and share them, all in Python.** And usually in just a few lines of code! So let's get started.
50
+
51
+ ### Hello, World
52
+
53
+ To get Gradio running with a simple "Hello, World" example, follow these three steps:
54
+
55
+ 1\. Install Gradio using pip:
56
+
57
+ ```bash
58
+ pip install gradio
59
+ ```
60
+
61
+ 2\. Run the code below as a Python script or in a Jupyter Notebook (or [Google Colab](https://colab.research.google.com/drive/18ODkJvyxHutTN0P5APWyGFO_xwNcgHDZ?usp=sharing)):
62
+
63
+ ```python
64
+ import gradio as gr
65
+
66
+ def greet(name):
67
+ return "Hello " + name + "!"
68
+
69
+ demo = gr.Interface(fn=greet, inputs="text", outputs="text")
70
+
71
+ demo.launch()
72
+ ```
73
+
74
+
75
+ We shorten the imported name to `gr` for better readability of code using Gradio. This is a widely adopted convention that you should follow so that anyone working with your code can easily understand it.
76
+
77
+ 3\. The demo below will appear automatically within the Jupyter Notebook, or pop in a browser on [http://localhost:7860](http://localhost:7860) if running from a script:
78
+
79
+ ![`hello_world` demo](demo/hello_world/screenshot.gif)
80
+
81
+ When developing locally, if you want to run the code as a Python script, you can use the Gradio CLI to launch the application **in reload mode**, which will provide seamless and fast development. Learn more about reloading in the [Auto-Reloading Guide](https://gradio.app/developing-faster-with-reload-mode/).
82
+
83
+ ```bash
84
+ gradio app.py
85
+ ```
86
+
87
+ Note: you can also do `python app.py`, but it won't provide the automatic reload mechanism.
88
+
89
+ ### The `Interface` Class
90
+
91
+ You'll notice that in order to make the demo, we created a `gr.Interface`. This `Interface` class can wrap any Python function with a user interface. In the example above, we saw a simple text-based function, but the function could be anything from music generator to a tax calculator to the prediction function of a pretrained machine learning model.
92
+
93
+ The core `Interface` class is initialized with three required parameters:
94
+
95
+ - `fn`: the function to wrap a UI around
96
+ - `inputs`: which component(s) to use for the input (e.g. `"text"`, `"image"` or `"audio"`)
97
+ - `outputs`: which component(s) to use for the output (e.g. `"text"`, `"image"` or `"label"`)
98
+
99
+ Let's take a closer look at these components used to provide input and output.
100
+
101
+ ### Components Attributes
102
+
103
+ We saw some simple `Textbox` components in the previous examples, but what if you want to change how the UI components look or behave?
104
+
105
+ Let's say you want to customize the input text field — for example, you wanted it to be larger and have a text placeholder. If we use the actual class for `Textbox` instead of using the string shortcut, you have access to much more customizability through component attributes.
106
+
107
+ ```python
108
+ import gradio as gr
109
+
110
+ def greet(name):
111
+ return "Hello " + name + "!"
112
+
113
+ demo = gr.Interface(
114
+ fn=greet,
115
+ inputs=gr.Textbox(lines=2, placeholder="Name Here..."),
116
+ outputs="text",
117
+ )
118
+ demo.launch()
119
+ ```
120
+
121
+ ![`hello_world_2` demo](demo/hello_world_2/screenshot.gif)
122
+
123
+ ### Multiple Input and Output Components
124
+
125
+ Suppose you had a more complex function, with multiple inputs and outputs. In the example below, we define a function that takes a string, boolean, and number, and returns a string and number. Take a look how you pass a list of input and output components.
126
+
127
+ ```python
128
+ import gradio as gr
129
+
130
+ def greet(name, is_morning, temperature):
131
+ salutation = "Good morning" if is_morning else "Good evening"
132
+ greeting = f"{salutation} {name}. It is {temperature} degrees today"
133
+ celsius = (temperature - 32) * 5 / 9
134
+ return greeting, round(celsius, 2)
135
+
136
+ demo = gr.Interface(
137
+ fn=greet,
138
+ inputs=["text", "checkbox", gr.Slider(0, 100)],
139
+ outputs=["text", "number"],
140
+ )
141
+ demo.launch()
142
+ ```
143
+
144
+ ![`hello_world_3` demo](demo/hello_world_3/screenshot.gif)
145
+
146
+ You simply wrap the components in a list. Each component in the `inputs` list corresponds to one of the parameters of the function, in order. Each component in the `outputs` list corresponds to one of the values returned by the function, again in order.
147
+
148
+ ### An Image Example
149
+
150
+ Gradio supports many types of components, such as `Image`, `DataFrame`, `Video`, or `Label`. Let's try an image-to-image function to get a feel for these!
151
+
152
+ ```python
153
+ import numpy as np
154
+ import gradio as gr
155
+
156
+ def sepia(input_img):
157
+ sepia_filter = np.array([
158
+ [0.393, 0.769, 0.189],
159
+ [0.349, 0.686, 0.168],
160
+ [0.272, 0.534, 0.131]
161
+ ])
162
+ sepia_img = input_img.dot(sepia_filter.T)
163
+ sepia_img /= sepia_img.max()
164
+ return sepia_img
165
+
166
+ demo = gr.Interface(sepia, gr.Image(shape=(200, 200)), "image")
167
+ demo.launch()
168
+ ```
169
+
170
+ ![`sepia_filter` demo](demo/sepia_filter/screenshot.gif)
171
+
172
+ When using the `Image` component as input, your function will receive a NumPy array with the shape `(height, width, 3)`, where the last dimension represents the RGB values. We'll return an image as well in the form of a NumPy array.
173
+
174
+ You can also set the datatype used by the component with the `type=` keyword argument. For example, if you wanted your function to take a file path to an image instead of a NumPy array, the input `Image` component could be written as:
175
+
176
+ ```python
177
+ gr.Image(type="filepath", shape=...)
178
+ ```
179
+
180
+ Also note that our input `Image` component comes with an edit button 🖉, which allows for cropping and zooming into images. Manipulating images in this way can help reveal biases or hidden flaws in a machine learning model!
181
+
182
+ You can read more about the many components and how to use them in the [Gradio docs](https://gradio.app/docs).
183
+
184
+ ### Chatbots
185
+
186
+ Gradio includes a high-level class, `gr.ChatInterface`, which is similar to `gr.Interface`, but is specifically designed for chatbot UIs. The `gr.ChatInterface` class also wraps a function but this function must have a specific signature. The function should take two arguments: `message` and then `history` (the arguments can be named anything, but must be in this order)
187
+
188
+ - `message`: a `str` representing the user's input
189
+ - `history`: a `list` of `list` representing the conversations up until that point. Each inner list consists of two `str` representing a pair: `[user input, bot response]`.
190
+
191
+ Your function should return a single string response, which is the bot's response to the particular user input `message`.
192
+
193
+ Other than that, `gr.ChatInterface` has no required parameters (though several are available for customization of the UI).
194
+
195
+ Here's a toy example:
196
+
197
+ ```python
198
+ import random
199
+ import gradio as gr
200
+
201
+ def random_response(message, history):
202
+ return random.choice(["Yes", "No"])
203
+
204
+ demo = gr.ChatInterface(random_response)
205
+
206
+ demo.launch()
207
+ ```
208
+
209
+ ![`chatinterface_random_response` demo](demo/chatinterface_random_response/screenshot.gif)
210
+
211
+ You can [read more about `gr.ChatInterface` here](https://gradio.app/guides/creating-a-chatbot-fast).
212
+
213
+ ### Blocks: More Flexibility and Control
214
+
215
+ Gradio offers two approaches to build apps:
216
+
217
+ 1\. **Interface** and **ChatInterface**, which provide a high-level abstraction for creating demos that we've been discussing so far.
218
+
219
+ 2\. **Blocks**, a low-level API for designing web apps with more flexible layouts and data flows. Blocks allows you to do things like feature multiple data flows and demos, control where components appear on the page, handle complex data flows (e.g. outputs can serve as inputs to other functions), and update properties/visibility of components based on user interaction — still all in Python. If this customizability is what you need, try `Blocks` instead!
220
+
221
+ ### Hello, Blocks
222
+
223
+ Let's take a look at a simple example. Note how the API here differs from `Interface`.
224
+
225
+ ```python
226
+ import gradio as gr
227
+
228
+ def greet(name):
229
+ return "Hello " + name + "!"
230
+
231
+ with gr.Blocks() as demo:
232
+ name = gr.Textbox(label="Name")
233
+ output = gr.Textbox(label="Output Box")
234
+ greet_btn = gr.Button("Greet")
235
+ greet_btn.click(fn=greet, inputs=name, outputs=output, api_name="greet")
236
+
237
+ demo.launch()
238
+ ```
239
+
240
+ ![`hello_blocks` demo](demo/hello_blocks/screenshot.gif)
241
+
242
+ Things to note:
243
+
244
+ - `Blocks` are made with a `with` clause, and any component created inside this clause is automatically added to the app.
245
+ - Components appear vertically in the app in the order they are created. (Later we will cover customizing layouts!)
246
+ - A `Button` was created, and then a `click` event-listener was added to this button. The API for this should look familiar! Like an `Interface`, the `click` method takes a Python function, input components, and output components.
247
+
248
+ ### More Complexity
249
+
250
+ Here's an app to give you a taste of what's possible with `Blocks`:
251
+
252
+ ```python
253
+ import numpy as np
254
+ import gradio as gr
255
+
256
+
257
+ def flip_text(x):
258
+ return x[::-1]
259
+
260
+
261
+ def flip_image(x):
262
+ return np.fliplr(x)
263
+
264
+
265
+ with gr.Blocks() as demo:
266
+ gr.Markdown("Flip text or image files using this demo.")
267
+ with gr.Tab("Flip Text"):
268
+ text_input = gr.Textbox()
269
+ text_output = gr.Textbox()
270
+ text_button = gr.Button("Flip")
271
+ with gr.Tab("Flip Image"):
272
+ with gr.Row():
273
+ image_input = gr.Image()
274
+ image_output = gr.Image()
275
+ image_button = gr.Button("Flip")
276
+
277
+ with gr.Accordion("Open for More!"):
278
+ gr.Markdown("Look at me...")
279
+
280
+ text_button.click(flip_text, inputs=text_input, outputs=text_output)
281
+ image_button.click(flip_image, inputs=image_input, outputs=image_output)
282
+
283
+ demo.launch()
284
+ ```
285
+
286
+ ![`blocks_flipper` demo](demo/blocks_flipper/screenshot.gif)
287
+
288
+ A lot more going on here! We'll cover how to create complex `Blocks` apps like this in the [building with blocks](https://gradio.app/blocks-and-event-listeners) section for you.
289
+
290
+ Congrats, you're now familiar with the basics of Gradio! 🥳 Go to our [next guide](https://gradio.app/key_features) to learn more about the key features of Gradio.
291
+
292
+
293
+ ## Open Source Stack
294
+
295
+ Gradio is built with many wonderful open-source libraries, please support them as well!
296
+
297
+ [<img src="readme_files/huggingface_mini.svg" alt="huggingface" height=40>](https://huggingface.co)
298
+ [<img src="readme_files/python.svg" alt="python" height=40>](https://www.python.org)
299
+ [<img src="readme_files/fastapi.svg" alt="fastapi" height=40>](https://fastapi.tiangolo.com)
300
+ [<img src="readme_files/encode.svg" alt="encode" height=40>](https://www.encode.io)
301
+ [<img src="readme_files/svelte.svg" alt="svelte" height=40>](https://svelte.dev)
302
+ [<img src="readme_files/vite.svg" alt="vite" height=40>](https://vitejs.dev)
303
+ [<img src="readme_files/pnpm.svg" alt="pnpm" height=40>](https://pnpm.io)
304
+ [<img src="readme_files/tailwind.svg" alt="tailwind" height=40>](https://tailwindcss.com)
305
+ [<img src="readme_files/storybook.svg" alt="storybook" height=40>](https://storybook.js.org/)
306
+ [<img src="readme_files/chromatic.svg" alt="chromatic" height=40>](https://www.chromatic.com/)
307
+
308
+ ## License
309
+
310
+ Gradio is licensed under the Apache License 2.0 found in the [LICENSE](LICENSE) file in the root directory of this repository.
311
+
312
+ ## Citation
313
+
314
+ Also check out the paper _[Gradio: Hassle-Free Sharing and Testing of ML Models in the Wild](https://arxiv.org/abs/1906.02569), ICML HILL 2019_, and please cite it if you use Gradio in your work.
315
+
316
+ ```
317
+ @article{abid2019gradio,
318
+ title = {Gradio: Hassle-Free Sharing and Testing of ML Models in the Wild},
319
+ author = {Abid, Abubakar and Abdalla, Ali and Abid, Ali and Khan, Dawood and Alfozan, Abdulrahman and Zou, James},
320
+ journal = {arXiv preprint arXiv:1906.02569},
321
+ year = {2019},
322
+ }
323
+ ```
testbed/gradio-app__gradio/client/js/CHANGELOG.md ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @gradio/client
2
+
3
+ ## 0.4.0
4
+
5
+ ### Features
6
+
7
+ - [#5682](https://github.com/gradio-app/gradio/pull/5682) [`c57f1b75e`](https://github.com/gradio-app/gradio/commit/c57f1b75e272c76b0af4d6bd0c7f44743ff34f26) - Fix functional tests. Thanks [@abidlabs](https://github.com/abidlabs)!
8
+ - [#5681](https://github.com/gradio-app/gradio/pull/5681) [`40de3d217`](https://github.com/gradio-app/gradio/commit/40de3d2178b61ebe424b6f6228f94c0c6f679bea) - add query parameters to the `gr.Request` object through the `query_params` attribute. Thanks [@DarhkVoyd](https://github.com/DarhkVoyd)!
9
+ - [#5653](https://github.com/gradio-app/gradio/pull/5653) [`ea0e00b20`](https://github.com/gradio-app/gradio/commit/ea0e00b207b4b90a10e9d054c4202d4e705a29ba) - Prevent Clients from accessing API endpoints that set `api_name=False`. Thanks [@abidlabs](https://github.com/abidlabs)!
10
+
11
+ ## 0.3.1
12
+
13
+ ### Fixes
14
+
15
+ - [#5412](https://github.com/gradio-app/gradio/pull/5412) [`26fef8c7`](https://github.com/gradio-app/gradio/commit/26fef8c7f85a006c7e25cdbed1792df19c512d02) - Skip view_api request in js client when auth enabled. Thanks [@freddyaboulton](https://github.com/freddyaboulton)!
16
+
17
+ ## 0.3.0
18
+
19
+ ### Features
20
+
21
+ - [#5267](https://github.com/gradio-app/gradio/pull/5267) [`119c8343`](https://github.com/gradio-app/gradio/commit/119c834331bfae60d4742c8f20e9cdecdd67e8c2) - Faster reload mode. Thanks [@freddyaboulton](https://github.com/freddyaboulton)!
22
+
23
+ ## 0.2.1
24
+
25
+ ### Features
26
+
27
+ - [#5173](https://github.com/gradio-app/gradio/pull/5173) [`730f0c1d`](https://github.com/gradio-app/gradio/commit/730f0c1d54792eb11359e40c9f2326e8a6e39203) - Ensure gradio client works as expected for functions that return nothing. Thanks [@raymondtri](https://github.com/raymondtri)!
28
+
29
+ ## 0.2.0
30
+
31
+ ### Features
32
+
33
+ - [#5133](https://github.com/gradio-app/gradio/pull/5133) [`61129052`](https://github.com/gradio-app/gradio/commit/61129052ed1391a75c825c891d57fa0ad6c09fc8) - Update dependency esbuild to ^0.19.0. Thanks [@renovate](https://github.com/apps/renovate)!
34
+ - [#5035](https://github.com/gradio-app/gradio/pull/5035) [`8b4eb8ca`](https://github.com/gradio-app/gradio/commit/8b4eb8cac9ea07bde31b44e2006ca2b7b5f4de36) - JS Client: Fixes cannot read properties of null (reading 'is_file'). Thanks [@raymondtri](https://github.com/raymondtri)!
35
+
36
+ ### Fixes
37
+
38
+ - [#5075](https://github.com/gradio-app/gradio/pull/5075) [`67265a58`](https://github.com/gradio-app/gradio/commit/67265a58027ef1f9e4c0eb849a532f72eaebde48) - Allow supporting >1000 files in `gr.File()` and `gr.UploadButton()`. Thanks [@abidlabs](https://github.com/abidlabs)!
39
+
40
+ ## 0.1.4
41
+
42
+ ### Patch Changes
43
+
44
+ - [#4717](https://github.com/gradio-app/gradio/pull/4717) [`ab5d1ea0`](https://github.com/gradio-app/gradio/commit/ab5d1ea0de87ed888779b66fd2a705583bd29e02) Thanks [@whitphx](https://github.com/whitphx)! - Fix the package description
45
+
46
+ ## 0.1.3
47
+
48
+ ### Patch Changes
49
+
50
+ - [#4357](https://github.com/gradio-app/gradio/pull/4357) [`0dbd8f7f`](https://github.com/gradio-app/gradio/commit/0dbd8f7fee4b4877f783fa7bc493f98bbfc3d01d) Thanks [@pngwn](https://github.com/pngwn)! - Various internal refactors and cleanups.
51
+
52
+ ## 0.1.2
53
+
54
+ ### Patch Changes
55
+
56
+ - [#4273](https://github.com/gradio-app/gradio/pull/4273) [`1d0f0a9d`](https://github.com/gradio-app/gradio/commit/1d0f0a9db096552e67eb2197c932342587e9e61e) Thanks [@pngwn](https://github.com/pngwn)! - Ensure websocket error messages are correctly handled.
57
+
58
+ - [#4315](https://github.com/gradio-app/gradio/pull/4315) [`b525b122`](https://github.com/gradio-app/gradio/commit/b525b122dd8569bbaf7e06db5b90d622d2e9073d) Thanks [@whitphx](https://github.com/whitphx)! - Refacor types.
59
+
60
+ - [#4271](https://github.com/gradio-app/gradio/pull/4271) [`1151c525`](https://github.com/gradio-app/gradio/commit/1151c5253554cb87ebd4a44a8a470ac215ff782b) Thanks [@pngwn](https://github.com/pngwn)! - Ensure the full root path is always respected when making requests to a gradio app server.
61
+
62
+ ## 0.1.1
63
+
64
+ ### Patch Changes
65
+
66
+ - [#4201](https://github.com/gradio-app/gradio/pull/4201) [`da5b4ee1`](https://github.com/gradio-app/gradio/commit/da5b4ee11721175858ded96e5710225369097f74) Thanks [@pngwn](https://github.com/pngwn)! - Ensure semiver is bundled so CDN links work correctly.
67
+
68
+ - [#4202](https://github.com/gradio-app/gradio/pull/4202) [`a26e9afd`](https://github.com/gradio-app/gradio/commit/a26e9afde319382993e6ddc77cc4e56337a31248) Thanks [@pngwn](https://github.com/pngwn)! - Ensure all URLs returned by the client are complete URLs with the correct host instead of an absolute path relative to a server.
69
+
70
+ ## 0.1.0
71
+
72
+ ### Minor Changes
73
+
74
+ - [#4185](https://github.com/gradio-app/gradio/pull/4185) [`67239ca9`](https://github.com/gradio-app/gradio/commit/67239ca9b2fe3796853fbf7bf865c9e4b383200d) Thanks [@pngwn](https://github.com/pngwn)! - Update client for initial release
75
+
76
+ ### Patch Changes
77
+
78
+ - [#3692](https://github.com/gradio-app/gradio/pull/3692) [`48e8b113`](https://github.com/gradio-app/gradio/commit/48e8b113f4b55e461d9da4f153bf72aeb4adf0f1) Thanks [@pngwn](https://github.com/pngwn)! - Ensure client works in node, create ESM bundle and generate typescript declaration files.
79
+
80
+ - [#3605](https://github.com/gradio-app/gradio/pull/3605) [`ae4277a9`](https://github.com/gradio-app/gradio/commit/ae4277a9a83d49bdadfe523b0739ba988128e73b) Thanks [@pngwn](https://github.com/pngwn)! - Update readme.
testbed/gradio-app__gradio/client/js/README.md ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## JavaScript Client Library
2
+
3
+ A javascript (and typescript) client to call Gradio APIs.
4
+
5
+ ## Installation
6
+
7
+ The Gradio JavaScript client is available on npm as `@gradio/client`. You can install it as below:
8
+
9
+ ```sh
10
+ npm i @gradio/client
11
+ ```
12
+
13
+ ## Usage
14
+
15
+ The JavaScript Gradio Client exposes two named imports, `client` and `duplicate`.
16
+
17
+ ### `client`
18
+
19
+ The client function connects to the API of a hosted Gradio space and returns an object that allows you to make calls to that API.
20
+
21
+ The simplest example looks like this:
22
+
23
+ ```ts
24
+ import { client } from "@gradio/client";
25
+
26
+ const app = await client("user/space-name");
27
+ const result = await app.predict("/predict");
28
+ ```
29
+
30
+ This function accepts two arguments: `source` and `options`:
31
+
32
+ #### `source`
33
+
34
+ This is the url or name of the gradio app whose API you wish to connect to. This parameter is required and should always be a string. For example:
35
+
36
+ ```ts
37
+ client("user/space-name");
38
+ ```
39
+
40
+ #### `options`
41
+
42
+ The options object can optionally be passed a second parameter. This object has two properties, `hf_token` and `status_callback`.
43
+
44
+ ##### `hf_token`
45
+
46
+ This should be a Hugging Face personal access token and is required if you wish to make calls to a private gradio api. This option is optional and should be a string starting with `"hf_"`.
47
+
48
+ Example:
49
+
50
+ ```ts
51
+ import { client } from "@gradio/client";
52
+
53
+ const app = await client("user/space-name", { hf_token: "hf_..." });
54
+ ```
55
+
56
+ ##### `status_callback`
57
+
58
+ This should be a function which will notify your of the status of a space if it is not running. If the gradio API you are connecting to is awake and running or is not hosted on Hugging Face space then this function will do nothing.
59
+
60
+ **Additional context**
61
+
62
+ Applications hosted on Hugging Face spaces can be in a number of different states. As spaces are a GitOps tool and will rebuild when new changes are pushed to the repository, they have various building, running and error states. If a space is not 'running' then the function passed as the `status_callback` will notify you of the current state of the space and the status of the space as it changes. Spaces that are building or sleeping can take longer than usual to respond, so you can use this information to give users feedback about the progress of their action.
63
+
64
+ ```ts
65
+ import { client, type SpaceStatus } from "@gradio/client";
66
+
67
+ const app = await client("user/space-name", {
68
+ // The space_status parameter does not need to be manually annotated, this is just for illustration.
69
+ space_status: (space_status: SpaceStatus) => console.log(space_status)
70
+ });
71
+ ```
72
+
73
+ ```ts
74
+ interface SpaceStatusNormal {
75
+ status: "sleeping" | "running" | "building" | "error" | "stopped";
76
+ detail:
77
+ | "SLEEPING"
78
+ | "RUNNING"
79
+ | "RUNNING_BUILDING"
80
+ | "BUILDING"
81
+ | "NOT_FOUND";
82
+ load_status: "pending" | "error" | "complete" | "generating";
83
+ message: string;
84
+ }
85
+
86
+ interface SpaceStatusError {
87
+ status: "space_error";
88
+ detail: "NO_APP_FILE" | "CONFIG_ERROR" | "BUILD_ERROR" | "RUNTIME_ERROR";
89
+ load_status: "error";
90
+ message: string;
91
+ discussions_enabled: boolean;
92
+
93
+ type SpaceStatus = SpaceStatusNormal | SpaceStatusError;
94
+ ```
95
+
96
+ The gradio client returns an object with a number of methods and properties:
97
+
98
+ #### `predict`
99
+
100
+ The `predict` method allows you to call an api endpoint and get a prediction result:
101
+
102
+ ```ts
103
+ import { client } from "@gradio/client";
104
+
105
+ const app = await client("user/space-name");
106
+ const result = await app.predict("/predict");
107
+ ```
108
+
109
+ `predict` accepts two parameters, `endpoint` and `payload`. It returns a promise that resolves to the prediction result.
110
+
111
+ ##### `endpoint`
112
+
113
+ This is the endpoint for an api request and is required. The default endpoint for a `gradio.Interface` is `"/predict"`. Explicitly named endpoints have a custom name. The endpoint names can be found on the "View API" page of a space.
114
+
115
+ ```ts
116
+ import { client } from "@gradio/client";
117
+
118
+ const app = await client("user/space-name");
119
+ const result = await app.predict("/predict");
120
+ ```
121
+
122
+ ##### `payload`
123
+
124
+ The `payload` argument is generally optional but this depends on the API itself. If the API endpoint depends on values being passed in then it is required for the API request to succeed. The data that should be passed in is detailed on the "View API" page of a space, or accessible via the `view_api()` method of the client.
125
+
126
+ ```ts
127
+ import { client } from "@gradio/client";
128
+
129
+ const app = await client("user/space-name");
130
+ const result = await app.predict("/predict", [1, "Hello", "friends"]);
131
+ ```
132
+
133
+ #### `submit`
134
+
135
+ The `submit` method provides a more flexible way to call an API endpoint, providing you with status updates about the current progress of the prediction as well as supporting more complex endpoint types.
136
+
137
+ ```ts
138
+ import { client } from "@gradio/client";
139
+
140
+ const app = await client("user/space-name");
141
+ const submission = app.submit("/predict", payload);
142
+ ```
143
+
144
+ The `submit` method accepts the same [`endpoint`](#endpoint) and [`payload`](#payload) arguments as `predict`.
145
+
146
+ The `submit` method does not return a promise and should not be awaited, instead it returns an object with a `on`, `off`, and `cancel` methods.
147
+
148
+ ##### `on`
149
+
150
+ The `on` method allows you to subscribe to events related to the submitted API request. There are two types of event that can be subscribed to: `"data"` updates and `"status"` updates.
151
+
152
+ `"data"` updates are issued when the API computes a value, the callback provided as the second argument will be called when such a value is sent to the client. The shape of the data depends on the way the API itself is constructed. This event may fire more than once if that endpoint supports emmitting new values over time.
153
+
154
+ `"status` updates are issued when the status of a request changes. This information allows you to offer feedback to users when the queue position of the request changes, or when the request changes from queued to processing.
155
+
156
+ The status payload look like this:
157
+
158
+ ```ts
159
+ interface Status {
160
+ queue: boolean;
161
+ code?: string;
162
+ success?: boolean;
163
+ stage: "pending" | "error" | "complete" | "generating";
164
+ size?: number;
165
+ position?: number;
166
+ eta?: number;
167
+ message?: string;
168
+ progress_data?: Array<{
169
+ progress: number | null;
170
+ index: number | null;
171
+ length: number | null;
172
+ unit: string | null;
173
+ desc: string | null;
174
+ }>;
175
+ time?: Date;
176
+ }
177
+ ```
178
+
179
+ Usage of these subscribe callback looks like this:
180
+
181
+ ```ts
182
+ import { client } from "@gradio/client";
183
+
184
+ const app = await client("user/space-name");
185
+ const submission = app
186
+ .submit("/predict", payload)
187
+ .on("data", (data) => console.log(data))
188
+ .on("status", (status: Status) => console.log(status));
189
+ ```
190
+
191
+ ##### `off`
192
+
193
+ The `off` method unsubscribes from a specific event of the submitted job and works similarly to `document.removeEventListener`; both the event name and the original callback must be passed in to successfully unsubscribe:
194
+
195
+ ```ts
196
+ import { client } from "@gradio/client";
197
+
198
+ const app = await client("user/space-name");
199
+ const handle_data = (data) => console.log(data);
200
+
201
+ const submission = app.submit("/predict", payload).on("data", handle_data);
202
+
203
+ // later
204
+ submission.off("/predict", handle_data);
205
+ ```
206
+
207
+ ##### `destroy`
208
+
209
+ The `destroy` method will remove all subscriptions to a job, regardless of whether or not they are `"data"` or `"status"` events. This is a convenience method for when you do not want to unsubscribe use the `off` method.
210
+
211
+ ```js
212
+ import { client } from "@gradio/client";
213
+
214
+ const app = await client("user/space-name");
215
+ const handle_data = (data) => console.log(data);
216
+
217
+ const submission = app.submit("/predict", payload).on("data", handle_data);
218
+
219
+ // later
220
+ submission.destroy();
221
+ ```
222
+
223
+ ##### `cancel`
224
+
225
+ Certain types of gradio function can run repeatedly and in some cases indefinitely. the `cancel` method will stop such an endpoints and prevent the API from issuing additional updates.
226
+
227
+ ```ts
228
+ import { client } from "@gradio/client";
229
+
230
+ const app = await client("user/space-name");
231
+ const submission = app
232
+ .submit("/predict", payload)
233
+ .on("data", (data) => console.log(data));
234
+
235
+ // later
236
+
237
+ submission.cancel();
238
+ ```
239
+
240
+ #### `view_api`
241
+
242
+ The `view_api` method provides details about the API you are connected to. It returns a JavaScript object of all named endpoints, unnamed endpoints and what values they accept and return. This method does not accept arguments.
243
+
244
+ ```ts
245
+ import { client } from "@gradio/client";
246
+
247
+ const app = await client("user/space-name");
248
+ const api_info = await app.view_api();
249
+
250
+ console.log(api_info);
251
+ ```
252
+
253
+ #### `config`
254
+
255
+ The `config` property contains the configuration for the gradio application you are connected to. This object may contain useful meta information about the application.
256
+
257
+ ```ts
258
+ import { client } from "@gradio/client";
259
+
260
+ const app = await client("user/space-name");
261
+ console.log(app.config);
262
+ ```
263
+
264
+ ### `duplicate`
265
+
266
+ The duplicate function will attempt to duplicate the space that is referenced and return an instance of `client` connected to that space. If the space has already been duplicated then it will not create a new duplicate and will instead connect to the existing duplicated space. The huggingface token that is passed in will dictate the user under which the space is created.
267
+
268
+ `duplicate` accepts the same arguments as `client` with the addition of a `private` options property dictating whether the duplicated space should be private or public. A huggingface token is required for duplication to work.
269
+
270
+ ```ts
271
+ import { duplicate } from "@gradio/client";
272
+
273
+ const app = await duplicate("user/space-name", {
274
+ hf_token: "hf_..."
275
+ });
276
+ ```
277
+
278
+ This function accepts two arguments: `source` and `options`:
279
+
280
+ #### `source`
281
+
282
+ The space to duplicate and connect to. [See `client`'s `source` parameter](#source).
283
+
284
+ #### `options`
285
+
286
+ Accepts all options that `client` accepts, except `hf_token` is required. [See `client`'s `options` parameter](#source).
287
+
288
+ `duplicate` also accepts one additional `options` property.
289
+
290
+ ##### `private`
291
+
292
+ This is an optional property specific to `duplicate`'s options object and will determine whether the space should be public or private. Spaces duplicated via the `duplicate` method are public by default.
293
+
294
+ ```ts
295
+ import { duplicate } from "@gradio/client";
296
+
297
+ const app = await duplicate("user/space-name", {
298
+ hf_token: "hf_...",
299
+ private: true
300
+ });
301
+ ```
302
+
303
+ ##### `timeout`
304
+
305
+ This is an optional property specific to `duplicate`'s options object and will set the timeout in minutes before the duplicated space will go to sleep.
306
+
307
+ ```ts
308
+ import { duplicate } from "@gradio/client";
309
+
310
+ const app = await duplicate("user/space-name", {
311
+ hf_token: "hf_...",
312
+ private: true,
313
+ timeout: 5
314
+ });
315
+ ```
316
+
317
+ ##### `hardware`
318
+
319
+ This is an optional property specific to `duplicate`'s options object and will set the hardware for the duplicated space. By default the hardware used will match that of the original space. If this cannot be obtained it will default to `"cpu-basic"`. For hardware upgrades (beyond the basic CPU tier), you may be required to provide [billing information on Hugging Face](https://huggingface.co/settings/billing).
320
+
321
+ Possible hardware options are:
322
+
323
+ - `"cpu-basic"`
324
+ - `"cpu-upgrade"`
325
+ - `"t4-small"`
326
+ - `"t4-medium"`
327
+ - `"a10g-small"`
328
+ - `"a10g-large"`
329
+ - `"a100-large"`
330
+
331
+ ```ts
332
+ import { duplicate } from "@gradio/client";
333
+
334
+ const app = await duplicate("user/space-name", {
335
+ hf_token: "hf_...",
336
+ private: true,
337
+ hardware: "a10g-small"
338
+ });
339
+ ```
testbed/gradio-app__gradio/client/js/package.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "@gradio/client",
3
+ "version": "0.4.0",
4
+ "description": "Gradio API client",
5
+ "type": "module",
6
+ "main": "dist/index.js",
7
+ "author": "",
8
+ "license": "ISC",
9
+ "exports": {
10
+ ".": {
11
+ "import": "./dist/index.js"
12
+ },
13
+ "./package.json": "./package.json"
14
+ },
15
+ "dependencies": {
16
+ "bufferutil": "^4.0.7",
17
+ "semiver": "^1.1.0",
18
+ "ws": "^8.13.0"
19
+ },
20
+ "devDependencies": {
21
+ "@types/ws": "^8.5.4",
22
+ "esbuild": "^0.19.0"
23
+ },
24
+ "scripts": {
25
+ "bundle": "vite build --ssr",
26
+ "generate_types": "tsc",
27
+ "build": "pnpm bundle && pnpm generate_types"
28
+ },
29
+ "engines": {
30
+ "node": ">=18.0.0"
31
+ },
32
+ "main_changeset": true
33
+ }
testbed/gradio-app__gradio/client/js/src/client.node-test.ts ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { test, describe, assert } from "vitest";
2
+ import { readFileSync } from "fs";
3
+ import { join, dirname } from "path";
4
+ import { fileURLToPath } from "url";
5
+ import { Blob } from "node:buffer";
6
+
7
+ const __dirname = dirname(fileURLToPath(import.meta.url));
8
+ const image_path = join(
9
+ __dirname,
10
+ "..",
11
+ "..",
12
+ "..",
13
+ "demo",
14
+ "kitchen_sink",
15
+ "files",
16
+ "lion.jpg"
17
+ );
18
+
19
+ import { walk_and_store_blobs, client, handle_blob } from "./client";
20
+
21
+ describe.skip("extract blob parts", () => {
22
+ test("convert Buffer to Blob", async () => {
23
+ const image = readFileSync(image_path);
24
+ await client("gradio/hello_world_main");
25
+ const parts = walk_and_store_blobs({
26
+ data: {
27
+ image
28
+ }
29
+ });
30
+
31
+ assert.isTrue(parts[0].blob instanceof Blob);
32
+ });
33
+
34
+ test("leave node Blob as Blob", async () => {
35
+ const image = new Blob([readFileSync(image_path)]);
36
+
37
+ await client("gradio/hello_world_main");
38
+ const parts = walk_and_store_blobs({
39
+ data: {
40
+ image
41
+ }
42
+ });
43
+
44
+ assert.isTrue(parts[0].blob instanceof Blob);
45
+ });
46
+
47
+ test("handle deep structures", async () => {
48
+ const image = new Blob([readFileSync(image_path)]);
49
+
50
+ await client("gradio/hello_world_main");
51
+ const parts = walk_and_store_blobs({
52
+ a: {
53
+ b: {
54
+ data: {
55
+ image
56
+ }
57
+ }
58
+ }
59
+ });
60
+
61
+ assert.isTrue(parts[0].blob instanceof Blob);
62
+ });
63
+
64
+ test("handle deep structures with arrays", async () => {
65
+ const image = new Blob([readFileSync(image_path)]);
66
+
67
+ await client("gradio/hello_world_main");
68
+ const parts = walk_and_store_blobs({
69
+ a: [
70
+ {
71
+ b: [
72
+ {
73
+ data: [
74
+ {
75
+ image
76
+ }
77
+ ]
78
+ }
79
+ ]
80
+ }
81
+ ]
82
+ });
83
+
84
+ assert.isTrue(parts[0].blob instanceof Blob);
85
+ });
86
+
87
+ test("handle deep structures with arrays 2", async () => {
88
+ const image = new Blob([readFileSync(image_path)]);
89
+
90
+ await client("gradio/hello_world_main");
91
+ const obj = {
92
+ a: [
93
+ {
94
+ b: [
95
+ {
96
+ data: [[image], image, [image, [image]]]
97
+ }
98
+ ]
99
+ }
100
+ ]
101
+ };
102
+ const parts = walk_and_store_blobs(obj);
103
+
104
+ function map_path(
105
+ obj: Record<string, any>,
106
+ parts: { path: string[]; blob: any }[]
107
+ ) {
108
+ const { path, blob } = parts[parts.length - 1];
109
+ let ref = obj;
110
+ path.forEach((p) => (ref = ref[p]));
111
+
112
+ return ref === blob;
113
+ }
114
+
115
+ assert.isTrue(parts[0].blob instanceof Blob);
116
+ // assert.isTrue(map_path(obj, parts));
117
+ });
118
+ });
119
+
120
+ describe("handle_blob", () => {
121
+ test("handle blobs", async () => {
122
+ const image = new Blob([readFileSync(image_path)]);
123
+
124
+ const app = await client("gradio/hello_world_main");
125
+ const obj = [
126
+ {
127
+ a: [
128
+ {
129
+ b: [
130
+ {
131
+ data: [[image], image, [image, [image]]]
132
+ }
133
+ ]
134
+ }
135
+ ]
136
+ }
137
+ ];
138
+
139
+ const parts = await handle_blob(app.config.root, obj, undefined);
140
+ //@ts-ignore
141
+ // assert.isString(parts.data[0].a[0].b[0].data[0][0]);
142
+ });
143
+ });
144
+
145
+ describe.skip("private space", () => {
146
+ test("can access a private space", async () => {
147
+ const image = new Blob([readFileSync(image_path)]);
148
+
149
+ const app = await client("pngwn/hello_world", {
150
+ hf_token: "hf_"
151
+ });
152
+
153
+ console.log(app);
154
+ const obj = [
155
+ {
156
+ a: [
157
+ {
158
+ b: [
159
+ {
160
+ data: [[image], image, [image, [image]]]
161
+ }
162
+ ]
163
+ }
164
+ ]
165
+ }
166
+ ];
167
+
168
+ const parts = await handle_blob(app.config.root, obj, "hf_");
169
+ //@ts-ignore
170
+ assert.isString(parts.data[0].a[0].b[0].data[0][0]);
171
+ });
172
+ });
testbed/gradio-app__gradio/client/js/src/client.ts ADDED
@@ -0,0 +1,1367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import semiver from "semiver";
2
+
3
+ import {
4
+ process_endpoint,
5
+ RE_SPACE_NAME,
6
+ map_names_to_ids,
7
+ discussions_enabled,
8
+ get_space_hardware,
9
+ set_space_hardware,
10
+ set_space_timeout,
11
+ hardware_types
12
+ } from "./utils.js";
13
+
14
+ import type {
15
+ EventType,
16
+ EventListener,
17
+ ListenerMap,
18
+ Event,
19
+ Payload,
20
+ PostResponse,
21
+ UploadResponse,
22
+ Status,
23
+ SpaceStatus,
24
+ SpaceStatusCallback,
25
+ FileData
26
+ } from "./types.js";
27
+
28
+ import type { Config } from "./types.js";
29
+
30
+ type event = <K extends EventType>(
31
+ eventType: K,
32
+ listener: EventListener<K>
33
+ ) => SubmitReturn;
34
+ type predict = (
35
+ endpoint: string | number,
36
+ data?: unknown[],
37
+ event_data?: unknown
38
+ ) => Promise<unknown>;
39
+
40
+ type client_return = {
41
+ predict: predict;
42
+ config: Config;
43
+ submit: (
44
+ endpoint: string | number,
45
+ data?: unknown[],
46
+ event_data?: unknown
47
+ ) => SubmitReturn;
48
+ view_api: (c?: Config) => Promise<ApiInfo<JsApiData>>;
49
+ };
50
+
51
+ type SubmitReturn = {
52
+ on: event;
53
+ off: event;
54
+ cancel: () => Promise<void>;
55
+ destroy: () => void;
56
+ };
57
+
58
+ const QUEUE_FULL_MSG = "This application is too busy. Keep trying!";
59
+ const BROKEN_CONNECTION_MSG = "Connection errored out.";
60
+
61
+ export let NodeBlob;
62
+
63
+ export async function duplicate(
64
+ app_reference: string,
65
+ options: {
66
+ hf_token: `hf_${string}`;
67
+ private?: boolean;
68
+ status_callback: SpaceStatusCallback;
69
+ hardware?: (typeof hardware_types)[number];
70
+ timeout?: number;
71
+ }
72
+ ): Promise<client_return> {
73
+ const { hf_token, private: _private, hardware, timeout } = options;
74
+
75
+ if (hardware && !hardware_types.includes(hardware)) {
76
+ throw new Error(
77
+ `Invalid hardware type provided. Valid types are: ${hardware_types
78
+ .map((v) => `"${v}"`)
79
+ .join(",")}.`
80
+ );
81
+ }
82
+ const headers = {
83
+ Authorization: `Bearer ${hf_token}`
84
+ };
85
+
86
+ const user = (
87
+ await (
88
+ await fetch(`https://huggingface.co/api/whoami-v2`, {
89
+ headers
90
+ })
91
+ ).json()
92
+ ).name;
93
+
94
+ const space_name = app_reference.split("/")[1];
95
+ const body: {
96
+ repository: string;
97
+ private?: boolean;
98
+ } = {
99
+ repository: `${user}/${space_name}`
100
+ };
101
+
102
+ if (_private) {
103
+ body.private = true;
104
+ }
105
+
106
+ try {
107
+ const response = await fetch(
108
+ `https://huggingface.co/api/spaces/${app_reference}/duplicate`,
109
+ {
110
+ method: "POST",
111
+ headers: { "Content-Type": "application/json", ...headers },
112
+ body: JSON.stringify(body)
113
+ }
114
+ );
115
+
116
+ if (response.status === 409) {
117
+ return client(`${user}/${space_name}`, options);
118
+ }
119
+ const duplicated_space = await response.json();
120
+
121
+ let original_hardware;
122
+
123
+ if (!hardware) {
124
+ original_hardware = await get_space_hardware(app_reference, hf_token);
125
+ }
126
+
127
+ const requested_hardware = hardware || original_hardware || "cpu-basic";
128
+ await set_space_hardware(
129
+ `${user}/${space_name}`,
130
+ requested_hardware,
131
+ hf_token
132
+ );
133
+
134
+ await set_space_timeout(`${user}/${space_name}`, timeout || 300, hf_token);
135
+ return client(duplicated_space.url, options);
136
+ } catch (e: any) {
137
+ throw new Error(e);
138
+ }
139
+ }
140
+
141
+ interface Client {
142
+ post_data: (
143
+ url: string,
144
+ body: unknown,
145
+ token?: `hf_${string}`
146
+ ) => Promise<[PostResponse, number]>;
147
+ upload_files: (
148
+ root: string,
149
+ files: File[],
150
+ token?: `hf_${string}`
151
+ ) => Promise<UploadResponse>;
152
+ client: (
153
+ app_reference: string,
154
+ options: {
155
+ hf_token?: `hf_${string}`;
156
+ status_callback?: SpaceStatusCallback;
157
+ normalise_files?: boolean;
158
+ }
159
+ ) => Promise<client_return>;
160
+ handle_blob: (
161
+ endpoint: string,
162
+ data: unknown[],
163
+ api_info: ApiInfo<JsApiData>,
164
+ token?: `hf_${string}`
165
+ ) => Promise<unknown[]>;
166
+ }
167
+
168
+ export function api_factory(fetch_implementation: typeof fetch): Client {
169
+ return { post_data, upload_files, client, handle_blob };
170
+
171
+ async function post_data(
172
+ url: string,
173
+ body: unknown,
174
+ token?: `hf_${string}`
175
+ ): Promise<[PostResponse, number]> {
176
+ const headers: {
177
+ Authorization?: string;
178
+ "Content-Type": "application/json";
179
+ } = { "Content-Type": "application/json" };
180
+ if (token) {
181
+ headers.Authorization = `Bearer ${token}`;
182
+ }
183
+ try {
184
+ var response = await fetch_implementation(url, {
185
+ method: "POST",
186
+ body: JSON.stringify(body),
187
+ headers
188
+ });
189
+ } catch (e) {
190
+ return [{ error: BROKEN_CONNECTION_MSG }, 500];
191
+ }
192
+ const output: PostResponse = await response.json();
193
+ return [output, response.status];
194
+ }
195
+
196
+ async function upload_files(
197
+ root: string,
198
+ files: (Blob | File)[],
199
+ token?: `hf_${string}`
200
+ ): Promise<UploadResponse> {
201
+ const headers: {
202
+ Authorization?: string;
203
+ } = {};
204
+ if (token) {
205
+ headers.Authorization = `Bearer ${token}`;
206
+ }
207
+ const chunkSize = 1000;
208
+ const uploadResponses = [];
209
+ for (let i = 0; i < files.length; i += chunkSize) {
210
+ const chunk = files.slice(i, i + chunkSize);
211
+ const formData = new FormData();
212
+ chunk.forEach((file) => {
213
+ formData.append("files", file);
214
+ });
215
+ try {
216
+ var response = await fetch_implementation(`${root}/upload`, {
217
+ method: "POST",
218
+ body: formData,
219
+ headers
220
+ });
221
+ } catch (e) {
222
+ return { error: BROKEN_CONNECTION_MSG };
223
+ }
224
+ const output: UploadResponse["files"] = await response.json();
225
+ uploadResponses.push(...output);
226
+ }
227
+ return { files: uploadResponses };
228
+ }
229
+
230
+ async function client(
231
+ app_reference: string,
232
+ options: {
233
+ hf_token?: `hf_${string}`;
234
+ status_callback?: SpaceStatusCallback;
235
+ normalise_files?: boolean;
236
+ } = { normalise_files: true }
237
+ ): Promise<client_return> {
238
+ return new Promise(async (res) => {
239
+ const { status_callback, hf_token, normalise_files } = options;
240
+ const return_obj = {
241
+ predict,
242
+ submit,
243
+ view_api
244
+ // duplicate
245
+ };
246
+
247
+ const transform_files = normalise_files ?? true;
248
+ if (typeof window === "undefined" || !("WebSocket" in window)) {
249
+ const ws = await import("ws");
250
+ NodeBlob = (await import("node:buffer")).Blob;
251
+ //@ts-ignore
252
+ global.WebSocket = ws.WebSocket;
253
+ }
254
+
255
+ const { ws_protocol, http_protocol, host, space_id } =
256
+ await process_endpoint(app_reference, hf_token);
257
+
258
+ const session_hash = Math.random().toString(36).substring(2);
259
+ const last_status: Record<string, Status["stage"]> = {};
260
+ let config: Config;
261
+ let api_map: Record<string, number> = {};
262
+
263
+ let jwt: false | string = false;
264
+
265
+ if (hf_token && space_id) {
266
+ jwt = await get_jwt(space_id, hf_token);
267
+ }
268
+
269
+ async function config_success(_config: Config): Promise<client_return> {
270
+ config = _config;
271
+ api_map = map_names_to_ids(_config?.dependencies || []);
272
+ if (config.auth_required) {
273
+ return {
274
+ config,
275
+ ...return_obj
276
+ };
277
+ }
278
+ try {
279
+ api = await view_api(config);
280
+ } catch (e) {
281
+ console.error(`Could not get api details: ${e.message}`);
282
+ }
283
+
284
+ return {
285
+ config,
286
+ ...return_obj
287
+ };
288
+ }
289
+ let api: ApiInfo<JsApiData>;
290
+ async function handle_space_sucess(status: SpaceStatus): Promise<void> {
291
+ if (status_callback) status_callback(status);
292
+ if (status.status === "running")
293
+ try {
294
+ config = await resolve_config(
295
+ fetch_implementation,
296
+ `${http_protocol}//${host}`,
297
+ hf_token
298
+ );
299
+
300
+ const _config = await config_success(config);
301
+ res(_config);
302
+ } catch (e) {
303
+ console.error(e);
304
+ if (status_callback) {
305
+ status_callback({
306
+ status: "error",
307
+ message: "Could not load this space.",
308
+ load_status: "error",
309
+ detail: "NOT_FOUND"
310
+ });
311
+ }
312
+ }
313
+ }
314
+
315
+ try {
316
+ config = await resolve_config(
317
+ fetch_implementation,
318
+ `${http_protocol}//${host}`,
319
+ hf_token
320
+ );
321
+
322
+ const _config = await config_success(config);
323
+ res(_config);
324
+ } catch (e) {
325
+ console.error(e);
326
+ if (space_id) {
327
+ check_space_status(
328
+ space_id,
329
+ RE_SPACE_NAME.test(space_id) ? "space_name" : "subdomain",
330
+ handle_space_sucess
331
+ );
332
+ } else {
333
+ if (status_callback)
334
+ status_callback({
335
+ status: "error",
336
+ message: "Could not load this space.",
337
+ load_status: "error",
338
+ detail: "NOT_FOUND"
339
+ });
340
+ }
341
+ }
342
+
343
+ function predict(
344
+ endpoint: string,
345
+ data: unknown[],
346
+ event_data?: unknown
347
+ ): Promise<unknown> {
348
+ let data_returned = false;
349
+ let status_complete = false;
350
+ let dependency;
351
+ if (typeof endpoint === "number") {
352
+ dependency = config.dependencies[endpoint];
353
+ } else {
354
+ const trimmed_endpoint = endpoint.replace(/^\//, "");
355
+ dependency = config.dependencies[api_map[trimmed_endpoint]];
356
+ }
357
+
358
+ if (dependency.types.continuous) {
359
+ throw new Error(
360
+ "Cannot call predict on this function as it may run forever. Use submit instead"
361
+ );
362
+ }
363
+
364
+ return new Promise((res, rej) => {
365
+ const app = submit(endpoint, data, event_data);
366
+ let result;
367
+
368
+ app
369
+ .on("data", (d) => {
370
+ // if complete message comes before data, resolve here
371
+ if (status_complete) {
372
+ app.destroy();
373
+ res(d);
374
+ }
375
+ data_returned = true;
376
+ result = d;
377
+ })
378
+ .on("status", (status) => {
379
+ if (status.stage === "error") rej(status);
380
+ if (status.stage === "complete") {
381
+ status_complete = true;
382
+ app.destroy();
383
+ // if complete message comes after data, resolve here
384
+ if (data_returned) {
385
+ res(result);
386
+ }
387
+ }
388
+ });
389
+ });
390
+ }
391
+
392
+ function submit(
393
+ endpoint: string | number,
394
+ data: unknown[],
395
+ event_data?: unknown
396
+ ): SubmitReturn {
397
+ let fn_index: number;
398
+ let api_info;
399
+
400
+ if (typeof endpoint === "number") {
401
+ fn_index = endpoint;
402
+ api_info = api.unnamed_endpoints[fn_index];
403
+ } else {
404
+ const trimmed_endpoint = endpoint.replace(/^\//, "");
405
+
406
+ fn_index = api_map[trimmed_endpoint];
407
+ api_info = api.named_endpoints[endpoint.trim()];
408
+ }
409
+
410
+ if (typeof fn_index !== "number") {
411
+ throw new Error(
412
+ "There is no endpoint matching that name of fn_index matching that number."
413
+ );
414
+ }
415
+
416
+ let websocket: WebSocket;
417
+
418
+ const _endpoint = typeof endpoint === "number" ? "/predict" : endpoint;
419
+ let payload: Payload;
420
+ let complete: false | Record<string, any> = false;
421
+ const listener_map: ListenerMap<EventType> = {};
422
+ const url_params = new URLSearchParams(
423
+ window.location.search
424
+ ).toString();
425
+
426
+ handle_blob(
427
+ `${http_protocol}//${host + config.path}`,
428
+ data,
429
+ api_info,
430
+ hf_token
431
+ ).then((_payload) => {
432
+ payload = { data: _payload || [], event_data, fn_index };
433
+ if (skip_queue(fn_index, config)) {
434
+ fire_event({
435
+ type: "status",
436
+ endpoint: _endpoint,
437
+ stage: "pending",
438
+ queue: false,
439
+ fn_index,
440
+ time: new Date()
441
+ });
442
+
443
+ post_data(
444
+ `${http_protocol}//${host + config.path}/run${
445
+ _endpoint.startsWith("/") ? _endpoint : `/${_endpoint}`
446
+ }${url_params ? "?" + url_params : ""}`,
447
+ {
448
+ ...payload,
449
+ session_hash
450
+ },
451
+ hf_token
452
+ )
453
+ .then(([output, status_code]) => {
454
+ const data = transform_files
455
+ ? transform_output(
456
+ output.data,
457
+ api_info,
458
+ config.root,
459
+ config.root_url
460
+ )
461
+ : output.data;
462
+ if (status_code == 200) {
463
+ fire_event({
464
+ type: "data",
465
+ endpoint: _endpoint,
466
+ fn_index,
467
+ data: data,
468
+ time: new Date()
469
+ });
470
+
471
+ fire_event({
472
+ type: "status",
473
+ endpoint: _endpoint,
474
+ fn_index,
475
+ stage: "complete",
476
+ eta: output.average_duration,
477
+ queue: false,
478
+ time: new Date()
479
+ });
480
+ } else {
481
+ fire_event({
482
+ type: "status",
483
+ stage: "error",
484
+ endpoint: _endpoint,
485
+ fn_index,
486
+ message: output.error,
487
+ queue: false,
488
+ time: new Date()
489
+ });
490
+ }
491
+ })
492
+ .catch((e) => {
493
+ fire_event({
494
+ type: "status",
495
+ stage: "error",
496
+ message: e.message,
497
+ endpoint: _endpoint,
498
+ fn_index,
499
+ queue: false,
500
+ time: new Date()
501
+ });
502
+ });
503
+ } else {
504
+ fire_event({
505
+ type: "status",
506
+ stage: "pending",
507
+ queue: true,
508
+ endpoint: _endpoint,
509
+ fn_index,
510
+ time: new Date()
511
+ });
512
+
513
+ let url = new URL(`${ws_protocol}://${host}${config.path}
514
+ /queue/join${url_params ? "?" + url_params : ""}`);
515
+
516
+ if (jwt) {
517
+ url.searchParams.set("__sign", jwt);
518
+ }
519
+
520
+ websocket = new WebSocket(url);
521
+
522
+ websocket.onclose = (evt) => {
523
+ if (!evt.wasClean) {
524
+ fire_event({
525
+ type: "status",
526
+ stage: "error",
527
+ broken: true,
528
+ message: BROKEN_CONNECTION_MSG,
529
+ queue: true,
530
+ endpoint: _endpoint,
531
+ fn_index,
532
+ time: new Date()
533
+ });
534
+ }
535
+ };
536
+
537
+ websocket.onmessage = function (event) {
538
+ const _data = JSON.parse(event.data);
539
+ const { type, status, data } = handle_message(
540
+ _data,
541
+ last_status[fn_index]
542
+ );
543
+
544
+ if (type === "update" && status && !complete) {
545
+ // call 'status' listeners
546
+ fire_event({
547
+ type: "status",
548
+ endpoint: _endpoint,
549
+ fn_index,
550
+ time: new Date(),
551
+ ...status
552
+ });
553
+ if (status.stage === "error") {
554
+ websocket.close();
555
+ }
556
+ } else if (type === "hash") {
557
+ websocket.send(JSON.stringify({ fn_index, session_hash }));
558
+ return;
559
+ } else if (type === "data") {
560
+ websocket.send(JSON.stringify({ ...payload, session_hash }));
561
+ } else if (type === "complete") {
562
+ complete = status;
563
+ } else if (type === "log") {
564
+ fire_event({
565
+ type: "log",
566
+ log: data.log,
567
+ level: data.level,
568
+ endpoint: _endpoint,
569
+ fn_index
570
+ });
571
+ } else if (type === "generating") {
572
+ fire_event({
573
+ type: "status",
574
+ time: new Date(),
575
+ ...status,
576
+ stage: status?.stage!,
577
+ queue: true,
578
+ endpoint: _endpoint,
579
+ fn_index
580
+ });
581
+ }
582
+ if (data) {
583
+ fire_event({
584
+ type: "data",
585
+ time: new Date(),
586
+ data: transform_files
587
+ ? transform_output(
588
+ data.data,
589
+ api_info,
590
+ config.root,
591
+ config.root_url
592
+ )
593
+ : data.data,
594
+ endpoint: _endpoint,
595
+ fn_index
596
+ });
597
+
598
+ if (complete) {
599
+ fire_event({
600
+ type: "status",
601
+ time: new Date(),
602
+ ...complete,
603
+ stage: status?.stage!,
604
+ queue: true,
605
+ endpoint: _endpoint,
606
+ fn_index
607
+ });
608
+ websocket.close();
609
+ }
610
+ }
611
+ };
612
+
613
+ // different ws contract for gradio versions older than 3.6.0
614
+ //@ts-ignore
615
+ if (semiver(config.version || "2.0.0", "3.6") < 0) {
616
+ addEventListener("open", () =>
617
+ websocket.send(JSON.stringify({ hash: session_hash }))
618
+ );
619
+ }
620
+ }
621
+ });
622
+
623
+ function fire_event<K extends EventType>(event: Event<K>): void {
624
+ const narrowed_listener_map: ListenerMap<K> = listener_map;
625
+ const listeners = narrowed_listener_map[event.type] || [];
626
+ listeners?.forEach((l) => l(event));
627
+ }
628
+
629
+ function on<K extends EventType>(
630
+ eventType: K,
631
+ listener: EventListener<K>
632
+ ): SubmitReturn {
633
+ const narrowed_listener_map: ListenerMap<K> = listener_map;
634
+ const listeners = narrowed_listener_map[eventType] || [];
635
+ narrowed_listener_map[eventType] = listeners;
636
+ listeners?.push(listener);
637
+
638
+ return { on, off, cancel, destroy };
639
+ }
640
+
641
+ function off<K extends EventType>(
642
+ eventType: K,
643
+ listener: EventListener<K>
644
+ ): SubmitReturn {
645
+ const narrowed_listener_map: ListenerMap<K> = listener_map;
646
+ let listeners = narrowed_listener_map[eventType] || [];
647
+ listeners = listeners?.filter((l) => l !== listener);
648
+ narrowed_listener_map[eventType] = listeners;
649
+
650
+ return { on, off, cancel, destroy };
651
+ }
652
+
653
+ async function cancel(): Promise<void> {
654
+ const _status: Status = {
655
+ stage: "complete",
656
+ queue: false,
657
+ time: new Date()
658
+ };
659
+ complete = _status;
660
+ fire_event({
661
+ ..._status,
662
+ type: "status",
663
+ endpoint: _endpoint,
664
+ fn_index: fn_index
665
+ });
666
+
667
+ if (websocket && websocket.readyState === 0) {
668
+ websocket.addEventListener("open", () => {
669
+ websocket.close();
670
+ });
671
+ } else {
672
+ websocket.close();
673
+ }
674
+
675
+ try {
676
+ await fetch_implementation(
677
+ `${http_protocol}//${host + config.path}/reset`,
678
+ {
679
+ headers: { "Content-Type": "application/json" },
680
+ method: "POST",
681
+ body: JSON.stringify({ fn_index, session_hash })
682
+ }
683
+ );
684
+ } catch (e) {
685
+ console.warn(
686
+ "The `/reset` endpoint could not be called. Subsequent endpoint results may be unreliable."
687
+ );
688
+ }
689
+ }
690
+
691
+ function destroy(): void {
692
+ for (const event_type in listener_map) {
693
+ listener_map[event_type as "data" | "status"].forEach((fn) => {
694
+ off(event_type as "data" | "status", fn);
695
+ });
696
+ }
697
+ }
698
+
699
+ return {
700
+ on,
701
+ off,
702
+ cancel,
703
+ destroy
704
+ };
705
+ }
706
+
707
+ async function view_api(config?: Config): Promise<ApiInfo<JsApiData>> {
708
+ if (api) return api;
709
+
710
+ const headers: {
711
+ Authorization?: string;
712
+ "Content-Type": "application/json";
713
+ } = { "Content-Type": "application/json" };
714
+ if (hf_token) {
715
+ headers.Authorization = `Bearer ${hf_token}`;
716
+ }
717
+ let response: Response;
718
+ // @ts-ignore
719
+ if (semiver(config.version || "2.0.0", "3.30") < 0) {
720
+ response = await fetch_implementation(
721
+ "https://gradio-space-api-fetcher-v2.hf.space/api",
722
+ {
723
+ method: "POST",
724
+ body: JSON.stringify({
725
+ serialize: false,
726
+ config: JSON.stringify(config)
727
+ }),
728
+ headers
729
+ }
730
+ );
731
+ } else {
732
+ response = await fetch_implementation(`${config.root}/info`, {
733
+ headers
734
+ });
735
+ }
736
+
737
+ if (!response.ok) {
738
+ throw new Error(BROKEN_CONNECTION_MSG);
739
+ }
740
+
741
+ let api_info = (await response.json()) as
742
+ | ApiInfo<ApiData>
743
+ | { api: ApiInfo<ApiData> };
744
+ if ("api" in api_info) {
745
+ api_info = api_info.api;
746
+ }
747
+
748
+ if (
749
+ api_info.named_endpoints["/predict"] &&
750
+ !api_info.unnamed_endpoints["0"]
751
+ ) {
752
+ api_info.unnamed_endpoints[0] = api_info.named_endpoints["/predict"];
753
+ }
754
+
755
+ const x = transform_api_info(api_info, config, api_map);
756
+ return x;
757
+ }
758
+ });
759
+ }
760
+
761
+ async function handle_blob(
762
+ endpoint: string,
763
+ data: unknown[],
764
+ api_info: ApiInfo<JsApiData>,
765
+ token?: `hf_${string}`
766
+ ): Promise<unknown[]> {
767
+ const blob_refs = await walk_and_store_blobs(
768
+ data,
769
+ undefined,
770
+ [],
771
+ true,
772
+ api_info
773
+ );
774
+
775
+ return Promise.all(
776
+ blob_refs.map(async ({ path, blob, data, type }) => {
777
+ if (blob) {
778
+ const file_url = (await upload_files(endpoint, [blob], token))
779
+ .files[0];
780
+ return { path, file_url, type };
781
+ }
782
+ return { path, base64: data, type };
783
+ })
784
+ ).then((r) => {
785
+ r.forEach(({ path, file_url, base64, type }) => {
786
+ if (base64) {
787
+ update_object(data, base64, path);
788
+ } else if (type === "Gallery") {
789
+ update_object(data, file_url, path);
790
+ } else if (file_url) {
791
+ const o = {
792
+ is_file: true,
793
+ name: `${file_url}`,
794
+ data: null
795
+ // orig_name: "file.csv"
796
+ };
797
+ update_object(data, o, path);
798
+ }
799
+ });
800
+
801
+ return data;
802
+ });
803
+ }
804
+ }
805
+
806
+ export const { post_data, upload_files, client, handle_blob } =
807
+ api_factory(fetch);
808
+
809
+ function transform_output(
810
+ data: any[],
811
+ api_info: any,
812
+ root_url: string,
813
+ remote_url?: string
814
+ ): unknown[] {
815
+ return data.map((d, i) => {
816
+ if (api_info?.returns?.[i]?.component === "File") {
817
+ return normalise_file(d, root_url, remote_url);
818
+ } else if (api_info?.returns?.[i]?.component === "Gallery") {
819
+ return d.map((img) => {
820
+ return Array.isArray(img)
821
+ ? [normalise_file(img[0], root_url, remote_url), img[1]]
822
+ : [normalise_file(img, root_url, remote_url), null];
823
+ });
824
+ } else if (typeof d === "object" && d?.is_file) {
825
+ return normalise_file(d, root_url, remote_url);
826
+ }
827
+ return d;
828
+ });
829
+ }
830
+
831
+ function normalise_file(
832
+ file: FileData[],
833
+ root: string,
834
+ root_url: string | null
835
+ ): FileData[];
836
+ function normalise_file(
837
+ file: FileData | string,
838
+ root: string,
839
+ root_url: string | null
840
+ ): FileData;
841
+ function normalise_file(
842
+ file: null,
843
+ root: string,
844
+ root_url: string | null
845
+ ): null;
846
+ function normalise_file(file, root, root_url): FileData[] | FileData | null {
847
+ if (file == null) return null;
848
+ if (typeof file === "string") {
849
+ return {
850
+ name: "file_data",
851
+ data: file
852
+ };
853
+ } else if (Array.isArray(file)) {
854
+ const normalized_file: (FileData | null)[] = [];
855
+
856
+ for (const x of file) {
857
+ if (x === null) {
858
+ normalized_file.push(null);
859
+ } else {
860
+ normalized_file.push(normalise_file(x, root, root_url));
861
+ }
862
+ }
863
+
864
+ return normalized_file as FileData[];
865
+ } else if (file.is_file) {
866
+ if (!root_url) {
867
+ file.data = root + "/file=" + file.name;
868
+ } else {
869
+ file.data = "/proxy=" + root_url + "file=" + file.name;
870
+ }
871
+ }
872
+ return file;
873
+ }
874
+
875
+ interface ApiData {
876
+ label: string;
877
+ type: {
878
+ type: any;
879
+ description: string;
880
+ };
881
+ component: string;
882
+ example_input?: any;
883
+ }
884
+
885
+ interface JsApiData {
886
+ label: string;
887
+ type: string;
888
+ component: string;
889
+ example_input: any;
890
+ }
891
+
892
+ interface EndpointInfo<T extends ApiData | JsApiData> {
893
+ parameters: T[];
894
+ returns: T[];
895
+ }
896
+ interface ApiInfo<T extends ApiData | JsApiData> {
897
+ named_endpoints: {
898
+ [key: string]: EndpointInfo<T>;
899
+ };
900
+ unnamed_endpoints: {
901
+ [key: string]: EndpointInfo<T>;
902
+ };
903
+ }
904
+
905
+ function get_type(
906
+ type: { [key: string]: any },
907
+ component: string,
908
+ serializer: string,
909
+ signature_type: "return" | "parameter"
910
+ ): string {
911
+ switch (type.type) {
912
+ case "string":
913
+ return "string";
914
+ case "boolean":
915
+ return "boolean";
916
+ case "number":
917
+ return "number";
918
+ }
919
+
920
+ if (
921
+ serializer === "JSONSerializable" ||
922
+ serializer === "StringSerializable"
923
+ ) {
924
+ return "any";
925
+ } else if (serializer === "ListStringSerializable") {
926
+ return "string[]";
927
+ } else if (component === "Image") {
928
+ return signature_type === "parameter" ? "Blob | File | Buffer" : "string";
929
+ } else if (serializer === "FileSerializable") {
930
+ if (type?.type === "array") {
931
+ return signature_type === "parameter"
932
+ ? "(Blob | File | Buffer)[]"
933
+ : `{ name: string; data: string; size?: number; is_file?: boolean; orig_name?: string}[]`;
934
+ }
935
+ return signature_type === "parameter"
936
+ ? "Blob | File | Buffer"
937
+ : `{ name: string; data: string; size?: number; is_file?: boolean; orig_name?: string}`;
938
+ } else if (serializer === "GallerySerializable") {
939
+ return signature_type === "parameter"
940
+ ? "[(Blob | File | Buffer), (string | null)][]"
941
+ : `[{ name: string; data: string; size?: number; is_file?: boolean; orig_name?: string}, (string | null))][]`;
942
+ }
943
+ }
944
+
945
+ function get_description(
946
+ type: { type: any; description: string },
947
+ serializer: string
948
+ ): string {
949
+ if (serializer === "GallerySerializable") {
950
+ return "array of [file, label] tuples";
951
+ } else if (serializer === "ListStringSerializable") {
952
+ return "array of strings";
953
+ } else if (serializer === "FileSerializable") {
954
+ return "array of files or single file";
955
+ }
956
+ return type.description;
957
+ }
958
+
959
+ function transform_api_info(
960
+ api_info: ApiInfo<ApiData>,
961
+ config: Config,
962
+ api_map: Record<string, number>
963
+ ): ApiInfo<JsApiData> {
964
+ const new_data = {
965
+ named_endpoints: {},
966
+ unnamed_endpoints: {}
967
+ };
968
+ for (const key in api_info) {
969
+ const cat = api_info[key];
970
+
971
+ for (const endpoint in cat) {
972
+ const dep_index = config.dependencies[endpoint]
973
+ ? endpoint
974
+ : api_map[endpoint.replace("/", "")];
975
+
976
+ const info = cat[endpoint];
977
+ new_data[key][endpoint] = {};
978
+ new_data[key][endpoint].parameters = {};
979
+ new_data[key][endpoint].returns = {};
980
+ new_data[key][endpoint].type = config.dependencies[dep_index].types;
981
+ new_data[key][endpoint].parameters = info.parameters.map(
982
+ ({ label, component, type, serializer }) => ({
983
+ label,
984
+ component,
985
+ type: get_type(type, component, serializer, "parameter"),
986
+ description: get_description(type, serializer)
987
+ })
988
+ );
989
+
990
+ new_data[key][endpoint].returns = info.returns.map(
991
+ ({ label, component, type, serializer }) => ({
992
+ label,
993
+ component,
994
+ type: get_type(type, component, serializer, "return"),
995
+ description: get_description(type, serializer)
996
+ })
997
+ );
998
+ }
999
+ }
1000
+
1001
+ return new_data;
1002
+ }
1003
+
1004
+ async function get_jwt(
1005
+ space: string,
1006
+ token: `hf_${string}`
1007
+ ): Promise<string | false> {
1008
+ try {
1009
+ const r = await fetch(`https://huggingface.co/api/spaces/${space}/jwt`, {
1010
+ headers: {
1011
+ Authorization: `Bearer ${token}`
1012
+ }
1013
+ });
1014
+
1015
+ const jwt = (await r.json()).token;
1016
+
1017
+ return jwt || false;
1018
+ } catch (e) {
1019
+ console.error(e);
1020
+ return false;
1021
+ }
1022
+ }
1023
+
1024
+ function update_object(object, newValue, stack): void {
1025
+ while (stack.length > 1) {
1026
+ object = object[stack.shift()];
1027
+ }
1028
+
1029
+ object[stack.shift()] = newValue;
1030
+ }
1031
+
1032
+ export async function walk_and_store_blobs(
1033
+ param,
1034
+ type = undefined,
1035
+ path = [],
1036
+ root = false,
1037
+ api_info = undefined
1038
+ ): Promise<
1039
+ {
1040
+ path: string[];
1041
+ data: string | false;
1042
+ type: string;
1043
+ blob: Blob | false;
1044
+ }[]
1045
+ > {
1046
+ if (Array.isArray(param)) {
1047
+ let blob_refs = [];
1048
+
1049
+ await Promise.all(
1050
+ param.map(async (v, i) => {
1051
+ let new_path = path.slice();
1052
+ new_path.push(i);
1053
+
1054
+ const array_refs = await walk_and_store_blobs(
1055
+ param[i],
1056
+ root ? api_info?.parameters[i]?.component || undefined : type,
1057
+ new_path,
1058
+ false,
1059
+ api_info
1060
+ );
1061
+
1062
+ blob_refs = blob_refs.concat(array_refs);
1063
+ })
1064
+ );
1065
+
1066
+ return blob_refs;
1067
+ } else if (globalThis.Buffer && param instanceof globalThis.Buffer) {
1068
+ const is_image = type === "Image";
1069
+ return [
1070
+ {
1071
+ path: path,
1072
+ blob: is_image ? false : new NodeBlob([param]),
1073
+ data: is_image ? `${param.toString("base64")}` : false,
1074
+ type
1075
+ }
1076
+ ];
1077
+ } else if (
1078
+ param instanceof Blob ||
1079
+ (typeof window !== "undefined" && param instanceof File)
1080
+ ) {
1081
+ if (type === "Image") {
1082
+ let data;
1083
+
1084
+ if (typeof window !== "undefined") {
1085
+ // browser
1086
+ data = await image_to_data_uri(param);
1087
+ } else {
1088
+ const buffer = await param.arrayBuffer();
1089
+ data = Buffer.from(buffer).toString("base64");
1090
+ }
1091
+
1092
+ return [{ path, data, type, blob: false }];
1093
+ }
1094
+ return [{ path: path, blob: param, type, data: false }];
1095
+ } else if (typeof param === "object") {
1096
+ let blob_refs = [];
1097
+ for (let key in param) {
1098
+ if (param.hasOwnProperty(key)) {
1099
+ let new_path = path.slice();
1100
+ new_path.push(key);
1101
+ blob_refs = blob_refs.concat(
1102
+ await walk_and_store_blobs(
1103
+ param[key],
1104
+ undefined,
1105
+ new_path,
1106
+ false,
1107
+ api_info
1108
+ )
1109
+ );
1110
+ }
1111
+ }
1112
+ return blob_refs;
1113
+ }
1114
+ return [];
1115
+ }
1116
+
1117
+ function image_to_data_uri(blob: Blob): Promise<string | ArrayBuffer> {
1118
+ return new Promise((resolve, _) => {
1119
+ const reader = new FileReader();
1120
+ reader.onloadend = () => resolve(reader.result);
1121
+ reader.readAsDataURL(blob);
1122
+ });
1123
+ }
1124
+
1125
+ function skip_queue(id: number, config: Config): boolean {
1126
+ return (
1127
+ !(config?.dependencies?.[id]?.queue === null
1128
+ ? config.enable_queue
1129
+ : config?.dependencies?.[id]?.queue) || false
1130
+ );
1131
+ }
1132
+
1133
+ async function resolve_config(
1134
+ fetch_implementation: typeof fetch,
1135
+ endpoint?: string,
1136
+ token?: `hf_${string}`
1137
+ ): Promise<Config> {
1138
+ const headers: { Authorization?: string } = {};
1139
+ if (token) {
1140
+ headers.Authorization = `Bearer ${token}`;
1141
+ }
1142
+ if (
1143
+ typeof window !== "undefined" &&
1144
+ window.gradio_config &&
1145
+ location.origin !== "http://localhost:9876" &&
1146
+ !window.gradio_config.dev_mode
1147
+ ) {
1148
+ const path = window.gradio_config.root;
1149
+ const config = window.gradio_config;
1150
+ config.root = endpoint + config.root;
1151
+ return { ...config, path: path };
1152
+ } else if (endpoint) {
1153
+ let response = await fetch_implementation(`${endpoint}/config`, {
1154
+ headers
1155
+ });
1156
+
1157
+ if (response.status === 200) {
1158
+ const config = await response.json();
1159
+ config.path = config.path ?? "";
1160
+ config.root = endpoint;
1161
+ return config;
1162
+ }
1163
+ throw new Error("Could not get config.");
1164
+ }
1165
+
1166
+ throw new Error("No config or app endpoint found");
1167
+ }
1168
+
1169
+ async function check_space_status(
1170
+ id: string,
1171
+ type: "subdomain" | "space_name",
1172
+ status_callback: SpaceStatusCallback
1173
+ ): Promise<void> {
1174
+ let endpoint =
1175
+ type === "subdomain"
1176
+ ? `https://huggingface.co/api/spaces/by-subdomain/${id}`
1177
+ : `https://huggingface.co/api/spaces/${id}`;
1178
+ let response;
1179
+ let _status;
1180
+ try {
1181
+ response = await fetch(endpoint);
1182
+ _status = response.status;
1183
+ if (_status !== 200) {
1184
+ throw new Error();
1185
+ }
1186
+ response = await response.json();
1187
+ } catch (e) {
1188
+ status_callback({
1189
+ status: "error",
1190
+ load_status: "error",
1191
+ message: "Could not get space status",
1192
+ detail: "NOT_FOUND"
1193
+ });
1194
+ return;
1195
+ }
1196
+
1197
+ if (!response || _status !== 200) return;
1198
+ const {
1199
+ runtime: { stage },
1200
+ id: space_name
1201
+ } = response;
1202
+
1203
+ switch (stage) {
1204
+ case "STOPPED":
1205
+ case "SLEEPING":
1206
+ status_callback({
1207
+ status: "sleeping",
1208
+ load_status: "pending",
1209
+ message: "Space is asleep. Waking it up...",
1210
+ detail: stage
1211
+ });
1212
+
1213
+ setTimeout(() => {
1214
+ check_space_status(id, type, status_callback);
1215
+ }, 1000); // poll for status
1216
+ break;
1217
+ case "PAUSED":
1218
+ status_callback({
1219
+ status: "paused",
1220
+ load_status: "error",
1221
+ message:
1222
+ "This space has been paused by the author. If you would like to try this demo, consider duplicating the space.",
1223
+ detail: stage,
1224
+ discussions_enabled: await discussions_enabled(space_name)
1225
+ });
1226
+ break;
1227
+ case "RUNNING":
1228
+ case "RUNNING_BUILDING":
1229
+ status_callback({
1230
+ status: "running",
1231
+ load_status: "complete",
1232
+ message: "",
1233
+ detail: stage
1234
+ });
1235
+ // load_config(source);
1236
+ // launch
1237
+ break;
1238
+ case "BUILDING":
1239
+ status_callback({
1240
+ status: "building",
1241
+ load_status: "pending",
1242
+ message: "Space is building...",
1243
+ detail: stage
1244
+ });
1245
+
1246
+ setTimeout(() => {
1247
+ check_space_status(id, type, status_callback);
1248
+ }, 1000);
1249
+ break;
1250
+ default:
1251
+ status_callback({
1252
+ status: "space_error",
1253
+ load_status: "error",
1254
+ message: "This space is experiencing an issue.",
1255
+ detail: stage,
1256
+ discussions_enabled: await discussions_enabled(space_name)
1257
+ });
1258
+ break;
1259
+ }
1260
+ }
1261
+
1262
+ function handle_message(
1263
+ data: any,
1264
+ last_status: Status["stage"]
1265
+ ): {
1266
+ type: "hash" | "data" | "update" | "complete" | "generating" | "log" | "none";
1267
+ data?: any;
1268
+ status?: Status;
1269
+ } {
1270
+ const queue = true;
1271
+ switch (data.msg) {
1272
+ case "send_data":
1273
+ return { type: "data" };
1274
+ case "send_hash":
1275
+ return { type: "hash" };
1276
+ case "queue_full":
1277
+ return {
1278
+ type: "update",
1279
+ status: {
1280
+ queue,
1281
+ message: QUEUE_FULL_MSG,
1282
+ stage: "error",
1283
+ code: data.code,
1284
+ success: data.success
1285
+ }
1286
+ };
1287
+ case "estimation":
1288
+ return {
1289
+ type: "update",
1290
+ status: {
1291
+ queue,
1292
+ stage: last_status || "pending",
1293
+ code: data.code,
1294
+ size: data.queue_size,
1295
+ position: data.rank,
1296
+ eta: data.rank_eta,
1297
+ success: data.success
1298
+ }
1299
+ };
1300
+ case "progress":
1301
+ return {
1302
+ type: "update",
1303
+ status: {
1304
+ queue,
1305
+ stage: "pending",
1306
+ code: data.code,
1307
+ progress_data: data.progress_data,
1308
+ success: data.success
1309
+ }
1310
+ };
1311
+ case "log":
1312
+ return { type: "log", data: data };
1313
+ case "process_generating":
1314
+ return {
1315
+ type: "generating",
1316
+ status: {
1317
+ queue,
1318
+ message: !data.success ? data.output.error : null,
1319
+ stage: data.success ? "generating" : "error",
1320
+ code: data.code,
1321
+ progress_data: data.progress_data,
1322
+ eta: data.average_duration
1323
+ },
1324
+ data: data.success ? data.output : null
1325
+ };
1326
+ case "process_completed":
1327
+ if ("error" in data.output) {
1328
+ return {
1329
+ type: "update",
1330
+ status: {
1331
+ queue,
1332
+ message: data.output.error as string,
1333
+ stage: "error",
1334
+ code: data.code,
1335
+ success: data.success
1336
+ }
1337
+ };
1338
+ }
1339
+ return {
1340
+ type: "complete",
1341
+ status: {
1342
+ queue,
1343
+ message: !data.success ? data.output.error : undefined,
1344
+ stage: data.success ? "complete" : "error",
1345
+ code: data.code,
1346
+ progress_data: data.progress_data,
1347
+ eta: data.output.average_duration
1348
+ },
1349
+ data: data.success ? data.output : null
1350
+ };
1351
+
1352
+ case "process_starts":
1353
+ return {
1354
+ type: "update",
1355
+ status: {
1356
+ queue,
1357
+ stage: "pending",
1358
+ code: data.code,
1359
+ size: data.rank,
1360
+ position: 0,
1361
+ success: data.success
1362
+ }
1363
+ };
1364
+ }
1365
+
1366
+ return { type: "none", status: { stage: "error", queue } };
1367
+ }
testbed/gradio-app__gradio/client/js/src/globals.d.ts ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ declare global {
2
+ interface Window {
3
+ __gradio_mode__: "app" | "website";
4
+ gradio_config: Config;
5
+ __is_colab__: boolean;
6
+ __gradio_space__: string | null;
7
+ }
8
+ }
9
+
10
+ export interface Config {
11
+ auth_required: boolean | undefined;
12
+ auth_message: string;
13
+ components: any[];
14
+ css: string | null;
15
+ dependencies: any[];
16
+ dev_mode: boolean;
17
+ enable_queue: boolean;
18
+ layout: any;
19
+ mode: "blocks" | "interface";
20
+ root: string;
21
+ theme: string;
22
+ title: string;
23
+ version: string;
24
+ space_id: string | null;
25
+ is_colab: boolean;
26
+ show_api: boolean;
27
+ stylesheets: string[];
28
+ path: string;
29
+ }
testbed/gradio-app__gradio/client/js/src/index.ts ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ export {
2
+ client,
3
+ post_data,
4
+ upload_files,
5
+ duplicate,
6
+ api_factory
7
+ } from "./client.js";
8
+ export type { SpaceStatus } from "./types.js";
testbed/gradio-app__gradio/client/js/src/types.ts ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export interface Config {
2
+ auth_required: boolean | undefined;
3
+ auth_message: string;
4
+ components: any[];
5
+ css: string | null;
6
+ dependencies: any[];
7
+ dev_mode: boolean;
8
+ enable_queue: boolean;
9
+ layout: any;
10
+ mode: "blocks" | "interface";
11
+ root: string;
12
+ root_url?: string;
13
+ theme: string;
14
+ title: string;
15
+ version: string;
16
+ space_id: string | null;
17
+ is_colab: boolean;
18
+ show_api: boolean;
19
+ stylesheets: string[];
20
+ path: string;
21
+ }
22
+
23
+ export interface Payload {
24
+ data: unknown[];
25
+ fn_index?: number;
26
+ event_data?: unknown;
27
+ time?: Date;
28
+ }
29
+
30
+ export interface PostResponse {
31
+ error?: string;
32
+ [x: string]: any;
33
+ }
34
+ export interface UploadResponse {
35
+ error?: string;
36
+ files?: string[];
37
+ }
38
+
39
+ export interface Status {
40
+ queue: boolean;
41
+ code?: string;
42
+ success?: boolean;
43
+ stage: "pending" | "error" | "complete" | "generating";
44
+ broken?: boolean;
45
+ size?: number;
46
+ position?: number;
47
+ eta?: number;
48
+ message?: string;
49
+ progress_data?: {
50
+ progress: number | null;
51
+ index: number | null;
52
+ length: number | null;
53
+ unit: string | null;
54
+ desc: string | null;
55
+ }[];
56
+ time?: Date;
57
+ }
58
+
59
+ export interface LogMessage {
60
+ log: string;
61
+ level: "warning" | "info";
62
+ }
63
+
64
+ export interface SpaceStatusNormal {
65
+ status: "sleeping" | "running" | "building" | "error" | "stopped";
66
+ detail:
67
+ | "SLEEPING"
68
+ | "RUNNING"
69
+ | "RUNNING_BUILDING"
70
+ | "BUILDING"
71
+ | "NOT_FOUND";
72
+ load_status: "pending" | "error" | "complete" | "generating";
73
+ message: string;
74
+ }
75
+ export interface SpaceStatusError {
76
+ status: "space_error" | "paused";
77
+ detail:
78
+ | "NO_APP_FILE"
79
+ | "CONFIG_ERROR"
80
+ | "BUILD_ERROR"
81
+ | "RUNTIME_ERROR"
82
+ | "PAUSED";
83
+ load_status: "error";
84
+ message: string;
85
+ discussions_enabled: boolean;
86
+ }
87
+ export type SpaceStatus = SpaceStatusNormal | SpaceStatusError;
88
+
89
+ export type status_callback_function = (a: Status) => void;
90
+ export type SpaceStatusCallback = (a: SpaceStatus) => void;
91
+
92
+ export type EventType = "data" | "status" | "log";
93
+
94
+ export interface EventMap {
95
+ data: Payload;
96
+ status: Status;
97
+ log: LogMessage;
98
+ }
99
+
100
+ export type Event<K extends EventType> = {
101
+ [P in K]: EventMap[P] & { type: P; endpoint: string; fn_index: number };
102
+ }[K];
103
+ export type EventListener<K extends EventType> = (event: Event<K>) => void;
104
+ export type ListenerMap<K extends EventType> = {
105
+ [P in K]?: EventListener<K>[];
106
+ };
107
+ export interface FileData {
108
+ name: string;
109
+ orig_name?: string;
110
+ size?: number;
111
+ data: string;
112
+ blob?: File;
113
+ is_file?: boolean;
114
+ mime_type?: string;
115
+ alt_text?: string;
116
+ }
testbed/gradio-app__gradio/client/js/src/utils.ts ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import type { Config } from "./types.js";
2
+
3
+ export function determine_protocol(endpoint: string): {
4
+ ws_protocol: "ws" | "wss";
5
+ http_protocol: "http:" | "https:";
6
+ host: string;
7
+ } {
8
+ if (endpoint.startsWith("http")) {
9
+ const { protocol, host } = new URL(endpoint);
10
+
11
+ if (host.endsWith("hf.space")) {
12
+ return {
13
+ ws_protocol: "wss",
14
+ host: host,
15
+ http_protocol: protocol as "http:" | "https:"
16
+ };
17
+ }
18
+ return {
19
+ ws_protocol: protocol === "https:" ? "wss" : "ws",
20
+ http_protocol: protocol as "http:" | "https:",
21
+ host
22
+ };
23
+ }
24
+
25
+ // default to secure if no protocol is provided
26
+ return {
27
+ ws_protocol: "wss",
28
+ http_protocol: "https:",
29
+ host: endpoint
30
+ };
31
+ }
32
+
33
+ export const RE_SPACE_NAME = /^[^\/]*\/[^\/]*$/;
34
+ export const RE_SPACE_DOMAIN = /.*hf\.space\/{0,1}$/;
35
+ export async function process_endpoint(
36
+ app_reference: string,
37
+ token?: `hf_${string}`
38
+ ): Promise<{
39
+ space_id: string | false;
40
+ host: string;
41
+ ws_protocol: "ws" | "wss";
42
+ http_protocol: "http:" | "https:";
43
+ }> {
44
+ const headers: { Authorization?: string } = {};
45
+ if (token) {
46
+ headers.Authorization = `Bearer ${token}`;
47
+ }
48
+
49
+ const _app_reference = app_reference.trim();
50
+
51
+ if (RE_SPACE_NAME.test(_app_reference)) {
52
+ try {
53
+ const res = await fetch(
54
+ `https://huggingface.co/api/spaces/${_app_reference}/host`,
55
+ { headers }
56
+ );
57
+
58
+ if (res.status !== 200)
59
+ throw new Error("Space metadata could not be loaded.");
60
+ const _host = (await res.json()).host;
61
+
62
+ return {
63
+ space_id: app_reference,
64
+ ...determine_protocol(_host)
65
+ };
66
+ } catch (e) {
67
+ throw new Error("Space metadata could not be loaded." + e.message);
68
+ }
69
+ }
70
+
71
+ if (RE_SPACE_DOMAIN.test(_app_reference)) {
72
+ const { ws_protocol, http_protocol, host } =
73
+ determine_protocol(_app_reference);
74
+
75
+ return {
76
+ space_id: host.replace(".hf.space", ""),
77
+ ws_protocol,
78
+ http_protocol,
79
+ host
80
+ };
81
+ }
82
+
83
+ return {
84
+ space_id: false,
85
+ ...determine_protocol(_app_reference)
86
+ };
87
+ }
88
+
89
+ export function map_names_to_ids(
90
+ fns: Config["dependencies"]
91
+ ): Record<string, number> {
92
+ let apis: Record<string, number> = {};
93
+
94
+ fns.forEach(({ api_name }, i) => {
95
+ if (api_name) apis[api_name] = i;
96
+ });
97
+
98
+ return apis;
99
+ }
100
+
101
+ const RE_DISABLED_DISCUSSION =
102
+ /^(?=[^]*\b[dD]iscussions{0,1}\b)(?=[^]*\b[dD]isabled\b)[^]*$/;
103
+ export async function discussions_enabled(space_id: string): Promise<boolean> {
104
+ try {
105
+ const r = await fetch(
106
+ `https://huggingface.co/api/spaces/${space_id}/discussions`,
107
+ {
108
+ method: "HEAD"
109
+ }
110
+ );
111
+ const error = r.headers.get("x-error-message");
112
+
113
+ if (error && RE_DISABLED_DISCUSSION.test(error)) return false;
114
+ return true;
115
+ } catch (e) {
116
+ return false;
117
+ }
118
+ }
119
+
120
+ export async function get_space_hardware(
121
+ space_id: string,
122
+ token: `hf_${string}`
123
+ ): Promise<(typeof hardware_types)[number]> {
124
+ const headers: { Authorization?: string } = {};
125
+ if (token) {
126
+ headers.Authorization = `Bearer ${token}`;
127
+ }
128
+
129
+ try {
130
+ const res = await fetch(
131
+ `https://huggingface.co/api/spaces/${space_id}/runtime`,
132
+ { headers }
133
+ );
134
+
135
+ if (res.status !== 200)
136
+ throw new Error("Space hardware could not be obtained.");
137
+
138
+ const { hardware } = await res.json();
139
+
140
+ return hardware;
141
+ } catch (e) {
142
+ throw new Error(e.message);
143
+ }
144
+ }
145
+
146
+ export async function set_space_hardware(
147
+ space_id: string,
148
+ new_hardware: (typeof hardware_types)[number],
149
+ token: `hf_${string}`
150
+ ): Promise<(typeof hardware_types)[number]> {
151
+ const headers: { Authorization?: string } = {};
152
+ if (token) {
153
+ headers.Authorization = `Bearer ${token}`;
154
+ }
155
+
156
+ try {
157
+ const res = await fetch(
158
+ `https://huggingface.co/api/spaces/${space_id}/hardware`,
159
+ { headers, body: JSON.stringify(new_hardware) }
160
+ );
161
+
162
+ if (res.status !== 200)
163
+ throw new Error(
164
+ "Space hardware could not be set. Please ensure the space hardware provided is valid and that a Hugging Face token is passed in."
165
+ );
166
+
167
+ const { hardware } = await res.json();
168
+
169
+ return hardware;
170
+ } catch (e) {
171
+ throw new Error(e.message);
172
+ }
173
+ }
174
+
175
+ export async function set_space_timeout(
176
+ space_id: string,
177
+ timeout: number,
178
+ token: `hf_${string}`
179
+ ): Promise<number> {
180
+ const headers: { Authorization?: string } = {};
181
+ if (token) {
182
+ headers.Authorization = `Bearer ${token}`;
183
+ }
184
+
185
+ try {
186
+ const res = await fetch(
187
+ `https://huggingface.co/api/spaces/${space_id}/hardware`,
188
+ { headers, body: JSON.stringify({ seconds: timeout }) }
189
+ );
190
+
191
+ if (res.status !== 200)
192
+ throw new Error(
193
+ "Space hardware could not be set. Please ensure the space hardware provided is valid and that a Hugging Face token is passed in."
194
+ );
195
+
196
+ const { hardware } = await res.json();
197
+
198
+ return hardware;
199
+ } catch (e) {
200
+ throw new Error(e.message);
201
+ }
202
+ }
203
+
204
+ export const hardware_types = [
205
+ "cpu-basic",
206
+ "cpu-upgrade",
207
+ "t4-small",
208
+ "t4-medium",
209
+ "a10g-small",
210
+ "a10g-large",
211
+ "a100-large"
212
+ ] as const;
testbed/gradio-app__gradio/client/js/tsconfig.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "include": ["src/**/*"],
3
+ "exclude": ["src/**/*.test.ts", "src/**/*.node-test.ts"],
4
+ "compilerOptions": {
5
+ "allowJs": true,
6
+ "declaration": true,
7
+ "emitDeclarationOnly": true,
8
+ "outDir": "dist",
9
+ "declarationMap": true,
10
+ "module": "es2020",
11
+ "moduleResolution": "node16",
12
+ "skipDefaultLibCheck": true
13
+ }
14
+ }
testbed/gradio-app__gradio/client/js/vite.config.js ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { defineConfig } from "vite";
2
+
3
+ export default defineConfig({
4
+ build: {
5
+ // minify: true,
6
+ lib: {
7
+ entry: "src/index.ts",
8
+ formats: ["es"]
9
+ },
10
+ rollupOptions: {
11
+ input: "src/index.ts",
12
+ output: {
13
+ dir: "dist"
14
+ }
15
+ }
16
+ },
17
+
18
+ ssr: {
19
+ target: "node",
20
+ format: "esm",
21
+ noExternal: ["ws", "semiver"]
22
+ }
23
+ });
testbed/gradio-app__gradio/client/python/CHANGELOG.md ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # gradio_client
2
+
3
+ ## 0.5.2
4
+
5
+ ### Features
6
+
7
+ - [#5653](https://github.com/gradio-app/gradio/pull/5653) [`ea0e00b20`](https://github.com/gradio-app/gradio/commit/ea0e00b207b4b90a10e9d054c4202d4e705a29ba) - Prevent Clients from accessing API endpoints that set `api_name=False`. Thanks [@abidlabs](https://github.com/abidlabs)!
8
+
9
+ ## 0.5.1
10
+
11
+ ### Features
12
+
13
+ - [#5514](https://github.com/gradio-app/gradio/pull/5514) [`52f783175`](https://github.com/gradio-app/gradio/commit/52f7831751b432411e109bd41add4ab286023a8e) - refactor: Use package.json for version management. Thanks [@DarhkVoyd](https://github.com/DarhkVoyd)!
14
+
15
+ ## 0.5.0
16
+
17
+ ### Highlights
18
+
19
+ #### Enable streaming audio in python client ([#5248](https://github.com/gradio-app/gradio/pull/5248) [`390624d8`](https://github.com/gradio-app/gradio/commit/390624d8ad2b1308a5bf8384435fd0db98d8e29e))
20
+
21
+ The `gradio_client` now supports streaming file outputs 🌊
22
+
23
+ No new syntax! Connect to a gradio demo that supports streaming file outputs and call `predict` or `submit` as you normally would.
24
+
25
+ ```python
26
+ import gradio_client as grc
27
+ client = grc.Client("gradio/stream_audio_out")
28
+
29
+ # Get the entire generated audio as a local file
30
+ client.predict("/Users/freddy/Pictures/bark_demo.mp4", api_name="/predict")
31
+
32
+ job = client.submit("/Users/freddy/Pictures/bark_demo.mp4", api_name="/predict")
33
+
34
+ # Get the entire generated audio as a local file
35
+ job.result()
36
+
37
+ # Each individual chunk
38
+ job.outputs()
39
+ ```
40
+
41
+ Thanks [@freddyaboulton](https://github.com/freddyaboulton)!
42
+
43
+ ### Fixes
44
+
45
+ - [#5295](https://github.com/gradio-app/gradio/pull/5295) [`7b8fa8aa`](https://github.com/gradio-app/gradio/commit/7b8fa8aa58f95f5046b9add64b40368bd3f1b700) - Allow caching examples with streamed output. Thanks [@aliabid94](https://github.com/aliabid94)!
46
+
47
+ ## 0.4.0
48
+
49
+ ### Highlights
50
+
51
+ #### Client.predict will now return the final output for streaming endpoints ([#5057](https://github.com/gradio-app/gradio/pull/5057) [`35856f8b`](https://github.com/gradio-app/gradio/commit/35856f8b54548cae7bd3b8d6a4de69e1748283b2))
52
+
53
+ ### This is a breaking change (for gradio_client only)!
54
+
55
+ Previously, `Client.predict` would only return the first output of an endpoint that streamed results. This was causing confusion for developers that wanted to call these streaming demos via the client.
56
+
57
+ We realize that developers using the client don't know the internals of whether a demo streams or not, so we're changing the behavior of predict to match developer expectations.
58
+
59
+ Using `Client.predict` will now return the final output of a streaming endpoint. This will make it even easier to use gradio apps via the client.
60
+
61
+ Thanks [@freddyaboulton](https://github.com/freddyaboulton)!
62
+
63
+ ### Features
64
+
65
+ - [#5076](https://github.com/gradio-app/gradio/pull/5076) [`2745075a`](https://github.com/gradio-app/gradio/commit/2745075a26f80e0e16863d483401ff1b6c5ada7a) - Add deploy_discord to docs. Thanks [@freddyaboulton](https://github.com/freddyaboulton)!
66
+
67
+ ### Fixes
68
+
69
+ - [#5061](https://github.com/gradio-app/gradio/pull/5061) [`136adc9c`](https://github.com/gradio-app/gradio/commit/136adc9ccb23e5cb4d02d2e88f23f0b850041f98) - Ensure `gradio_client` is backwards compatible with `gradio==3.24.1`. Thanks [@abidlabs](https://github.com/abidlabs)!
70
+
71
+ ## 0.3.0
72
+
73
+ ### Highlights
74
+
75
+ #### Create Discord Bots from Gradio Apps 🤖 ([#4960](https://github.com/gradio-app/gradio/pull/4960) [`46e4ef67`](https://github.com/gradio-app/gradio/commit/46e4ef67d287dd68a91473b73172b29cbad064bc))
76
+
77
+ We're excited to announce that Gradio can now automatically create a discord bot from any `gr.ChatInterface` app.
78
+
79
+ It's as easy as importing `gradio_client`, connecting to the app, and calling `deploy_discord`!
80
+
81
+ _🦙 Turning Llama 2 70b into a discord bot 🦙_
82
+
83
+ ```python
84
+ import gradio_client as grc
85
+ grc.Client("ysharma/Explore_llamav2_with_TGI").deploy_discord(to_id="llama2-70b-discord-bot")
86
+ ```
87
+
88
+ <img src="https://gradio-builds.s3.amazonaws.com/demo-files/discordbots/guide/llama_chat.gif">
89
+
90
+ #### Getting started with template spaces
91
+
92
+ To help get you started, we have created an organization on Hugging Face called [gradio-discord-bots](https://huggingface.co/gradio-discord-bots) with template spaces you can use to turn state of the art LLMs powered by Gradio to discord bots.
93
+
94
+ Currently we have template spaces for:
95
+
96
+ - [Llama-2-70b-chat-hf](https://huggingface.co/spaces/gradio-discord-bots/Llama-2-70b-chat-hf) powered by a FREE Hugging Face Inference Endpoint!
97
+ - [Llama-2-13b-chat-hf](https://huggingface.co/spaces/gradio-discord-bots/Llama-2-13b-chat-hf) powered by Hugging Face Inference Endpoints.
98
+ - [Llama-2-13b-chat-hf](https://huggingface.co/spaces/gradio-discord-bots/llama-2-13b-chat-transformers) powered by Hugging Face transformers.
99
+ - [falcon-7b-instruct](https://huggingface.co/spaces/gradio-discord-bots/falcon-7b-instruct) powered by Hugging Face Inference Endpoints.
100
+ - [gpt-3.5-turbo](https://huggingface.co/spaces/gradio-discord-bots/gpt-35-turbo), powered by openai. Requires an OpenAI key.
101
+
102
+ But once again, you can deploy ANY `gr.ChatInterface` app exposed on the internet! So don't hesitate to try it on your own Chatbots.
103
+
104
+ ❗️ Additional Note ❗️: Technically, any gradio app that exposes an api route that takes in a single string and outputs a single string can be deployed to discord. But `gr.ChatInterface` apps naturally lend themselves to discord's chat functionality so we suggest you start with those.
105
+
106
+ Thanks [@freddyaboulton](https://github.com/freddyaboulton)!
107
+
108
+ ### New Features:
109
+
110
+ - Endpoints that return layout components are now properly handled in the `submit` and `view_api` methods. Output layout components are not returned by the API but all other components are (excluding `gr.State`). By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4871](https://github.com/gradio-app/gradio/pull/4871)
111
+
112
+ ### Bug Fixes:
113
+
114
+ No changes to highlight
115
+
116
+ ### Breaking Changes:
117
+
118
+ No changes to highlight.
119
+
120
+ ### Full Changelog:
121
+
122
+ No changes to highlight.
123
+
124
+ # 0.2.9
125
+
126
+ ### New Features:
127
+
128
+ No changes to highlight
129
+
130
+ ### Bug Fixes:
131
+
132
+ - Fix bug determining the api name when a demo has `api_name=False` by [@freddyboulton](https://github.com/freddyaboulton) in [PR 4886](https://github.com/gradio-app/gradio/pull/4886)
133
+
134
+ ### Breaking Changes:
135
+
136
+ No changes to highlight.
137
+
138
+ ### Full Changelog:
139
+
140
+ - Pinned dependencies to major versions to reduce the likelihood of a broken `gradio_client` due to changes in downstream dependencies by [@abidlabs](https://github.com/abidlabs) in [PR 4885](https://github.com/gradio-app/gradio/pull/4885)
141
+
142
+ # 0.2.8
143
+
144
+ ### New Features:
145
+
146
+ - Support loading gradio apps where `api_name=False` by [@abidlabs](https://github.com/abidlabs) in [PR 4683](https://github.com/gradio-app/gradio/pull/4683)
147
+
148
+ ### Bug Fixes:
149
+
150
+ - Fix bug where space duplication would error if the demo has cpu-basic hardware by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4583](https://github.com/gradio-app/gradio/pull/4583)
151
+ - Fixes and optimizations to URL/download functions by [@akx](https://github.com/akx) in [PR 4695](https://github.com/gradio-app/gradio/pull/4695)
152
+
153
+ ### Breaking Changes:
154
+
155
+ No changes to highlight.
156
+
157
+ ### Full Changelog:
158
+
159
+ No changes to highlight.
160
+
161
+ # 0.2.7
162
+
163
+ ### New Features:
164
+
165
+ - The output directory for files downloaded via the Client can now be set by the `output_dir` parameter in `Client` by [@abidlabs](https://github.com/abidlabs) in [PR 4501](https://github.com/gradio-app/gradio/pull/4501)
166
+
167
+ ### Bug Fixes:
168
+
169
+ - The output directory for files downloaded via the Client are now set to a temporary directory by default (instead of the working directory in some cases) by [@abidlabs](https://github.com/abidlabs) in [PR 4501](https://github.com/gradio-app/gradio/pull/4501)
170
+
171
+ ### Breaking Changes:
172
+
173
+ No changes to highlight.
174
+
175
+ ### Full Changelog:
176
+
177
+ No changes to highlight.
178
+
179
+ # 0.2.6
180
+
181
+ ### New Features:
182
+
183
+ No changes to highlight.
184
+
185
+ ### Bug Fixes:
186
+
187
+ - Fixed bug file deserialization didn't preserve all file extensions by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4440](https://github.com/gradio-app/gradio/pull/4440)
188
+ - Fixed bug where mounted apps could not be called via the client by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4435](https://github.com/gradio-app/gradio/pull/4435)
189
+
190
+ ### Breaking Changes:
191
+
192
+ No changes to highlight.
193
+
194
+ ### Full Changelog:
195
+
196
+ No changes to highlight.
197
+
198
+ # 0.2.5
199
+
200
+ ### New Features:
201
+
202
+ No changes to highlight.
203
+
204
+ ### Bug Fixes:
205
+
206
+ - Fixes parameter names not showing underscores by [@abidlabs](https://github.com/abidlabs) in [PR 4230](https://github.com/gradio-app/gradio/pull/4230)
207
+ - Fixes issue in which state was not handled correctly if `serialize=False` by [@abidlabs](https://github.com/abidlabs) in [PR 4230](https://github.com/gradio-app/gradio/pull/4230)
208
+
209
+ ### Breaking Changes:
210
+
211
+ No changes to highlight.
212
+
213
+ ### Full Changelog:
214
+
215
+ No changes to highlight.
216
+
217
+ # 0.2.4
218
+
219
+ ### Bug Fixes:
220
+
221
+ - Fixes missing serialization classes for several components: `Barplot`, `Lineplot`, `Scatterplot`, `AnnotatedImage`, `Interpretation` by [@abidlabs](https://github.com/abidlabs) in [PR 4167](https://github.com/gradio-app/gradio/pull/4167)
222
+
223
+ ### Documentation Changes:
224
+
225
+ No changes to highlight.
226
+
227
+ ### Testing and Infrastructure Changes:
228
+
229
+ No changes to highlight.
230
+
231
+ ### Breaking Changes:
232
+
233
+ No changes to highlight.
234
+
235
+ ### Full Changelog:
236
+
237
+ No changes to highlight.
238
+
239
+ ### Contributors Shoutout:
240
+
241
+ No changes to highlight.
242
+
243
+ # 0.2.3
244
+
245
+ ### New Features:
246
+
247
+ No changes to highlight.
248
+
249
+ ### Bug Fixes:
250
+
251
+ - Fix example inputs for `gr.File(file_count='multiple')` output components by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4153](https://github.com/gradio-app/gradio/pull/4153)
252
+
253
+ ### Documentation Changes:
254
+
255
+ No changes to highlight.
256
+
257
+ ### Testing and Infrastructure Changes:
258
+
259
+ No changes to highlight.
260
+
261
+ ### Breaking Changes:
262
+
263
+ No changes to highlight.
264
+
265
+ ### Full Changelog:
266
+
267
+ No changes to highlight.
268
+
269
+ ### Contributors Shoutout:
270
+
271
+ No changes to highlight.
272
+
273
+ # 0.2.2
274
+
275
+ ### New Features:
276
+
277
+ No changes to highlight.
278
+
279
+ ### Bug Fixes:
280
+
281
+ - Only send request to `/info` route if demo version is above `3.28.3` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4109](https://github.com/gradio-app/gradio/pull/4109)
282
+
283
+ ### Other Changes:
284
+
285
+ - Fix bug in test from gradio 3.29.0 refactor by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4138](https://github.com/gradio-app/gradio/pull/4138)
286
+
287
+ ### Breaking Changes:
288
+
289
+ No changes to highlight.
290
+
291
+ # 0.2.1
292
+
293
+ ### New Features:
294
+
295
+ No changes to highlight.
296
+
297
+ ### Bug Fixes:
298
+
299
+ Removes extraneous `State` component info from the `Client.view_api()` method by [@abidlabs](https://github.com/freddyaboulton) in [PR 4107](https://github.com/gradio-app/gradio/pull/4107)
300
+
301
+ ### Documentation Changes:
302
+
303
+ No changes to highlight.
304
+
305
+ ### Testing and Infrastructure Changes:
306
+
307
+ Separates flaky tests from non-flaky tests by [@abidlabs](https://github.com/freddyaboulton) in [PR 4107](https://github.com/gradio-app/gradio/pull/4107)
308
+
309
+ ### Breaking Changes:
310
+
311
+ No changes to highlight.
312
+
313
+ ### Full Changelog:
314
+
315
+ No changes to highlight.
316
+
317
+ ### Contributors Shoutout:
318
+
319
+ No changes to highlight.
320
+
321
+ # 0.1.4
322
+
323
+ ### New Features:
324
+
325
+ - Progress Updates from `gr.Progress()` can be accessed via `job.status().progress_data` by @freddyaboulton](https://github.com/freddyaboulton) in [PR 3924](https://github.com/gradio-app/gradio/pull/3924)
326
+
327
+ ### Bug Fixes:
328
+
329
+ - Fixed bug where unnamed routes where displayed with `api_name` instead of `fn_index` in `view_api` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3972](https://github.com/gradio-app/gradio/pull/3972)
330
+
331
+ ### Documentation Changes:
332
+
333
+ No changes to highlight.
334
+
335
+ ### Testing and Infrastructure Changes:
336
+
337
+ No changes to highlight.
338
+
339
+ ### Breaking Changes:
340
+
341
+ No changes to highlight.
342
+
343
+ ### Full Changelog:
344
+
345
+ No changes to highlight.
346
+
347
+ ### Contributors Shoutout:
348
+
349
+ No changes to highlight.
350
+
351
+ # 0.1.3
352
+
353
+ ### New Features:
354
+
355
+ No changes to highlight.
356
+
357
+ ### Bug Fixes:
358
+
359
+ - Fixed bug where `Video` components in latest gradio were not able to be deserialized by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3860](https://github.com/gradio-app/gradio/pull/3860)
360
+
361
+ ### Documentation Changes:
362
+
363
+ No changes to highlight.
364
+
365
+ ### Testing and Infrastructure Changes:
366
+
367
+ No changes to highlight.
368
+
369
+ ### Breaking Changes:
370
+
371
+ No changes to highlight.
372
+
373
+ ### Full Changelog:
374
+
375
+ No changes to highlight.
376
+
377
+ ### Contributors Shoutout:
378
+
379
+ No changes to highlight.
380
+
381
+ # 0.1.2
382
+
383
+ First public release of the Gradio Client library! The `gradio_client` Python library that makes it very easy to use any Gradio app as an API.
384
+
385
+ As an example, consider this [Hugging Face Space that transcribes audio files](https://huggingface.co/spaces/abidlabs/whisper) that are recorded from the microphone.
386
+
387
+ ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio-guides/whisper-screenshot.jpg)
388
+
389
+ Using the `gradio_client` library, we can easily use the Gradio as an API to transcribe audio files programmatically.
390
+
391
+ Here's the entire code to do it:
392
+
393
+ ```python
394
+ from gradio_client import Client
395
+
396
+ client = Client("abidlabs/whisper")
397
+ client.predict("audio_sample.wav")
398
+
399
+ >> "This is a test of the whisper speech recognition model."
400
+ ```
401
+
402
+ Read more about how to use the `gradio_client` library here: https://gradio.app/getting-started-with-the-python-client/
testbed/gradio-app__gradio/client/python/README.md ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # `gradio_client`: Use a Gradio app as an API -- in 3 lines of Python
2
+
3
+ This directory contains the source code for `gradio_client`, a lightweight Python library that makes it very easy to use any Gradio app as an API.
4
+
5
+ As an example, consider this [Hugging Face Space that transcribes audio files](https://huggingface.co/spaces/abidlabs/whisper) that are recorded from the microphone.
6
+
7
+ ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio-guides/whisper-screenshot.jpg)
8
+
9
+ Using the `gradio_client` library, we can easily use the Gradio as an API to transcribe audio files programmatically.
10
+
11
+ Here's the entire code to do it:
12
+
13
+ ```python
14
+ from gradio_client import Client
15
+
16
+ client = Client("abidlabs/whisper")
17
+ client.predict("audio_sample.wav")
18
+
19
+ >> "This is a test of the whisper speech recognition model."
20
+ ```
21
+
22
+ The Gradio client works with any Gradio Space, whether it be an image generator, a stateful chatbot, or a tax calculator.
23
+
24
+ ## Installation
25
+
26
+ If you already have a recent version of `gradio`, then the `gradio_client` is included as a dependency.
27
+
28
+ Otherwise, the lightweight `gradio_client` package can be installed from pip (or pip3) and works with Python versions 3.8 or higher:
29
+
30
+ ```bash
31
+ $ pip install gradio_client
32
+ ```
33
+
34
+ ## Basic Usage
35
+
36
+ ### Connecting to a Space or a Gradio app
37
+
38
+ Start by connecting instantiating a `Client` object and connecting it to a Gradio app that is running on Spaces (or anywhere else)!
39
+
40
+ **Connecting to a Space**
41
+
42
+ ```python
43
+ from gradio_client import Client
44
+
45
+ client = Client("abidlabs/en2fr") # a Space that translates from English to French
46
+ ```
47
+
48
+ You can also connect to private Spaces by passing in your HF token with the `hf_token` parameter. You can get your HF token here: https://huggingface.co/settings/tokens
49
+
50
+ ```python
51
+ from gradio_client import Client
52
+
53
+ client = Client("abidlabs/my-private-space", hf_token="...")
54
+ ```
55
+
56
+ **Duplicating a Space for private use**
57
+
58
+ While you can use any public Space as an API, you may get rate limited by Hugging Face if you make too many requests. For unlimited usage of a Space, simply duplicate the Space to create a private Space,
59
+ and then use it to make as many requests as you'd like!
60
+
61
+ The `gradio_client` includes a class method: `Client.duplicate()` to make this process simple:
62
+
63
+ ```python
64
+ from gradio_client import Client
65
+
66
+ client = Client.duplicate("abidlabs/whisper")
67
+ client.predict("audio_sample.wav")
68
+
69
+ >> "This is a test of the whisper speech recognition model."
70
+ ```
71
+
72
+ If you have previously duplicated a Space, re-running `duplicate()` will _not_ create a new Space. Instead, the Client will attach to the previously-created Space. So it is safe to re-run the `Client.duplicate()` method multiple times.
73
+
74
+ **Note:** if the original Space uses GPUs, your private Space will as well, and your Hugging Face account will get billed based on the price of the GPU. To minimize charges, your Space will automatically go to sleep after 1 hour of inactivity. You can also set the hardware using the `hardware` parameter of `duplicate()`.
75
+
76
+ **Connecting a general Gradio app**
77
+
78
+ If your app is running somewhere else, just provide the full URL instead, including the "http://" or "https://". Here's an example of making predictions to a Gradio app that is running on a share URL:
79
+
80
+ ```python
81
+ from gradio_client import Client
82
+
83
+ client = Client("https://bec81a83-5b5c-471e.gradio.live")
84
+ ```
85
+
86
+ ### Inspecting the API endpoints
87
+
88
+ Once you have connected to a Gradio app, you can view the APIs that are available to you by calling the `.view_api()` method. For the Whisper Space, we see the following:
89
+
90
+ ```
91
+ Client.predict() Usage Info
92
+ ---------------------------
93
+ Named API endpoints: 1
94
+
95
+ - predict(input_audio, api_name="/predict") -> value_0
96
+ Parameters:
97
+ - [Audio] input_audio: str (filepath or URL)
98
+ Returns:
99
+ - [Textbox] value_0: str (value)
100
+ ```
101
+
102
+ This shows us that we have 1 API endpoint in this space, and shows us how to use the API endpoint to make a prediction: we should call the `.predict()` method, providing a parameter `input_audio` of type `str`, which is a `filepath or URL`.
103
+
104
+ We should also provide the `api_name='/predict'` argument. Although this isn't necessary if a Gradio app has a single named endpoint, it does allow us to call different endpoints in a single app if they are available. If an app has unnamed API endpoints, these can also be displayed by running `.view_api(all_endpoints=True)`.
105
+
106
+ ### Making a prediction
107
+
108
+ The simplest way to make a prediction is simply to call the `.predict()` function with the appropriate arguments:
109
+
110
+ ```python
111
+ from gradio_client import Client
112
+
113
+ client = Client("abidlabs/en2fr")
114
+ client.predict("Hello")
115
+
116
+ >> Bonjour
117
+ ```
118
+
119
+ If there are multiple parameters, then you should pass them as separate arguments to `.predict()`, like this:
120
+
121
+ ```python
122
+ from gradio_client import Client
123
+
124
+ client = Client("gradio/calculator")
125
+ client.predict(4, "add", 5)
126
+
127
+ >> 9.0
128
+ ```
129
+
130
+ For certain inputs, such as images, you should pass in the filepath or URL to the file. Likewise, for the corresponding output types, you will get a filepath or URL returned.
131
+
132
+ ```python
133
+ from gradio_client import Client
134
+
135
+ client = Client("abidlabs/whisper")
136
+ client.predict("https://audio-samples.github.io/samples/mp3/blizzard_unconditional/sample-0.mp3")
137
+
138
+ >> "My thought I have nobody by a beauty and will as you poured. Mr. Rochester is serve in that so don't find simpus, and devoted abode, to at might in a r—"
139
+ ```
140
+
141
+ ## Advanced Usage
142
+
143
+ For more ways to use the Gradio Python Client, check out our dedicated Guide on the Python client, available here: https://www.gradio.app/getting-started-with-the-python-client/
testbed/gradio-app__gradio/client/python/build_pypi.sh ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ cd "$(dirname ${0})"
5
+
6
+ python3 -m pip install build
7
+ rm -rf dist/*
8
+ rm -rf build/*
9
+ python3 -m build
testbed/gradio-app__gradio/client/python/gradio_client/CHANGELOG.md ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # gradio_client
2
+
3
+ ## 0.5.2
4
+
5
+ ### Features
6
+
7
+ - [#5653](https://github.com/gradio-app/gradio/pull/5653) [`ea0e00b20`](https://github.com/gradio-app/gradio/commit/ea0e00b207b4b90a10e9d054c4202d4e705a29ba) - Prevent Clients from accessing API endpoints that set `api_name=False`. Thanks [@abidlabs](https://github.com/abidlabs)!
8
+
9
+ ## 0.5.1
10
+
11
+ ### Features
12
+
13
+ - [#5514](https://github.com/gradio-app/gradio/pull/5514) [`52f783175`](https://github.com/gradio-app/gradio/commit/52f7831751b432411e109bd41add4ab286023a8e) - refactor: Use package.json for version management. Thanks [@DarhkVoyd](https://github.com/DarhkVoyd)!
14
+
15
+ ## 0.5.0
16
+
17
+ ### Highlights
18
+
19
+ #### Enable streaming audio in python client ([#5248](https://github.com/gradio-app/gradio/pull/5248) [`390624d8`](https://github.com/gradio-app/gradio/commit/390624d8ad2b1308a5bf8384435fd0db98d8e29e))
20
+
21
+ The `gradio_client` now supports streaming file outputs 🌊
22
+
23
+ No new syntax! Connect to a gradio demo that supports streaming file outputs and call `predict` or `submit` as you normally would.
24
+
25
+ ```python
26
+ import gradio_client as grc
27
+ client = grc.Client("gradio/stream_audio_out")
28
+
29
+ # Get the entire generated audio as a local file
30
+ client.predict("/Users/freddy/Pictures/bark_demo.mp4", api_name="/predict")
31
+
32
+ job = client.submit("/Users/freddy/Pictures/bark_demo.mp4", api_name="/predict")
33
+
34
+ # Get the entire generated audio as a local file
35
+ job.result()
36
+
37
+ # Each individual chunk
38
+ job.outputs()
39
+ ```
40
+
41
+ Thanks [@freddyaboulton](https://github.com/freddyaboulton)!
42
+
43
+ ### Fixes
44
+
45
+ - [#5295](https://github.com/gradio-app/gradio/pull/5295) [`7b8fa8aa`](https://github.com/gradio-app/gradio/commit/7b8fa8aa58f95f5046b9add64b40368bd3f1b700) - Allow caching examples with streamed output. Thanks [@aliabid94](https://github.com/aliabid94)!
46
+
47
+ ## 0.4.0
48
+
49
+ ### Highlights
50
+
51
+ #### Client.predict will now return the final output for streaming endpoints ([#5057](https://github.com/gradio-app/gradio/pull/5057) [`35856f8b`](https://github.com/gradio-app/gradio/commit/35856f8b54548cae7bd3b8d6a4de69e1748283b2))
52
+
53
+ ### This is a breaking change (for gradio_client only)!
54
+
55
+ Previously, `Client.predict` would only return the first output of an endpoint that streamed results. This was causing confusion for developers that wanted to call these streaming demos via the client.
56
+
57
+ We realize that developers using the client don't know the internals of whether a demo streams or not, so we're changing the behavior of predict to match developer expectations.
58
+
59
+ Using `Client.predict` will now return the final output of a streaming endpoint. This will make it even easier to use gradio apps via the client.
60
+
61
+ Thanks [@freddyaboulton](https://github.com/freddyaboulton)!
62
+
63
+ ### Features
64
+
65
+ - [#5076](https://github.com/gradio-app/gradio/pull/5076) [`2745075a`](https://github.com/gradio-app/gradio/commit/2745075a26f80e0e16863d483401ff1b6c5ada7a) - Add deploy_discord to docs. Thanks [@freddyaboulton](https://github.com/freddyaboulton)!
66
+
67
+ ### Fixes
68
+
69
+ - [#5061](https://github.com/gradio-app/gradio/pull/5061) [`136adc9c`](https://github.com/gradio-app/gradio/commit/136adc9ccb23e5cb4d02d2e88f23f0b850041f98) - Ensure `gradio_client` is backwards compatible with `gradio==3.24.1`. Thanks [@abidlabs](https://github.com/abidlabs)!
70
+
71
+ ## 0.3.0
72
+
73
+ ### Highlights
74
+
75
+ #### Create Discord Bots from Gradio Apps 🤖 ([#4960](https://github.com/gradio-app/gradio/pull/4960) [`46e4ef67`](https://github.com/gradio-app/gradio/commit/46e4ef67d287dd68a91473b73172b29cbad064bc))
76
+
77
+ We're excited to announce that Gradio can now automatically create a discord bot from any `gr.ChatInterface` app.
78
+
79
+ It's as easy as importing `gradio_client`, connecting to the app, and calling `deploy_discord`!
80
+
81
+ _🦙 Turning Llama 2 70b into a discord bot 🦙_
82
+
83
+ ```python
84
+ import gradio_client as grc
85
+ grc.Client("ysharma/Explore_llamav2_with_TGI").deploy_discord(to_id="llama2-70b-discord-bot")
86
+ ```
87
+
88
+ <img src="https://gradio-builds.s3.amazonaws.com/demo-files/discordbots/guide/llama_chat.gif">
89
+
90
+ #### Getting started with template spaces
91
+
92
+ To help get you started, we have created an organization on Hugging Face called [gradio-discord-bots](https://huggingface.co/gradio-discord-bots) with template spaces you can use to turn state of the art LLMs powered by Gradio to discord bots.
93
+
94
+ Currently we have template spaces for:
95
+
96
+ - [Llama-2-70b-chat-hf](https://huggingface.co/spaces/gradio-discord-bots/Llama-2-70b-chat-hf) powered by a FREE Hugging Face Inference Endpoint!
97
+ - [Llama-2-13b-chat-hf](https://huggingface.co/spaces/gradio-discord-bots/Llama-2-13b-chat-hf) powered by Hugging Face Inference Endpoints.
98
+ - [Llama-2-13b-chat-hf](https://huggingface.co/spaces/gradio-discord-bots/llama-2-13b-chat-transformers) powered by Hugging Face transformers.
99
+ - [falcon-7b-instruct](https://huggingface.co/spaces/gradio-discord-bots/falcon-7b-instruct) powered by Hugging Face Inference Endpoints.
100
+ - [gpt-3.5-turbo](https://huggingface.co/spaces/gradio-discord-bots/gpt-35-turbo), powered by openai. Requires an OpenAI key.
101
+
102
+ But once again, you can deploy ANY `gr.ChatInterface` app exposed on the internet! So don't hesitate to try it on your own Chatbots.
103
+
104
+ ❗️ Additional Note ❗️: Technically, any gradio app that exposes an api route that takes in a single string and outputs a single string can be deployed to discord. But `gr.ChatInterface` apps naturally lend themselves to discord's chat functionality so we suggest you start with those.
105
+
106
+ Thanks [@freddyaboulton](https://github.com/freddyaboulton)!
107
+
108
+ ### New Features:
109
+
110
+ - Endpoints that return layout components are now properly handled in the `submit` and `view_api` methods. Output layout components are not returned by the API but all other components are (excluding `gr.State`). By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4871](https://github.com/gradio-app/gradio/pull/4871)
111
+
112
+ ### Bug Fixes:
113
+
114
+ No changes to highlight
115
+
116
+ ### Breaking Changes:
117
+
118
+ No changes to highlight.
119
+
120
+ ### Full Changelog:
121
+
122
+ No changes to highlight.
123
+
124
+ # 0.2.9
125
+
126
+ ### New Features:
127
+
128
+ No changes to highlight
129
+
130
+ ### Bug Fixes:
131
+
132
+ - Fix bug determining the api name when a demo has `api_name=False` by [@freddyboulton](https://github.com/freddyaboulton) in [PR 4886](https://github.com/gradio-app/gradio/pull/4886)
133
+
134
+ ### Breaking Changes:
135
+
136
+ No changes to highlight.
137
+
138
+ ### Full Changelog:
139
+
140
+ - Pinned dependencies to major versions to reduce the likelihood of a broken `gradio_client` due to changes in downstream dependencies by [@abidlabs](https://github.com/abidlabs) in [PR 4885](https://github.com/gradio-app/gradio/pull/4885)
141
+
142
+ # 0.2.8
143
+
144
+ ### New Features:
145
+
146
+ - Support loading gradio apps where `api_name=False` by [@abidlabs](https://github.com/abidlabs) in [PR 4683](https://github.com/gradio-app/gradio/pull/4683)
147
+
148
+ ### Bug Fixes:
149
+
150
+ - Fix bug where space duplication would error if the demo has cpu-basic hardware by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4583](https://github.com/gradio-app/gradio/pull/4583)
151
+ - Fixes and optimizations to URL/download functions by [@akx](https://github.com/akx) in [PR 4695](https://github.com/gradio-app/gradio/pull/4695)
152
+
153
+ ### Breaking Changes:
154
+
155
+ No changes to highlight.
156
+
157
+ ### Full Changelog:
158
+
159
+ No changes to highlight.
160
+
161
+ # 0.2.7
162
+
163
+ ### New Features:
164
+
165
+ - The output directory for files downloaded via the Client can now be set by the `output_dir` parameter in `Client` by [@abidlabs](https://github.com/abidlabs) in [PR 4501](https://github.com/gradio-app/gradio/pull/4501)
166
+
167
+ ### Bug Fixes:
168
+
169
+ - The output directory for files downloaded via the Client are now set to a temporary directory by default (instead of the working directory in some cases) by [@abidlabs](https://github.com/abidlabs) in [PR 4501](https://github.com/gradio-app/gradio/pull/4501)
170
+
171
+ ### Breaking Changes:
172
+
173
+ No changes to highlight.
174
+
175
+ ### Full Changelog:
176
+
177
+ No changes to highlight.
178
+
179
+ # 0.2.6
180
+
181
+ ### New Features:
182
+
183
+ No changes to highlight.
184
+
185
+ ### Bug Fixes:
186
+
187
+ - Fixed bug file deserialization didn't preserve all file extensions by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4440](https://github.com/gradio-app/gradio/pull/4440)
188
+ - Fixed bug where mounted apps could not be called via the client by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4435](https://github.com/gradio-app/gradio/pull/4435)
189
+
190
+ ### Breaking Changes:
191
+
192
+ No changes to highlight.
193
+
194
+ ### Full Changelog:
195
+
196
+ No changes to highlight.
197
+
198
+ # 0.2.5
199
+
200
+ ### New Features:
201
+
202
+ No changes to highlight.
203
+
204
+ ### Bug Fixes:
205
+
206
+ - Fixes parameter names not showing underscores by [@abidlabs](https://github.com/abidlabs) in [PR 4230](https://github.com/gradio-app/gradio/pull/4230)
207
+ - Fixes issue in which state was not handled correctly if `serialize=False` by [@abidlabs](https://github.com/abidlabs) in [PR 4230](https://github.com/gradio-app/gradio/pull/4230)
208
+
209
+ ### Breaking Changes:
210
+
211
+ No changes to highlight.
212
+
213
+ ### Full Changelog:
214
+
215
+ No changes to highlight.
216
+
217
+ # 0.2.4
218
+
219
+ ### Bug Fixes:
220
+
221
+ - Fixes missing serialization classes for several components: `Barplot`, `Lineplot`, `Scatterplot`, `AnnotatedImage`, `Interpretation` by [@abidlabs](https://github.com/abidlabs) in [PR 4167](https://github.com/gradio-app/gradio/pull/4167)
222
+
223
+ ### Documentation Changes:
224
+
225
+ No changes to highlight.
226
+
227
+ ### Testing and Infrastructure Changes:
228
+
229
+ No changes to highlight.
230
+
231
+ ### Breaking Changes:
232
+
233
+ No changes to highlight.
234
+
235
+ ### Full Changelog:
236
+
237
+ No changes to highlight.
238
+
239
+ ### Contributors Shoutout:
240
+
241
+ No changes to highlight.
242
+
243
+ # 0.2.3
244
+
245
+ ### New Features:
246
+
247
+ No changes to highlight.
248
+
249
+ ### Bug Fixes:
250
+
251
+ - Fix example inputs for `gr.File(file_count='multiple')` output components by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4153](https://github.com/gradio-app/gradio/pull/4153)
252
+
253
+ ### Documentation Changes:
254
+
255
+ No changes to highlight.
256
+
257
+ ### Testing and Infrastructure Changes:
258
+
259
+ No changes to highlight.
260
+
261
+ ### Breaking Changes:
262
+
263
+ No changes to highlight.
264
+
265
+ ### Full Changelog:
266
+
267
+ No changes to highlight.
268
+
269
+ ### Contributors Shoutout:
270
+
271
+ No changes to highlight.
272
+
273
+ # 0.2.2
274
+
275
+ ### New Features:
276
+
277
+ No changes to highlight.
278
+
279
+ ### Bug Fixes:
280
+
281
+ - Only send request to `/info` route if demo version is above `3.28.3` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4109](https://github.com/gradio-app/gradio/pull/4109)
282
+
283
+ ### Other Changes:
284
+
285
+ - Fix bug in test from gradio 3.29.0 refactor by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4138](https://github.com/gradio-app/gradio/pull/4138)
286
+
287
+ ### Breaking Changes:
288
+
289
+ No changes to highlight.
290
+
291
+ # 0.2.1
292
+
293
+ ### New Features:
294
+
295
+ No changes to highlight.
296
+
297
+ ### Bug Fixes:
298
+
299
+ Removes extraneous `State` component info from the `Client.view_api()` method by [@abidlabs](https://github.com/freddyaboulton) in [PR 4107](https://github.com/gradio-app/gradio/pull/4107)
300
+
301
+ ### Documentation Changes:
302
+
303
+ No changes to highlight.
304
+
305
+ ### Testing and Infrastructure Changes:
306
+
307
+ Separates flaky tests from non-flaky tests by [@abidlabs](https://github.com/freddyaboulton) in [PR 4107](https://github.com/gradio-app/gradio/pull/4107)
308
+
309
+ ### Breaking Changes:
310
+
311
+ No changes to highlight.
312
+
313
+ ### Full Changelog:
314
+
315
+ No changes to highlight.
316
+
317
+ ### Contributors Shoutout:
318
+
319
+ No changes to highlight.
320
+
321
+ # 0.1.4
322
+
323
+ ### New Features:
324
+
325
+ - Progress Updates from `gr.Progress()` can be accessed via `job.status().progress_data` by @freddyaboulton](https://github.com/freddyaboulton) in [PR 3924](https://github.com/gradio-app/gradio/pull/3924)
326
+
327
+ ### Bug Fixes:
328
+
329
+ - Fixed bug where unnamed routes where displayed with `api_name` instead of `fn_index` in `view_api` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3972](https://github.com/gradio-app/gradio/pull/3972)
330
+
331
+ ### Documentation Changes:
332
+
333
+ No changes to highlight.
334
+
335
+ ### Testing and Infrastructure Changes:
336
+
337
+ No changes to highlight.
338
+
339
+ ### Breaking Changes:
340
+
341
+ No changes to highlight.
342
+
343
+ ### Full Changelog:
344
+
345
+ No changes to highlight.
346
+
347
+ ### Contributors Shoutout:
348
+
349
+ No changes to highlight.
350
+
351
+ # 0.1.3
352
+
353
+ ### New Features:
354
+
355
+ No changes to highlight.
356
+
357
+ ### Bug Fixes:
358
+
359
+ - Fixed bug where `Video` components in latest gradio were not able to be deserialized by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3860](https://github.com/gradio-app/gradio/pull/3860)
360
+
361
+ ### Documentation Changes:
362
+
363
+ No changes to highlight.
364
+
365
+ ### Testing and Infrastructure Changes:
366
+
367
+ No changes to highlight.
368
+
369
+ ### Breaking Changes:
370
+
371
+ No changes to highlight.
372
+
373
+ ### Full Changelog:
374
+
375
+ No changes to highlight.
376
+
377
+ ### Contributors Shoutout:
378
+
379
+ No changes to highlight.
380
+
381
+ # 0.1.2
382
+
383
+ First public release of the Gradio Client library! The `gradio_client` Python library that makes it very easy to use any Gradio app as an API.
384
+
385
+ As an example, consider this [Hugging Face Space that transcribes audio files](https://huggingface.co/spaces/abidlabs/whisper) that are recorded from the microphone.
386
+
387
+ ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio-guides/whisper-screenshot.jpg)
388
+
389
+ Using the `gradio_client` library, we can easily use the Gradio as an API to transcribe audio files programmatically.
390
+
391
+ Here's the entire code to do it:
392
+
393
+ ```python
394
+ from gradio_client import Client
395
+
396
+ client = Client("abidlabs/whisper")
397
+ client.predict("audio_sample.wav")
398
+
399
+ >> "This is a test of the whisper speech recognition model."
400
+ ```
401
+
402
+ Read more about how to use the `gradio_client` library here: https://gradio.app/getting-started-with-the-python-client/
testbed/gradio-app__gradio/client/python/gradio_client/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from gradio_client.client import Client
2
+ from gradio_client.utils import __version__
3
+
4
+ __all__ = [
5
+ "Client",
6
+ "__version__",
7
+ ]
testbed/gradio-app__gradio/client/python/gradio_client/cli/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from gradio_client.cli import deploy_discord
2
+
3
+ __all__ = ["deploy_discord"]
testbed/gradio-app__gradio/client/python/gradio_client/cli/deploy_discord.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+ from gradio_client import Client
4
+
5
+
6
+ def main():
7
+ parser = argparse.ArgumentParser(description="Deploy Space as Discord Bot.")
8
+ parser.add_argument("deploy-discord")
9
+ parser.add_argument(
10
+ "--src",
11
+ type=str,
12
+ help="The space id or url or gradio app you want to deploy as a gradio bot.",
13
+ )
14
+ parser.add_argument(
15
+ "--discord-bot-token",
16
+ type=str,
17
+ help="Discord bot token. Get one on the discord website.",
18
+ )
19
+ parser.add_argument(
20
+ "--api-names",
21
+ nargs="*",
22
+ help="Api names to turn into discord bots",
23
+ default=[],
24
+ )
25
+ parser.add_argument(
26
+ "--to-id",
27
+ type=str,
28
+ help="Name of the space used to host the discord bot",
29
+ default=None,
30
+ )
31
+ parser.add_argument(
32
+ "--hf-token",
33
+ type=str,
34
+ help=(
35
+ "Hugging Face token. Can be ommitted if you are logged in via huggingface_hub cli. "
36
+ "Must be provided if upstream space is private."
37
+ ),
38
+ default=None,
39
+ )
40
+ parser.add_argument(
41
+ "--private",
42
+ type=bool,
43
+ nargs="?",
44
+ help="Whether the discord bot space is private.",
45
+ const=True,
46
+ default=False,
47
+ )
48
+ args = parser.parse_args()
49
+ for i, name in enumerate(args.api_names):
50
+ if "," in name:
51
+ args.api_names[i] = tuple(name.split(","))
52
+ Client(args.src).deploy_discord(
53
+ discord_bot_token=args.discord_bot_token,
54
+ api_names=args.api_names,
55
+ to_id=args.to_id,
56
+ hf_token=args.hf_token,
57
+ private=args.private,
58
+ )
testbed/gradio-app__gradio/client/python/gradio_client/client.py ADDED
@@ -0,0 +1,1251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The main Client class for the Python client."""
2
+ from __future__ import annotations
3
+
4
+ import concurrent.futures
5
+ import json
6
+ import os
7
+ import re
8
+ import secrets
9
+ import tempfile
10
+ import threading
11
+ import time
12
+ import urllib.parse
13
+ import uuid
14
+ import warnings
15
+ from concurrent.futures import Future
16
+ from datetime import datetime
17
+ from pathlib import Path
18
+ from threading import Lock
19
+ from typing import Any, Callable, Literal
20
+
21
+ import huggingface_hub
22
+ import requests
23
+ import websockets
24
+ from huggingface_hub import CommitOperationAdd, SpaceHardware, SpaceStage
25
+ from huggingface_hub.utils import (
26
+ RepositoryNotFoundError,
27
+ build_hf_headers,
28
+ send_telemetry,
29
+ )
30
+ from packaging import version
31
+
32
+ from gradio_client import serializing, utils
33
+ from gradio_client.documentation import document, set_documentation_group
34
+ from gradio_client.serializing import Serializable
35
+ from gradio_client.utils import (
36
+ Communicator,
37
+ JobStatus,
38
+ Status,
39
+ StatusUpdate,
40
+ )
41
+
42
+ set_documentation_group("py-client")
43
+
44
+
45
+ DEFAULT_TEMP_DIR = os.environ.get("GRADIO_TEMP_DIR") or str(
46
+ Path(tempfile.gettempdir()) / "gradio"
47
+ )
48
+
49
+
50
+ @document("predict", "submit", "view_api", "duplicate", "deploy_discord")
51
+ class Client:
52
+ """
53
+ The main Client class for the Python client. This class is used to connect to a remote Gradio app and call its API endpoints.
54
+
55
+ Example:
56
+ from gradio_client import Client
57
+
58
+ client = Client("abidlabs/whisper-large-v2") # connecting to a Hugging Face Space
59
+ client.predict("test.mp4", api_name="/predict")
60
+ >> What a nice recording! # returns the result of the remote API call
61
+
62
+ client = Client("https://bec81a83-5b5c-471e.gradio.live") # connecting to a temporary Gradio share URL
63
+ job = client.submit("hello", api_name="/predict") # runs the prediction in a background thread
64
+ job.result()
65
+ >> 49 # returns the result of the remote API call (blocking call)
66
+ """
67
+
68
+ def __init__(
69
+ self,
70
+ src: str,
71
+ hf_token: str | None = None,
72
+ max_workers: int = 40,
73
+ serialize: bool = True,
74
+ output_dir: str | Path | None = DEFAULT_TEMP_DIR,
75
+ verbose: bool = True,
76
+ ):
77
+ """
78
+ Parameters:
79
+ src: Either the name of the Hugging Face Space to load, (e.g. "abidlabs/whisper-large-v2") or the full URL (including "http" or "https") of the hosted Gradio app to load (e.g. "http://mydomain.com/app" or "https://bec81a83-5b5c-471e.gradio.live/").
80
+ hf_token: The Hugging Face token to use to access private Spaces. Automatically fetched if you are logged in via the Hugging Face Hub CLI. Obtain from: https://huggingface.co/settings/token
81
+ max_workers: The maximum number of thread workers that can be used to make requests to the remote Gradio app simultaneously.
82
+ serialize: Whether the client should serialize the inputs and deserialize the outputs of the remote API. If set to False, the client will pass the inputs and outputs as-is, without serializing/deserializing them. E.g. you if you set this to False, you'd submit an image in base64 format instead of a filepath, and you'd get back an image in base64 format from the remote API instead of a filepath.
83
+ output_dir: The directory to save files that are downloaded from the remote API. If None, reads from the GRADIO_TEMP_DIR environment variable. Defaults to a temporary directory on your machine.
84
+ verbose: Whether the client should print statements to the console.
85
+ """
86
+ self.verbose = verbose
87
+ self.hf_token = hf_token
88
+ self.serialize = serialize
89
+ self.headers = build_hf_headers(
90
+ token=hf_token,
91
+ library_name="gradio_client",
92
+ library_version=utils.__version__,
93
+ )
94
+ self.space_id = None
95
+ self.output_dir = output_dir
96
+
97
+ if src.startswith("http://") or src.startswith("https://"):
98
+ _src = src if src.endswith("/") else src + "/"
99
+ else:
100
+ _src = self._space_name_to_src(src)
101
+ if _src is None:
102
+ raise ValueError(
103
+ f"Could not find Space: {src}. If it is a private Space, please provide an hf_token."
104
+ )
105
+ self.space_id = src
106
+ self.src = _src
107
+ state = self._get_space_state()
108
+ if state == SpaceStage.BUILDING:
109
+ if self.verbose:
110
+ print("Space is still building. Please wait...")
111
+ while self._get_space_state() == SpaceStage.BUILDING:
112
+ time.sleep(2) # so we don't get rate limited by the API
113
+ pass
114
+ if state in utils.INVALID_RUNTIME:
115
+ raise ValueError(
116
+ f"The current space is in the invalid state: {state}. "
117
+ "Please contact the owner to fix this."
118
+ )
119
+ if self.verbose:
120
+ print(f"Loaded as API: {self.src} ✔")
121
+
122
+ self.api_url = urllib.parse.urljoin(self.src, utils.API_URL)
123
+ self.ws_url = urllib.parse.urljoin(
124
+ self.src.replace("http", "ws", 1), utils.WS_URL
125
+ )
126
+ self.upload_url = urllib.parse.urljoin(self.src, utils.UPLOAD_URL)
127
+ self.reset_url = urllib.parse.urljoin(self.src, utils.RESET_URL)
128
+ self.config = self._get_config()
129
+ self.session_hash = str(uuid.uuid4())
130
+
131
+ self.endpoints = [
132
+ Endpoint(self, fn_index, dependency)
133
+ for fn_index, dependency in enumerate(self.config["dependencies"])
134
+ ]
135
+
136
+ # Create a pool of threads to handle the requests
137
+ self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)
138
+
139
+ # Disable telemetry by setting the env variable HF_HUB_DISABLE_TELEMETRY=1
140
+ threading.Thread(target=self._telemetry_thread).start()
141
+
142
+ @classmethod
143
+ def duplicate(
144
+ cls,
145
+ from_id: str,
146
+ to_id: str | None = None,
147
+ hf_token: str | None = None,
148
+ private: bool = True,
149
+ hardware: Literal[
150
+ "cpu-basic",
151
+ "cpu-upgrade",
152
+ "t4-small",
153
+ "t4-medium",
154
+ "a10g-small",
155
+ "a10g-large",
156
+ "a100-large",
157
+ ]
158
+ | SpaceHardware
159
+ | None = None,
160
+ secrets: dict[str, str] | None = None,
161
+ sleep_timeout: int = 5,
162
+ max_workers: int = 40,
163
+ verbose: bool = True,
164
+ ):
165
+ """
166
+ Duplicates a Hugging Face Space under your account and returns a Client object
167
+ for the new Space. No duplication is created if the Space already exists in your
168
+ account (to override this, provide a new name for the new Space using `to_id`).
169
+ To use this method, you must provide an `hf_token` or be logged in via the Hugging
170
+ Face Hub CLI.
171
+
172
+ The new Space will be private by default and use the same hardware as the original
173
+ Space. This can be changed by using the `private` and `hardware` parameters. For
174
+ hardware upgrades (beyond the basic CPU tier), you may be required to provide
175
+ billing information on Hugging Face: https://huggingface.co/settings/billing
176
+
177
+ Parameters:
178
+ from_id: The name of the Hugging Face Space to duplicate in the format "{username}/{space_id}", e.g. "gradio/whisper".
179
+ to_id: The name of the new Hugging Face Space to create, e.g. "abidlabs/whisper-duplicate". If not provided, the new Space will be named "{your_HF_username}/{space_id}".
180
+ hf_token: The Hugging Face token to use to access private Spaces. Automatically fetched if you are logged in via the Hugging Face Hub CLI. Obtain from: https://huggingface.co/settings/token
181
+ private: Whether the new Space should be private (True) or public (False). Defaults to True.
182
+ hardware: The hardware tier to use for the new Space. Defaults to the same hardware tier as the original Space. Options include "cpu-basic", "cpu-upgrade", "t4-small", "t4-medium", "a10g-small", "a10g-large", "a100-large", subject to availability.
183
+ secrets: A dictionary of (secret key, secret value) to pass to the new Space. Defaults to None. Secrets are only used when the Space is duplicated for the first time, and are not updated if the duplicated Space already exists.
184
+ sleep_timeout: The number of minutes after which the duplicate Space will be puased if no requests are made to it (to minimize billing charges). Defaults to 5 minutes.
185
+ max_workers: The maximum number of thread workers that can be used to make requests to the remote Gradio app simultaneously.
186
+ verbose: Whether the client should print statements to the console.
187
+ Example:
188
+ import os
189
+ from gradio_client import Client
190
+ HF_TOKEN = os.environ.get("HF_TOKEN")
191
+ client = Client.duplicate("abidlabs/whisper", hf_token=HF_TOKEN)
192
+ client.predict("audio_sample.wav")
193
+ >> "This is a test of the whisper speech recognition model."
194
+ """
195
+ try:
196
+ original_info = huggingface_hub.get_space_runtime(from_id, token=hf_token)
197
+ except RepositoryNotFoundError as rnfe:
198
+ raise ValueError(
199
+ f"Could not find Space: {from_id}. If it is a private Space, please provide an `hf_token`."
200
+ ) from rnfe
201
+ if to_id:
202
+ if "/" in to_id:
203
+ to_id = to_id.split("/")[1]
204
+ space_id = huggingface_hub.get_full_repo_name(to_id, token=hf_token)
205
+ else:
206
+ space_id = huggingface_hub.get_full_repo_name(
207
+ from_id.split("/")[1], token=hf_token
208
+ )
209
+ try:
210
+ huggingface_hub.get_space_runtime(space_id, token=hf_token)
211
+ if verbose:
212
+ print(
213
+ f"Using your existing Space: {utils.SPACE_URL.format(space_id)} 🤗"
214
+ )
215
+ if secrets is not None:
216
+ warnings.warn(
217
+ "Secrets are only used when the Space is duplicated for the first time, and are not updated if the duplicated Space already exists."
218
+ )
219
+ except RepositoryNotFoundError:
220
+ if verbose:
221
+ print(f"Creating a duplicate of {from_id} for your own use... 🤗")
222
+ huggingface_hub.duplicate_space(
223
+ from_id=from_id,
224
+ to_id=space_id,
225
+ token=hf_token,
226
+ exist_ok=True,
227
+ private=private,
228
+ )
229
+ if secrets is not None:
230
+ for key, value in secrets.items():
231
+ huggingface_hub.add_space_secret(
232
+ space_id, key, value, token=hf_token
233
+ )
234
+ if verbose:
235
+ print(f"Created new Space: {utils.SPACE_URL.format(space_id)}")
236
+ current_info = huggingface_hub.get_space_runtime(space_id, token=hf_token)
237
+ current_hardware = (
238
+ current_info.hardware or huggingface_hub.SpaceHardware.CPU_BASIC
239
+ )
240
+ hardware = hardware or original_info.hardware
241
+ if current_hardware != hardware:
242
+ huggingface_hub.request_space_hardware(space_id, hardware) # type: ignore
243
+ print(
244
+ f"-------\nNOTE: this Space uses upgraded hardware: {hardware}... see billing info at https://huggingface.co/settings/billing\n-------"
245
+ )
246
+ # Setting a timeout only works if the hardware is not basic
247
+ # so set it here after the hardware has been requested
248
+ if hardware != huggingface_hub.SpaceHardware.CPU_BASIC:
249
+ utils.set_space_timeout(
250
+ space_id, hf_token=hf_token, timeout_in_seconds=sleep_timeout * 60
251
+ )
252
+ if verbose:
253
+ print("")
254
+ client = cls(
255
+ space_id, hf_token=hf_token, max_workers=max_workers, verbose=verbose
256
+ )
257
+ return client
258
+
259
+ def _get_space_state(self):
260
+ if not self.space_id:
261
+ return None
262
+ info = huggingface_hub.get_space_runtime(self.space_id, token=self.hf_token)
263
+ return info.stage
264
+
265
+ def predict(
266
+ self,
267
+ *args,
268
+ api_name: str | None = None,
269
+ fn_index: int | None = None,
270
+ ) -> Any:
271
+ """
272
+ Calls the Gradio API and returns the result (this is a blocking call).
273
+
274
+ Parameters:
275
+ args: The arguments to pass to the remote API. The order of the arguments must match the order of the inputs in the Gradio app.
276
+ api_name: The name of the API endpoint to call starting with a leading slash, e.g. "/predict". Does not need to be provided if the Gradio app has only one named API endpoint.
277
+ fn_index: As an alternative to api_name, this parameter takes the index of the API endpoint to call, e.g. 0. Both api_name and fn_index can be provided, but if they conflict, api_name will take precedence.
278
+ Returns:
279
+ The result of the API call. Will be a Tuple if the API has multiple outputs.
280
+ Example:
281
+ from gradio_client import Client
282
+ client = Client(src="gradio/calculator")
283
+ client.predict(5, "add", 4, api_name="/predict")
284
+ >> 9.0
285
+ """
286
+ inferred_fn_index = self._infer_fn_index(api_name, fn_index)
287
+ if self.endpoints[inferred_fn_index].is_continuous:
288
+ raise ValueError(
289
+ "Cannot call predict on this function as it may run forever. Use submit instead."
290
+ )
291
+ return self.submit(*args, api_name=api_name, fn_index=fn_index).result()
292
+
293
+ def submit(
294
+ self,
295
+ *args,
296
+ api_name: str | None = None,
297
+ fn_index: int | None = None,
298
+ result_callbacks: Callable | list[Callable] | None = None,
299
+ ) -> Job:
300
+ """
301
+ Creates and returns a Job object which calls the Gradio API in a background thread. The job can be used to retrieve the status and result of the remote API call.
302
+
303
+ Parameters:
304
+ args: The arguments to pass to the remote API. The order of the arguments must match the order of the inputs in the Gradio app.
305
+ api_name: The name of the API endpoint to call starting with a leading slash, e.g. "/predict". Does not need to be provided if the Gradio app has only one named API endpoint.
306
+ fn_index: As an alternative to api_name, this parameter takes the index of the API endpoint to call, e.g. 0. Both api_name and fn_index can be provided, but if they conflict, api_name will take precedence.
307
+ result_callbacks: A callback function, or list of callback functions, to be called when the result is ready. If a list of functions is provided, they will be called in order. The return values from the remote API are provided as separate parameters into the callback. If None, no callback will be called.
308
+ Returns:
309
+ A Job object that can be used to retrieve the status and result of the remote API call.
310
+ Example:
311
+ from gradio_client import Client
312
+ client = Client(src="gradio/calculator")
313
+ job = client.submit(5, "add", 4, api_name="/predict")
314
+ job.status()
315
+ >> <Status.STARTING: 'STARTING'>
316
+ job.result() # blocking call
317
+ >> 9.0
318
+ """
319
+ inferred_fn_index = self._infer_fn_index(api_name, fn_index)
320
+
321
+ helper = None
322
+ if self.endpoints[inferred_fn_index].use_ws:
323
+ helper = Communicator(
324
+ Lock(),
325
+ JobStatus(),
326
+ self.endpoints[inferred_fn_index].process_predictions,
327
+ self.reset_url,
328
+ )
329
+ end_to_end_fn = self.endpoints[inferred_fn_index].make_end_to_end_fn(helper)
330
+ future = self.executor.submit(end_to_end_fn, *args)
331
+
332
+ job = Job(
333
+ future, communicator=helper, verbose=self.verbose, space_id=self.space_id
334
+ )
335
+
336
+ if result_callbacks:
337
+ if isinstance(result_callbacks, Callable):
338
+ result_callbacks = [result_callbacks]
339
+
340
+ def create_fn(callback) -> Callable:
341
+ def fn(future):
342
+ if isinstance(future.result(), tuple):
343
+ callback(*future.result())
344
+ else:
345
+ callback(future.result())
346
+
347
+ return fn
348
+
349
+ for callback in result_callbacks:
350
+ job.add_done_callback(create_fn(callback))
351
+
352
+ return job
353
+
354
+ def view_api(
355
+ self,
356
+ all_endpoints: bool | None = None,
357
+ print_info: bool = True,
358
+ return_format: Literal["dict", "str"] | None = None,
359
+ ) -> dict | str | None:
360
+ """
361
+ Prints the usage info for the API. If the Gradio app has multiple API endpoints, the usage info for each endpoint will be printed separately. If return_format="dict" the info is returned in dictionary format, as shown in the example below.
362
+
363
+ Parameters:
364
+ all_endpoints: If True, prints information for both named and unnamed endpoints in the Gradio app. If False, will only print info about named endpoints. If None (default), will print info about named endpoints, unless there aren't any -- in which it will print info about unnamed endpoints.
365
+ print_info: If True, prints the usage info to the console. If False, does not print the usage info.
366
+ return_format: If None, nothing is returned. If "str", returns the same string that would be printed to the console. If "dict", returns the usage info as a dictionary that can be programmatically parsed, and *all endpoints are returned in the dictionary* regardless of the value of `all_endpoints`. The format of the dictionary is in the docstring of this method.
367
+ Example:
368
+ from gradio_client import Client
369
+ client = Client(src="gradio/calculator")
370
+ client.view_api(return_format="dict")
371
+ >> {
372
+ 'named_endpoints': {
373
+ '/predict': {
374
+ 'parameters': [
375
+ {
376
+ 'label': 'num1',
377
+ 'type_python': 'int | float',
378
+ 'type_description': 'numeric value',
379
+ 'component': 'Number',
380
+ 'example_input': '5'
381
+ },
382
+ {
383
+ 'label': 'operation',
384
+ 'type_python': 'str',
385
+ 'type_description': 'string value',
386
+ 'component': 'Radio',
387
+ 'example_input': 'add'
388
+ },
389
+ {
390
+ 'label': 'num2',
391
+ 'type_python': 'int | float',
392
+ 'type_description': 'numeric value',
393
+ 'component': 'Number',
394
+ 'example_input': '5'
395
+ },
396
+ ],
397
+ 'returns': [
398
+ {
399
+ 'label': 'output',
400
+ 'type_python': 'int | float',
401
+ 'type_description': 'numeric value',
402
+ 'component': 'Number',
403
+ },
404
+ ]
405
+ },
406
+ '/flag': {
407
+ 'parameters': [
408
+ ...
409
+ ],
410
+ 'returns': [
411
+ ...
412
+ ]
413
+ }
414
+ }
415
+ 'unnamed_endpoints': {
416
+ 2: {
417
+ 'parameters': [
418
+ ...
419
+ ],
420
+ 'returns': [
421
+ ...
422
+ ]
423
+ }
424
+ }
425
+ }
426
+ }
427
+
428
+ """
429
+ if self.serialize:
430
+ api_info_url = urllib.parse.urljoin(self.src, utils.API_INFO_URL)
431
+ else:
432
+ api_info_url = urllib.parse.urljoin(self.src, utils.RAW_API_INFO_URL)
433
+
434
+ # Versions of Gradio older than 3.29.0 returned format of the API info
435
+ # from the /info endpoint
436
+ if version.parse(self.config.get("version", "2.0")) > version.Version("3.36.1"):
437
+ r = requests.get(api_info_url, headers=self.headers)
438
+ if r.ok:
439
+ info = r.json()
440
+ else:
441
+ raise ValueError(f"Could not fetch api info for {self.src}")
442
+ else:
443
+ fetch = requests.post(
444
+ utils.SPACE_FETCHER_URL,
445
+ json={"config": json.dumps(self.config), "serialize": self.serialize},
446
+ )
447
+ if fetch.ok:
448
+ info = fetch.json()["api"]
449
+ else:
450
+ raise ValueError(f"Could not fetch api info for {self.src}")
451
+ num_named_endpoints = len(info["named_endpoints"])
452
+ num_unnamed_endpoints = len(info["unnamed_endpoints"])
453
+ if num_named_endpoints == 0 and all_endpoints is None:
454
+ all_endpoints = True
455
+
456
+ human_info = "Client.predict() Usage Info\n---------------------------\n"
457
+ human_info += f"Named API endpoints: {num_named_endpoints}\n"
458
+
459
+ for api_name, endpoint_info in info["named_endpoints"].items():
460
+ human_info += self._render_endpoints_info(api_name, endpoint_info)
461
+
462
+ if all_endpoints:
463
+ human_info += f"\nUnnamed API endpoints: {num_unnamed_endpoints}\n"
464
+ for fn_index, endpoint_info in info["unnamed_endpoints"].items():
465
+ # When loading from json, the fn_indices are read as strings
466
+ # because json keys can only be strings
467
+ human_info += self._render_endpoints_info(int(fn_index), endpoint_info)
468
+ else:
469
+ if num_unnamed_endpoints > 0:
470
+ human_info += f"\nUnnamed API endpoints: {num_unnamed_endpoints}, to view, run Client.view_api(all_endpoints=True)\n"
471
+
472
+ if print_info:
473
+ print(human_info)
474
+ if return_format == "str":
475
+ return human_info
476
+ elif return_format == "dict":
477
+ return info
478
+
479
+ def reset_session(self) -> None:
480
+ self.session_hash = str(uuid.uuid4())
481
+
482
+ def _render_endpoints_info(
483
+ self,
484
+ name_or_index: str | int,
485
+ endpoints_info: dict[str, list[dict[str, Any]]],
486
+ ) -> str:
487
+ parameter_names = [p["label"] for p in endpoints_info["parameters"]]
488
+ parameter_names = [utils.sanitize_parameter_names(p) for p in parameter_names]
489
+ rendered_parameters = ", ".join(parameter_names)
490
+ if rendered_parameters:
491
+ rendered_parameters = rendered_parameters + ", "
492
+ return_values = [p["label"] for p in endpoints_info["returns"]]
493
+ return_values = [utils.sanitize_parameter_names(r) for r in return_values]
494
+ rendered_return_values = ", ".join(return_values)
495
+ if len(return_values) > 1:
496
+ rendered_return_values = f"({rendered_return_values})"
497
+
498
+ if isinstance(name_or_index, str):
499
+ final_param = f'api_name="{name_or_index}"'
500
+ elif isinstance(name_or_index, int):
501
+ final_param = f"fn_index={name_or_index}"
502
+ else:
503
+ raise ValueError("name_or_index must be a string or integer")
504
+
505
+ human_info = f"\n - predict({rendered_parameters}{final_param}) -> {rendered_return_values}\n"
506
+ human_info += " Parameters:\n"
507
+ if endpoints_info["parameters"]:
508
+ for info in endpoints_info["parameters"]:
509
+ desc = (
510
+ f" ({info['python_type']['description']})"
511
+ if info["python_type"].get("description")
512
+ else ""
513
+ )
514
+ type_ = info["python_type"]["type"]
515
+ human_info += f" - [{info['component']}] {utils.sanitize_parameter_names(info['label'])}: {type_}{desc} \n"
516
+ else:
517
+ human_info += " - None\n"
518
+ human_info += " Returns:\n"
519
+ if endpoints_info["returns"]:
520
+ for info in endpoints_info["returns"]:
521
+ desc = (
522
+ f" ({info['python_type']['description']})"
523
+ if info["python_type"].get("description")
524
+ else ""
525
+ )
526
+ type_ = info["python_type"]["type"]
527
+ human_info += f" - [{info['component']}] {utils.sanitize_parameter_names(info['label'])}: {type_}{desc} \n"
528
+ else:
529
+ human_info += " - None\n"
530
+
531
+ return human_info
532
+
533
+ def __repr__(self):
534
+ return self.view_api(print_info=False, return_format="str")
535
+
536
+ def __str__(self):
537
+ return self.view_api(print_info=False, return_format="str")
538
+
539
+ def _telemetry_thread(self) -> None:
540
+ # Disable telemetry by setting the env variable HF_HUB_DISABLE_TELEMETRY=1
541
+ data = {
542
+ "src": self.src,
543
+ }
544
+ try:
545
+ send_telemetry(
546
+ topic="py_client/initiated",
547
+ library_name="gradio_client",
548
+ library_version=utils.__version__,
549
+ user_agent=data,
550
+ )
551
+ except Exception:
552
+ pass
553
+
554
+ def _infer_fn_index(self, api_name: str | None, fn_index: int | None) -> int:
555
+ inferred_fn_index = None
556
+ if api_name is not None:
557
+ for i, d in enumerate(self.config["dependencies"]):
558
+ config_api_name = d.get("api_name")
559
+ if config_api_name is None or config_api_name is False:
560
+ continue
561
+ if "/" + config_api_name == api_name:
562
+ inferred_fn_index = i
563
+ break
564
+ else:
565
+ error_message = f"Cannot find a function with `api_name`: {api_name}."
566
+ if not api_name.startswith("/"):
567
+ error_message += " Did you mean to use a leading slash?"
568
+ raise ValueError(error_message)
569
+ elif fn_index is not None:
570
+ inferred_fn_index = fn_index
571
+ if (
572
+ inferred_fn_index >= len(self.endpoints)
573
+ or not self.endpoints[inferred_fn_index].is_valid
574
+ ):
575
+ raise ValueError(f"Invalid function index: {fn_index}.")
576
+ else:
577
+ valid_endpoints = [
578
+ e for e in self.endpoints if e.is_valid and e.api_name is not None
579
+ ]
580
+ if len(valid_endpoints) == 1:
581
+ inferred_fn_index = valid_endpoints[0].fn_index
582
+ else:
583
+ raise ValueError(
584
+ "This Gradio app might have multiple endpoints. Please specify an `api_name` or `fn_index`"
585
+ )
586
+ return inferred_fn_index
587
+
588
+ def __del__(self):
589
+ if hasattr(self, "executor"):
590
+ self.executor.shutdown(wait=True)
591
+
592
+ def _space_name_to_src(self, space) -> str | None:
593
+ return huggingface_hub.space_info(space, token=self.hf_token).host # type: ignore
594
+
595
+ def _get_config(self) -> dict:
596
+ r = requests.get(
597
+ urllib.parse.urljoin(self.src, utils.CONFIG_URL), headers=self.headers
598
+ )
599
+ if r.ok:
600
+ return r.json()
601
+ else: # to support older versions of Gradio
602
+ r = requests.get(self.src, headers=self.headers)
603
+ # some basic regex to extract the config
604
+ result = re.search(r"window.gradio_config = (.*?);[\s]*</script>", r.text)
605
+ try:
606
+ config = json.loads(result.group(1)) # type: ignore
607
+ except AttributeError as ae:
608
+ raise ValueError(
609
+ f"Could not get Gradio config from: {self.src}"
610
+ ) from ae
611
+ if "allow_flagging" in config:
612
+ raise ValueError(
613
+ "Gradio 2.x is not supported by this client. Please upgrade your Gradio app to Gradio 3.x or higher."
614
+ )
615
+ return config
616
+
617
+ def deploy_discord(
618
+ self,
619
+ discord_bot_token: str | None = None,
620
+ api_names: list[str | tuple[str, str]] | None = None,
621
+ to_id: str | None = None,
622
+ hf_token: str | None = None,
623
+ private: bool = False,
624
+ ):
625
+ """
626
+ Deploy the upstream app as a discord bot. Currently only supports gr.ChatInterface.
627
+ Parameters:
628
+ discord_bot_token: This is the "password" needed to be able to launch the bot. Users can get a token by creating a bot app on the discord website. If run the method without specifying a token, the space will explain how to get one. See here: https://huggingface.co/spaces/freddyaboulton/test-discord-bot-v1.
629
+ api_names: The api_names of the app to turn into bot commands. This parameter currently has no effect as ChatInterface only has one api_name ('/chat').
630
+ to_id: The name of the space hosting the discord bot. If None, the name will be gradio-discord-bot-{random-substring}
631
+ hf_token: HF api token with write priviledges in order to upload the files to HF space. Can be ommitted if logged in via the HuggingFace CLI, unless the upstream space is private. Obtain from: https://huggingface.co/settings/token
632
+ private: Whether the space hosting the discord bot is private. The visibility of the discord bot itself is set via the discord website. See https://huggingface.co/spaces/freddyaboulton/test-discord-bot-v1
633
+ """
634
+
635
+ if self.config["mode"] == "chat_interface" and not api_names:
636
+ api_names = [("chat", "chat")]
637
+
638
+ valid_list = isinstance(api_names, list) and (
639
+ isinstance(n, str)
640
+ or (
641
+ isinstance(n, tuple) and isinstance(n[0], str) and isinstance(n[1], str)
642
+ )
643
+ for n in api_names
644
+ )
645
+ if api_names is None or not valid_list:
646
+ raise ValueError(
647
+ f"Each entry in api_names must be either a string or a tuple of strings. Received {api_names}"
648
+ )
649
+ assert (
650
+ len(api_names) == 1
651
+ ), "Currently only one api_name can be deployed to discord."
652
+
653
+ for i, name in enumerate(api_names):
654
+ if isinstance(name, str):
655
+ api_names[i] = (name, name)
656
+
657
+ fn = next(
658
+ (ep for ep in self.endpoints if ep.api_name == f"/{api_names[0][0]}"), None
659
+ )
660
+ if not fn:
661
+ raise ValueError(
662
+ f"api_name {api_names[0][0]} not present in {self.space_id or self.src}"
663
+ )
664
+ inputs = [
665
+ inp for inp in fn.input_component_types if fn not in utils.SKIP_COMPONENTS
666
+ ]
667
+ outputs = [
668
+ inp for inp in fn.input_component_types if fn not in utils.SKIP_COMPONENTS
669
+ ]
670
+ if not inputs == ["textbox"] and outputs == ["textbox"]:
671
+ raise ValueError(
672
+ "Currently only api_names with a single textbox as input and output are supported. "
673
+ f"Received {inputs} and {outputs}"
674
+ )
675
+
676
+ is_private = False
677
+ if self.space_id:
678
+ is_private = huggingface_hub.space_info(self.space_id).private
679
+ if is_private:
680
+ assert hf_token, (
681
+ f"Since {self.space_id} is private, you must explicitly pass in hf_token "
682
+ "so that it can be added as a secret in the discord bot space."
683
+ )
684
+
685
+ if to_id:
686
+ if "/" in to_id:
687
+ to_id = to_id.split("/")[1]
688
+ space_id = huggingface_hub.get_full_repo_name(to_id, token=hf_token)
689
+ else:
690
+ if self.space_id:
691
+ space_id = f'{self.space_id.split("/")[1]}-gradio-discord-bot'
692
+ else:
693
+ space_id = f"gradio-discord-bot-{secrets.token_hex(4)}"
694
+ space_id = huggingface_hub.get_full_repo_name(space_id, token=hf_token)
695
+
696
+ api = huggingface_hub.HfApi()
697
+
698
+ try:
699
+ huggingface_hub.space_info(space_id)
700
+ first_upload = False
701
+ except huggingface_hub.utils.RepositoryNotFoundError:
702
+ first_upload = True
703
+
704
+ huggingface_hub.create_repo(
705
+ space_id,
706
+ repo_type="space",
707
+ space_sdk="gradio",
708
+ token=hf_token,
709
+ exist_ok=True,
710
+ private=private,
711
+ )
712
+ if first_upload:
713
+ huggingface_hub.metadata_update(
714
+ repo_id=space_id,
715
+ repo_type="space",
716
+ metadata={"tags": ["gradio-discord-bot"]},
717
+ )
718
+
719
+ with open(str(Path(__file__).parent / "templates" / "discord_chat.py")) as f:
720
+ app = f.read()
721
+ app = app.replace("<<app-src>>", self.src)
722
+ app = app.replace("<<api-name>>", api_names[0][0])
723
+ app = app.replace("<<command-name>>", api_names[0][1])
724
+
725
+ with tempfile.NamedTemporaryFile(mode="w", delete=False) as app_file:
726
+ with tempfile.NamedTemporaryFile(mode="w", delete=False) as requirements:
727
+ app_file.write(app)
728
+ requirements.write("\n".join(["discord.py==2.3.1"]))
729
+
730
+ operations = [
731
+ CommitOperationAdd(path_in_repo="app.py", path_or_fileobj=app_file.name),
732
+ CommitOperationAdd(
733
+ path_in_repo="requirements.txt", path_or_fileobj=requirements.name
734
+ ),
735
+ ]
736
+
737
+ api.create_commit(
738
+ repo_id=space_id,
739
+ commit_message="Deploy Discord Bot",
740
+ repo_type="space",
741
+ operations=operations,
742
+ token=hf_token,
743
+ )
744
+
745
+ if discord_bot_token:
746
+ huggingface_hub.add_space_secret(
747
+ space_id, "DISCORD_TOKEN", discord_bot_token, token=hf_token
748
+ )
749
+ if is_private:
750
+ huggingface_hub.add_space_secret(
751
+ space_id, "HF_TOKEN", hf_token, token=hf_token
752
+ )
753
+
754
+ url = f"https://huggingface.co/spaces/{space_id}"
755
+ print(f"See your discord bot here! {url}")
756
+ return url
757
+
758
+
759
+ class Endpoint:
760
+ """Helper class for storing all the information about a single API endpoint."""
761
+
762
+ def __init__(self, client: Client, fn_index: int, dependency: dict):
763
+ self.client: Client = client
764
+ self.fn_index = fn_index
765
+ self.dependency = dependency
766
+ api_name = dependency.get("api_name")
767
+ self.api_name: str | Literal[False] | None = (
768
+ "/" + api_name if isinstance(api_name, str) else api_name
769
+ )
770
+ self.use_ws = self._use_websocket(self.dependency)
771
+ self.input_component_types = []
772
+ self.output_component_types = []
773
+ self.root_url = client.src + "/" if not client.src.endswith("/") else client.src
774
+ self.is_continuous = dependency.get("types", {}).get("continuous", False)
775
+ try:
776
+ # Only a real API endpoint if backend_fn is True (so not just a frontend function), serializers are valid,
777
+ # and api_name is not False (meaning that the developer has explicitly disabled the API endpoint)
778
+ self.serializers, self.deserializers = self._setup_serializers()
779
+ self.is_valid = self.dependency["backend_fn"] and self.api_name is not False
780
+ except AssertionError:
781
+ self.is_valid = False
782
+
783
+ def __repr__(self):
784
+ return f"Endpoint src: {self.client.src}, api_name: {self.api_name}, fn_index: {self.fn_index}"
785
+
786
+ def __str__(self):
787
+ return self.__repr__()
788
+
789
+ def make_end_to_end_fn(self, helper: Communicator | None = None):
790
+ _predict = self.make_predict(helper)
791
+
792
+ def _inner(*data):
793
+ if not self.is_valid:
794
+ raise utils.InvalidAPIEndpointError()
795
+ data = self.insert_state(*data)
796
+ if self.client.serialize:
797
+ data = self.serialize(*data)
798
+ predictions = _predict(*data)
799
+ predictions = self.process_predictions(*predictions)
800
+ # Append final output only if not already present
801
+ # for consistency between generators and not generators
802
+ if helper:
803
+ with helper.lock:
804
+ if not helper.job.outputs:
805
+ helper.job.outputs.append(predictions)
806
+ return predictions
807
+
808
+ return _inner
809
+
810
+ def make_predict(self, helper: Communicator | None = None):
811
+ def _predict(*data) -> tuple:
812
+ data = json.dumps(
813
+ {
814
+ "data": data,
815
+ "fn_index": self.fn_index,
816
+ "session_hash": self.client.session_hash,
817
+ }
818
+ )
819
+ hash_data = json.dumps(
820
+ {
821
+ "fn_index": self.fn_index,
822
+ "session_hash": self.client.session_hash,
823
+ }
824
+ )
825
+
826
+ if self.use_ws:
827
+ result = utils.synchronize_async(self._ws_fn, data, hash_data, helper)
828
+ if "error" in result:
829
+ raise ValueError(result["error"])
830
+ else:
831
+ response = requests.post(
832
+ self.client.api_url, headers=self.client.headers, data=data
833
+ )
834
+ result = json.loads(response.content.decode("utf-8"))
835
+ try:
836
+ output = result["data"]
837
+ except KeyError as ke:
838
+ is_public_space = (
839
+ self.client.space_id
840
+ and not huggingface_hub.space_info(self.client.space_id).private
841
+ )
842
+ if "error" in result and "429" in result["error"] and is_public_space:
843
+ raise utils.TooManyRequestsError(
844
+ f"Too many requests to the API, please try again later. To avoid being rate-limited, "
845
+ f"please duplicate the Space using Client.duplicate({self.client.space_id}) "
846
+ f"and pass in your Hugging Face token."
847
+ ) from None
848
+ elif "error" in result:
849
+ raise ValueError(result["error"]) from None
850
+ raise KeyError(
851
+ f"Could not find 'data' key in response. Response received: {result}"
852
+ ) from ke
853
+ return tuple(output)
854
+
855
+ return _predict
856
+
857
+ def _predict_resolve(self, *data) -> Any:
858
+ """Needed for gradio.load(), which has a slightly different signature for serializing/deserializing"""
859
+ outputs = self.make_predict()(*data)
860
+ if len(self.dependency["outputs"]) == 1:
861
+ return outputs[0]
862
+ return outputs
863
+
864
+ def _upload(
865
+ self, file_paths: list[str | list[str]]
866
+ ) -> list[str | list[str]] | list[dict[str, Any] | list[dict[str, Any]]]:
867
+ if not file_paths:
868
+ return []
869
+ # Put all the filepaths in one file
870
+ # but then keep track of which index in the
871
+ # original list they came from so we can recreate
872
+ # the original structure
873
+ files = []
874
+ indices = []
875
+ for i, fs in enumerate(file_paths):
876
+ if not isinstance(fs, list):
877
+ fs = [fs]
878
+ for f in fs:
879
+ files.append(("files", (Path(f).name, open(f, "rb")))) # noqa: SIM115
880
+ indices.append(i)
881
+ r = requests.post(
882
+ self.client.upload_url, headers=self.client.headers, files=files
883
+ )
884
+ if r.status_code != 200:
885
+ uploaded = file_paths
886
+ else:
887
+ uploaded = []
888
+ result = r.json()
889
+ for i, fs in enumerate(file_paths):
890
+ if isinstance(fs, list):
891
+ output = [o for ix, o in enumerate(result) if indices[ix] == i]
892
+ res = [
893
+ {
894
+ "is_file": True,
895
+ "name": o,
896
+ "orig_name": Path(f).name,
897
+ "data": None,
898
+ }
899
+ for f, o in zip(fs, output)
900
+ ]
901
+ else:
902
+ o = next(o for ix, o in enumerate(result) if indices[ix] == i)
903
+ res = {
904
+ "is_file": True,
905
+ "name": o,
906
+ "orig_name": Path(fs).name,
907
+ "data": None,
908
+ }
909
+ uploaded.append(res)
910
+ return uploaded
911
+
912
+ def _add_uploaded_files_to_data(
913
+ self,
914
+ files: list[str | list[str]] | list[dict[str, Any] | list[dict[str, Any]]],
915
+ data: list[Any],
916
+ ) -> None:
917
+ """Helper function to modify the input data with the uploaded files."""
918
+ file_counter = 0
919
+ for i, t in enumerate(self.input_component_types):
920
+ if t in ["file", "uploadbutton"]:
921
+ data[i] = files[file_counter]
922
+ file_counter += 1
923
+
924
+ def insert_state(self, *data) -> tuple:
925
+ data = list(data)
926
+ for i, input_component_type in enumerate(self.input_component_types):
927
+ if input_component_type == utils.STATE_COMPONENT:
928
+ data.insert(i, None)
929
+ return tuple(data)
930
+
931
+ def remove_skipped_components(self, *data) -> tuple:
932
+ data = [
933
+ d
934
+ for d, oct in zip(data, self.output_component_types)
935
+ if oct not in utils.SKIP_COMPONENTS
936
+ ]
937
+ return tuple(data)
938
+
939
+ def reduce_singleton_output(self, *data) -> Any:
940
+ if (
941
+ len(
942
+ [
943
+ oct
944
+ for oct in self.output_component_types
945
+ if oct not in utils.SKIP_COMPONENTS
946
+ ]
947
+ )
948
+ == 1
949
+ ):
950
+ return data[0]
951
+ else:
952
+ return data
953
+
954
+ def serialize(self, *data) -> tuple:
955
+ assert len(data) == len(
956
+ self.serializers
957
+ ), f"Expected {len(self.serializers)} arguments, got {len(data)}"
958
+
959
+ files = [
960
+ f
961
+ for f, t in zip(data, self.input_component_types)
962
+ if t in ["file", "uploadbutton"]
963
+ ]
964
+ uploaded_files = self._upload(files)
965
+ data = list(data)
966
+ self._add_uploaded_files_to_data(uploaded_files, data)
967
+ o = tuple([s.serialize(d) for s, d in zip(self.serializers, data)])
968
+ return o
969
+
970
+ def deserialize(self, *data) -> tuple:
971
+ assert len(data) == len(
972
+ self.deserializers
973
+ ), f"Expected {len(self.deserializers)} outputs, got {len(data)}"
974
+ outputs = tuple(
975
+ [
976
+ s.deserialize(
977
+ d,
978
+ save_dir=self.client.output_dir,
979
+ hf_token=self.client.hf_token,
980
+ root_url=self.root_url,
981
+ )
982
+ for s, d in zip(self.deserializers, data)
983
+ ]
984
+ )
985
+ return outputs
986
+
987
+ def process_predictions(self, *predictions):
988
+ if self.client.serialize:
989
+ predictions = self.deserialize(*predictions)
990
+ predictions = self.remove_skipped_components(*predictions)
991
+ predictions = self.reduce_singleton_output(*predictions)
992
+ return predictions
993
+
994
+ def _setup_serializers(self) -> tuple[list[Serializable], list[Serializable]]:
995
+ inputs = self.dependency["inputs"]
996
+ serializers = []
997
+
998
+ for i in inputs:
999
+ for component in self.client.config["components"]:
1000
+ if component["id"] == i:
1001
+ component_name = component["type"]
1002
+ self.input_component_types.append(component_name)
1003
+ if component.get("serializer"):
1004
+ serializer_name = component["serializer"]
1005
+ assert (
1006
+ serializer_name in serializing.SERIALIZER_MAPPING
1007
+ ), f"Unknown serializer: {serializer_name}, you may need to update your gradio_client version."
1008
+ serializer = serializing.SERIALIZER_MAPPING[serializer_name]
1009
+ else:
1010
+ assert (
1011
+ component_name in serializing.COMPONENT_MAPPING
1012
+ ), f"Unknown component: {component_name}, you may need to update your gradio_client version."
1013
+ serializer = serializing.COMPONENT_MAPPING[component_name]
1014
+ serializers.append(serializer()) # type: ignore
1015
+
1016
+ outputs = self.dependency["outputs"]
1017
+ deserializers = []
1018
+ for i in outputs:
1019
+ for component in self.client.config["components"]:
1020
+ if component["id"] == i:
1021
+ component_name = component["type"]
1022
+ self.output_component_types.append(component_name)
1023
+ if component.get("serializer"):
1024
+ serializer_name = component["serializer"]
1025
+ assert (
1026
+ serializer_name in serializing.SERIALIZER_MAPPING
1027
+ ), f"Unknown serializer: {serializer_name}, you may need to update your gradio_client version."
1028
+ deserializer = serializing.SERIALIZER_MAPPING[serializer_name]
1029
+ elif component_name in utils.SKIP_COMPONENTS:
1030
+ deserializer = serializing.SimpleSerializable
1031
+ else:
1032
+ assert (
1033
+ component_name in serializing.COMPONENT_MAPPING
1034
+ ), f"Unknown component: {component_name}, you may need to update your gradio_client version."
1035
+ deserializer = serializing.COMPONENT_MAPPING[component_name]
1036
+ deserializers.append(deserializer()) # type: ignore
1037
+
1038
+ return serializers, deserializers
1039
+
1040
+ def _use_websocket(self, dependency: dict) -> bool:
1041
+ queue_enabled = self.client.config.get("enable_queue", False)
1042
+ queue_uses_websocket = version.parse(
1043
+ self.client.config.get("version", "2.0")
1044
+ ) >= version.Version("3.2")
1045
+ dependency_uses_queue = dependency.get("queue", False) is not False
1046
+ return queue_enabled and queue_uses_websocket and dependency_uses_queue
1047
+
1048
+ async def _ws_fn(self, data, hash_data, helper: Communicator):
1049
+ async with websockets.connect( # type: ignore
1050
+ self.client.ws_url,
1051
+ open_timeout=10,
1052
+ extra_headers=self.client.headers,
1053
+ max_size=1024 * 1024 * 1024,
1054
+ ) as websocket:
1055
+ return await utils.get_pred_from_ws(websocket, data, hash_data, helper)
1056
+
1057
+
1058
+ @document("result", "outputs", "status")
1059
+ class Job(Future):
1060
+ """
1061
+ A Job is a wrapper over the Future class that represents a prediction call that has been
1062
+ submitted by the Gradio client. This class is not meant to be instantiated directly, but rather
1063
+ is created by the Client.submit() method.
1064
+
1065
+ A Job object includes methods to get the status of the prediction call, as well to get the outputs of
1066
+ the prediction call. Job objects are also iterable, and can be used in a loop to get the outputs
1067
+ of prediction calls as they become available for generator endpoints.
1068
+ """
1069
+
1070
+ def __init__(
1071
+ self,
1072
+ future: Future,
1073
+ communicator: Communicator | None = None,
1074
+ verbose: bool = True,
1075
+ space_id: str | None = None,
1076
+ ):
1077
+ """
1078
+ Parameters:
1079
+ future: The future object that represents the prediction call, created by the Client.submit() method
1080
+ communicator: The communicator object that is used to communicate between the client and the background thread running the job
1081
+ verbose: Whether to print any status-related messages to the console
1082
+ space_id: The space ID corresponding to the Client object that created this Job object
1083
+ """
1084
+ self.future = future
1085
+ self.communicator = communicator
1086
+ self._counter = 0
1087
+ self.verbose = verbose
1088
+ self.space_id = space_id
1089
+
1090
+ def __iter__(self) -> Job:
1091
+ return self
1092
+
1093
+ def __next__(self) -> tuple | Any:
1094
+ if not self.communicator:
1095
+ raise StopIteration()
1096
+
1097
+ with self.communicator.lock:
1098
+ if self.communicator.job.latest_status.code == Status.FINISHED:
1099
+ raise StopIteration()
1100
+
1101
+ while True:
1102
+ with self.communicator.lock:
1103
+ if len(self.communicator.job.outputs) == self._counter + 1:
1104
+ o = self.communicator.job.outputs[self._counter]
1105
+ self._counter += 1
1106
+ return o
1107
+ if self.communicator.job.latest_status.code == Status.FINISHED:
1108
+ raise StopIteration()
1109
+
1110
+ def result(self, timeout: float | None = None) -> Any:
1111
+ """
1112
+ Return the result of the call that the future represents. Raises CancelledError: If the future was cancelled, TimeoutError: If the future didn't finish executing before the given timeout, and Exception: If the call raised then that exception will be raised.
1113
+
1114
+ Parameters:
1115
+ timeout: The number of seconds to wait for the result if the future isn't done. If None, then there is no limit on the wait time.
1116
+ Returns:
1117
+ The result of the call that the future represents. For generator functions, it will return the final iteration.
1118
+ Example:
1119
+ from gradio_client import Client
1120
+ calculator = Client(src="gradio/calculator")
1121
+ job = calculator.submit("foo", "add", 4, fn_index=0)
1122
+ job.result(timeout=5)
1123
+ >> 9
1124
+ """
1125
+ return super().result(timeout=timeout)
1126
+
1127
+ def outputs(self) -> list[tuple | Any]:
1128
+ """
1129
+ Returns a list containing the latest outputs from the Job.
1130
+
1131
+ If the endpoint has multiple output components, the list will contain
1132
+ a tuple of results. Otherwise, it will contain the results without storing them
1133
+ in tuples.
1134
+
1135
+ For endpoints that are queued, this list will contain the final job output even
1136
+ if that endpoint does not use a generator function.
1137
+
1138
+ Example:
1139
+ from gradio_client import Client
1140
+ client = Client(src="gradio/count_generator")
1141
+ job = client.submit(3, api_name="/count")
1142
+ while not job.done():
1143
+ time.sleep(0.1)
1144
+ job.outputs()
1145
+ >> ['0', '1', '2']
1146
+ """
1147
+ if not self.communicator:
1148
+ return []
1149
+ else:
1150
+ with self.communicator.lock:
1151
+ return self.communicator.job.outputs
1152
+
1153
+ def status(self) -> StatusUpdate:
1154
+ """
1155
+ Returns the latest status update from the Job in the form of a StatusUpdate
1156
+ object, which contains the following fields: code, rank, queue_size, success, time, eta, and progress_data.
1157
+
1158
+ progress_data is a list of updates emitted by the gr.Progress() tracker of the event handler. Each element
1159
+ of the list has the following fields: index, length, unit, progress, desc. If the event handler does not have
1160
+ a gr.Progress() tracker, the progress_data field will be None.
1161
+
1162
+ Example:
1163
+ from gradio_client import Client
1164
+ client = Client(src="gradio/calculator")
1165
+ job = client.submit(5, "add", 4, api_name="/predict")
1166
+ job.status()
1167
+ >> <Status.STARTING: 'STARTING'>
1168
+ job.status().eta
1169
+ >> 43.241 # seconds
1170
+ """
1171
+ time = datetime.now()
1172
+ cancelled = False
1173
+ if self.communicator:
1174
+ with self.communicator.lock:
1175
+ cancelled = self.communicator.should_cancel
1176
+ if cancelled:
1177
+ return StatusUpdate(
1178
+ code=Status.CANCELLED,
1179
+ rank=0,
1180
+ queue_size=None,
1181
+ success=False,
1182
+ time=time,
1183
+ eta=None,
1184
+ progress_data=None,
1185
+ )
1186
+ if self.done():
1187
+ if not self.future._exception: # type: ignore
1188
+ return StatusUpdate(
1189
+ code=Status.FINISHED,
1190
+ rank=0,
1191
+ queue_size=None,
1192
+ success=True,
1193
+ time=time,
1194
+ eta=None,
1195
+ progress_data=None,
1196
+ )
1197
+ else:
1198
+ return StatusUpdate(
1199
+ code=Status.FINISHED,
1200
+ rank=0,
1201
+ queue_size=None,
1202
+ success=False,
1203
+ time=time,
1204
+ eta=None,
1205
+ progress_data=None,
1206
+ )
1207
+ else:
1208
+ if not self.communicator:
1209
+ return StatusUpdate(
1210
+ code=Status.PROCESSING,
1211
+ rank=0,
1212
+ queue_size=None,
1213
+ success=None,
1214
+ time=time,
1215
+ eta=None,
1216
+ progress_data=None,
1217
+ )
1218
+ else:
1219
+ with self.communicator.lock:
1220
+ eta = self.communicator.job.latest_status.eta
1221
+ if self.verbose and self.space_id and eta and eta > 30:
1222
+ print(
1223
+ f"Due to heavy traffic on this app, the prediction will take approximately {int(eta)} seconds."
1224
+ f"For faster predictions without waiting in queue, you may duplicate the space using: Client.duplicate({self.space_id})"
1225
+ )
1226
+ return self.communicator.job.latest_status
1227
+
1228
+ def __getattr__(self, name):
1229
+ """Forwards any properties to the Future class."""
1230
+ return getattr(self.future, name)
1231
+
1232
+ def cancel(self) -> bool:
1233
+ """Cancels the job as best as possible.
1234
+
1235
+ If the app you are connecting to has the gradio queue enabled, the job
1236
+ will be cancelled locally as soon as possible. For apps that do not use the
1237
+ queue, the job cannot be cancelled if it's been sent to the local executor
1238
+ (for the time being).
1239
+
1240
+ Note: In general, this DOES not stop the process from running in the upstream server
1241
+ except for the following situations:
1242
+
1243
+ 1. If the job is queued upstream, it will be removed from the queue and the server will not run the job
1244
+ 2. If the job has iterative outputs, the job will finish as soon as the current iteration finishes running
1245
+ 3. If the job has not been picked up by the queue yet, the queue will not pick up the job
1246
+ """
1247
+ if self.communicator:
1248
+ with self.communicator.lock:
1249
+ self.communicator.should_cancel = True
1250
+ return True
1251
+ return self.future.cancel()
testbed/gradio-app__gradio/client/python/gradio_client/data_classes.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TypedDict
4
+
5
+ from typing_extensions import NotRequired
6
+
7
+
8
+ class FileData(TypedDict):
9
+ name: str | None # filename
10
+ data: str | None # base64 encoded data
11
+ size: NotRequired[int | None] # size in bytes
12
+ is_file: NotRequired[
13
+ bool
14
+ ] # whether the data corresponds to a file or base64 encoded data
15
+ orig_name: NotRequired[str] # original filename
16
+ mime_type: NotRequired[str]
17
+ is_stream: NotRequired[bool]
testbed/gradio-app__gradio/client/python/gradio_client/documentation.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Contains methods that generate documentation for Gradio functions and classes."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import inspect
6
+ from typing import Callable
7
+
8
+ classes_to_document = {}
9
+ classes_inherit_documentation = {}
10
+ documentation_group = None
11
+
12
+
13
+ def set_documentation_group(m):
14
+ global documentation_group
15
+ documentation_group = m
16
+ if m not in classes_to_document:
17
+ classes_to_document[m] = []
18
+
19
+
20
+ def extract_instance_attr_doc(cls, attr):
21
+ code = inspect.getsource(cls.__init__)
22
+ lines = [line.strip() for line in code.split("\n")]
23
+ i = None
24
+ for i, line in enumerate(lines): # noqa: B007
25
+ if line.startswith("self." + attr + ":") or line.startswith(
26
+ "self." + attr + " ="
27
+ ):
28
+ break
29
+ assert i is not None, f"Could not find {attr} in {cls.__name__}"
30
+ start_line = lines.index('"""', i)
31
+ end_line = lines.index('"""', start_line + 1)
32
+ for j in range(i + 1, start_line):
33
+ assert not lines[j].startswith("self."), (
34
+ f"Found another attribute before docstring for {attr} in {cls.__name__}: "
35
+ + lines[j]
36
+ + "\n start:"
37
+ + lines[i]
38
+ )
39
+ doc_string = " ".join(lines[start_line + 1 : end_line])
40
+ return doc_string
41
+
42
+
43
+ def document(*fns, inherit=False):
44
+ """
45
+ Defines the @document decorator which adds classes or functions to the Gradio
46
+ documentation at www.gradio.app/docs.
47
+
48
+ Usage examples:
49
+ - Put @document() above a class to document the class and its constructor.
50
+ - Put @document("fn1", "fn2") above a class to also document methods fn1 and fn2.
51
+ - Put @document("*fn3") with an asterisk above a class to document the instance attribute methods f3.
52
+ """
53
+
54
+ def inner_doc(cls):
55
+ global documentation_group
56
+ if inherit:
57
+ classes_inherit_documentation[cls] = None
58
+ classes_to_document[documentation_group].append((cls, fns))
59
+ return cls
60
+
61
+ return inner_doc
62
+
63
+
64
+ def document_fn(fn: Callable, cls) -> tuple[str, list[dict], dict, str | None]:
65
+ """
66
+ Generates documentation for any function.
67
+ Parameters:
68
+ fn: Function to document
69
+ Returns:
70
+ description: General description of fn
71
+ parameters: A list of dicts for each parameter, storing data for the parameter name, annotation and doc
72
+ return: A dict storing data for the returned annotation and doc
73
+ example: Code for an example use of the fn
74
+ """
75
+ doc_str = inspect.getdoc(fn) or ""
76
+ doc_lines = doc_str.split("\n")
77
+ signature = inspect.signature(fn)
78
+ description, parameters, returns, examples = [], {}, [], []
79
+ mode = "description"
80
+ for line in doc_lines:
81
+ line = line.rstrip()
82
+ if line == "Parameters:":
83
+ mode = "parameter"
84
+ elif line.startswith("Example:"):
85
+ mode = "example"
86
+ if "(" in line and ")" in line:
87
+ c = line.split("(")[1].split(")")[0]
88
+ if c != cls.__name__:
89
+ mode = "ignore"
90
+ elif line == "Returns:":
91
+ mode = "return"
92
+ else:
93
+ if mode == "description":
94
+ description.append(line if line.strip() else "<br>")
95
+ continue
96
+ if not (line.startswith(" ") or line.strip() == ""):
97
+ print(line)
98
+ assert (
99
+ line.startswith(" ") or line.strip() == ""
100
+ ), f"Documentation format for {fn.__name__} has format error in line: {line}"
101
+ line = line[4:]
102
+ if mode == "parameter":
103
+ colon_index = line.index(": ")
104
+ assert (
105
+ colon_index > -1
106
+ ), f"Documentation format for {fn.__name__} has format error in line: {line}"
107
+ parameter = line[:colon_index]
108
+ parameter_doc = line[colon_index + 2 :]
109
+ parameters[parameter] = parameter_doc
110
+ elif mode == "return":
111
+ returns.append(line)
112
+ elif mode == "example":
113
+ examples.append(line)
114
+ description_doc = " ".join(description)
115
+ parameter_docs = []
116
+ for param_name, param in signature.parameters.items():
117
+ if param_name.startswith("_"):
118
+ continue
119
+ if param_name in ["kwargs", "args"] and param_name not in parameters:
120
+ continue
121
+ parameter_doc = {
122
+ "name": param_name,
123
+ "annotation": param.annotation,
124
+ "doc": parameters.get(param_name),
125
+ }
126
+ if param_name in parameters:
127
+ del parameters[param_name]
128
+ if param.default != inspect.Parameter.empty:
129
+ default = param.default
130
+ if type(default) == str:
131
+ default = '"' + default + '"'
132
+ if default.__class__.__module__ != "builtins":
133
+ default = f"{default.__class__.__name__}()"
134
+ parameter_doc["default"] = default
135
+ elif parameter_doc["doc"] is not None:
136
+ if "kwargs" in parameter_doc["doc"]:
137
+ parameter_doc["kwargs"] = True
138
+ if "args" in parameter_doc["doc"]:
139
+ parameter_doc["args"] = True
140
+ parameter_docs.append(parameter_doc)
141
+ assert (
142
+ len(parameters) == 0
143
+ ), f"Documentation format for {fn.__name__} documents nonexistent parameters: {''.join(parameters.keys())}"
144
+ if len(returns) == 0:
145
+ return_docs = {}
146
+ elif len(returns) == 1:
147
+ return_docs = {"annotation": signature.return_annotation, "doc": returns[0]}
148
+ else:
149
+ return_docs = {}
150
+ # raise ValueError("Does not support multiple returns yet.")
151
+ examples_doc = "\n".join(examples) if len(examples) > 0 else None
152
+ return description_doc, parameter_docs, return_docs, examples_doc
153
+
154
+
155
+ def document_cls(cls):
156
+ doc_str = inspect.getdoc(cls)
157
+ if doc_str is None:
158
+ return "", {}, ""
159
+ tags = {}
160
+ description_lines = []
161
+ mode = "description"
162
+ for line in doc_str.split("\n"):
163
+ line = line.rstrip()
164
+ if line.endswith(":") and " " not in line:
165
+ mode = line[:-1].lower()
166
+ tags[mode] = []
167
+ elif line.split(" ")[0].endswith(":") and not line.startswith(" "):
168
+ tag = line[: line.index(":")].lower()
169
+ value = line[line.index(":") + 2 :]
170
+ tags[tag] = value
171
+ else:
172
+ if mode == "description":
173
+ description_lines.append(line if line.strip() else "<br>")
174
+ else:
175
+ assert (
176
+ line.startswith(" ") or not line.strip()
177
+ ), f"Documentation format for {cls.__name__} has format error in line: {line}"
178
+ tags[mode].append(line[4:])
179
+ if "example" in tags:
180
+ example = "\n".join(tags["example"])
181
+ del tags["example"]
182
+ else:
183
+ example = None
184
+ for key, val in tags.items():
185
+ if isinstance(val, list):
186
+ tags[key] = "<br>".join(val)
187
+ description = " ".join(description_lines).replace("\n", "<br>")
188
+ return description, tags, example
189
+
190
+
191
+ def generate_documentation():
192
+ documentation = {}
193
+ for mode, class_list in classes_to_document.items():
194
+ documentation[mode] = []
195
+ for cls, fns in class_list:
196
+ fn_to_document = cls if inspect.isfunction(cls) else cls.__init__
197
+ _, parameter_doc, return_doc, _ = document_fn(fn_to_document, cls)
198
+ cls_description, cls_tags, cls_example = document_cls(cls)
199
+ cls_documentation = {
200
+ "class": cls,
201
+ "name": cls.__name__,
202
+ "description": cls_description,
203
+ "tags": cls_tags,
204
+ "parameters": parameter_doc,
205
+ "returns": return_doc,
206
+ "example": cls_example,
207
+ "fns": [],
208
+ }
209
+ for fn_name in fns:
210
+ instance_attribute_fn = fn_name.startswith("*")
211
+ if instance_attribute_fn:
212
+ fn_name = fn_name[1:]
213
+ # Instance attribute fns are classes
214
+ # whose __call__ method determines their behavior
215
+ fn = getattr(cls(), fn_name).__call__
216
+ else:
217
+ fn = getattr(cls, fn_name)
218
+ if not callable(fn):
219
+ description_doc = str(fn)
220
+ parameter_docs = {}
221
+ return_docs = {}
222
+ examples_doc = ""
223
+ override_signature = f"gr.{cls.__name__}.{fn_name}"
224
+ else:
225
+ (
226
+ description_doc,
227
+ parameter_docs,
228
+ return_docs,
229
+ examples_doc,
230
+ ) = document_fn(fn, cls)
231
+ override_signature = None
232
+ if instance_attribute_fn:
233
+ description_doc = extract_instance_attr_doc(cls, fn_name)
234
+ cls_documentation["fns"].append(
235
+ {
236
+ "fn": fn,
237
+ "name": fn_name,
238
+ "description": description_doc,
239
+ "tags": {},
240
+ "parameters": parameter_docs,
241
+ "returns": return_docs,
242
+ "example": examples_doc,
243
+ "override_signature": override_signature,
244
+ }
245
+ )
246
+ documentation[mode].append(cls_documentation)
247
+ if cls in classes_inherit_documentation:
248
+ classes_inherit_documentation[cls] = cls_documentation["fns"]
249
+ for mode, class_list in classes_to_document.items():
250
+ for i, (cls, _) in enumerate(class_list):
251
+ for super_class in classes_inherit_documentation:
252
+ if (
253
+ inspect.isclass(cls)
254
+ and issubclass(cls, super_class)
255
+ and cls != super_class
256
+ ):
257
+ for inherited_fn in classes_inherit_documentation[super_class]:
258
+ inherited_fn = dict(inherited_fn)
259
+ try:
260
+ inherited_fn["description"] = extract_instance_attr_doc(
261
+ cls, inherited_fn["name"]
262
+ )
263
+ except (ValueError, AssertionError):
264
+ pass
265
+ documentation[mode][i]["fns"].append(inherited_fn)
266
+ return documentation
testbed/gradio-app__gradio/client/python/gradio_client/media_data.py ADDED
The diff for this file is too large to render. See raw diff
 
testbed/gradio-app__gradio/client/python/gradio_client/package.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "gradio_client",
3
+ "version": "0.5.2",
4
+ "description": "",
5
+ "python": "true",
6
+ "main_changeset": true
7
+ }
testbed/gradio-app__gradio/client/python/gradio_client/serializing.py ADDED
@@ -0,0 +1,582 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import os
5
+ import secrets
6
+ import tempfile
7
+ import uuid
8
+ from pathlib import Path
9
+ from typing import Any
10
+
11
+ from gradio_client import media_data, utils
12
+ from gradio_client.data_classes import FileData
13
+
14
+ with open(Path(__file__).parent / "types.json") as f:
15
+ serializer_types = json.load(f)
16
+
17
+
18
+ class Serializable:
19
+ def serialized_info(self):
20
+ """
21
+ The typing information for this component as a dictionary whose values are a list of 2 strings: [Python type, language-agnostic description].
22
+ Keys of the dictionary are: raw_input, raw_output, serialized_input, serialized_output
23
+ """
24
+ return self.api_info()
25
+
26
+ def api_info(self) -> dict[str, list[str]]:
27
+ """
28
+ The typing information for this component as a dictionary whose values are a list of 2 strings: [Python type, language-agnostic description].
29
+ Keys of the dictionary are: raw_input, raw_output, serialized_input, serialized_output
30
+ """
31
+ raise NotImplementedError()
32
+
33
+ def example_inputs(self) -> dict[str, Any]:
34
+ """
35
+ The example inputs for this component as a dictionary whose values are example inputs compatible with this component.
36
+ Keys of the dictionary are: raw, serialized
37
+ """
38
+ raise NotImplementedError()
39
+
40
+ # For backwards compatibility
41
+ def input_api_info(self) -> tuple[str, str]:
42
+ api_info = self.api_info()
43
+ types = api_info.get("serialized_input", [api_info["info"]["type"]] * 2) # type: ignore
44
+ return (types[0], types[1])
45
+
46
+ # For backwards compatibility
47
+ def output_api_info(self) -> tuple[str, str]:
48
+ api_info = self.api_info()
49
+ types = api_info.get("serialized_output", [api_info["info"]["type"]] * 2) # type: ignore
50
+ return (types[0], types[1])
51
+
52
+ def serialize(self, x: Any, load_dir: str | Path = "", allow_links: bool = False):
53
+ """
54
+ Convert data from human-readable format to serialized format for a browser.
55
+ """
56
+ return x
57
+
58
+ def deserialize(
59
+ self,
60
+ x: Any,
61
+ save_dir: str | Path | None = None,
62
+ root_url: str | None = None,
63
+ hf_token: str | None = None,
64
+ ):
65
+ """
66
+ Convert data from serialized format for a browser to human-readable format.
67
+ """
68
+ return x
69
+
70
+
71
+ class SimpleSerializable(Serializable):
72
+ """General class that does not perform any serialization or deserialization."""
73
+
74
+ def api_info(self) -> dict[str, bool | dict]:
75
+ return {
76
+ "info": serializer_types["SimpleSerializable"],
77
+ "serialized_info": False,
78
+ }
79
+
80
+ def example_inputs(self) -> dict[str, Any]:
81
+ return {
82
+ "raw": None,
83
+ "serialized": None,
84
+ }
85
+
86
+
87
+ class StringSerializable(Serializable):
88
+ """Expects a string as input/output but performs no serialization."""
89
+
90
+ def api_info(self) -> dict[str, bool | dict]:
91
+ return {
92
+ "info": serializer_types["StringSerializable"],
93
+ "serialized_info": False,
94
+ }
95
+
96
+ def example_inputs(self) -> dict[str, Any]:
97
+ return {
98
+ "raw": "Howdy!",
99
+ "serialized": "Howdy!",
100
+ }
101
+
102
+
103
+ class ListStringSerializable(Serializable):
104
+ """Expects a list of strings as input/output but performs no serialization."""
105
+
106
+ def api_info(self) -> dict[str, bool | dict]:
107
+ return {
108
+ "info": serializer_types["ListStringSerializable"],
109
+ "serialized_info": False,
110
+ }
111
+
112
+ def example_inputs(self) -> dict[str, Any]:
113
+ return {
114
+ "raw": ["Howdy!", "Merhaba"],
115
+ "serialized": ["Howdy!", "Merhaba"],
116
+ }
117
+
118
+
119
+ class BooleanSerializable(Serializable):
120
+ """Expects a boolean as input/output but performs no serialization."""
121
+
122
+ def api_info(self) -> dict[str, bool | dict]:
123
+ return {
124
+ "info": serializer_types["BooleanSerializable"],
125
+ "serialized_info": False,
126
+ }
127
+
128
+ def example_inputs(self) -> dict[str, Any]:
129
+ return {
130
+ "raw": True,
131
+ "serialized": True,
132
+ }
133
+
134
+
135
+ class NumberSerializable(Serializable):
136
+ """Expects a number (int/float) as input/output but performs no serialization."""
137
+
138
+ def api_info(self) -> dict[str, bool | dict]:
139
+ return {
140
+ "info": serializer_types["NumberSerializable"],
141
+ "serialized_info": False,
142
+ }
143
+
144
+ def example_inputs(self) -> dict[str, Any]:
145
+ return {
146
+ "raw": 5,
147
+ "serialized": 5,
148
+ }
149
+
150
+
151
+ class ImgSerializable(Serializable):
152
+ """Expects a base64 string as input/output which is serialized to a filepath."""
153
+
154
+ def serialized_info(self):
155
+ return {"type": "string", "description": "filepath or URL to image"}
156
+
157
+ def api_info(self) -> dict[str, bool | dict]:
158
+ return {"info": serializer_types["ImgSerializable"], "serialized_info": True}
159
+
160
+ def example_inputs(self) -> dict[str, Any]:
161
+ return {
162
+ "raw": media_data.BASE64_IMAGE,
163
+ "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png",
164
+ }
165
+
166
+ def serialize(
167
+ self,
168
+ x: str | None,
169
+ load_dir: str | Path = "",
170
+ allow_links: bool = False,
171
+ ) -> str | None:
172
+ """
173
+ Convert from human-friendly version of a file (string filepath) to a serialized
174
+ representation (base64).
175
+ Parameters:
176
+ x: String path to file to serialize
177
+ load_dir: Path to directory containing x
178
+ """
179
+ if not x:
180
+ return None
181
+ if utils.is_http_url_like(x):
182
+ return utils.encode_url_to_base64(x)
183
+ return utils.encode_file_to_base64(Path(load_dir) / x)
184
+
185
+ def deserialize(
186
+ self,
187
+ x: str | None,
188
+ save_dir: str | Path | None = None,
189
+ root_url: str | None = None,
190
+ hf_token: str | None = None,
191
+ ) -> str | None:
192
+ """
193
+ Convert from serialized representation of a file (base64) to a human-friendly
194
+ version (string filepath). Optionally, save the file to the directory specified by save_dir
195
+ Parameters:
196
+ x: Base64 representation of image to deserialize into a string filepath
197
+ save_dir: Path to directory to save the deserialized image to
198
+ root_url: Ignored
199
+ hf_token: Ignored
200
+ """
201
+ if x is None or x == "":
202
+ return None
203
+ file = utils.decode_base64_to_file(x, dir=save_dir)
204
+ return file.name
205
+
206
+
207
+ class FileSerializable(Serializable):
208
+ """Expects a dict with base64 representation of object as input/output which is serialized to a filepath."""
209
+
210
+ def __init__(self) -> None:
211
+ self.stream = None
212
+ self.stream_name = None
213
+ super().__init__()
214
+
215
+ def serialized_info(self):
216
+ return self._single_file_serialized_info()
217
+
218
+ def _single_file_api_info(self):
219
+ return {
220
+ "info": serializer_types["SingleFileSerializable"],
221
+ "serialized_info": True,
222
+ }
223
+
224
+ def _single_file_serialized_info(self):
225
+ return {"type": "string", "description": "filepath or URL to file"}
226
+
227
+ def _multiple_file_serialized_info(self):
228
+ return {
229
+ "type": "array",
230
+ "description": "List of filepath(s) or URL(s) to files",
231
+ "items": {"type": "string", "description": "filepath or URL to file"},
232
+ }
233
+
234
+ def _multiple_file_api_info(self):
235
+ return {
236
+ "info": serializer_types["MultipleFileSerializable"],
237
+ "serialized_info": True,
238
+ }
239
+
240
+ def api_info(self) -> dict[str, dict | bool]:
241
+ return self._single_file_api_info()
242
+
243
+ def example_inputs(self) -> dict[str, Any]:
244
+ return self._single_file_example_inputs()
245
+
246
+ def _single_file_example_inputs(self) -> dict[str, Any]:
247
+ return {
248
+ "raw": {"is_file": False, "data": media_data.BASE64_FILE},
249
+ "serialized": "https://github.com/gradio-app/gradio/raw/main/test/test_files/sample_file.pdf",
250
+ }
251
+
252
+ def _multiple_file_example_inputs(self) -> dict[str, Any]:
253
+ return {
254
+ "raw": [{"is_file": False, "data": media_data.BASE64_FILE}],
255
+ "serialized": [
256
+ "https://github.com/gradio-app/gradio/raw/main/test/test_files/sample_file.pdf"
257
+ ],
258
+ }
259
+
260
+ def _serialize_single(
261
+ self,
262
+ x: str | FileData | None,
263
+ load_dir: str | Path = "",
264
+ allow_links: bool = False,
265
+ ) -> FileData | None:
266
+ if x is None or isinstance(x, dict):
267
+ return x
268
+ if utils.is_http_url_like(x):
269
+ filename = x
270
+ size = None
271
+ else:
272
+ filename = str(Path(load_dir) / x)
273
+ size = Path(filename).stat().st_size
274
+ return {
275
+ "name": filename,
276
+ "data": None
277
+ if allow_links
278
+ else utils.encode_url_or_file_to_base64(filename),
279
+ "orig_name": Path(filename).name,
280
+ "is_file": allow_links,
281
+ "size": size,
282
+ }
283
+
284
+ def _setup_stream(self, url, hf_token):
285
+ return utils.download_byte_stream(url, hf_token)
286
+
287
+ def _deserialize_single(
288
+ self,
289
+ x: str | FileData | None,
290
+ save_dir: str | None = None,
291
+ root_url: str | None = None,
292
+ hf_token: str | None = None,
293
+ ) -> str | None:
294
+ if x is None:
295
+ return None
296
+ if isinstance(x, str):
297
+ file_name = utils.decode_base64_to_file(x, dir=save_dir).name
298
+ elif isinstance(x, dict):
299
+ if x.get("is_file"):
300
+ filepath = x.get("name")
301
+ assert filepath is not None, f"The 'name' field is missing in {x}"
302
+ if root_url is not None:
303
+ file_name = utils.download_tmp_copy_of_file(
304
+ root_url + "file=" + filepath,
305
+ hf_token=hf_token,
306
+ dir=save_dir,
307
+ )
308
+ else:
309
+ file_name = utils.create_tmp_copy_of_file(filepath, dir=save_dir)
310
+ elif x.get("is_stream"):
311
+ assert x["name"] and root_url and save_dir
312
+ if not self.stream or self.stream_name != x["name"]:
313
+ self.stream = self._setup_stream(
314
+ root_url + "stream/" + x["name"], hf_token=hf_token
315
+ )
316
+ self.stream_name = x["name"]
317
+ chunk = next(self.stream)
318
+ path = Path(save_dir or tempfile.gettempdir()) / secrets.token_hex(20)
319
+ path.mkdir(parents=True, exist_ok=True)
320
+ path = path / x.get("orig_name", "output")
321
+ path.write_bytes(chunk)
322
+ file_name = str(path)
323
+ else:
324
+ data = x.get("data")
325
+ assert data is not None, f"The 'data' field is missing in {x}"
326
+ file_name = utils.decode_base64_to_file(data, dir=save_dir).name
327
+ else:
328
+ raise ValueError(
329
+ f"A FileSerializable component can only deserialize a string or a dict, not a {type(x)}: {x}"
330
+ )
331
+ return file_name
332
+
333
+ def serialize(
334
+ self,
335
+ x: str | FileData | None | list[str | FileData | None],
336
+ load_dir: str | Path = "",
337
+ allow_links: bool = False,
338
+ ) -> FileData | None | list[FileData | None]:
339
+ """
340
+ Convert from human-friendly version of a file (string filepath) to a
341
+ serialized representation (base64)
342
+ Parameters:
343
+ x: String path to file to serialize
344
+ load_dir: Path to directory containing x
345
+ allow_links: Will allow path returns instead of raw file content
346
+ """
347
+ if x is None or x == "":
348
+ return None
349
+ if isinstance(x, list):
350
+ return [self._serialize_single(f, load_dir, allow_links) for f in x]
351
+ else:
352
+ return self._serialize_single(x, load_dir, allow_links)
353
+
354
+ def deserialize(
355
+ self,
356
+ x: str | FileData | None | list[str | FileData | None],
357
+ save_dir: Path | str | None = None,
358
+ root_url: str | None = None,
359
+ hf_token: str | None = None,
360
+ ) -> str | None | list[str | None]:
361
+ """
362
+ Convert from serialized representation of a file (base64) to a human-friendly
363
+ version (string filepath). Optionally, save the file to the directory specified by `save_dir`
364
+ Parameters:
365
+ x: Base64 representation of file to deserialize into a string filepath
366
+ save_dir: Path to directory to save the deserialized file to
367
+ root_url: If this component is loaded from an external Space, this is the URL of the Space.
368
+ hf_token: If this component is loaded from an external private Space, this is the access token for the Space
369
+ """
370
+ if x is None:
371
+ return None
372
+ if isinstance(save_dir, Path):
373
+ save_dir = str(save_dir)
374
+ if isinstance(x, list):
375
+ return [
376
+ self._deserialize_single(
377
+ f, save_dir=save_dir, root_url=root_url, hf_token=hf_token
378
+ )
379
+ for f in x
380
+ ]
381
+ else:
382
+ return self._deserialize_single(
383
+ x, save_dir=save_dir, root_url=root_url, hf_token=hf_token
384
+ )
385
+
386
+
387
+ class VideoSerializable(FileSerializable):
388
+ def serialized_info(self):
389
+ return {"type": "string", "description": "filepath or URL to video file"}
390
+
391
+ def api_info(self) -> dict[str, dict | bool]:
392
+ return {"info": serializer_types["FileSerializable"], "serialized_info": True}
393
+
394
+ def example_inputs(self) -> dict[str, Any]:
395
+ return {
396
+ "raw": {"is_file": False, "data": media_data.BASE64_VIDEO},
397
+ "serialized": "https://github.com/gradio-app/gradio/raw/main/test/test_files/video_sample.mp4",
398
+ }
399
+
400
+ def serialize(
401
+ self, x: str | None, load_dir: str | Path = "", allow_links: bool = False
402
+ ) -> tuple[FileData | None, None]:
403
+ return (super().serialize(x, load_dir, allow_links), None) # type: ignore
404
+
405
+ def deserialize(
406
+ self,
407
+ x: tuple[FileData | None, FileData | None] | None,
408
+ save_dir: Path | str | None = None,
409
+ root_url: str | None = None,
410
+ hf_token: str | None = None,
411
+ ) -> str | tuple[str | None, str | None] | None:
412
+ """
413
+ Convert from serialized representation of a file (base64) to a human-friendly
414
+ version (string filepath). Optionally, save the file to the directory specified by `save_dir`
415
+ """
416
+ if isinstance(x, (tuple, list)):
417
+ assert len(x) == 2, f"Expected tuple of length 2. Received: {x}"
418
+ x_as_list = [x[0], x[1]]
419
+ else:
420
+ raise ValueError(f"Expected tuple of length 2. Received: {x}")
421
+ deserialized_file = super().deserialize(x_as_list, save_dir, root_url, hf_token) # type: ignore
422
+ if isinstance(deserialized_file, list):
423
+ return deserialized_file[0] # ignore subtitles
424
+
425
+
426
+ class JSONSerializable(Serializable):
427
+ def serialized_info(self):
428
+ return {"type": "string", "description": "filepath to JSON file"}
429
+
430
+ def api_info(self) -> dict[str, dict | bool]:
431
+ return {"info": serializer_types["JSONSerializable"], "serialized_info": True}
432
+
433
+ def example_inputs(self) -> dict[str, Any]:
434
+ return {
435
+ "raw": {"a": 1, "b": 2},
436
+ "serialized": None,
437
+ }
438
+
439
+ def serialize(
440
+ self,
441
+ x: str | None,
442
+ load_dir: str | Path = "",
443
+ allow_links: bool = False,
444
+ ) -> dict | list | None:
445
+ """
446
+ Convert from a a human-friendly version (string path to json file) to a
447
+ serialized representation (json string)
448
+ Parameters:
449
+ x: String path to json file to read to get json string
450
+ load_dir: Path to directory containing x
451
+ """
452
+ if x is None or x == "":
453
+ return None
454
+ return utils.file_to_json(Path(load_dir) / x)
455
+
456
+ def deserialize(
457
+ self,
458
+ x: str | dict | list,
459
+ save_dir: str | Path | None = None,
460
+ root_url: str | None = None,
461
+ hf_token: str | None = None,
462
+ ) -> str | None:
463
+ """
464
+ Convert from serialized representation (json string) to a human-friendly
465
+ version (string path to json file). Optionally, save the file to the directory specified by `save_dir`
466
+ Parameters:
467
+ x: Json string
468
+ save_dir: Path to save the deserialized json file to
469
+ root_url: Ignored
470
+ hf_token: Ignored
471
+ """
472
+ if x is None:
473
+ return None
474
+ return utils.dict_or_str_to_json_file(x, dir=save_dir).name
475
+
476
+
477
+ class GallerySerializable(Serializable):
478
+ def serialized_info(self):
479
+ return {
480
+ "type": "string",
481
+ "description": "path to directory with images and a file associating images with captions called captions.json",
482
+ }
483
+
484
+ def api_info(self) -> dict[str, dict | bool]:
485
+ return {
486
+ "info": serializer_types["GallerySerializable"],
487
+ "serialized_info": True,
488
+ }
489
+
490
+ def example_inputs(self) -> dict[str, Any]:
491
+ return {
492
+ "raw": [media_data.BASE64_IMAGE] * 2,
493
+ "serialized": [
494
+ "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png",
495
+ ]
496
+ * 2,
497
+ }
498
+
499
+ def serialize(
500
+ self, x: str | None, load_dir: str | Path = "", allow_links: bool = False
501
+ ) -> list[list[str | None]] | None:
502
+ if x is None or x == "":
503
+ return None
504
+ files = []
505
+ captions_file = Path(x) / "captions.json"
506
+ with captions_file.open("r") as captions_json:
507
+ captions = json.load(captions_json)
508
+ for file_name, caption in captions.items():
509
+ img = FileSerializable().serialize(file_name, allow_links=allow_links)
510
+ files.append([img, caption])
511
+ return files
512
+
513
+ def deserialize(
514
+ self,
515
+ x: list[list[str | None]] | None,
516
+ save_dir: str = "",
517
+ root_url: str | None = None,
518
+ hf_token: str | None = None,
519
+ ) -> None | str:
520
+ if x is None:
521
+ return None
522
+ gallery_path = Path(save_dir) / str(uuid.uuid4())
523
+ gallery_path.mkdir(exist_ok=True, parents=True)
524
+ captions = {}
525
+ for img_data in x:
526
+ if isinstance(img_data, (list, tuple)):
527
+ img_data, caption = img_data
528
+ else:
529
+ caption = None
530
+ name = FileSerializable().deserialize(
531
+ img_data, gallery_path, root_url=root_url, hf_token=hf_token
532
+ )
533
+ captions[name] = caption
534
+ captions_file = gallery_path / "captions.json"
535
+ with captions_file.open("w") as captions_json:
536
+ json.dump(captions, captions_json)
537
+ return os.path.abspath(gallery_path)
538
+
539
+
540
+ SERIALIZER_MAPPING = {}
541
+ for cls in Serializable.__subclasses__():
542
+ SERIALIZER_MAPPING[cls.__name__] = cls
543
+ for subcls in cls.__subclasses__():
544
+ SERIALIZER_MAPPING[subcls.__name__] = subcls
545
+
546
+ SERIALIZER_MAPPING["Serializable"] = SimpleSerializable
547
+ SERIALIZER_MAPPING["File"] = FileSerializable
548
+ SERIALIZER_MAPPING["UploadButton"] = FileSerializable
549
+
550
+ COMPONENT_MAPPING: dict[str, type] = {
551
+ "textbox": StringSerializable,
552
+ "number": NumberSerializable,
553
+ "slider": NumberSerializable,
554
+ "checkbox": BooleanSerializable,
555
+ "checkboxgroup": ListStringSerializable,
556
+ "radio": StringSerializable,
557
+ "dropdown": SimpleSerializable,
558
+ "image": ImgSerializable,
559
+ "video": FileSerializable,
560
+ "audio": FileSerializable,
561
+ "file": FileSerializable,
562
+ "dataframe": JSONSerializable,
563
+ "timeseries": JSONSerializable,
564
+ "state": SimpleSerializable,
565
+ "button": StringSerializable,
566
+ "uploadbutton": FileSerializable,
567
+ "colorpicker": StringSerializable,
568
+ "label": JSONSerializable,
569
+ "highlightedtext": JSONSerializable,
570
+ "json": JSONSerializable,
571
+ "html": StringSerializable,
572
+ "gallery": GallerySerializable,
573
+ "chatbot": JSONSerializable,
574
+ "model3d": FileSerializable,
575
+ "plot": JSONSerializable,
576
+ "barplot": JSONSerializable,
577
+ "lineplot": JSONSerializable,
578
+ "scatterplot": JSONSerializable,
579
+ "markdown": StringSerializable,
580
+ "code": StringSerializable,
581
+ "annotatedimage": JSONSerializable,
582
+ }
testbed/gradio-app__gradio/client/python/gradio_client/templates/discord_chat.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import os
3
+ import threading
4
+ from threading import Event
5
+ from typing import Optional
6
+
7
+ import discord
8
+ import gradio as gr
9
+ from discord import Permissions
10
+ from discord.ext import commands
11
+ from discord.utils import oauth_url
12
+
13
+ import gradio_client as grc
14
+ from gradio_client.utils import QueueError
15
+
16
+ event = Event()
17
+
18
+ DISCORD_TOKEN = os.getenv("DISCORD_TOKEN")
19
+
20
+
21
+ async def wait(job):
22
+ while not job.done():
23
+ await asyncio.sleep(0.2)
24
+
25
+
26
+ def get_client(session: Optional[str] = None) -> grc.Client:
27
+ client = grc.Client("<<app-src>>", hf_token=os.getenv("HF_TOKEN"))
28
+ if session:
29
+ client.session_hash = session
30
+ return client
31
+
32
+
33
+ def truncate_response(response: str) -> str:
34
+ ending = "...\nTruncating response to 2000 characters due to discord api limits."
35
+ if len(response) > 2000:
36
+ return response[: 2000 - len(ending)] + ending
37
+ else:
38
+ return response
39
+
40
+
41
+ intents = discord.Intents.default()
42
+ intents.message_content = True
43
+ bot = commands.Bot(command_prefix="/", intents=intents)
44
+
45
+
46
+ @bot.event
47
+ async def on_ready():
48
+ print(f"Logged in as {bot.user} (ID: {bot.user.id})")
49
+ synced = await bot.tree.sync()
50
+ print(f"Synced commands: {', '.join([s.name for s in synced])}.")
51
+ event.set()
52
+ print("------")
53
+
54
+
55
+ thread_to_client = {}
56
+ thread_to_user = {}
57
+
58
+
59
+ @bot.hybrid_command(
60
+ name="<<command-name>>",
61
+ description="Enter some text to chat with the bot! Like this: /<<command-name>> Hello, how are you?",
62
+ )
63
+ async def chat(ctx, prompt: str):
64
+ if ctx.author.id == bot.user.id:
65
+ return
66
+ try:
67
+ message = await ctx.send("Creating thread...")
68
+
69
+ thread = await message.create_thread(name=prompt)
70
+ loop = asyncio.get_running_loop()
71
+ client = await loop.run_in_executor(None, get_client, None)
72
+ job = client.submit(prompt, api_name="/<<api-name>>")
73
+ await wait(job)
74
+
75
+ try:
76
+ job.result()
77
+ response = job.outputs()[-1]
78
+ await thread.send(truncate_response(response))
79
+ thread_to_client[thread.id] = client
80
+ thread_to_user[thread.id] = ctx.author.id
81
+ except QueueError:
82
+ await thread.send(
83
+ "The gradio space powering this bot is really busy! Please try again later!"
84
+ )
85
+
86
+ except Exception as e:
87
+ print(f"{e}")
88
+
89
+
90
+ async def continue_chat(message):
91
+ """Continues a given conversation based on chathistory"""
92
+ try:
93
+ client = thread_to_client[message.channel.id]
94
+ prompt = message.content
95
+ job = client.submit(prompt, api_name="/<<api-name>>")
96
+ await wait(job)
97
+ try:
98
+ job.result()
99
+ response = job.outputs()[-1]
100
+ await message.reply(truncate_response(response))
101
+ except QueueError:
102
+ await message.reply(
103
+ "The gradio space powering this bot is really busy! Please try again later!"
104
+ )
105
+
106
+ except Exception as e:
107
+ print(f"Error: {e}")
108
+
109
+
110
+ @bot.event
111
+ async def on_message(message):
112
+ """Continue the chat"""
113
+ try:
114
+ if not message.author.bot:
115
+ if message.channel.id in thread_to_user:
116
+ if thread_to_user[message.channel.id] == message.author.id:
117
+ await continue_chat(message)
118
+ else:
119
+ await bot.process_commands(message)
120
+
121
+ except Exception as e:
122
+ print(f"Error: {e}")
123
+
124
+
125
+ # running in thread
126
+ def run_bot():
127
+ if not DISCORD_TOKEN:
128
+ print("DISCORD_TOKEN NOT SET")
129
+ event.set()
130
+ else:
131
+ bot.run(DISCORD_TOKEN)
132
+
133
+
134
+ threading.Thread(target=run_bot).start()
135
+
136
+ event.wait()
137
+
138
+ if not DISCORD_TOKEN:
139
+ welcome_message = """
140
+
141
+ ## You have not specified a DISCORD_TOKEN, which means you have not created a bot account. Please follow these steps:
142
+
143
+ ### 1. Go to https://discord.com/developers/applications and click 'New Application'
144
+
145
+ ### 2. Give your bot a name 🤖
146
+
147
+ ![](https://gradio-builds.s3.amazonaws.com/demo-files/discordbots/BotName.png)
148
+
149
+ ## 3. In Settings > Bot, click the 'Reset Token' button to get a new token. Write it down and keep it safe 🔐
150
+
151
+ ![](https://gradio-builds.s3.amazonaws.com/demo-files/discordbots/ResetToken.png)
152
+
153
+ ## 4. Optionally make the bot public if you want anyone to be able to add it to their servers
154
+
155
+ ## 5. Scroll down and enable 'Message Content Intent' under 'Priviledged Gateway Intents'
156
+
157
+ ![](https://gradio-builds.s3.amazonaws.com/demo-files/discordbots/MessageContentIntent.png)
158
+
159
+ ## 6. Save your changes!
160
+
161
+ ## 7. The token from step 3 is the DISCORD_TOKEN. Rerun the deploy_discord command, e.g client.deploy_discord(discord_bot_token=DISCORD_TOKEN, ...), or add the token as a space secret manually.
162
+ """
163
+ else:
164
+ permissions = Permissions(326417525824)
165
+ url = oauth_url(bot.user.id, permissions=permissions)
166
+ welcome_message = f"""
167
+ ## Add this bot to your server by clicking this link:
168
+
169
+ {url}
170
+
171
+ ## How to use it?
172
+
173
+ The bot can be triggered via `/<<command-name>>` followed by your text prompt.
174
+
175
+ This will create a thread with the bot's response to your text prompt.
176
+ You can reply in the thread (without `/<<command-name>>`) to continue the conversation.
177
+ In the thread, the bot will only reply to the original author of the command.
178
+
179
+ ⚠️ Note ⚠️: Please make sure this bot's command does have the same name as another command in your server.
180
+
181
+ ⚠️ Note ⚠️: Bot commands do not work in DMs with the bot as of now.
182
+ """
183
+
184
+
185
+ with gr.Blocks() as demo:
186
+ gr.Markdown(
187
+ f"""
188
+ # Discord bot of <<app-src>>
189
+ {welcome_message}
190
+ """
191
+ )
192
+
193
+ demo.launch()
testbed/gradio-app__gradio/client/python/gradio_client/types.json ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "SimpleSerializable": {
3
+ "type": {},
4
+ "description": "any valid value"
5
+ },
6
+ "StringSerializable": {
7
+ "type": "string"
8
+ },
9
+ "ListStringSerializable": {
10
+ "type": "array",
11
+ "items": {
12
+ "type": "string"
13
+ }
14
+ },
15
+ "BooleanSerializable": {
16
+ "type": "boolean"
17
+ },
18
+ "NumberSerializable": {
19
+ "type": "number"
20
+ },
21
+ "ImgSerializable": {
22
+ "type": "string",
23
+ "description": "base64 representation of an image"
24
+ },
25
+ "FileSerializable": {
26
+ "oneOf": [
27
+ {
28
+ "type": "string",
29
+ "description": "filepath or URL to file"
30
+ },
31
+ {
32
+ "type": "object",
33
+ "properties": {
34
+ "name": { "type": "string", "description": "name of file" },
35
+ "data": {
36
+ "type": "string",
37
+ "description": "base64 representation of file"
38
+ },
39
+ "size": {
40
+ "type": "integer",
41
+ "description": "size of image in bytes"
42
+ },
43
+ "is_file": {
44
+ "type": "boolean",
45
+ "description": "true if the file has been uploaded to the server"
46
+ },
47
+ "orig_name": {
48
+ "type": "string",
49
+ "description": "original name of the file"
50
+ }
51
+ },
52
+ "required": ["name", "data"]
53
+ },
54
+ {
55
+ "type": "array",
56
+ "items": {
57
+ "anyOf": [
58
+ {
59
+ "type": "string",
60
+ "description": "filepath or URL to file"
61
+ },
62
+ {
63
+ "type": "object",
64
+ "properties": {
65
+ "name": { "type": "string", "description": "name of file" },
66
+ "data": {
67
+ "type": "string",
68
+ "description": "base64 representation of file"
69
+ },
70
+ "size": {
71
+ "type": "integer",
72
+ "description": "size of image in bytes"
73
+ },
74
+ "is_file": {
75
+ "type": "boolean",
76
+ "description": "true if the file has been uploaded to the server"
77
+ },
78
+ "orig_name": {
79
+ "type": "string",
80
+ "description": "original name of the file"
81
+ }
82
+ },
83
+ "required": ["name", "data"]
84
+ }
85
+ ]
86
+ }
87
+ }
88
+ ]
89
+ },
90
+ "SingleFileSerializable": {
91
+ "oneOf": [
92
+ {
93
+ "type": "string",
94
+ "description": "filepath or URL to file"
95
+ },
96
+ {
97
+ "type": "object",
98
+ "properties": {
99
+ "name": { "type": "string", "description": "name of file" },
100
+ "data": {
101
+ "type": "string",
102
+ "description": "base64 representation of file"
103
+ },
104
+ "size": {
105
+ "type": "integer",
106
+ "description": "size of image in bytes"
107
+ },
108
+ "is_file": {
109
+ "type": "boolean",
110
+ "description": "true if the file has been uploaded to the server"
111
+ },
112
+ "orig_name": {
113
+ "type": "string",
114
+ "description": "original name of the file"
115
+ }
116
+ },
117
+ "required": ["name", "data"]
118
+ }
119
+ ]
120
+ },
121
+ "MultipleFileSerializable": {
122
+ "type": "array",
123
+ "items": {
124
+ "anyOf": [
125
+ {
126
+ "type": "string",
127
+ "description": "filepath or URL to file"
128
+ },
129
+ {
130
+ "type": "object",
131
+ "properties": {
132
+ "name": { "type": "string", "description": "name of file" },
133
+ "data": {
134
+ "type": "string",
135
+ "description": "base64 representation of file"
136
+ },
137
+ "size": {
138
+ "type": "integer",
139
+ "description": "size of image in bytes"
140
+ },
141
+ "is_file": {
142
+ "type": "boolean",
143
+ "description": "true if the file has been uploaded to the server"
144
+ },
145
+ "orig_name": {
146
+ "type": "string",
147
+ "description": "original name of the file"
148
+ }
149
+ },
150
+ "required": ["name", "data"]
151
+ }
152
+ ]
153
+ }
154
+ },
155
+ "JSONSerializable": {
156
+ "type": {},
157
+ "description": "any valid json"
158
+ },
159
+ "GallerySerializable": {
160
+ "type": "array",
161
+ "items": {
162
+ "type": "array",
163
+ "items": false,
164
+ "maxSize": 2,
165
+ "minSize": 2,
166
+ "prefixItems": [
167
+ {
168
+ "type": "object",
169
+ "properties": {
170
+ "name": { "type": "string", "description": "name of file" },
171
+ "data": {
172
+ "type": "string",
173
+ "description": "base64 representation of file"
174
+ },
175
+ "size": {
176
+ "type": "integer",
177
+ "description": "size of image in bytes"
178
+ },
179
+ "is_file": {
180
+ "type": "boolean",
181
+ "description": "true if the file has been uploaded to the server"
182
+ },
183
+ "orig_name": {
184
+ "type": "string",
185
+ "description": "original name of the file"
186
+ }
187
+ },
188
+ "required": ["name", "data"]
189
+ },
190
+ {
191
+ "oneOf": [
192
+ { "type": "string", "description": "caption of image" },
193
+ { "type": "null" }
194
+ ]
195
+ }
196
+ ]
197
+ }
198
+ }
199
+ }
testbed/gradio-app__gradio/client/python/gradio_client/utils.py ADDED
@@ -0,0 +1,598 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import base64
5
+ import json
6
+ import mimetypes
7
+ import os
8
+ import pkgutil
9
+ import secrets
10
+ import shutil
11
+ import tempfile
12
+ import warnings
13
+ from concurrent.futures import CancelledError
14
+ from dataclasses import dataclass, field
15
+ from datetime import datetime
16
+ from enum import Enum
17
+ from pathlib import Path
18
+ from threading import Lock
19
+ from typing import Any, Callable, Optional
20
+
21
+ import fsspec.asyn
22
+ import httpx
23
+ import huggingface_hub
24
+ import requests
25
+ from huggingface_hub import SpaceStage
26
+ from websockets.legacy.protocol import WebSocketCommonProtocol
27
+
28
+ API_URL = "api/predict/"
29
+ WS_URL = "queue/join"
30
+ UPLOAD_URL = "upload"
31
+ CONFIG_URL = "config"
32
+ API_INFO_URL = "info"
33
+ RAW_API_INFO_URL = "info?serialize=False"
34
+ SPACE_FETCHER_URL = "https://gradio-space-api-fetcher-v2.hf.space/api"
35
+ RESET_URL = "reset"
36
+ SPACE_URL = "https://hf.space/{}"
37
+
38
+ SKIP_COMPONENTS = {
39
+ "state",
40
+ "row",
41
+ "column",
42
+ "tabs",
43
+ "tab",
44
+ "tabitem",
45
+ "box",
46
+ "form",
47
+ "accordion",
48
+ "group",
49
+ "interpretation",
50
+ "dataset",
51
+ }
52
+ STATE_COMPONENT = "state"
53
+ INVALID_RUNTIME = [
54
+ SpaceStage.NO_APP_FILE,
55
+ SpaceStage.CONFIG_ERROR,
56
+ SpaceStage.BUILD_ERROR,
57
+ SpaceStage.RUNTIME_ERROR,
58
+ SpaceStage.PAUSED,
59
+ ]
60
+
61
+
62
+ def get_package_version() -> str:
63
+ try:
64
+ package_json_data = (
65
+ pkgutil.get_data(__name__, "package.json").decode("utf-8").strip() # type: ignore
66
+ )
67
+ package_data = json.loads(package_json_data)
68
+ version = package_data.get("version", "")
69
+ return version
70
+ except Exception:
71
+ return ""
72
+
73
+
74
+ __version__ = get_package_version()
75
+
76
+
77
+ class TooManyRequestsError(Exception):
78
+ """Raised when the API returns a 429 status code."""
79
+
80
+ pass
81
+
82
+
83
+ class QueueError(Exception):
84
+ """Raised when the queue is full or there is an issue adding a job to the queue."""
85
+
86
+ pass
87
+
88
+
89
+ class InvalidAPIEndpointError(Exception):
90
+ """Raised when the API endpoint is invalid."""
91
+
92
+ pass
93
+
94
+
95
+ class SpaceDuplicationError(Exception):
96
+ """Raised when something goes wrong with a Space Duplication."""
97
+
98
+ pass
99
+
100
+
101
+ class Status(Enum):
102
+ """Status codes presented to client users."""
103
+
104
+ STARTING = "STARTING"
105
+ JOINING_QUEUE = "JOINING_QUEUE"
106
+ QUEUE_FULL = "QUEUE_FULL"
107
+ IN_QUEUE = "IN_QUEUE"
108
+ SENDING_DATA = "SENDING_DATA"
109
+ PROCESSING = "PROCESSING"
110
+ ITERATING = "ITERATING"
111
+ PROGRESS = "PROGRESS"
112
+ FINISHED = "FINISHED"
113
+ CANCELLED = "CANCELLED"
114
+
115
+ @staticmethod
116
+ def ordering(status: Status) -> int:
117
+ """Order of messages. Helpful for testing."""
118
+ order = [
119
+ Status.STARTING,
120
+ Status.JOINING_QUEUE,
121
+ Status.QUEUE_FULL,
122
+ Status.IN_QUEUE,
123
+ Status.SENDING_DATA,
124
+ Status.PROCESSING,
125
+ Status.PROGRESS,
126
+ Status.ITERATING,
127
+ Status.FINISHED,
128
+ Status.CANCELLED,
129
+ ]
130
+ return order.index(status)
131
+
132
+ def __lt__(self, other: Status):
133
+ return self.ordering(self) < self.ordering(other)
134
+
135
+ @staticmethod
136
+ def msg_to_status(msg: str) -> Status:
137
+ """Map the raw message from the backend to the status code presented to users."""
138
+ return {
139
+ "send_hash": Status.JOINING_QUEUE,
140
+ "queue_full": Status.QUEUE_FULL,
141
+ "estimation": Status.IN_QUEUE,
142
+ "send_data": Status.SENDING_DATA,
143
+ "process_starts": Status.PROCESSING,
144
+ "process_generating": Status.ITERATING,
145
+ "process_completed": Status.FINISHED,
146
+ "progress": Status.PROGRESS,
147
+ }[msg]
148
+
149
+
150
+ @dataclass
151
+ class ProgressUnit:
152
+ index: Optional[int]
153
+ length: Optional[int]
154
+ unit: Optional[str]
155
+ progress: Optional[float]
156
+ desc: Optional[str]
157
+
158
+ @classmethod
159
+ def from_ws_msg(cls, data: list[dict]) -> list[ProgressUnit]:
160
+ return [
161
+ cls(
162
+ index=d.get("index"),
163
+ length=d.get("length"),
164
+ unit=d.get("unit"),
165
+ progress=d.get("progress"),
166
+ desc=d.get("desc"),
167
+ )
168
+ for d in data
169
+ ]
170
+
171
+
172
+ @dataclass
173
+ class StatusUpdate:
174
+ """Update message sent from the worker thread to the Job on the main thread."""
175
+
176
+ code: Status
177
+ rank: int | None
178
+ queue_size: int | None
179
+ eta: float | None
180
+ success: bool | None
181
+ time: datetime | None
182
+ progress_data: list[ProgressUnit] | None
183
+
184
+
185
+ def create_initial_status_update():
186
+ return StatusUpdate(
187
+ code=Status.STARTING,
188
+ rank=None,
189
+ queue_size=None,
190
+ eta=None,
191
+ success=None,
192
+ time=datetime.now(),
193
+ progress_data=None,
194
+ )
195
+
196
+
197
+ @dataclass
198
+ class JobStatus:
199
+ """The job status.
200
+
201
+ Keeps track of the latest status update and intermediate outputs (not yet implements).
202
+ """
203
+
204
+ latest_status: StatusUpdate = field(default_factory=create_initial_status_update)
205
+ outputs: list[Any] = field(default_factory=list)
206
+
207
+
208
+ @dataclass
209
+ class Communicator:
210
+ """Helper class to help communicate between the worker thread and main thread."""
211
+
212
+ lock: Lock
213
+ job: JobStatus
214
+ prediction_processor: Callable[..., tuple]
215
+ reset_url: str
216
+ should_cancel: bool = False
217
+
218
+
219
+ ########################
220
+ # Network utils
221
+ ########################
222
+
223
+
224
+ def is_http_url_like(possible_url: str) -> bool:
225
+ """
226
+ Check if the given string looks like an HTTP(S) URL.
227
+ """
228
+ return possible_url.startswith(("http://", "https://"))
229
+
230
+
231
+ def probe_url(possible_url: str) -> bool:
232
+ """
233
+ Probe the given URL to see if it responds with a 200 status code (to HEAD, then to GET).
234
+ """
235
+ headers = {"User-Agent": "gradio (https://gradio.app/; team@gradio.app)"}
236
+ try:
237
+ with requests.session() as sess:
238
+ head_request = sess.head(possible_url, headers=headers)
239
+ if head_request.status_code == 405:
240
+ return sess.get(possible_url, headers=headers).ok
241
+ return head_request.ok
242
+ except Exception:
243
+ return False
244
+
245
+
246
+ def is_valid_url(possible_url: str) -> bool:
247
+ """
248
+ Check if the given string is a valid URL.
249
+ """
250
+ warnings.warn(
251
+ "is_valid_url should not be used. "
252
+ "Use is_http_url_like() and probe_url(), as suitable, instead.",
253
+ )
254
+ return is_http_url_like(possible_url) and probe_url(possible_url)
255
+
256
+
257
+ async def get_pred_from_ws(
258
+ websocket: WebSocketCommonProtocol,
259
+ data: str,
260
+ hash_data: str,
261
+ helper: Communicator | None = None,
262
+ ) -> dict[str, Any]:
263
+ completed = False
264
+ resp = {}
265
+ while not completed:
266
+ # Receive message in the background so that we can
267
+ # cancel even while running a long pred
268
+ task = asyncio.create_task(websocket.recv())
269
+ while not task.done():
270
+ if helper:
271
+ with helper.lock:
272
+ if helper.should_cancel:
273
+ # Need to reset the iterator state since the client
274
+ # will not reset the session
275
+ async with httpx.AsyncClient() as http:
276
+ reset = http.post(
277
+ helper.reset_url, json=json.loads(hash_data)
278
+ )
279
+ # Retrieve cancel exception from task
280
+ # otherwise will get nasty warning in console
281
+ task.cancel()
282
+ await asyncio.gather(task, reset, return_exceptions=True)
283
+ raise CancelledError()
284
+ # Need to suspend this coroutine so that task actually runs
285
+ await asyncio.sleep(0.01)
286
+ msg = task.result()
287
+ resp = json.loads(msg)
288
+ if helper:
289
+ with helper.lock:
290
+ has_progress = "progress_data" in resp
291
+ status_update = StatusUpdate(
292
+ code=Status.msg_to_status(resp["msg"]),
293
+ queue_size=resp.get("queue_size"),
294
+ rank=resp.get("rank", None),
295
+ success=resp.get("success"),
296
+ time=datetime.now(),
297
+ eta=resp.get("rank_eta"),
298
+ progress_data=ProgressUnit.from_ws_msg(resp["progress_data"])
299
+ if has_progress
300
+ else None,
301
+ )
302
+ output = resp.get("output", {}).get("data", [])
303
+ if output and status_update.code != Status.FINISHED:
304
+ try:
305
+ result = helper.prediction_processor(*output)
306
+ except Exception as e:
307
+ result = [e]
308
+ helper.job.outputs.append(result)
309
+ helper.job.latest_status = status_update
310
+ if resp["msg"] == "queue_full":
311
+ raise QueueError("Queue is full! Please try again.")
312
+ if resp["msg"] == "send_hash":
313
+ await websocket.send(hash_data)
314
+ elif resp["msg"] == "send_data":
315
+ await websocket.send(data)
316
+ completed = resp["msg"] == "process_completed"
317
+ return resp["output"]
318
+
319
+
320
+ ########################
321
+ # Data processing utils
322
+ ########################
323
+
324
+
325
+ def download_tmp_copy_of_file(
326
+ url_path: str, hf_token: str | None = None, dir: str | None = None
327
+ ) -> str:
328
+ if dir is not None:
329
+ os.makedirs(dir, exist_ok=True)
330
+ headers = {"Authorization": "Bearer " + hf_token} if hf_token else {}
331
+ directory = Path(dir or tempfile.gettempdir()) / secrets.token_hex(20)
332
+ directory.mkdir(exist_ok=True, parents=True)
333
+ file_path = directory / Path(url_path).name
334
+
335
+ with requests.get(url_path, headers=headers, stream=True) as r:
336
+ r.raise_for_status()
337
+ with open(file_path, "wb") as f:
338
+ shutil.copyfileobj(r.raw, f)
339
+ return str(file_path.resolve())
340
+
341
+
342
+ def create_tmp_copy_of_file(file_path: str, dir: str | None = None) -> str:
343
+ directory = Path(dir or tempfile.gettempdir()) / secrets.token_hex(20)
344
+ directory.mkdir(exist_ok=True, parents=True)
345
+ dest = directory / Path(file_path).name
346
+ shutil.copy2(file_path, dest)
347
+ return str(dest.resolve())
348
+
349
+
350
+ def get_mimetype(filename: str) -> str | None:
351
+ if filename.endswith(".vtt"):
352
+ return "text/vtt"
353
+ mimetype = mimetypes.guess_type(filename)[0]
354
+ if mimetype is not None:
355
+ mimetype = mimetype.replace("x-wav", "wav").replace("x-flac", "flac")
356
+ return mimetype
357
+
358
+
359
+ def get_extension(encoding: str) -> str | None:
360
+ encoding = encoding.replace("audio/wav", "audio/x-wav")
361
+ type = mimetypes.guess_type(encoding)[0]
362
+ if type == "audio/flac": # flac is not supported by mimetypes
363
+ return "flac"
364
+ elif type is None:
365
+ return None
366
+ extension = mimetypes.guess_extension(type)
367
+ if extension is not None and extension.startswith("."):
368
+ extension = extension[1:]
369
+ return extension
370
+
371
+
372
+ def encode_file_to_base64(f: str | Path):
373
+ with open(f, "rb") as file:
374
+ encoded_string = base64.b64encode(file.read())
375
+ base64_str = str(encoded_string, "utf-8")
376
+ mimetype = get_mimetype(str(f))
377
+ return (
378
+ "data:"
379
+ + (mimetype if mimetype is not None else "")
380
+ + ";base64,"
381
+ + base64_str
382
+ )
383
+
384
+
385
+ def encode_url_to_base64(url: str):
386
+ resp = requests.get(url)
387
+ resp.raise_for_status()
388
+ encoded_string = base64.b64encode(resp.content)
389
+ base64_str = str(encoded_string, "utf-8")
390
+ mimetype = get_mimetype(url)
391
+ return (
392
+ "data:" + (mimetype if mimetype is not None else "") + ";base64," + base64_str
393
+ )
394
+
395
+
396
+ def encode_url_or_file_to_base64(path: str | Path):
397
+ path = str(path)
398
+ if is_http_url_like(path):
399
+ return encode_url_to_base64(path)
400
+ return encode_file_to_base64(path)
401
+
402
+
403
+ def download_byte_stream(url: str, hf_token=None):
404
+ arr = bytearray()
405
+ headers = {"Authorization": "Bearer " + hf_token} if hf_token else {}
406
+ with httpx.stream("GET", url, headers=headers) as r:
407
+ for data in r.iter_bytes():
408
+ arr += data
409
+ yield data
410
+ yield arr
411
+
412
+
413
+ def decode_base64_to_binary(encoding: str) -> tuple[bytes, str | None]:
414
+ extension = get_extension(encoding)
415
+ data = encoding.rsplit(",", 1)[-1]
416
+ return base64.b64decode(data), extension
417
+
418
+
419
+ def strip_invalid_filename_characters(filename: str, max_bytes: int = 200) -> str:
420
+ """Strips invalid characters from a filename and ensures that the file_length is less than `max_bytes` bytes."""
421
+ filename = "".join([char for char in filename if char.isalnum() or char in "._- "])
422
+ filename_len = len(filename.encode())
423
+ if filename_len > max_bytes:
424
+ while filename_len > max_bytes:
425
+ if len(filename) == 0:
426
+ break
427
+ filename = filename[:-1]
428
+ filename_len = len(filename.encode())
429
+ return filename
430
+
431
+
432
+ def sanitize_parameter_names(original_name: str) -> str:
433
+ """Cleans up a Python parameter name to make the API info more readable."""
434
+ return (
435
+ "".join([char for char in original_name if char.isalnum() or char in " _"])
436
+ .replace(" ", "_")
437
+ .lower()
438
+ )
439
+
440
+
441
+ def decode_base64_to_file(
442
+ encoding: str,
443
+ file_path: str | None = None,
444
+ dir: str | Path | None = None,
445
+ prefix: str | None = None,
446
+ ):
447
+ directory = Path(dir or tempfile.gettempdir()) / secrets.token_hex(20)
448
+ directory.mkdir(exist_ok=True, parents=True)
449
+ data, extension = decode_base64_to_binary(encoding)
450
+ if file_path is not None and prefix is None:
451
+ filename = Path(file_path).name
452
+ prefix = filename
453
+ if "." in filename:
454
+ prefix = filename[0 : filename.index(".")]
455
+ extension = filename[filename.index(".") + 1 :]
456
+
457
+ if prefix is not None:
458
+ prefix = strip_invalid_filename_characters(prefix)
459
+
460
+ if extension is None:
461
+ file_obj = tempfile.NamedTemporaryFile(
462
+ delete=False, prefix=prefix, dir=directory
463
+ )
464
+ else:
465
+ file_obj = tempfile.NamedTemporaryFile(
466
+ delete=False,
467
+ prefix=prefix,
468
+ suffix="." + extension,
469
+ dir=directory,
470
+ )
471
+ file_obj.write(data)
472
+ file_obj.flush()
473
+ return file_obj
474
+
475
+
476
+ def dict_or_str_to_json_file(jsn: str | dict | list, dir: str | Path | None = None):
477
+ if dir is not None:
478
+ os.makedirs(dir, exist_ok=True)
479
+
480
+ file_obj = tempfile.NamedTemporaryFile(
481
+ delete=False, suffix=".json", dir=dir, mode="w+"
482
+ )
483
+ if isinstance(jsn, str):
484
+ jsn = json.loads(jsn)
485
+ json.dump(jsn, file_obj)
486
+ file_obj.flush()
487
+ return file_obj
488
+
489
+
490
+ def file_to_json(file_path: str | Path) -> dict | list:
491
+ with open(file_path) as f:
492
+ return json.load(f)
493
+
494
+
495
+ ###########################
496
+ # HuggingFace Hub API Utils
497
+ ###########################
498
+ def set_space_timeout(
499
+ space_id: str,
500
+ hf_token: str | None = None,
501
+ timeout_in_seconds: int = 300,
502
+ ):
503
+ headers = huggingface_hub.utils.build_hf_headers(
504
+ token=hf_token,
505
+ library_name="gradio_client",
506
+ library_version=__version__,
507
+ )
508
+ req = requests.post(
509
+ f"https://huggingface.co/api/spaces/{space_id}/sleeptime",
510
+ json={"seconds": timeout_in_seconds},
511
+ headers=headers,
512
+ )
513
+ try:
514
+ huggingface_hub.utils.hf_raise_for_status(req)
515
+ except huggingface_hub.utils.HfHubHTTPError as err:
516
+ raise SpaceDuplicationError(
517
+ f"Could not set sleep timeout on duplicated Space. Please visit {SPACE_URL.format(space_id)} "
518
+ "to set a timeout manually to reduce billing charges."
519
+ ) from err
520
+
521
+
522
+ ########################
523
+ # Misc utils
524
+ ########################
525
+
526
+
527
+ def synchronize_async(func: Callable, *args, **kwargs) -> Any:
528
+ """
529
+ Runs async functions in sync scopes. Can be used in any scope.
530
+
531
+ Example:
532
+ if inspect.iscoroutinefunction(block_fn.fn):
533
+ predictions = utils.synchronize_async(block_fn.fn, *processed_input)
534
+
535
+ Args:
536
+ func:
537
+ *args:
538
+ **kwargs:
539
+ """
540
+ return fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args, **kwargs) # type: ignore
541
+
542
+
543
+ class APIInfoParseError(ValueError):
544
+ pass
545
+
546
+
547
+ def get_type(schema: dict):
548
+ if "type" in schema:
549
+ return schema["type"]
550
+ elif schema.get("oneOf"):
551
+ return "oneOf"
552
+ elif schema.get("anyOf"):
553
+ return "anyOf"
554
+ else:
555
+ raise APIInfoParseError(f"Cannot parse type for {schema}")
556
+
557
+
558
+ def json_schema_to_python_type(schema: Any) -> str:
559
+ """Convert the json schema into a python type hint"""
560
+ type_ = get_type(schema)
561
+ if type_ == {}:
562
+ if "json" in schema["description"]:
563
+ return "Dict[Any, Any]"
564
+ else:
565
+ return "Any"
566
+ elif type_ == "null":
567
+ return "None"
568
+ elif type_ == "integer":
569
+ return "int"
570
+ elif type_ == "string":
571
+ return "str"
572
+ elif type_ == "boolean":
573
+ return "bool"
574
+ elif type_ == "number":
575
+ return "int | float"
576
+ elif type_ == "array":
577
+ items = schema.get("items")
578
+ if "prefixItems" in items:
579
+ elements = ", ".join(
580
+ [json_schema_to_python_type(i) for i in items["prefixItems"]]
581
+ )
582
+ return f"Tuple[{elements}]"
583
+ else:
584
+ elements = json_schema_to_python_type(items)
585
+ return f"List[{elements}]"
586
+ elif type_ == "object":
587
+ des = ", ".join(
588
+ [
589
+ f"{n}: {json_schema_to_python_type(v)} ({v.get('description')})"
590
+ for n, v in schema["properties"].items()
591
+ ]
592
+ )
593
+ return f"Dict({des})"
594
+ elif type_ in ["oneOf", "anyOf"]:
595
+ desc = " | ".join([json_schema_to_python_type(i) for i in schema[type_]])
596
+ return desc
597
+ else:
598
+ raise APIInfoParseError(f"Cannot parse schema {schema}")
testbed/gradio-app__gradio/client/python/pyproject.toml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["hatchling", "hatch-requirements-txt", "hatch-fancy-pypi-readme>=22.5.0"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "gradio_client"
7
+ dynamic = ["version", "dependencies", "readme"]
8
+ description = "Python library for easily interacting with trained machine learning models"
9
+ license = "Apache-2.0"
10
+ requires-python = ">=3.8"
11
+ authors = [
12
+ { name = "Abubakar Abid", email = "team@gradio.app" },
13
+ { name = "Ali Abid", email = "team@gradio.app" },
14
+ { name = "Ali Abdalla", email = "team@gradio.app" },
15
+ { name = "Dawood Khan", email = "team@gradio.app" },
16
+ { name = "Ahsen Khaliq", email = "team@gradio.app" },
17
+ { name = "Pete Allen", email = "team@gradio.app" },
18
+ { name = "Freddy Boulton", email = "team@gradio.app" },
19
+ ]
20
+ keywords = ["machine learning", "client", "API"]
21
+
22
+ classifiers = [
23
+ 'Development Status :: 4 - Beta',
24
+ 'License :: OSI Approved :: Apache Software License',
25
+ 'Operating System :: OS Independent',
26
+ 'Programming Language :: Python :: 3',
27
+ 'Programming Language :: Python :: 3 :: Only',
28
+ 'Programming Language :: Python :: 3.8',
29
+ 'Programming Language :: Python :: 3.9',
30
+ 'Programming Language :: Python :: 3.10',
31
+ 'Programming Language :: Python :: 3.11',
32
+ 'Topic :: Scientific/Engineering',
33
+ 'Topic :: Scientific/Engineering :: Artificial Intelligence',
34
+ 'Topic :: Software Development :: User Interfaces',
35
+ ]
36
+
37
+ [project.urls]
38
+ Homepage = "https://github.com/gradio-app/gradio"
39
+
40
+ [tool.hatch.version]
41
+ path = "gradio_client/package.json"
42
+ pattern = ".*\"version\":\\s*\"(?P<version>[^\"]+)\""
43
+
44
+ [tool.hatch.metadata.hooks.requirements_txt]
45
+ filename = "requirements.txt"
46
+
47
+ [tool.hatch.metadata.hooks.fancy-pypi-readme]
48
+ content-type = "text/markdown"
49
+ fragments = [
50
+ { path = "README.md" },
51
+ ]
52
+
53
+ [tool.hatch.build.targets.sdist]
54
+ include = [
55
+ "/gradio_client",
56
+ "/README.md",
57
+ "/requirements.txt",
58
+ ]
59
+
60
+ [tool.ruff]
61
+ extend = "../../pyproject.toml"
62
+
63
+ [tool.ruff.isort]
64
+ known-first-party = [
65
+ "gradio_client"
66
+ ]
67
+
68
+ [tool.pytest.ini_options]
69
+ GRADIO_ANALYTICS_ENABLED = "False"
70
+ HF_HUB_DISABLE_TELEMETRY = "1"
testbed/gradio-app__gradio/client/python/requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ fsspec
2
+ httpx
3
+ huggingface_hub>=0.13.0
4
+ packaging
5
+ requests~=2.0
6
+ typing_extensions~=4.0
7
+ websockets>=10.0,<12.0
testbed/gradio-app__gradio/client/python/scripts/build_pypi.sh ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ cd "$(dirname ${0})/.."
5
+
6
+ python3 -m pip install build
7
+ rm -rf dist/*
8
+ rm -rf build/*
9
+ python3 -m build
testbed/gradio-app__gradio/client/python/scripts/check_pypi.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import sys
3
+ import urllib.request
4
+ from pathlib import Path
5
+
6
+ version_file = Path(__file__).parent.parent / "gradio_client" / "package.json"
7
+ with version_file.open() as f:
8
+ version = json.load(f)["version"]
9
+
10
+ with urllib.request.urlopen("https://pypi.org/pypi/gradio_client/json") as url:
11
+ releases = json.load(url)["releases"]
12
+
13
+ if version in releases:
14
+ print(f"Version {version} already exists on PyPI")
15
+ sys.exit(1)
16
+ else:
17
+ print(f"Version {version} does not exist on PyPI")