diff --git a/data/node_modules/.bin/mime b/data/node_modules/.bin/mime new file mode 120000 index 0000000000000000000000000000000000000000..fbb7ee0eed8d1dd0fe3b5a9d6ff41d1c4f044675 --- /dev/null +++ b/data/node_modules/.bin/mime @@ -0,0 +1 @@ +../mime/cli.js \ No newline at end of file diff --git a/data/node_modules/.package-lock.json b/data/node_modules/.package-lock.json new file mode 100644 index 0000000000000000000000000000000000000000..0bc72acb3408d8cd7e33d3287ae608fae7edd400 --- /dev/null +++ b/data/node_modules/.package-lock.json @@ -0,0 +1,782 @@ +{ + "name": "data", + "lockfileVersion": 3, + "requires": true, + "packages": { + "node_modules/@huggingface/inference": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/@huggingface/inference/-/inference-2.8.0.tgz", + "integrity": "sha512-Ti681P1qckcCAqgzmL53jBnluPuZGelmMIuXNjgAwC5+RIjF4S0SDQu6oy44ZTwekwNp2ETaZ2sXsOk+45aC4w==", + "license": "MIT", + "dependencies": { + "@huggingface/tasks": "^0.11.2" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@huggingface/tasks": { + "version": "0.11.4", + "resolved": "https://registry.npmjs.org/@huggingface/tasks/-/tasks-0.11.4.tgz", + "integrity": "sha512-DW12G7Ae643U8vwitfaZOtvuk0+3W0GfQFcxxwVkFi07neZdBnSalxeKQsoPW7tZNcUdIZJSxhrnVNFGSXeC5Q==", + "license": "MIT" + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "license": "MIT", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==", + "license": "MIT" + }, + "node_modules/body-parser": { + "version": "1.20.2", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", + "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", + "license": "MIT", + "dependencies": { + "bytes": "3.1.2", + "content-type": "~1.0.5", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.11.0", + "raw-body": "2.5.2", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/call-bind": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "license": "MIT", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==", + "license": "MIT" + }, + "node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "license": "MIT", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/dotenv": { + "version": "16.4.5", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.5.tgz", + "integrity": "sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", + "license": "MIT" + }, + "node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", + "license": "MIT" + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/express": { + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", + "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", + "license": "MIT", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.2", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.6.0", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.2.0", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.1", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.7", + "proxy-addr": "~2.0.7", + "qs": "6.11.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.18.0", + "serve-static": "1.15.0", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/finalhandler": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", + "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gopd": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", + "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "license": "MIT", + "dependencies": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", + "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==", + "license": "MIT" + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "license": "MIT", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/object-inspect": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.2.tgz", + "integrity": "sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "license": "MIT", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/path-to-regexp": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", + "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==", + "license": "MIT" + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "license": "MIT", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/qs": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", + "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.0.4" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", + "license": "MIT", + "dependencies": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "license": "MIT" + }, + "node_modules/send": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", + "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/serve-static": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", + "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "license": "MIT", + "dependencies": { + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.18.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", + "license": "ISC" + }, + "node_modules/side-channel": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "license": "MIT", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "license": "MIT", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "license": "MIT", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + } + } +} diff --git a/data/node_modules/@huggingface/inference/LICENSE b/data/node_modules/@huggingface/inference/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..7b15760427589d074b7a776846b62bf84e806372 --- /dev/null +++ b/data/node_modules/@huggingface/inference/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Tim Mikeladze + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/data/node_modules/@huggingface/inference/README.md b/data/node_modules/@huggingface/inference/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f97a8312e57cdf3deb2eea19b368d0a94dbed8b5 --- /dev/null +++ b/data/node_modules/@huggingface/inference/README.md @@ -0,0 +1,633 @@ +# 🤗 Hugging Face Inference Endpoints + +A Typescript powered wrapper for the Hugging Face Inference Endpoints API. Learn more about Inference Endpoints at [Hugging Face](https://huggingface.co/inference-endpoints). +It works with both [Inference API (serverless)](https://huggingface.co/docs/api-inference/index) and [Inference Endpoints (dedicated)](https://huggingface.co/docs/inference-endpoints/index). + +Check out the [full documentation](https://huggingface.co/docs/huggingface.js/inference/README). + +You can also try out a live [interactive notebook](https://observablehq.com/@huggingface/hello-huggingface-js-inference), see some demos on [hf.co/huggingfacejs](https://huggingface.co/huggingfacejs), or watch a [Scrimba tutorial that explains how Inference Endpoints works](https://scrimba.com/scrim/cod8248f5adfd6e129582c523). + +## Getting Started + +### Install + +#### Node + +```console +npm install @huggingface/inference + +pnpm add @huggingface/inference + +yarn add @huggingface/inference +``` + +#### Deno + +```ts +// esm.sh +import { HfInference } from "https://esm.sh/@huggingface/inference" +// or npm: +import { HfInference } from "npm:@huggingface/inference" +``` + +### Initialize + +```typescript +import { HfInference } from '@huggingface/inference' + +const hf = new HfInference('your access token') +``` + +❗**Important note:** Using an access token is optional to get started, however you will be rate limited eventually. Join [Hugging Face](https://huggingface.co/join) and then visit [access tokens](https://huggingface.co/settings/tokens) to generate your access token for **free**. + +Your access token should be kept private. If you need to protect it in front-end applications, we suggest setting up a proxy server that stores the access token. + +#### Tree-shaking + +You can import the functions you need directly from the module instead of using the `HfInference` class. + +```ts +import { textGeneration } from "@huggingface/inference"; + +await textGeneration({ + accessToken: "hf_...", + model: "model_or_endpoint", + inputs: ..., + parameters: ... +}) +``` + +This will enable tree-shaking by your bundler. + +## Natural Language Processing + +### Text Generation + +Generates text from an input prompt. + +[Demo](https://huggingface.co/spaces/huggingfacejs/streaming-text-generation) + +```typescript +await hf.textGeneration({ + model: 'gpt2', + inputs: 'The answer to the universe is' +}) + +for await (const output of hf.textGenerationStream({ + model: "google/flan-t5-xxl", + inputs: 'repeat "one two three four"', + parameters: { max_new_tokens: 250 } +})) { + console.log(output.token.text, output.generated_text); +} +``` + +### Text Generation (Chat Completion API Compatible) + +Using the `chatCompletion` method, you can generate text with models compatible with the OpenAI Chat Completion API. All models served by [TGI](https://api-inference.huggingface.co/framework/text-generation-inference) on Hugging Face support Messages API. + +[Demo](https://huggingface.co/spaces/huggingfacejs/streaming-chat-completion) + +```typescript +// Non-streaming API +const out = await hf.chatCompletion({ + model: "mistralai/Mistral-7B-Instruct-v0.2", + messages: [{ role: "user", content: "Complete the this sentence with words one plus one is equal " }], + max_tokens: 500, + temperature: 0.1, + seed: 0, +}); + +// Streaming API +let out = ""; +for await (const chunk of hf.chatCompletionStream({ + model: "mistralai/Mistral-7B-Instruct-v0.2", + messages: [ + { role: "user", content: "Complete the equation 1+1= ,just the answer" }, + ], + max_tokens: 500, + temperature: 0.1, + seed: 0, +})) { + if (chunk.choices && chunk.choices.length > 0) { + out += chunk.choices[0].delta.content; + } +} +``` + +It's also possible to call Mistral or OpenAI endpoints directly: + +```typescript +const openai = new HfInference(OPENAI_TOKEN).endpoint("https://api.openai.com"); + +let out = ""; +for await (const chunk of openai.chatCompletionStream({ + model: "gpt-3.5-turbo", + messages: [ + { role: "user", content: "Complete the equation 1+1= ,just the answer" }, + ], + max_tokens: 500, + temperature: 0.1, + seed: 0, +})) { + if (chunk.choices && chunk.choices.length > 0) { + out += chunk.choices[0].delta.content; + } +} + +// For mistral AI: +// endpointUrl: "https://api.mistral.ai" +// model: "mistral-tiny" +``` + +### Fill Mask + +Tries to fill in a hole with a missing word (token to be precise). + +```typescript +await hf.fillMask({ + model: 'bert-base-uncased', + inputs: '[MASK] world!' +}) +``` + +### Summarization + +Summarizes longer text into shorter text. Be careful, some models have a maximum length of input. + +```typescript +await hf.summarization({ + model: 'facebook/bart-large-cnn', + inputs: + 'The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930.', + parameters: { + max_length: 100 + } +}) +``` + +### Question Answering + +Answers questions based on the context you provide. + +```typescript +await hf.questionAnswering({ + model: 'deepset/roberta-base-squad2', + inputs: { + question: 'What is the capital of France?', + context: 'The capital of France is Paris.' + } +}) +``` + +### Table Question Answering + +```typescript +await hf.tableQuestionAnswering({ + model: 'google/tapas-base-finetuned-wtq', + inputs: { + query: 'How many stars does the transformers repository have?', + table: { + Repository: ['Transformers', 'Datasets', 'Tokenizers'], + Stars: ['36542', '4512', '3934'], + Contributors: ['651', '77', '34'], + 'Programming language': ['Python', 'Python', 'Rust, Python and NodeJS'] + } + } +}) +``` + +### Text Classification + +Often used for sentiment analysis, this method will assign labels to the given text along with a probability score of that label. + +```typescript +await hf.textClassification({ + model: 'distilbert-base-uncased-finetuned-sst-2-english', + inputs: 'I like you. I love you.' +}) +``` + +### Token Classification + +Used for sentence parsing, either grammatical, or Named Entity Recognition (NER) to understand keywords contained within text. + +```typescript +await hf.tokenClassification({ + model: 'dbmdz/bert-large-cased-finetuned-conll03-english', + inputs: 'My name is Sarah Jessica Parker but you can call me Jessica' +}) +``` + +### Translation + +Converts text from one language to another. + +```typescript +await hf.translation({ + model: 't5-base', + inputs: 'My name is Wolfgang and I live in Berlin' +}) + +await hf.translation({ + model: 'facebook/mbart-large-50-many-to-many-mmt', + inputs: textToTranslate, + parameters: { + "src_lang": "en_XX", + "tgt_lang": "fr_XX" + } +}) +``` + +### Zero-Shot Classification + +Checks how well an input text fits into a set of labels you provide. + +```typescript +await hf.zeroShotClassification({ + model: 'facebook/bart-large-mnli', + inputs: [ + 'Hi, I recently bought a device from your company but it is not working as advertised and I would like to get reimbursed!' + ], + parameters: { candidate_labels: ['refund', 'legal', 'faq'] } +}) +``` + +### Conversational + +This task corresponds to any chatbot-like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long-range dependency or not. + +```typescript +await hf.conversational({ + model: 'microsoft/DialoGPT-large', + inputs: { + past_user_inputs: ['Which movie is the best ?'], + generated_responses: ['It is Die Hard for sure.'], + text: 'Can you explain why ?' + } +}) +``` + +### Sentence Similarity + +Calculate the semantic similarity between one text and a list of other sentences. + +```typescript +await hf.sentenceSimilarity({ + model: 'sentence-transformers/paraphrase-xlm-r-multilingual-v1', + inputs: { + source_sentence: 'That is a happy person', + sentences: [ + 'That is a happy dog', + 'That is a very happy person', + 'Today is a sunny day' + ] + } +}) +``` + +## Audio + +### Automatic Speech Recognition + +Transcribes speech from an audio file. + +[Demo](https://huggingface.co/spaces/huggingfacejs/speech-recognition-vue) + +```typescript +await hf.automaticSpeechRecognition({ + model: 'facebook/wav2vec2-large-960h-lv60-self', + data: readFileSync('test/sample1.flac') +}) +``` + +### Audio Classification + +Assigns labels to the given audio along with a probability score of that label. + +[Demo](https://huggingface.co/spaces/huggingfacejs/audio-classification-vue) + +```typescript +await hf.audioClassification({ + model: 'superb/hubert-large-superb-er', + data: readFileSync('test/sample1.flac') +}) +``` + +### Text To Speech + +Generates natural-sounding speech from text input. + +[Interactive tutorial](https://scrimba.com/scrim/co8da4d23b49b648f77f4848a?pl=pkVnrP7uP) + +```typescript +await hf.textToSpeech({ + model: 'espnet/kan-bayashi_ljspeech_vits', + inputs: 'Hello world!' +}) +``` + +### Audio To Audio + +Outputs one or multiple generated audios from an input audio, commonly used for speech enhancement and source separation. + +```typescript +await hf.audioToAudio({ + model: 'speechbrain/sepformer-wham', + data: readFileSync('test/sample1.flac') +}) +``` + +## Computer Vision + +### Image Classification + +Assigns labels to a given image along with a probability score of that label. + +[Demo](https://huggingface.co/spaces/huggingfacejs/image-classification-vue) + +```typescript +await hf.imageClassification({ + data: readFileSync('test/cheetah.png'), + model: 'google/vit-base-patch16-224' +}) +``` + +### Object Detection + +Detects objects within an image and returns labels with corresponding bounding boxes and probability scores. + +[Demo](https://huggingface.co/spaces/huggingfacejs/object-detection-vue) + +```typescript +await hf.objectDetection({ + data: readFileSync('test/cats.png'), + model: 'facebook/detr-resnet-50' +}) +``` + +### Image Segmentation + +Detects segments within an image and returns labels with corresponding bounding boxes and probability scores. + +```typescript +await hf.imageSegmentation({ + data: readFileSync('test/cats.png'), + model: 'facebook/detr-resnet-50-panoptic' +}) +``` + +### Image To Text + +Outputs text from a given image, commonly used for captioning or optical character recognition. + +```typescript +await hf.imageToText({ + data: readFileSync('test/cats.png'), + model: 'nlpconnect/vit-gpt2-image-captioning' +}) +``` + +### Text To Image + +Creates an image from a text prompt. + +[Demo](https://huggingface.co/spaces/huggingfacejs/image-to-text) + +```typescript +await hf.textToImage({ + inputs: 'award winning high resolution photo of a giant tortoise/((ladybird)) hybrid, [trending on artstation]', + model: 'stabilityai/stable-diffusion-2', + parameters: { + negative_prompt: 'blurry', + } +}) +``` + +### Image To Image + +Image-to-image is the task of transforming a source image to match the characteristics of a target image or a target image domain. + +[Interactive tutorial](https://scrimba.com/scrim/co4834bf9a91cc81cfab07969?pl=pkVnrP7uP) + +```typescript +await hf.imageToImage({ + inputs: new Blob([readFileSync("test/stormtrooper_depth.png")]), + parameters: { + prompt: "elmo's lecture", + }, + model: "lllyasviel/sd-controlnet-depth", +}); +``` + +### Zero Shot Image Classification + +Checks how well an input image fits into a set of labels you provide. + +```typescript +await hf.zeroShotImageClassification({ + model: 'openai/clip-vit-large-patch14-336', + inputs: { + image: await (await fetch('https://placekitten.com/300/300')).blob() + }, + parameters: { + candidate_labels: ['cat', 'dog'] + } +}) +``` + +## Multimodal + +### Feature Extraction + +This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search. + +```typescript +await hf.featureExtraction({ + model: "sentence-transformers/distilbert-base-nli-mean-tokens", + inputs: "That is a happy person", +}); +``` + +### Visual Question Answering + +Visual Question Answering is the task of answering open-ended questions based on an image. They output natural language responses to natural language questions. + +[Demo](https://huggingface.co/spaces/huggingfacejs/doc-vis-qa) + +```typescript +await hf.visualQuestionAnswering({ + model: 'dandelin/vilt-b32-finetuned-vqa', + inputs: { + question: 'How many cats are lying down?', + image: await (await fetch('https://placekitten.com/300/300')).blob() + } +}) +``` + +### Document Question Answering + +Document question answering models take a (document, question) pair as input and return an answer in natural language. + +[Demo](https://huggingface.co/spaces/huggingfacejs/doc-vis-qa) + +```typescript +await hf.documentQuestionAnswering({ + model: 'impira/layoutlm-document-qa', + inputs: { + question: 'Invoice number?', + image: await (await fetch('https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png')).blob(), + } +}) +``` + +## Tabular + +### Tabular Regression + +Tabular regression is the task of predicting a numerical value given a set of attributes. + +```typescript +await hf.tabularRegression({ + model: "scikit-learn/Fish-Weight", + inputs: { + data: { + "Height": ["11.52", "12.48", "12.3778"], + "Length1": ["23.2", "24", "23.9"], + "Length2": ["25.4", "26.3", "26.5"], + "Length3": ["30", "31.2", "31.1"], + "Species": ["Bream", "Bream", "Bream"], + "Width": ["4.02", "4.3056", "4.6961"] + }, + }, +}) +``` + +### Tabular Classification + +Tabular classification is the task of classifying a target category (a group) based on set of attributes. + +```typescript +await hf.tabularClassification({ + model: "vvmnnnkv/wine-quality", + inputs: { + data: { + "fixed_acidity": ["7.4", "7.8", "10.3"], + "volatile_acidity": ["0.7", "0.88", "0.32"], + "citric_acid": ["0", "0", "0.45"], + "residual_sugar": ["1.9", "2.6", "6.4"], + "chlorides": ["0.076", "0.098", "0.073"], + "free_sulfur_dioxide": ["11", "25", "5"], + "total_sulfur_dioxide": ["34", "67", "13"], + "density": ["0.9978", "0.9968", "0.9976"], + "pH": ["3.51", "3.2", "3.23"], + "sulphates": ["0.56", "0.68", "0.82"], + "alcohol": ["9.4", "9.8", "12.6"] + }, + }, +}) +``` + +## Custom Calls + +For models with custom parameters / outputs. + +```typescript +await hf.request({ + model: 'my-custom-model', + inputs: 'hello world', + parameters: { + custom_param: 'some magic', + } +}) + +// Custom streaming call, for models with custom parameters / outputs +for await (const output of hf.streamingRequest({ + model: 'my-custom-model', + inputs: 'hello world', + parameters: { + custom_param: 'some magic', + } +})) { + ... +} +``` + +You can use any Chat Completion API-compatible provider with the `chatCompletion` method. + +```typescript +// Chat Completion Example +const MISTRAL_KEY = process.env.MISTRAL_KEY; +const hf = new HfInference(MISTRAL_KEY); +const ep = hf.endpoint("https://api.mistral.ai"); +const stream = ep.chatCompletionStream({ + model: "mistral-tiny", + messages: [{ role: "user", content: "Complete the equation one + one = , just the answer" }], +}); +let out = ""; +for await (const chunk of stream) { + if (chunk.choices && chunk.choices.length > 0) { + out += chunk.choices[0].delta.content; + console.log(out); + } +} +``` + +## Custom Inference Endpoints + +Learn more about using your own inference endpoints [here](https://hf.co/docs/inference-endpoints/) + +```typescript +const gpt2 = hf.endpoint('https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/gpt2'); +const { generated_text } = await gpt2.textGeneration({inputs: 'The answer to the universe is'}); + +// Chat Completion Example +const ep = hf.endpoint( + "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2" +); +const stream = ep.chatCompletionStream({ + model: "tgi", + messages: [{ role: "user", content: "Complete the equation 1+1= ,just the answer" }], + max_tokens: 500, + temperature: 0.1, + seed: 0, +}); +let out = ""; +for await (const chunk of stream) { + if (chunk.choices && chunk.choices.length > 0) { + out += chunk.choices[0].delta.content; + console.log(out); + } +} +``` + +By default, all calls to the inference endpoint will wait until the model is +loaded. When [scaling to +0](https://huggingface.co/docs/inference-endpoints/en/autoscaling#scaling-to-0) +is enabled on the endpoint, this can result in non-trivial waiting time. If +you'd rather disable this behavior and handle the endpoint's returned 500 HTTP +errors yourself, you can do so like so: + +```typescript +const gpt2 = hf.endpoint('https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/gpt2'); +const { generated_text } = await gpt2.textGeneration( + {inputs: 'The answer to the universe is'}, + {retry_on_error: false}, +); +``` + +## Running tests + +```console +HF_TOKEN="your access token" pnpm run test +``` + +## Finding appropriate models + +We have an informative documentation project called [Tasks](https://huggingface.co/tasks) to list available models for each task and explain how each task works in detail. + +It also contains demos, example outputs, and other resources should you want to dig deeper into the ML side of things. + +## Dependencies + +- `@huggingface/tasks` : Typings only diff --git a/data/node_modules/@huggingface/inference/dist/index.cjs b/data/node_modules/@huggingface/inference/dist/index.cjs new file mode 100644 index 0000000000000000000000000000000000000000..38460de8d56ee6720aefb8efe1e194cda217a4ca --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/index.cjs @@ -0,0 +1,999 @@ +"use strict"; +var __defProp = Object.defineProperty; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); + +// src/index.ts +var src_exports = {}; +__export(src_exports, { + HfInference: () => HfInference, + HfInferenceEndpoint: () => HfInferenceEndpoint, + InferenceOutputError: () => InferenceOutputError, + audioClassification: () => audioClassification, + audioToAudio: () => audioToAudio, + automaticSpeechRecognition: () => automaticSpeechRecognition, + chatCompletion: () => chatCompletion, + chatCompletionStream: () => chatCompletionStream, + documentQuestionAnswering: () => documentQuestionAnswering, + featureExtraction: () => featureExtraction, + fillMask: () => fillMask, + imageClassification: () => imageClassification, + imageSegmentation: () => imageSegmentation, + imageToImage: () => imageToImage, + imageToText: () => imageToText, + objectDetection: () => objectDetection, + questionAnswering: () => questionAnswering, + request: () => request, + sentenceSimilarity: () => sentenceSimilarity, + streamingRequest: () => streamingRequest, + summarization: () => summarization, + tableQuestionAnswering: () => tableQuestionAnswering, + tabularClassification: () => tabularClassification, + tabularRegression: () => tabularRegression, + textClassification: () => textClassification, + textGeneration: () => textGeneration, + textGenerationStream: () => textGenerationStream, + textToImage: () => textToImage, + textToSpeech: () => textToSpeech, + tokenClassification: () => tokenClassification, + translation: () => translation, + visualQuestionAnswering: () => visualQuestionAnswering, + zeroShotClassification: () => zeroShotClassification, + zeroShotImageClassification: () => zeroShotImageClassification +}); +module.exports = __toCommonJS(src_exports); + +// src/tasks/index.ts +var tasks_exports = {}; +__export(tasks_exports, { + audioClassification: () => audioClassification, + audioToAudio: () => audioToAudio, + automaticSpeechRecognition: () => automaticSpeechRecognition, + chatCompletion: () => chatCompletion, + chatCompletionStream: () => chatCompletionStream, + documentQuestionAnswering: () => documentQuestionAnswering, + featureExtraction: () => featureExtraction, + fillMask: () => fillMask, + imageClassification: () => imageClassification, + imageSegmentation: () => imageSegmentation, + imageToImage: () => imageToImage, + imageToText: () => imageToText, + objectDetection: () => objectDetection, + questionAnswering: () => questionAnswering, + request: () => request, + sentenceSimilarity: () => sentenceSimilarity, + streamingRequest: () => streamingRequest, + summarization: () => summarization, + tableQuestionAnswering: () => tableQuestionAnswering, + tabularClassification: () => tabularClassification, + tabularRegression: () => tabularRegression, + textClassification: () => textClassification, + textGeneration: () => textGeneration, + textGenerationStream: () => textGenerationStream, + textToImage: () => textToImage, + textToSpeech: () => textToSpeech, + tokenClassification: () => tokenClassification, + translation: () => translation, + visualQuestionAnswering: () => visualQuestionAnswering, + zeroShotClassification: () => zeroShotClassification, + zeroShotImageClassification: () => zeroShotImageClassification +}); + +// src/utils/pick.ts +function pick(o, props) { + return Object.assign( + {}, + ...props.map((prop) => { + if (o[prop] !== void 0) { + return { [prop]: o[prop] }; + } + }) + ); +} + +// src/utils/typedInclude.ts +function typedInclude(arr, v) { + return arr.includes(v); +} + +// src/utils/omit.ts +function omit(o, props) { + const propsArr = Array.isArray(props) ? props : [props]; + const letsKeep = Object.keys(o).filter((prop) => !typedInclude(propsArr, prop)); + return pick(o, letsKeep); +} + +// src/lib/isUrl.ts +function isUrl(modelOrUrl) { + return /^http(s?):/.test(modelOrUrl) || modelOrUrl.startsWith("/"); +} + +// src/lib/getDefaultTask.ts +var taskCache = /* @__PURE__ */ new Map(); +var CACHE_DURATION = 10 * 60 * 1e3; +var MAX_CACHE_ITEMS = 1e3; +var HF_HUB_URL = "https://huggingface.co"; +async function getDefaultTask(model, accessToken, options) { + if (isUrl(model)) { + return null; + } + const key = `${model}:${accessToken}`; + let cachedTask = taskCache.get(key); + if (cachedTask && cachedTask.date < new Date(Date.now() - CACHE_DURATION)) { + taskCache.delete(key); + cachedTask = void 0; + } + if (cachedTask === void 0) { + const modelTask = await (options?.fetch ?? fetch)(`${HF_HUB_URL}/api/models/${model}?expand[]=pipeline_tag`, { + headers: accessToken ? { Authorization: `Bearer ${accessToken}` } : {} + }).then((resp) => resp.json()).then((json) => json.pipeline_tag).catch(() => null); + if (!modelTask) { + return null; + } + cachedTask = { task: modelTask, date: /* @__PURE__ */ new Date() }; + taskCache.set(key, { task: modelTask, date: /* @__PURE__ */ new Date() }); + if (taskCache.size > MAX_CACHE_ITEMS) { + taskCache.delete(taskCache.keys().next().value); + } + } + return cachedTask.task; +} + +// src/lib/makeRequestOptions.ts +var HF_INFERENCE_API_BASE_URL = "https://api-inference.huggingface.co"; +var tasks = null; +async function makeRequestOptions(args, options) { + const { accessToken, endpointUrl, ...otherArgs } = args; + let { model } = args; + const { + forceTask: task, + includeCredentials, + taskHint, + wait_for_model, + use_cache, + dont_load_model, + chatCompletion: chatCompletion2 + } = options ?? {}; + const headers = {}; + if (accessToken) { + headers["Authorization"] = `Bearer ${accessToken}`; + } + if (!model && !tasks && taskHint) { + const res = await fetch(`${HF_HUB_URL}/api/tasks`); + if (res.ok) { + tasks = await res.json(); + } + } + if (!model && tasks && taskHint) { + const taskInfo = tasks[taskHint]; + if (taskInfo) { + model = taskInfo.models[0].id; + } + } + if (!model) { + throw new Error("No model provided, and no default model found for this task"); + } + const binary = "data" in args && !!args.data; + if (!binary) { + headers["Content-Type"] = "application/json"; + } + if (wait_for_model) { + headers["X-Wait-For-Model"] = "true"; + } + if (use_cache === false) { + headers["X-Use-Cache"] = "false"; + } + if (dont_load_model) { + headers["X-Load-Model"] = "0"; + } + let url = (() => { + if (endpointUrl && isUrl(model)) { + throw new TypeError("Both model and endpointUrl cannot be URLs"); + } + if (isUrl(model)) { + console.warn("Using a model URL is deprecated, please use the `endpointUrl` parameter instead"); + return model; + } + if (endpointUrl) { + return endpointUrl; + } + if (task) { + return `${HF_INFERENCE_API_BASE_URL}/pipeline/${task}/${model}`; + } + return `${HF_INFERENCE_API_BASE_URL}/models/${model}`; + })(); + if (chatCompletion2 && !url.endsWith("/chat/completions")) { + url += "/v1/chat/completions"; + } + let credentials; + if (typeof includeCredentials === "string") { + credentials = includeCredentials; + } else if (includeCredentials === true) { + credentials = "include"; + } + const info = { + headers, + method: "POST", + body: binary ? args.data : JSON.stringify({ + ...otherArgs.model && isUrl(otherArgs.model) ? omit(otherArgs, "model") : otherArgs + }), + ...credentials && { credentials }, + signal: options?.signal + }; + return { url, info }; +} + +// src/tasks/custom/request.ts +async function request(args, options) { + const { url, info } = await makeRequestOptions(args, options); + const response = await (options?.fetch ?? fetch)(url, info); + if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) { + return request(args, { + ...options, + wait_for_model: true + }); + } + if (!response.ok) { + if (response.headers.get("Content-Type")?.startsWith("application/json")) { + const output = await response.json(); + if ([400, 422, 404, 500].includes(response.status) && options?.chatCompletion) { + throw new Error(`Server ${args.model} does not seem to support chat completion. Error: ${output.error}`); + } + if (output.error) { + throw new Error(output.error); + } + } + throw new Error("An error occurred while fetching the blob"); + } + if (response.headers.get("Content-Type")?.startsWith("application/json")) { + return await response.json(); + } + return await response.blob(); +} + +// src/vendor/fetch-event-source/parse.ts +function getLines(onLine) { + let buffer; + let position; + let fieldLength; + let discardTrailingNewline = false; + return function onChunk(arr) { + if (buffer === void 0) { + buffer = arr; + position = 0; + fieldLength = -1; + } else { + buffer = concat(buffer, arr); + } + const bufLength = buffer.length; + let lineStart = 0; + while (position < bufLength) { + if (discardTrailingNewline) { + if (buffer[position] === 10 /* NewLine */) { + lineStart = ++position; + } + discardTrailingNewline = false; + } + let lineEnd = -1; + for (; position < bufLength && lineEnd === -1; ++position) { + switch (buffer[position]) { + case 58 /* Colon */: + if (fieldLength === -1) { + fieldLength = position - lineStart; + } + break; + case 13 /* CarriageReturn */: + discardTrailingNewline = true; + case 10 /* NewLine */: + lineEnd = position; + break; + } + } + if (lineEnd === -1) { + break; + } + onLine(buffer.subarray(lineStart, lineEnd), fieldLength); + lineStart = position; + fieldLength = -1; + } + if (lineStart === bufLength) { + buffer = void 0; + } else if (lineStart !== 0) { + buffer = buffer.subarray(lineStart); + position -= lineStart; + } + }; +} +function getMessages(onId, onRetry, onMessage) { + let message = newMessage(); + const decoder = new TextDecoder(); + return function onLine(line, fieldLength) { + if (line.length === 0) { + onMessage?.(message); + message = newMessage(); + } else if (fieldLength > 0) { + const field = decoder.decode(line.subarray(0, fieldLength)); + const valueOffset = fieldLength + (line[fieldLength + 1] === 32 /* Space */ ? 2 : 1); + const value = decoder.decode(line.subarray(valueOffset)); + switch (field) { + case "data": + message.data = message.data ? message.data + "\n" + value : value; + break; + case "event": + message.event = value; + break; + case "id": + onId(message.id = value); + break; + case "retry": + const retry = parseInt(value, 10); + if (!isNaN(retry)) { + onRetry(message.retry = retry); + } + break; + } + } + }; +} +function concat(a, b) { + const res = new Uint8Array(a.length + b.length); + res.set(a); + res.set(b, a.length); + return res; +} +function newMessage() { + return { + data: "", + event: "", + id: "", + retry: void 0 + }; +} + +// src/tasks/custom/streamingRequest.ts +async function* streamingRequest(args, options) { + const { url, info } = await makeRequestOptions({ ...args, stream: true }, options); + const response = await (options?.fetch ?? fetch)(url, info); + if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) { + return yield* streamingRequest(args, { + ...options, + wait_for_model: true + }); + } + if (!response.ok) { + if (response.headers.get("Content-Type")?.startsWith("application/json")) { + const output = await response.json(); + if ([400, 422, 404, 500].includes(response.status) && options?.chatCompletion) { + throw new Error(`Server ${args.model} does not seem to support chat completion. Error: ${output.error}`); + } + if (output.error) { + throw new Error(output.error); + } + } + throw new Error(`Server response contains error: ${response.status}`); + } + if (!response.headers.get("content-type")?.startsWith("text/event-stream")) { + throw new Error( + `Server does not support event stream content type, it returned ` + response.headers.get("content-type") + ); + } + if (!response.body) { + return; + } + const reader = response.body.getReader(); + let events = []; + const onEvent = (event) => { + events.push(event); + }; + const onChunk = getLines( + getMessages( + () => { + }, + () => { + }, + onEvent + ) + ); + try { + while (true) { + const { done, value } = await reader.read(); + if (done) + return; + onChunk(value); + for (const event of events) { + if (event.data.length > 0) { + if (event.data === "[DONE]") { + return; + } + const data = JSON.parse(event.data); + if (typeof data === "object" && data !== null && "error" in data) { + throw new Error(data.error); + } + yield data; + } + } + events = []; + } + } finally { + reader.releaseLock(); + } +} + +// src/lib/InferenceOutputError.ts +var InferenceOutputError = class extends TypeError { + constructor(message) { + super( + `Invalid inference output: ${message}. Use the 'request' method with the same parameters to do a custom call with no type checking.` + ); + this.name = "InferenceOutputError"; + } +}; + +// src/tasks/audio/audioClassification.ts +async function audioClassification(args, options) { + const res = await request(args, { + ...options, + taskHint: "audio-classification" + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x.label === "string" && typeof x.score === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{label: string, score: number}>"); + } + return res; +} + +// src/tasks/audio/automaticSpeechRecognition.ts +async function automaticSpeechRecognition(args, options) { + const res = await request(args, { + ...options, + taskHint: "automatic-speech-recognition" + }); + const isValidOutput = typeof res?.text === "string"; + if (!isValidOutput) { + throw new InferenceOutputError("Expected {text: string}"); + } + return res; +} + +// src/tasks/audio/textToSpeech.ts +async function textToSpeech(args, options) { + const res = await request(args, { + ...options, + taskHint: "text-to-speech" + }); + const isValidOutput = res && res instanceof Blob; + if (!isValidOutput) { + throw new InferenceOutputError("Expected Blob"); + } + return res; +} + +// src/tasks/audio/audioToAudio.ts +async function audioToAudio(args, options) { + const res = await request(args, { + ...options, + taskHint: "audio-to-audio" + }); + const isValidOutput = Array.isArray(res) && res.every( + (x) => typeof x.label === "string" && typeof x.blob === "string" && typeof x["content-type"] === "string" + ); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{label: string, blob: string, content-type: string}>"); + } + return res; +} + +// src/tasks/cv/imageClassification.ts +async function imageClassification(args, options) { + const res = await request(args, { + ...options, + taskHint: "image-classification" + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x.label === "string" && typeof x.score === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{label: string, score: number}>"); + } + return res; +} + +// src/tasks/cv/imageSegmentation.ts +async function imageSegmentation(args, options) { + const res = await request(args, { + ...options, + taskHint: "image-segmentation" + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x.label === "string" && typeof x.mask === "string" && typeof x.score === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{label: string, mask: string, score: number}>"); + } + return res; +} + +// src/tasks/cv/imageToText.ts +async function imageToText(args, options) { + const res = (await request(args, { + ...options, + taskHint: "image-to-text" + }))?.[0]; + if (typeof res?.generated_text !== "string") { + throw new InferenceOutputError("Expected {generated_text: string}"); + } + return res; +} + +// src/tasks/cv/objectDetection.ts +async function objectDetection(args, options) { + const res = await request(args, { + ...options, + taskHint: "object-detection" + }); + const isValidOutput = Array.isArray(res) && res.every( + (x) => typeof x.label === "string" && typeof x.score === "number" && typeof x.box.xmin === "number" && typeof x.box.ymin === "number" && typeof x.box.xmax === "number" && typeof x.box.ymax === "number" + ); + if (!isValidOutput) { + throw new InferenceOutputError( + "Expected Array<{label:string; score:number; box:{xmin:number; ymin:number; xmax:number; ymax:number}}>" + ); + } + return res; +} + +// src/tasks/cv/textToImage.ts +async function textToImage(args, options) { + const res = await request(args, { + ...options, + taskHint: "text-to-image" + }); + const isValidOutput = res && res instanceof Blob; + if (!isValidOutput) { + throw new InferenceOutputError("Expected Blob"); + } + return res; +} + +// src/utils/base64FromBytes.ts +function base64FromBytes(arr) { + if (globalThis.Buffer) { + return globalThis.Buffer.from(arr).toString("base64"); + } else { + const bin = []; + arr.forEach((byte) => { + bin.push(String.fromCharCode(byte)); + }); + return globalThis.btoa(bin.join("")); + } +} + +// src/tasks/cv/imageToImage.ts +async function imageToImage(args, options) { + let reqArgs; + if (!args.parameters) { + reqArgs = { + accessToken: args.accessToken, + model: args.model, + data: args.inputs + }; + } else { + reqArgs = { + ...args, + inputs: base64FromBytes( + new Uint8Array(args.inputs instanceof ArrayBuffer ? args.inputs : await args.inputs.arrayBuffer()) + ) + }; + } + const res = await request(reqArgs, { + ...options, + taskHint: "image-to-image" + }); + const isValidOutput = res && res instanceof Blob; + if (!isValidOutput) { + throw new InferenceOutputError("Expected Blob"); + } + return res; +} + +// src/tasks/cv/zeroShotImageClassification.ts +async function zeroShotImageClassification(args, options) { + const reqArgs = { + ...args, + inputs: { + image: base64FromBytes( + new Uint8Array( + args.inputs.image instanceof ArrayBuffer ? args.inputs.image : await args.inputs.image.arrayBuffer() + ) + ) + } + }; + const res = await request(reqArgs, { + ...options, + taskHint: "zero-shot-image-classification" + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x.label === "string" && typeof x.score === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{label: string, score: number}>"); + } + return res; +} + +// src/tasks/nlp/featureExtraction.ts +async function featureExtraction(args, options) { + const defaultTask = args.model ? await getDefaultTask(args.model, args.accessToken, options) : void 0; + const res = await request(args, { + ...options, + taskHint: "feature-extraction", + ...defaultTask === "sentence-similarity" && { forceTask: "feature-extraction" } + }); + let isValidOutput = true; + const isNumArrayRec = (arr, maxDepth, curDepth = 0) => { + if (curDepth > maxDepth) + return false; + if (arr.every((x) => Array.isArray(x))) { + return arr.every((x) => isNumArrayRec(x, maxDepth, curDepth + 1)); + } else { + return arr.every((x) => typeof x === "number"); + } + }; + isValidOutput = Array.isArray(res) && isNumArrayRec(res, 3, 0); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array"); + } + return res; +} + +// src/tasks/nlp/fillMask.ts +async function fillMask(args, options) { + const res = await request(args, { + ...options, + taskHint: "fill-mask" + }); + const isValidOutput = Array.isArray(res) && res.every( + (x) => typeof x.score === "number" && typeof x.sequence === "string" && typeof x.token === "number" && typeof x.token_str === "string" + ); + if (!isValidOutput) { + throw new InferenceOutputError( + "Expected Array<{score: number, sequence: string, token: number, token_str: string}>" + ); + } + return res; +} + +// src/tasks/nlp/questionAnswering.ts +async function questionAnswering(args, options) { + const res = await request(args, { + ...options, + taskHint: "question-answering" + }); + const isValidOutput = typeof res === "object" && !!res && typeof res.answer === "string" && typeof res.end === "number" && typeof res.score === "number" && typeof res.start === "number"; + if (!isValidOutput) { + throw new InferenceOutputError("Expected {answer: string, end: number, score: number, start: number}"); + } + return res; +} + +// src/tasks/nlp/sentenceSimilarity.ts +async function sentenceSimilarity(args, options) { + const defaultTask = args.model ? await getDefaultTask(args.model, args.accessToken, options) : void 0; + const res = await request(args, { + ...options, + taskHint: "sentence-similarity", + ...defaultTask === "feature-extraction" && { forceTask: "sentence-similarity" } + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected number[]"); + } + return res; +} + +// src/tasks/nlp/summarization.ts +async function summarization(args, options) { + const res = await request(args, { + ...options, + taskHint: "summarization" + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x?.summary_text === "string"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{summary_text: string}>"); + } + return res?.[0]; +} + +// src/tasks/nlp/tableQuestionAnswering.ts +async function tableQuestionAnswering(args, options) { + const res = await request(args, { + ...options, + taskHint: "table-question-answering" + }); + const isValidOutput = typeof res?.aggregator === "string" && typeof res.answer === "string" && Array.isArray(res.cells) && res.cells.every((x) => typeof x === "string") && Array.isArray(res.coordinates) && res.coordinates.every((coord) => Array.isArray(coord) && coord.every((x) => typeof x === "number")); + if (!isValidOutput) { + throw new InferenceOutputError( + "Expected {aggregator: string, answer: string, cells: string[], coordinates: number[][]}" + ); + } + return res; +} + +// src/tasks/nlp/textClassification.ts +async function textClassification(args, options) { + const res = (await request(args, { + ...options, + taskHint: "text-classification" + }))?.[0]; + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x?.label === "string" && typeof x.score === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{label: string, score: number}>"); + } + return res; +} + +// src/utils/toArray.ts +function toArray(obj) { + if (Array.isArray(obj)) { + return obj; + } + return [obj]; +} + +// src/tasks/nlp/textGeneration.ts +async function textGeneration(args, options) { + const res = toArray( + await request(args, { + ...options, + taskHint: "text-generation" + }) + ); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x?.generated_text === "string"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{generated_text: string}>"); + } + return res?.[0]; +} + +// src/tasks/nlp/textGenerationStream.ts +async function* textGenerationStream(args, options) { + yield* streamingRequest(args, { + ...options, + taskHint: "text-generation" + }); +} + +// src/tasks/nlp/tokenClassification.ts +async function tokenClassification(args, options) { + const res = toArray( + await request(args, { + ...options, + taskHint: "token-classification" + }) + ); + const isValidOutput = Array.isArray(res) && res.every( + (x) => typeof x.end === "number" && typeof x.entity_group === "string" && typeof x.score === "number" && typeof x.start === "number" && typeof x.word === "string" + ); + if (!isValidOutput) { + throw new InferenceOutputError( + "Expected Array<{end: number, entity_group: string, score: number, start: number, word: string}>" + ); + } + return res; +} + +// src/tasks/nlp/translation.ts +async function translation(args, options) { + const res = await request(args, { + ...options, + taskHint: "translation" + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x?.translation_text === "string"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected type Array<{translation_text: string}>"); + } + return res?.length === 1 ? res?.[0] : res; +} + +// src/tasks/nlp/zeroShotClassification.ts +async function zeroShotClassification(args, options) { + const res = toArray( + await request(args, { + ...options, + taskHint: "zero-shot-classification" + }) + ); + const isValidOutput = Array.isArray(res) && res.every( + (x) => Array.isArray(x.labels) && x.labels.every((_label) => typeof _label === "string") && Array.isArray(x.scores) && x.scores.every((_score) => typeof _score === "number") && typeof x.sequence === "string" + ); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{labels: string[], scores: number[], sequence: string}>"); + } + return res; +} + +// src/tasks/nlp/chatCompletion.ts +async function chatCompletion(args, options) { + const res = await request(args, { + ...options, + taskHint: "text-generation", + chatCompletion: true + }); + const isValidOutput = typeof res === "object" && Array.isArray(res?.choices) && typeof res?.created === "number" && typeof res?.id === "string" && typeof res?.model === "string" && typeof res?.system_fingerprint === "string" && typeof res?.usage === "object"; + if (!isValidOutput) { + throw new InferenceOutputError("Expected ChatCompletionOutput"); + } + return res; +} + +// src/tasks/nlp/chatCompletionStream.ts +async function* chatCompletionStream(args, options) { + yield* streamingRequest(args, { + ...options, + taskHint: "text-generation", + chatCompletion: true + }); +} + +// src/tasks/multimodal/documentQuestionAnswering.ts +async function documentQuestionAnswering(args, options) { + const reqArgs = { + ...args, + inputs: { + question: args.inputs.question, + // convert Blob or ArrayBuffer to base64 + image: base64FromBytes( + new Uint8Array( + args.inputs.image instanceof ArrayBuffer ? args.inputs.image : await args.inputs.image.arrayBuffer() + ) + ) + } + }; + const res = toArray( + await request(reqArgs, { + ...options, + taskHint: "document-question-answering" + }) + )?.[0]; + const isValidOutput = typeof res?.answer === "string" && (typeof res.end === "number" || typeof res.end === "undefined") && (typeof res.score === "number" || typeof res.score === "undefined") && (typeof res.start === "number" || typeof res.start === "undefined"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{answer: string, end?: number, score?: number, start?: number}>"); + } + return res; +} + +// src/tasks/multimodal/visualQuestionAnswering.ts +async function visualQuestionAnswering(args, options) { + const reqArgs = { + ...args, + inputs: { + question: args.inputs.question, + // convert Blob or ArrayBuffer to base64 + image: base64FromBytes( + new Uint8Array( + args.inputs.image instanceof ArrayBuffer ? args.inputs.image : await args.inputs.image.arrayBuffer() + ) + ) + } + }; + const res = (await request(reqArgs, { + ...options, + taskHint: "visual-question-answering" + }))?.[0]; + const isValidOutput = typeof res?.answer === "string" && typeof res.score === "number"; + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{answer: string, score: number}>"); + } + return res; +} + +// src/tasks/tabular/tabularRegression.ts +async function tabularRegression(args, options) { + const res = await request(args, { + ...options, + taskHint: "tabular-regression" + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected number[]"); + } + return res; +} + +// src/tasks/tabular/tabularClassification.ts +async function tabularClassification(args, options) { + const res = await request(args, { + ...options, + taskHint: "tabular-classification" + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected number[]"); + } + return res; +} + +// src/HfInference.ts +var HfInference = class { + accessToken; + defaultOptions; + constructor(accessToken = "", defaultOptions = {}) { + this.accessToken = accessToken; + this.defaultOptions = defaultOptions; + for (const [name, fn] of Object.entries(tasks_exports)) { + Object.defineProperty(this, name, { + enumerable: false, + value: (params, options) => ( + // eslint-disable-next-line @typescript-eslint/no-explicit-any + fn({ ...params, accessToken }, { ...defaultOptions, ...options }) + ) + }); + } + } + /** + * Returns copy of HfInference tied to a specified endpoint. + */ + endpoint(endpointUrl) { + return new HfInferenceEndpoint(endpointUrl, this.accessToken, this.defaultOptions); + } +}; +var HfInferenceEndpoint = class { + constructor(endpointUrl, accessToken = "", defaultOptions = {}) { + accessToken; + defaultOptions; + for (const [name, fn] of Object.entries(tasks_exports)) { + Object.defineProperty(this, name, { + enumerable: false, + value: (params, options) => ( + // eslint-disable-next-line @typescript-eslint/no-explicit-any + fn({ ...params, accessToken, endpointUrl }, { ...defaultOptions, ...options }) + ) + }); + } + } +}; +// Annotate the CommonJS export names for ESM import in node: +0 && (module.exports = { + HfInference, + HfInferenceEndpoint, + InferenceOutputError, + audioClassification, + audioToAudio, + automaticSpeechRecognition, + chatCompletion, + chatCompletionStream, + documentQuestionAnswering, + featureExtraction, + fillMask, + imageClassification, + imageSegmentation, + imageToImage, + imageToText, + objectDetection, + questionAnswering, + request, + sentenceSimilarity, + streamingRequest, + summarization, + tableQuestionAnswering, + tabularClassification, + tabularRegression, + textClassification, + textGeneration, + textGenerationStream, + textToImage, + textToSpeech, + tokenClassification, + translation, + visualQuestionAnswering, + zeroShotClassification, + zeroShotImageClassification +}); diff --git a/data/node_modules/@huggingface/inference/dist/index.js b/data/node_modules/@huggingface/inference/dist/index.js new file mode 100644 index 0000000000000000000000000000000000000000..4896cd7768c0cb1ddc0e80f15e99bd0845a930a5 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/index.js @@ -0,0 +1,945 @@ +var __defProp = Object.defineProperty; +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; + +// src/tasks/index.ts +var tasks_exports = {}; +__export(tasks_exports, { + audioClassification: () => audioClassification, + audioToAudio: () => audioToAudio, + automaticSpeechRecognition: () => automaticSpeechRecognition, + chatCompletion: () => chatCompletion, + chatCompletionStream: () => chatCompletionStream, + documentQuestionAnswering: () => documentQuestionAnswering, + featureExtraction: () => featureExtraction, + fillMask: () => fillMask, + imageClassification: () => imageClassification, + imageSegmentation: () => imageSegmentation, + imageToImage: () => imageToImage, + imageToText: () => imageToText, + objectDetection: () => objectDetection, + questionAnswering: () => questionAnswering, + request: () => request, + sentenceSimilarity: () => sentenceSimilarity, + streamingRequest: () => streamingRequest, + summarization: () => summarization, + tableQuestionAnswering: () => tableQuestionAnswering, + tabularClassification: () => tabularClassification, + tabularRegression: () => tabularRegression, + textClassification: () => textClassification, + textGeneration: () => textGeneration, + textGenerationStream: () => textGenerationStream, + textToImage: () => textToImage, + textToSpeech: () => textToSpeech, + tokenClassification: () => tokenClassification, + translation: () => translation, + visualQuestionAnswering: () => visualQuestionAnswering, + zeroShotClassification: () => zeroShotClassification, + zeroShotImageClassification: () => zeroShotImageClassification +}); + +// src/utils/pick.ts +function pick(o, props) { + return Object.assign( + {}, + ...props.map((prop) => { + if (o[prop] !== void 0) { + return { [prop]: o[prop] }; + } + }) + ); +} + +// src/utils/typedInclude.ts +function typedInclude(arr, v) { + return arr.includes(v); +} + +// src/utils/omit.ts +function omit(o, props) { + const propsArr = Array.isArray(props) ? props : [props]; + const letsKeep = Object.keys(o).filter((prop) => !typedInclude(propsArr, prop)); + return pick(o, letsKeep); +} + +// src/lib/isUrl.ts +function isUrl(modelOrUrl) { + return /^http(s?):/.test(modelOrUrl) || modelOrUrl.startsWith("/"); +} + +// src/lib/getDefaultTask.ts +var taskCache = /* @__PURE__ */ new Map(); +var CACHE_DURATION = 10 * 60 * 1e3; +var MAX_CACHE_ITEMS = 1e3; +var HF_HUB_URL = "https://huggingface.co"; +async function getDefaultTask(model, accessToken, options) { + if (isUrl(model)) { + return null; + } + const key = `${model}:${accessToken}`; + let cachedTask = taskCache.get(key); + if (cachedTask && cachedTask.date < new Date(Date.now() - CACHE_DURATION)) { + taskCache.delete(key); + cachedTask = void 0; + } + if (cachedTask === void 0) { + const modelTask = await (options?.fetch ?? fetch)(`${HF_HUB_URL}/api/models/${model}?expand[]=pipeline_tag`, { + headers: accessToken ? { Authorization: `Bearer ${accessToken}` } : {} + }).then((resp) => resp.json()).then((json) => json.pipeline_tag).catch(() => null); + if (!modelTask) { + return null; + } + cachedTask = { task: modelTask, date: /* @__PURE__ */ new Date() }; + taskCache.set(key, { task: modelTask, date: /* @__PURE__ */ new Date() }); + if (taskCache.size > MAX_CACHE_ITEMS) { + taskCache.delete(taskCache.keys().next().value); + } + } + return cachedTask.task; +} + +// src/lib/makeRequestOptions.ts +var HF_INFERENCE_API_BASE_URL = "https://api-inference.huggingface.co"; +var tasks = null; +async function makeRequestOptions(args, options) { + const { accessToken, endpointUrl, ...otherArgs } = args; + let { model } = args; + const { + forceTask: task, + includeCredentials, + taskHint, + wait_for_model, + use_cache, + dont_load_model, + chatCompletion: chatCompletion2 + } = options ?? {}; + const headers = {}; + if (accessToken) { + headers["Authorization"] = `Bearer ${accessToken}`; + } + if (!model && !tasks && taskHint) { + const res = await fetch(`${HF_HUB_URL}/api/tasks`); + if (res.ok) { + tasks = await res.json(); + } + } + if (!model && tasks && taskHint) { + const taskInfo = tasks[taskHint]; + if (taskInfo) { + model = taskInfo.models[0].id; + } + } + if (!model) { + throw new Error("No model provided, and no default model found for this task"); + } + const binary = "data" in args && !!args.data; + if (!binary) { + headers["Content-Type"] = "application/json"; + } + if (wait_for_model) { + headers["X-Wait-For-Model"] = "true"; + } + if (use_cache === false) { + headers["X-Use-Cache"] = "false"; + } + if (dont_load_model) { + headers["X-Load-Model"] = "0"; + } + let url = (() => { + if (endpointUrl && isUrl(model)) { + throw new TypeError("Both model and endpointUrl cannot be URLs"); + } + if (isUrl(model)) { + console.warn("Using a model URL is deprecated, please use the `endpointUrl` parameter instead"); + return model; + } + if (endpointUrl) { + return endpointUrl; + } + if (task) { + return `${HF_INFERENCE_API_BASE_URL}/pipeline/${task}/${model}`; + } + return `${HF_INFERENCE_API_BASE_URL}/models/${model}`; + })(); + if (chatCompletion2 && !url.endsWith("/chat/completions")) { + url += "/v1/chat/completions"; + } + let credentials; + if (typeof includeCredentials === "string") { + credentials = includeCredentials; + } else if (includeCredentials === true) { + credentials = "include"; + } + const info = { + headers, + method: "POST", + body: binary ? args.data : JSON.stringify({ + ...otherArgs.model && isUrl(otherArgs.model) ? omit(otherArgs, "model") : otherArgs + }), + ...credentials && { credentials }, + signal: options?.signal + }; + return { url, info }; +} + +// src/tasks/custom/request.ts +async function request(args, options) { + const { url, info } = await makeRequestOptions(args, options); + const response = await (options?.fetch ?? fetch)(url, info); + if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) { + return request(args, { + ...options, + wait_for_model: true + }); + } + if (!response.ok) { + if (response.headers.get("Content-Type")?.startsWith("application/json")) { + const output = await response.json(); + if ([400, 422, 404, 500].includes(response.status) && options?.chatCompletion) { + throw new Error(`Server ${args.model} does not seem to support chat completion. Error: ${output.error}`); + } + if (output.error) { + throw new Error(output.error); + } + } + throw new Error("An error occurred while fetching the blob"); + } + if (response.headers.get("Content-Type")?.startsWith("application/json")) { + return await response.json(); + } + return await response.blob(); +} + +// src/vendor/fetch-event-source/parse.ts +function getLines(onLine) { + let buffer; + let position; + let fieldLength; + let discardTrailingNewline = false; + return function onChunk(arr) { + if (buffer === void 0) { + buffer = arr; + position = 0; + fieldLength = -1; + } else { + buffer = concat(buffer, arr); + } + const bufLength = buffer.length; + let lineStart = 0; + while (position < bufLength) { + if (discardTrailingNewline) { + if (buffer[position] === 10 /* NewLine */) { + lineStart = ++position; + } + discardTrailingNewline = false; + } + let lineEnd = -1; + for (; position < bufLength && lineEnd === -1; ++position) { + switch (buffer[position]) { + case 58 /* Colon */: + if (fieldLength === -1) { + fieldLength = position - lineStart; + } + break; + case 13 /* CarriageReturn */: + discardTrailingNewline = true; + case 10 /* NewLine */: + lineEnd = position; + break; + } + } + if (lineEnd === -1) { + break; + } + onLine(buffer.subarray(lineStart, lineEnd), fieldLength); + lineStart = position; + fieldLength = -1; + } + if (lineStart === bufLength) { + buffer = void 0; + } else if (lineStart !== 0) { + buffer = buffer.subarray(lineStart); + position -= lineStart; + } + }; +} +function getMessages(onId, onRetry, onMessage) { + let message = newMessage(); + const decoder = new TextDecoder(); + return function onLine(line, fieldLength) { + if (line.length === 0) { + onMessage?.(message); + message = newMessage(); + } else if (fieldLength > 0) { + const field = decoder.decode(line.subarray(0, fieldLength)); + const valueOffset = fieldLength + (line[fieldLength + 1] === 32 /* Space */ ? 2 : 1); + const value = decoder.decode(line.subarray(valueOffset)); + switch (field) { + case "data": + message.data = message.data ? message.data + "\n" + value : value; + break; + case "event": + message.event = value; + break; + case "id": + onId(message.id = value); + break; + case "retry": + const retry = parseInt(value, 10); + if (!isNaN(retry)) { + onRetry(message.retry = retry); + } + break; + } + } + }; +} +function concat(a, b) { + const res = new Uint8Array(a.length + b.length); + res.set(a); + res.set(b, a.length); + return res; +} +function newMessage() { + return { + data: "", + event: "", + id: "", + retry: void 0 + }; +} + +// src/tasks/custom/streamingRequest.ts +async function* streamingRequest(args, options) { + const { url, info } = await makeRequestOptions({ ...args, stream: true }, options); + const response = await (options?.fetch ?? fetch)(url, info); + if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) { + return yield* streamingRequest(args, { + ...options, + wait_for_model: true + }); + } + if (!response.ok) { + if (response.headers.get("Content-Type")?.startsWith("application/json")) { + const output = await response.json(); + if ([400, 422, 404, 500].includes(response.status) && options?.chatCompletion) { + throw new Error(`Server ${args.model} does not seem to support chat completion. Error: ${output.error}`); + } + if (output.error) { + throw new Error(output.error); + } + } + throw new Error(`Server response contains error: ${response.status}`); + } + if (!response.headers.get("content-type")?.startsWith("text/event-stream")) { + throw new Error( + `Server does not support event stream content type, it returned ` + response.headers.get("content-type") + ); + } + if (!response.body) { + return; + } + const reader = response.body.getReader(); + let events = []; + const onEvent = (event) => { + events.push(event); + }; + const onChunk = getLines( + getMessages( + () => { + }, + () => { + }, + onEvent + ) + ); + try { + while (true) { + const { done, value } = await reader.read(); + if (done) + return; + onChunk(value); + for (const event of events) { + if (event.data.length > 0) { + if (event.data === "[DONE]") { + return; + } + const data = JSON.parse(event.data); + if (typeof data === "object" && data !== null && "error" in data) { + throw new Error(data.error); + } + yield data; + } + } + events = []; + } + } finally { + reader.releaseLock(); + } +} + +// src/lib/InferenceOutputError.ts +var InferenceOutputError = class extends TypeError { + constructor(message) { + super( + `Invalid inference output: ${message}. Use the 'request' method with the same parameters to do a custom call with no type checking.` + ); + this.name = "InferenceOutputError"; + } +}; + +// src/tasks/audio/audioClassification.ts +async function audioClassification(args, options) { + const res = await request(args, { + ...options, + taskHint: "audio-classification" + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x.label === "string" && typeof x.score === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{label: string, score: number}>"); + } + return res; +} + +// src/tasks/audio/automaticSpeechRecognition.ts +async function automaticSpeechRecognition(args, options) { + const res = await request(args, { + ...options, + taskHint: "automatic-speech-recognition" + }); + const isValidOutput = typeof res?.text === "string"; + if (!isValidOutput) { + throw new InferenceOutputError("Expected {text: string}"); + } + return res; +} + +// src/tasks/audio/textToSpeech.ts +async function textToSpeech(args, options) { + const res = await request(args, { + ...options, + taskHint: "text-to-speech" + }); + const isValidOutput = res && res instanceof Blob; + if (!isValidOutput) { + throw new InferenceOutputError("Expected Blob"); + } + return res; +} + +// src/tasks/audio/audioToAudio.ts +async function audioToAudio(args, options) { + const res = await request(args, { + ...options, + taskHint: "audio-to-audio" + }); + const isValidOutput = Array.isArray(res) && res.every( + (x) => typeof x.label === "string" && typeof x.blob === "string" && typeof x["content-type"] === "string" + ); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{label: string, blob: string, content-type: string}>"); + } + return res; +} + +// src/tasks/cv/imageClassification.ts +async function imageClassification(args, options) { + const res = await request(args, { + ...options, + taskHint: "image-classification" + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x.label === "string" && typeof x.score === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{label: string, score: number}>"); + } + return res; +} + +// src/tasks/cv/imageSegmentation.ts +async function imageSegmentation(args, options) { + const res = await request(args, { + ...options, + taskHint: "image-segmentation" + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x.label === "string" && typeof x.mask === "string" && typeof x.score === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{label: string, mask: string, score: number}>"); + } + return res; +} + +// src/tasks/cv/imageToText.ts +async function imageToText(args, options) { + const res = (await request(args, { + ...options, + taskHint: "image-to-text" + }))?.[0]; + if (typeof res?.generated_text !== "string") { + throw new InferenceOutputError("Expected {generated_text: string}"); + } + return res; +} + +// src/tasks/cv/objectDetection.ts +async function objectDetection(args, options) { + const res = await request(args, { + ...options, + taskHint: "object-detection" + }); + const isValidOutput = Array.isArray(res) && res.every( + (x) => typeof x.label === "string" && typeof x.score === "number" && typeof x.box.xmin === "number" && typeof x.box.ymin === "number" && typeof x.box.xmax === "number" && typeof x.box.ymax === "number" + ); + if (!isValidOutput) { + throw new InferenceOutputError( + "Expected Array<{label:string; score:number; box:{xmin:number; ymin:number; xmax:number; ymax:number}}>" + ); + } + return res; +} + +// src/tasks/cv/textToImage.ts +async function textToImage(args, options) { + const res = await request(args, { + ...options, + taskHint: "text-to-image" + }); + const isValidOutput = res && res instanceof Blob; + if (!isValidOutput) { + throw new InferenceOutputError("Expected Blob"); + } + return res; +} + +// src/utils/base64FromBytes.ts +function base64FromBytes(arr) { + if (globalThis.Buffer) { + return globalThis.Buffer.from(arr).toString("base64"); + } else { + const bin = []; + arr.forEach((byte) => { + bin.push(String.fromCharCode(byte)); + }); + return globalThis.btoa(bin.join("")); + } +} + +// src/tasks/cv/imageToImage.ts +async function imageToImage(args, options) { + let reqArgs; + if (!args.parameters) { + reqArgs = { + accessToken: args.accessToken, + model: args.model, + data: args.inputs + }; + } else { + reqArgs = { + ...args, + inputs: base64FromBytes( + new Uint8Array(args.inputs instanceof ArrayBuffer ? args.inputs : await args.inputs.arrayBuffer()) + ) + }; + } + const res = await request(reqArgs, { + ...options, + taskHint: "image-to-image" + }); + const isValidOutput = res && res instanceof Blob; + if (!isValidOutput) { + throw new InferenceOutputError("Expected Blob"); + } + return res; +} + +// src/tasks/cv/zeroShotImageClassification.ts +async function zeroShotImageClassification(args, options) { + const reqArgs = { + ...args, + inputs: { + image: base64FromBytes( + new Uint8Array( + args.inputs.image instanceof ArrayBuffer ? args.inputs.image : await args.inputs.image.arrayBuffer() + ) + ) + } + }; + const res = await request(reqArgs, { + ...options, + taskHint: "zero-shot-image-classification" + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x.label === "string" && typeof x.score === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{label: string, score: number}>"); + } + return res; +} + +// src/tasks/nlp/featureExtraction.ts +async function featureExtraction(args, options) { + const defaultTask = args.model ? await getDefaultTask(args.model, args.accessToken, options) : void 0; + const res = await request(args, { + ...options, + taskHint: "feature-extraction", + ...defaultTask === "sentence-similarity" && { forceTask: "feature-extraction" } + }); + let isValidOutput = true; + const isNumArrayRec = (arr, maxDepth, curDepth = 0) => { + if (curDepth > maxDepth) + return false; + if (arr.every((x) => Array.isArray(x))) { + return arr.every((x) => isNumArrayRec(x, maxDepth, curDepth + 1)); + } else { + return arr.every((x) => typeof x === "number"); + } + }; + isValidOutput = Array.isArray(res) && isNumArrayRec(res, 3, 0); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array"); + } + return res; +} + +// src/tasks/nlp/fillMask.ts +async function fillMask(args, options) { + const res = await request(args, { + ...options, + taskHint: "fill-mask" + }); + const isValidOutput = Array.isArray(res) && res.every( + (x) => typeof x.score === "number" && typeof x.sequence === "string" && typeof x.token === "number" && typeof x.token_str === "string" + ); + if (!isValidOutput) { + throw new InferenceOutputError( + "Expected Array<{score: number, sequence: string, token: number, token_str: string}>" + ); + } + return res; +} + +// src/tasks/nlp/questionAnswering.ts +async function questionAnswering(args, options) { + const res = await request(args, { + ...options, + taskHint: "question-answering" + }); + const isValidOutput = typeof res === "object" && !!res && typeof res.answer === "string" && typeof res.end === "number" && typeof res.score === "number" && typeof res.start === "number"; + if (!isValidOutput) { + throw new InferenceOutputError("Expected {answer: string, end: number, score: number, start: number}"); + } + return res; +} + +// src/tasks/nlp/sentenceSimilarity.ts +async function sentenceSimilarity(args, options) { + const defaultTask = args.model ? await getDefaultTask(args.model, args.accessToken, options) : void 0; + const res = await request(args, { + ...options, + taskHint: "sentence-similarity", + ...defaultTask === "feature-extraction" && { forceTask: "sentence-similarity" } + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected number[]"); + } + return res; +} + +// src/tasks/nlp/summarization.ts +async function summarization(args, options) { + const res = await request(args, { + ...options, + taskHint: "summarization" + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x?.summary_text === "string"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{summary_text: string}>"); + } + return res?.[0]; +} + +// src/tasks/nlp/tableQuestionAnswering.ts +async function tableQuestionAnswering(args, options) { + const res = await request(args, { + ...options, + taskHint: "table-question-answering" + }); + const isValidOutput = typeof res?.aggregator === "string" && typeof res.answer === "string" && Array.isArray(res.cells) && res.cells.every((x) => typeof x === "string") && Array.isArray(res.coordinates) && res.coordinates.every((coord) => Array.isArray(coord) && coord.every((x) => typeof x === "number")); + if (!isValidOutput) { + throw new InferenceOutputError( + "Expected {aggregator: string, answer: string, cells: string[], coordinates: number[][]}" + ); + } + return res; +} + +// src/tasks/nlp/textClassification.ts +async function textClassification(args, options) { + const res = (await request(args, { + ...options, + taskHint: "text-classification" + }))?.[0]; + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x?.label === "string" && typeof x.score === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{label: string, score: number}>"); + } + return res; +} + +// src/utils/toArray.ts +function toArray(obj) { + if (Array.isArray(obj)) { + return obj; + } + return [obj]; +} + +// src/tasks/nlp/textGeneration.ts +async function textGeneration(args, options) { + const res = toArray( + await request(args, { + ...options, + taskHint: "text-generation" + }) + ); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x?.generated_text === "string"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{generated_text: string}>"); + } + return res?.[0]; +} + +// src/tasks/nlp/textGenerationStream.ts +async function* textGenerationStream(args, options) { + yield* streamingRequest(args, { + ...options, + taskHint: "text-generation" + }); +} + +// src/tasks/nlp/tokenClassification.ts +async function tokenClassification(args, options) { + const res = toArray( + await request(args, { + ...options, + taskHint: "token-classification" + }) + ); + const isValidOutput = Array.isArray(res) && res.every( + (x) => typeof x.end === "number" && typeof x.entity_group === "string" && typeof x.score === "number" && typeof x.start === "number" && typeof x.word === "string" + ); + if (!isValidOutput) { + throw new InferenceOutputError( + "Expected Array<{end: number, entity_group: string, score: number, start: number, word: string}>" + ); + } + return res; +} + +// src/tasks/nlp/translation.ts +async function translation(args, options) { + const res = await request(args, { + ...options, + taskHint: "translation" + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x?.translation_text === "string"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected type Array<{translation_text: string}>"); + } + return res?.length === 1 ? res?.[0] : res; +} + +// src/tasks/nlp/zeroShotClassification.ts +async function zeroShotClassification(args, options) { + const res = toArray( + await request(args, { + ...options, + taskHint: "zero-shot-classification" + }) + ); + const isValidOutput = Array.isArray(res) && res.every( + (x) => Array.isArray(x.labels) && x.labels.every((_label) => typeof _label === "string") && Array.isArray(x.scores) && x.scores.every((_score) => typeof _score === "number") && typeof x.sequence === "string" + ); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{labels: string[], scores: number[], sequence: string}>"); + } + return res; +} + +// src/tasks/nlp/chatCompletion.ts +async function chatCompletion(args, options) { + const res = await request(args, { + ...options, + taskHint: "text-generation", + chatCompletion: true + }); + const isValidOutput = typeof res === "object" && Array.isArray(res?.choices) && typeof res?.created === "number" && typeof res?.id === "string" && typeof res?.model === "string" && typeof res?.system_fingerprint === "string" && typeof res?.usage === "object"; + if (!isValidOutput) { + throw new InferenceOutputError("Expected ChatCompletionOutput"); + } + return res; +} + +// src/tasks/nlp/chatCompletionStream.ts +async function* chatCompletionStream(args, options) { + yield* streamingRequest(args, { + ...options, + taskHint: "text-generation", + chatCompletion: true + }); +} + +// src/tasks/multimodal/documentQuestionAnswering.ts +async function documentQuestionAnswering(args, options) { + const reqArgs = { + ...args, + inputs: { + question: args.inputs.question, + // convert Blob or ArrayBuffer to base64 + image: base64FromBytes( + new Uint8Array( + args.inputs.image instanceof ArrayBuffer ? args.inputs.image : await args.inputs.image.arrayBuffer() + ) + ) + } + }; + const res = toArray( + await request(reqArgs, { + ...options, + taskHint: "document-question-answering" + }) + )?.[0]; + const isValidOutput = typeof res?.answer === "string" && (typeof res.end === "number" || typeof res.end === "undefined") && (typeof res.score === "number" || typeof res.score === "undefined") && (typeof res.start === "number" || typeof res.start === "undefined"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{answer: string, end?: number, score?: number, start?: number}>"); + } + return res; +} + +// src/tasks/multimodal/visualQuestionAnswering.ts +async function visualQuestionAnswering(args, options) { + const reqArgs = { + ...args, + inputs: { + question: args.inputs.question, + // convert Blob or ArrayBuffer to base64 + image: base64FromBytes( + new Uint8Array( + args.inputs.image instanceof ArrayBuffer ? args.inputs.image : await args.inputs.image.arrayBuffer() + ) + ) + } + }; + const res = (await request(reqArgs, { + ...options, + taskHint: "visual-question-answering" + }))?.[0]; + const isValidOutput = typeof res?.answer === "string" && typeof res.score === "number"; + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{answer: string, score: number}>"); + } + return res; +} + +// src/tasks/tabular/tabularRegression.ts +async function tabularRegression(args, options) { + const res = await request(args, { + ...options, + taskHint: "tabular-regression" + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected number[]"); + } + return res; +} + +// src/tasks/tabular/tabularClassification.ts +async function tabularClassification(args, options) { + const res = await request(args, { + ...options, + taskHint: "tabular-classification" + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected number[]"); + } + return res; +} + +// src/HfInference.ts +var HfInference = class { + accessToken; + defaultOptions; + constructor(accessToken = "", defaultOptions = {}) { + this.accessToken = accessToken; + this.defaultOptions = defaultOptions; + for (const [name, fn] of Object.entries(tasks_exports)) { + Object.defineProperty(this, name, { + enumerable: false, + value: (params, options) => ( + // eslint-disable-next-line @typescript-eslint/no-explicit-any + fn({ ...params, accessToken }, { ...defaultOptions, ...options }) + ) + }); + } + } + /** + * Returns copy of HfInference tied to a specified endpoint. + */ + endpoint(endpointUrl) { + return new HfInferenceEndpoint(endpointUrl, this.accessToken, this.defaultOptions); + } +}; +var HfInferenceEndpoint = class { + constructor(endpointUrl, accessToken = "", defaultOptions = {}) { + accessToken; + defaultOptions; + for (const [name, fn] of Object.entries(tasks_exports)) { + Object.defineProperty(this, name, { + enumerable: false, + value: (params, options) => ( + // eslint-disable-next-line @typescript-eslint/no-explicit-any + fn({ ...params, accessToken, endpointUrl }, { ...defaultOptions, ...options }) + ) + }); + } + } +}; +export { + HfInference, + HfInferenceEndpoint, + InferenceOutputError, + audioClassification, + audioToAudio, + automaticSpeechRecognition, + chatCompletion, + chatCompletionStream, + documentQuestionAnswering, + featureExtraction, + fillMask, + imageClassification, + imageSegmentation, + imageToImage, + imageToText, + objectDetection, + questionAnswering, + request, + sentenceSimilarity, + streamingRequest, + summarization, + tableQuestionAnswering, + tabularClassification, + tabularRegression, + textClassification, + textGeneration, + textGenerationStream, + textToImage, + textToSpeech, + tokenClassification, + translation, + visualQuestionAnswering, + zeroShotClassification, + zeroShotImageClassification +}; diff --git a/data/node_modules/@huggingface/inference/dist/src/HfInference.d.ts b/data/node_modules/@huggingface/inference/dist/src/HfInference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..c49b37dc4843a58c3bdd13553126c7153555e21e --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/HfInference.d.ts @@ -0,0 +1,28 @@ +import * as tasks from "./tasks"; +import type { Options } from "./types"; +import type { DistributiveOmit } from "./utils/distributive-omit"; +type Task = typeof tasks; +type TaskWithNoAccessToken = { + [key in keyof Task]: (args: DistributiveOmit[0], "accessToken">, options?: Parameters[1]) => ReturnType; +}; +type TaskWithNoAccessTokenNoEndpointUrl = { + [key in keyof Task]: (args: DistributiveOmit[0], "accessToken" | "endpointUrl">, options?: Parameters[1]) => ReturnType; +}; +export declare class HfInference { + private readonly accessToken; + private readonly defaultOptions; + constructor(accessToken?: string, defaultOptions?: Options); + /** + * Returns copy of HfInference tied to a specified endpoint. + */ + endpoint(endpointUrl: string): HfInferenceEndpoint; +} +export declare class HfInferenceEndpoint { + constructor(endpointUrl: string, accessToken?: string, defaultOptions?: Options); +} +export interface HfInference extends TaskWithNoAccessToken { +} +export interface HfInferenceEndpoint extends TaskWithNoAccessTokenNoEndpointUrl { +} +export {}; +//# sourceMappingURL=HfInference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/HfInference.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/HfInference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..d66db8de983dba20c53dcc5a5d689afa23bd878e --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/HfInference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"HfInference.d.ts","sourceRoot":"","sources":["../../src/HfInference.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,KAAK,MAAM,SAAS,CAAC;AACjC,OAAO,KAAK,EAAE,OAAO,EAAe,MAAM,SAAS,CAAC;AACpD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,2BAA2B,CAAC;AAKlE,KAAK,IAAI,GAAG,OAAO,KAAK,CAAC;AAEzB,KAAK,qBAAqB,GAAG;KAC3B,GAAG,IAAI,MAAM,IAAI,GAAG,CACpB,IAAI,EAAE,gBAAgB,CAAC,UAAU,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,aAAa,CAAC,EAC/D,OAAO,CAAC,EAAE,UAAU,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAC9B,UAAU,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;CAC1B,CAAC;AAEF,KAAK,kCAAkC,GAAG;KACxC,GAAG,IAAI,MAAM,IAAI,GAAG,CACpB,IAAI,EAAE,gBAAgB,CAAC,UAAU,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,EAAE,aAAa,GAAG,aAAa,CAAC,EAC/E,OAAO,CAAC,EAAE,UAAU,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,KAC9B,UAAU,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;CAC1B,CAAC;AAEF,qBAAa,WAAW;IACvB,OAAO,CAAC,QAAQ,CAAC,WAAW,CAAS;IACrC,OAAO,CAAC,QAAQ,CAAC,cAAc,CAAU;gBAE7B,WAAW,SAAK,EAAE,cAAc,GAAE,OAAY;IAc1D;;OAEG;IACI,QAAQ,CAAC,WAAW,EAAE,MAAM,GAAG,mBAAmB;CAGzD;AAED,qBAAa,mBAAmB;gBACnB,WAAW,EAAE,MAAM,EAAE,WAAW,SAAK,EAAE,cAAc,GAAE,OAAY;CAa/E;AAED,MAAM,WAAW,WAAY,SAAQ,qBAAqB;CAAG;AAE7D,MAAM,WAAW,mBAAoB,SAAQ,kCAAkC;CAAG"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/index.d.ts b/data/node_modules/@huggingface/inference/dist/src/index.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..9a0a684a6b4ad63f139b6529d402baa5684adc0b --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/index.d.ts @@ -0,0 +1,5 @@ +export { HfInference, HfInferenceEndpoint } from "./HfInference"; +export { InferenceOutputError } from "./lib/InferenceOutputError"; +export * from "./types"; +export * from "./tasks"; +//# sourceMappingURL=index.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/index.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/index.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..dc15cb61390c01baeb330839c18110d66513410a --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/index.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,WAAW,EAAE,mBAAmB,EAAE,MAAM,eAAe,CAAC;AACjE,OAAO,EAAE,oBAAoB,EAAE,MAAM,4BAA4B,CAAC;AAClE,cAAc,SAAS,CAAC;AACxB,cAAc,SAAS,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/lib/InferenceOutputError.d.ts b/data/node_modules/@huggingface/inference/dist/src/lib/InferenceOutputError.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..89881cfee8cede0970e6db3385a12c40e8b4b571 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/lib/InferenceOutputError.d.ts @@ -0,0 +1,4 @@ +export declare class InferenceOutputError extends TypeError { + constructor(message: string); +} +//# sourceMappingURL=InferenceOutputError.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/lib/InferenceOutputError.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/lib/InferenceOutputError.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..d903b220febc3b1269d0c034b0bd7bf4c6d3a774 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/lib/InferenceOutputError.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"InferenceOutputError.d.ts","sourceRoot":"","sources":["../../../src/lib/InferenceOutputError.ts"],"names":[],"mappings":"AAAA,qBAAa,oBAAqB,SAAQ,SAAS;gBACtC,OAAO,EAAE,MAAM;CAM3B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/lib/getDefaultTask.d.ts b/data/node_modules/@huggingface/inference/dist/src/lib/getDefaultTask.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..c5eda9a6500fa118081bb7302f592285201773af --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/lib/getDefaultTask.d.ts @@ -0,0 +1,12 @@ +export declare const HF_HUB_URL = "https://huggingface.co"; +export interface DefaultTaskOptions { + fetch?: typeof fetch; +} +/** + * Get the default task. Use a LRU cache of 1000 items with 10 minutes expiration + * to avoid making too many calls to the HF hub. + * + * @returns The default task for the model, or `null` if it was impossible to get it + */ +export declare function getDefaultTask(model: string, accessToken: string | undefined, options?: DefaultTaskOptions): Promise; +//# sourceMappingURL=getDefaultTask.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/lib/getDefaultTask.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/lib/getDefaultTask.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..9cd929ce02620a45e5946e5da4401a04c4211505 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/lib/getDefaultTask.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"getDefaultTask.d.ts","sourceRoot":"","sources":["../../../src/lib/getDefaultTask.ts"],"names":[],"mappings":"AAUA,eAAO,MAAM,UAAU,2BAA2B,CAAC;AAEnD,MAAM,WAAW,kBAAkB;IAClC,KAAK,CAAC,EAAE,OAAO,KAAK,CAAC;CACrB;AAED;;;;;GAKG;AACH,wBAAsB,cAAc,CACnC,KAAK,EAAE,MAAM,EACb,WAAW,EAAE,MAAM,GAAG,SAAS,EAC/B,OAAO,CAAC,EAAE,kBAAkB,GAC1B,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAkCxB"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/lib/isUrl.d.ts b/data/node_modules/@huggingface/inference/dist/src/lib/isUrl.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..6e336b6bd48851e87ab814f607312bae03051403 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/lib/isUrl.d.ts @@ -0,0 +1,2 @@ +export declare function isUrl(modelOrUrl: string): boolean; +//# sourceMappingURL=isUrl.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/lib/isUrl.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/lib/isUrl.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..217d6458f62c95f405239fb027e6b6ee484726ca --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/lib/isUrl.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"isUrl.d.ts","sourceRoot":"","sources":["../../../src/lib/isUrl.ts"],"names":[],"mappings":"AAAA,wBAAgB,KAAK,CAAC,UAAU,EAAE,MAAM,GAAG,OAAO,CAEjD"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/lib/makeRequestOptions.d.ts b/data/node_modules/@huggingface/inference/dist/src/lib/makeRequestOptions.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..59023e85a99c65cc84afdb1c1a56bb47dcf53735 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/lib/makeRequestOptions.d.ts @@ -0,0 +1,18 @@ +import type { InferenceTask, Options, RequestArgs } from "../types"; +/** + * Helper that prepares request arguments + */ +export declare function makeRequestOptions(args: RequestArgs & { + data?: Blob | ArrayBuffer; + stream?: boolean; +}, options?: Options & { + /** When a model can be used for multiple tasks, and we want to run a non-default task */ + forceTask?: string | InferenceTask; + /** To load default model if needed */ + taskHint?: InferenceTask; + chatCompletion?: boolean; +}): Promise<{ + url: string; + info: RequestInit; +}>; +//# sourceMappingURL=makeRequestOptions.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/lib/makeRequestOptions.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/lib/makeRequestOptions.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..7ca279e4f98b0168c35777619710ddc158b997fc --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/lib/makeRequestOptions.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"makeRequestOptions.d.ts","sourceRoot":"","sources":["../../../src/lib/makeRequestOptions.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,UAAU,CAAC;AAYpE;;GAEG;AACH,wBAAsB,kBAAkB,CACvC,IAAI,EAAE,WAAW,GAAG;IACnB,IAAI,CAAC,EAAE,IAAI,GAAG,WAAW,CAAC;IAC1B,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB,EACD,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,SAAS,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IACnC,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,OAAO,CAAC;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,WAAW,CAAA;CAAE,CAAC,CAkG7C"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/audio/audioClassification.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/audio/audioClassification.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..4418f0d101b6707d935348c5a7574b41721d7cac --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/audio/audioClassification.d.ts @@ -0,0 +1,24 @@ +import type { BaseArgs, Options } from "../../types"; +export type AudioClassificationArgs = BaseArgs & { + /** + * Binary audio data + */ + data: Blob | ArrayBuffer; +}; +export interface AudioClassificationOutputValue { + /** + * The label for the class (model specific) + */ + label: string; + /** + * A float that represents how likely it is that the audio file belongs to this class. + */ + score: number; +} +export type AudioClassificationReturn = AudioClassificationOutputValue[]; +/** + * This task reads some audio input and outputs the likelihood of classes. + * Recommended model: superb/hubert-large-superb-er + */ +export declare function audioClassification(args: AudioClassificationArgs, options?: Options): Promise; +//# sourceMappingURL=audioClassification.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/audio/audioClassification.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/audio/audioClassification.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..20a24b98a3ed21309b86ba7bbb0bb8c91c825db7 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/audio/audioClassification.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"audioClassification.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio/audioClassification.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,uBAAuB,GAAG,QAAQ,GAAG;IAChD;;OAEG;IACH,IAAI,EAAE,IAAI,GAAG,WAAW,CAAC;CACzB,CAAC;AAEF,MAAM,WAAW,8BAA8B;IAC9C;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IAEd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;CACd;AAED,MAAM,MAAM,yBAAyB,GAAG,8BAA8B,EAAE,CAAC;AAEzE;;;GAGG;AACH,wBAAsB,mBAAmB,CACxC,IAAI,EAAE,uBAAuB,EAC7B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,yBAAyB,CAAC,CAWpC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/audio/audioToAudio.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/audio/audioToAudio.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..cda6f2e1ae0c9616c0f2625cc1bc4ddfec28e059 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/audio/audioToAudio.d.ts @@ -0,0 +1,28 @@ +import type { BaseArgs, Options } from "../../types"; +export type AudioToAudioArgs = BaseArgs & { + /** + * Binary audio data + */ + data: Blob | ArrayBuffer; +}; +export interface AudioToAudioOutputValue { + /** + * The label for the audio output (model specific) + */ + label: string; + /** + * Base64 encoded audio output. + */ + blob: string; + /** + * Content-type for blob, e.g. audio/flac + */ + "content-type": string; +} +export type AudioToAudioReturn = AudioToAudioOutputValue[]; +/** + * This task reads some audio input and outputs one or multiple audio files. + * Example model: speechbrain/sepformer-wham does audio source separation. + */ +export declare function audioToAudio(args: AudioToAudioArgs, options?: Options): Promise; +//# sourceMappingURL=audioToAudio.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/audio/audioToAudio.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/audio/audioToAudio.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..e98788521a632740c5435d3b25576170ba265fd6 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/audio/audioToAudio.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"audioToAudio.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio/audioToAudio.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,gBAAgB,GAAG,QAAQ,GAAG;IACzC;;OAEG;IACH,IAAI,EAAE,IAAI,GAAG,WAAW,CAAC;CACzB,CAAC;AAEF,MAAM,WAAW,uBAAuB;IACvC;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IAEd;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;IAEb;;OAEG;IACH,cAAc,EAAE,MAAM,CAAC;CACvB;AAED,MAAM,MAAM,kBAAkB,GAAG,uBAAuB,EAAE,CAAC;AAE3D;;;GAGG;AACH,wBAAsB,YAAY,CAAC,IAAI,EAAE,gBAAgB,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,kBAAkB,CAAC,CAczG"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/audio/automaticSpeechRecognition.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/audio/automaticSpeechRecognition.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..bf845c2ff09e9ec8dc4b5c684b67b41e2a38ed8f --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/audio/automaticSpeechRecognition.d.ts @@ -0,0 +1,19 @@ +import type { BaseArgs, Options } from "../../types"; +export type AutomaticSpeechRecognitionArgs = BaseArgs & { + /** + * Binary audio data + */ + data: Blob | ArrayBuffer; +}; +export interface AutomaticSpeechRecognitionOutput { + /** + * The text that was recognized from the audio + */ + text: string; +} +/** + * This task reads some audio input and outputs the said words within the audio files. + * Recommended model (english language): facebook/wav2vec2-large-960h-lv60-self + */ +export declare function automaticSpeechRecognition(args: AutomaticSpeechRecognitionArgs, options?: Options): Promise; +//# sourceMappingURL=automaticSpeechRecognition.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/audio/automaticSpeechRecognition.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/audio/automaticSpeechRecognition.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..0d714b31ec95017b66563322109eab940766ac34 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/audio/automaticSpeechRecognition.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"automaticSpeechRecognition.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio/automaticSpeechRecognition.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,8BAA8B,GAAG,QAAQ,GAAG;IACvD;;OAEG;IACH,IAAI,EAAE,IAAI,GAAG,WAAW,CAAC;CACzB,CAAC;AAEF,MAAM,WAAW,gCAAgC;IAChD;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;CACb;AAED;;;GAGG;AACH,wBAAsB,0BAA0B,CAC/C,IAAI,EAAE,8BAA8B,EACpC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,gCAAgC,CAAC,CAU3C"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/audio/textToSpeech.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/audio/textToSpeech.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..85f0de59c2d9666d0cc3e4be78aee1150e63dbef --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/audio/textToSpeech.d.ts @@ -0,0 +1,14 @@ +import type { BaseArgs, Options } from "../../types"; +export type TextToSpeechArgs = BaseArgs & { + /** + * The text to generate an audio from + */ + inputs: string; +}; +export type TextToSpeechOutput = Blob; +/** + * This task synthesize an audio of a voice pronouncing a given text. + * Recommended model: espnet/kan-bayashi_ljspeech_vits + */ +export declare function textToSpeech(args: TextToSpeechArgs, options?: Options): Promise; +//# sourceMappingURL=textToSpeech.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/audio/textToSpeech.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/audio/textToSpeech.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..40486c131ae65f86dbe17910da8d59235f65ad10 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/audio/textToSpeech.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"textToSpeech.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio/textToSpeech.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,gBAAgB,GAAG,QAAQ,GAAG;IACzC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;CACf,CAAC;AAEF,MAAM,MAAM,kBAAkB,GAAG,IAAI,CAAC;AAEtC;;;GAGG;AACH,wBAAsB,YAAY,CAAC,IAAI,EAAE,gBAAgB,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,kBAAkB,CAAC,CAUzG"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/custom/request.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/custom/request.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..592cc611dcd44abdd7d1d8b4d7912fbfaa52775e --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/custom/request.d.ts @@ -0,0 +1,13 @@ +import type { InferenceTask, Options, RequestArgs } from "../../types"; +/** + * Primitive to make custom calls to Inference Endpoints + */ +export declare function request(args: RequestArgs, options?: Options & { + /** When a model can be used for multiple tasks, and we want to run a non-default task */ + task?: string | InferenceTask; + /** To load default model if needed */ + taskHint?: InferenceTask; + /** Is chat completion compatible */ + chatCompletion?: boolean; +}): Promise; +//# sourceMappingURL=request.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/custom/request.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/custom/request.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..ae2888ade903ea7c8e424aa0a0d9ccca332b8331 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/custom/request.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"request.d.ts","sourceRoot":"","sources":["../../../../src/tasks/custom/request.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AAGvE;;GAEG;AACH,wBAAsB,OAAO,CAAC,CAAC,EAC9B,IAAI,EAAE,WAAW,EACjB,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,IAAI,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IAC9B,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,oCAAoC;IACpC,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,OAAO,CAAC,CAAC,CAAC,CA6BZ"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/custom/streamingRequest.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/custom/streamingRequest.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..c927a1575d019dc57a92338f42617cee468aa537 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/custom/streamingRequest.d.ts @@ -0,0 +1,13 @@ +import type { InferenceTask, Options, RequestArgs } from "../../types"; +/** + * Primitive to make custom inference calls that expect server-sent events, and returns the response through a generator + */ +export declare function streamingRequest(args: RequestArgs, options?: Options & { + /** When a model can be used for multiple tasks, and we want to run a non-default task */ + task?: string | InferenceTask; + /** To load default model if needed */ + taskHint?: InferenceTask; + /** Is chat completion compatible */ + chatCompletion?: boolean; +}): AsyncGenerator; +//# sourceMappingURL=streamingRequest.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/custom/streamingRequest.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/custom/streamingRequest.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..63ee5ac48b94454249e783c6d50b05792502ae68 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/custom/streamingRequest.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"streamingRequest.d.ts","sourceRoot":"","sources":["../../../../src/tasks/custom/streamingRequest.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AAKvE;;GAEG;AACH,wBAAuB,gBAAgB,CAAC,CAAC,EACxC,IAAI,EAAE,WAAW,EACjB,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,IAAI,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IAC9B,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,oCAAoC;IACpC,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,cAAc,CAAC,CAAC,CAAC,CAuEnB"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageClassification.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageClassification.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..62d74f92c8cf08d42b5a742ac55f9f8ce6a50116 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageClassification.d.ts @@ -0,0 +1,24 @@ +import type { BaseArgs, Options } from "../../types"; +export type ImageClassificationArgs = BaseArgs & { + /** + * Binary image data + */ + data: Blob | ArrayBuffer; +}; +export interface ImageClassificationOutputValue { + /** + * The label for the class (model specific) + */ + label: string; + /** + * A float that represents how likely it is that the image file belongs to this class. + */ + score: number; +} +export type ImageClassificationOutput = ImageClassificationOutputValue[]; +/** + * This task reads some image input and outputs the likelihood of classes. + * Recommended model: google/vit-base-patch16-224 + */ +export declare function imageClassification(args: ImageClassificationArgs, options?: Options): Promise; +//# sourceMappingURL=imageClassification.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageClassification.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageClassification.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..6f0df0defb93ccc3dd4c8157095859a65171b278 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageClassification.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"imageClassification.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/imageClassification.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,uBAAuB,GAAG,QAAQ,GAAG;IAChD;;OAEG;IACH,IAAI,EAAE,IAAI,GAAG,WAAW,CAAC;CACzB,CAAC;AAEF,MAAM,WAAW,8BAA8B;IAC9C;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;CACd;AAED,MAAM,MAAM,yBAAyB,GAAG,8BAA8B,EAAE,CAAC;AAEzE;;;GAGG;AACH,wBAAsB,mBAAmB,CACxC,IAAI,EAAE,uBAAuB,EAC7B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,yBAAyB,CAAC,CAWpC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageSegmentation.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageSegmentation.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..e00f4e6dc2f699c34b0f0c68ccca8c3c8e3c8af3 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageSegmentation.d.ts @@ -0,0 +1,28 @@ +import type { BaseArgs, Options } from "../../types"; +export type ImageSegmentationArgs = BaseArgs & { + /** + * Binary image data + */ + data: Blob | ArrayBuffer; +}; +export interface ImageSegmentationOutputValue { + /** + * The label for the class (model specific) of a segment. + */ + label: string; + /** + * A str (base64 str of a single channel black-and-white img) representing the mask of a segment. + */ + mask: string; + /** + * A float that represents how likely it is that the detected object belongs to the given class. + */ + score: number; +} +export type ImageSegmentationOutput = ImageSegmentationOutputValue[]; +/** + * This task reads some image input and outputs the likelihood of classes & bounding boxes of detected objects. + * Recommended model: facebook/detr-resnet-50-panoptic + */ +export declare function imageSegmentation(args: ImageSegmentationArgs, options?: Options): Promise; +//# sourceMappingURL=imageSegmentation.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageSegmentation.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageSegmentation.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..96066229ab6c789219190ccb648379d124925d9b --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageSegmentation.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"imageSegmentation.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/imageSegmentation.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,qBAAqB,GAAG,QAAQ,GAAG;IAC9C;;OAEG;IACH,IAAI,EAAE,IAAI,GAAG,WAAW,CAAC;CACzB,CAAC;AAEF,MAAM,WAAW,4BAA4B;IAC5C;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;IACb;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;CACd;AAED,MAAM,MAAM,uBAAuB,GAAG,4BAA4B,EAAE,CAAC;AAErE;;;GAGG;AACH,wBAAsB,iBAAiB,CACtC,IAAI,EAAE,qBAAqB,EAC3B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,uBAAuB,CAAC,CAYlC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageToImage.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageToImage.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..0eec6d1027491ec6a55da6eaeee79600f27cf9cc --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageToImage.d.ts @@ -0,0 +1,55 @@ +import type { BaseArgs, Options } from "../../types"; +export type ImageToImageArgs = BaseArgs & { + /** + * The initial image condition + * + **/ + inputs: Blob | ArrayBuffer; + parameters?: { + /** + * The text prompt to guide the image generation. + */ + prompt?: string; + /** + * strengh param only works for SD img2img and alt diffusion img2img models + * Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + * will be used as a starting point, adding more noise to it the larger the `strength`. The number of + * denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + * be maximum and the denoising process will run for the full number of iterations specified in + * `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + **/ + strength?: number; + /** + * An optional negative prompt for the image generation + */ + negative_prompt?: string; + /** + * The height in pixels of the generated image + */ + height?: number; + /** + * The width in pixels of the generated image + */ + width?: number; + /** + * The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. + */ + num_inference_steps?: number; + /** + * Guidance scale: Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. + */ + guidance_scale?: number; + /** + * guess_mode only works for ControlNet models, defaults to False In this mode, the ControlNet encoder will try best to recognize the content of the input image even if + * you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended. + */ + guess_mode?: boolean; + }; +}; +export type ImageToImageOutput = Blob; +/** + * This task reads some text input and outputs an image. + * Recommended model: lllyasviel/sd-controlnet-depth + */ +export declare function imageToImage(args: ImageToImageArgs, options?: Options): Promise; +//# sourceMappingURL=imageToImage.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageToImage.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageToImage.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..cbbed19b95a8f81d48cac65be2f5390b75e69b7a --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageToImage.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"imageToImage.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/imageToImage.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAe,MAAM,aAAa,CAAC;AAIlE,MAAM,MAAM,gBAAgB,GAAG,QAAQ,GAAG;IACzC;;;QAGI;IACJ,MAAM,EAAE,IAAI,GAAG,WAAW,CAAC;IAE3B,UAAU,CAAC,EAAE;QACZ;;WAEG;QACH,MAAM,CAAC,EAAE,MAAM,CAAC;QAChB;;;;;;;YAOI;QACJ,QAAQ,CAAC,EAAE,MAAM,CAAC;QAClB;;WAEG;QACH,eAAe,CAAC,EAAE,MAAM,CAAC;QACzB;;WAEG;QACH,MAAM,CAAC,EAAE,MAAM,CAAC;QAChB;;WAEG;QACH,KAAK,CAAC,EAAE,MAAM,CAAC;QACf;;WAEG;QACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;QAC7B;;WAEG;QACH,cAAc,CAAC,EAAE,MAAM,CAAC;QACxB;;;WAGG;QACH,UAAU,CAAC,EAAE,OAAO,CAAC;KACrB,CAAC;CACF,CAAC;AAEF,MAAM,MAAM,kBAAkB,GAAG,IAAI,CAAC;AAEtC;;;GAGG;AACH,wBAAsB,YAAY,CAAC,IAAI,EAAE,gBAAgB,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,kBAAkB,CAAC,CAyBzG"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageToText.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageToText.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..42b82d45c0feeb749d8f7cd4cfb2e9dbd1c3c1a0 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageToText.d.ts @@ -0,0 +1,18 @@ +import type { BaseArgs, Options } from "../../types"; +export type ImageToTextArgs = BaseArgs & { + /** + * Binary image data + */ + data: Blob | ArrayBuffer; +}; +export interface ImageToTextOutput { + /** + * The generated caption + */ + generated_text: string; +} +/** + * This task reads some image input and outputs the text caption. + */ +export declare function imageToText(args: ImageToTextArgs, options?: Options): Promise; +//# sourceMappingURL=imageToText.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageToText.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageToText.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..aa15daf17b91e4cb34d8ff054a6860ddaf78e0f9 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/imageToText.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"imageToText.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/imageToText.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,eAAe,GAAG,QAAQ,GAAG;IACxC;;OAEG;IACH,IAAI,EAAE,IAAI,GAAG,WAAW,CAAC;CACzB,CAAC;AAEF,MAAM,WAAW,iBAAiB;IACjC;;OAEG;IACH,cAAc,EAAE,MAAM,CAAC;CACvB;AAED;;GAEG;AACH,wBAAsB,WAAW,CAAC,IAAI,EAAE,eAAe,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,iBAAiB,CAAC,CAatG"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/cv/objectDetection.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/objectDetection.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..c9971e216d5f4c2d2f40ce9ef08079cd07092202 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/objectDetection.d.ts @@ -0,0 +1,33 @@ +import type { BaseArgs, Options } from "../../types"; +export type ObjectDetectionArgs = BaseArgs & { + /** + * Binary image data + */ + data: Blob | ArrayBuffer; +}; +export interface ObjectDetectionOutputValue { + /** + * A dict (with keys [xmin,ymin,xmax,ymax]) representing the bounding box of a detected object. + */ + box: { + xmax: number; + xmin: number; + ymax: number; + ymin: number; + }; + /** + * The label for the class (model specific) of a detected object. + */ + label: string; + /** + * A float that represents how likely it is that the detected object belongs to the given class. + */ + score: number; +} +export type ObjectDetectionOutput = ObjectDetectionOutputValue[]; +/** + * This task reads some image input and outputs the likelihood of classes & bounding boxes of detected objects. + * Recommended model: facebook/detr-resnet-50 + */ +export declare function objectDetection(args: ObjectDetectionArgs, options?: Options): Promise; +//# sourceMappingURL=objectDetection.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/cv/objectDetection.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/objectDetection.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..bf6e2726b1c40fc752cf8f527dc0de149ae59415 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/objectDetection.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"objectDetection.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/objectDetection.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,mBAAmB,GAAG,QAAQ,GAAG;IAC5C;;OAEG;IACH,IAAI,EAAE,IAAI,GAAG,WAAW,CAAC;CACzB,CAAC;AAEF,MAAM,WAAW,0BAA0B;IAC1C;;OAEG;IACH,GAAG,EAAE;QACJ,IAAI,EAAE,MAAM,CAAC;QACb,IAAI,EAAE,MAAM,CAAC;QACb,IAAI,EAAE,MAAM,CAAC;QACb,IAAI,EAAE,MAAM,CAAC;KACb,CAAC;IACF;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IAEd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;CACd;AAED,MAAM,MAAM,qBAAqB,GAAG,0BAA0B,EAAE,CAAC;AAEjE;;;GAGG;AACH,wBAAsB,eAAe,CAAC,IAAI,EAAE,mBAAmB,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,qBAAqB,CAAC,CAsBlH"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/cv/textToImage.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/textToImage.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..6130d2ec86402bf45b37db763ea70bc4992aca49 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/textToImage.d.ts @@ -0,0 +1,36 @@ +import type { BaseArgs, Options } from "../../types"; +export type TextToImageArgs = BaseArgs & { + /** + * The text to generate an image from + */ + inputs: string; + parameters?: { + /** + * An optional negative prompt for the image generation + */ + negative_prompt?: string; + /** + * The height in pixels of the generated image + */ + height?: number; + /** + * The width in pixels of the generated image + */ + width?: number; + /** + * The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. + */ + num_inference_steps?: number; + /** + * Guidance scale: Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. + */ + guidance_scale?: number; + }; +}; +export type TextToImageOutput = Blob; +/** + * This task reads some text input and outputs an image. + * Recommended model: stabilityai/stable-diffusion-2 + */ +export declare function textToImage(args: TextToImageArgs, options?: Options): Promise; +//# sourceMappingURL=textToImage.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/cv/textToImage.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/textToImage.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..d7b8f28739a7b8d9ff45fe2be6281f1068b8f429 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/textToImage.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"textToImage.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/textToImage.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,eAAe,GAAG,QAAQ,GAAG;IACxC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IAEf,UAAU,CAAC,EAAE;QACZ;;WAEG;QACH,eAAe,CAAC,EAAE,MAAM,CAAC;QACzB;;WAEG;QACH,MAAM,CAAC,EAAE,MAAM,CAAC;QAChB;;WAEG;QACH,KAAK,CAAC,EAAE,MAAM,CAAC;QACf;;WAEG;QACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;QAC7B;;WAEG;QACH,cAAc,CAAC,EAAE,MAAM,CAAC;KACxB,CAAC;CACF,CAAC;AAEF,MAAM,MAAM,iBAAiB,GAAG,IAAI,CAAC;AAErC;;;GAGG;AACH,wBAAsB,WAAW,CAAC,IAAI,EAAE,eAAe,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,iBAAiB,CAAC,CAUtG"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/cv/zeroShotImageClassification.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/zeroShotImageClassification.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..862ea62a29621291633cae43b2f0fd1ee138bfa2 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/zeroShotImageClassification.d.ts @@ -0,0 +1,26 @@ +import type { BaseArgs, Options } from "../../types"; +export type ZeroShotImageClassificationArgs = BaseArgs & { + inputs: { + /** + * Binary image data + */ + image: Blob | ArrayBuffer; + }; + parameters: { + /** + * A list of strings that are potential classes for inputs. (max 10) + */ + candidate_labels: string[]; + }; +}; +export interface ZeroShotImageClassificationOutputValue { + label: string; + score: number; +} +export type ZeroShotImageClassificationOutput = ZeroShotImageClassificationOutputValue[]; +/** + * Classify an image to specified classes. + * Recommended model: openai/clip-vit-large-patch14-336 + */ +export declare function zeroShotImageClassification(args: ZeroShotImageClassificationArgs, options?: Options): Promise; +//# sourceMappingURL=zeroShotImageClassification.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/cv/zeroShotImageClassification.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/zeroShotImageClassification.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..e1e8b59bed447933868070e3e930f063ace8494d --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/cv/zeroShotImageClassification.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"zeroShotImageClassification.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/zeroShotImageClassification.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAKrD,MAAM,MAAM,+BAA+B,GAAG,QAAQ,GAAG;IACxD,MAAM,EAAE;QACP;;WAEG;QACH,KAAK,EAAE,IAAI,GAAG,WAAW,CAAC;KAC1B,CAAC;IACF,UAAU,EAAE;QACX;;WAEG;QACH,gBAAgB,EAAE,MAAM,EAAE,CAAC;KAC3B,CAAC;CACF,CAAC;AAEF,MAAM,WAAW,sCAAsC;IACtD,KAAK,EAAE,MAAM,CAAC;IACd,KAAK,EAAE,MAAM,CAAC;CACd;AAED,MAAM,MAAM,iCAAiC,GAAG,sCAAsC,EAAE,CAAC;AAEzF;;;GAGG;AACH,wBAAsB,2BAA2B,CAChD,IAAI,EAAE,+BAA+B,EACrC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,iCAAiC,CAAC,CAsB5C"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/index.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/index.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..c6a16f32fcde3ad5162037a413183ed234a2dce3 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/index.d.ts @@ -0,0 +1,32 @@ +export * from "./custom/request"; +export * from "./custom/streamingRequest"; +export * from "./audio/audioClassification"; +export * from "./audio/automaticSpeechRecognition"; +export * from "./audio/textToSpeech"; +export * from "./audio/audioToAudio"; +export * from "./cv/imageClassification"; +export * from "./cv/imageSegmentation"; +export * from "./cv/imageToText"; +export * from "./cv/objectDetection"; +export * from "./cv/textToImage"; +export * from "./cv/imageToImage"; +export * from "./cv/zeroShotImageClassification"; +export * from "./nlp/featureExtraction"; +export * from "./nlp/fillMask"; +export * from "./nlp/questionAnswering"; +export * from "./nlp/sentenceSimilarity"; +export * from "./nlp/summarization"; +export * from "./nlp/tableQuestionAnswering"; +export * from "./nlp/textClassification"; +export * from "./nlp/textGeneration"; +export * from "./nlp/textGenerationStream"; +export * from "./nlp/tokenClassification"; +export * from "./nlp/translation"; +export * from "./nlp/zeroShotClassification"; +export * from "./nlp/chatCompletion"; +export * from "./nlp/chatCompletionStream"; +export * from "./multimodal/documentQuestionAnswering"; +export * from "./multimodal/visualQuestionAnswering"; +export * from "./tabular/tabularRegression"; +export * from "./tabular/tabularClassification"; +//# sourceMappingURL=index.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/index.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/index.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..2f38482f94c1bed870b1224a6e6fe7cab3ce8444 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/index.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/tasks/index.ts"],"names":[],"mappings":"AACA,cAAc,kBAAkB,CAAC;AACjC,cAAc,2BAA2B,CAAC;AAG1C,cAAc,6BAA6B,CAAC;AAC5C,cAAc,oCAAoC,CAAC;AACnD,cAAc,sBAAsB,CAAC;AACrC,cAAc,sBAAsB,CAAC;AAGrC,cAAc,0BAA0B,CAAC;AACzC,cAAc,wBAAwB,CAAC;AACvC,cAAc,kBAAkB,CAAC;AACjC,cAAc,sBAAsB,CAAC;AACrC,cAAc,kBAAkB,CAAC;AACjC,cAAc,mBAAmB,CAAC;AAClC,cAAc,kCAAkC,CAAC;AAGjD,cAAc,yBAAyB,CAAC;AACxC,cAAc,gBAAgB,CAAC;AAC/B,cAAc,yBAAyB,CAAC;AACxC,cAAc,0BAA0B,CAAC;AACzC,cAAc,qBAAqB,CAAC;AACpC,cAAc,8BAA8B,CAAC;AAC7C,cAAc,0BAA0B,CAAC;AACzC,cAAc,sBAAsB,CAAC;AACrC,cAAc,4BAA4B,CAAC;AAC3C,cAAc,2BAA2B,CAAC;AAC1C,cAAc,mBAAmB,CAAC;AAClC,cAAc,8BAA8B,CAAC;AAC7C,cAAc,sBAAsB,CAAC;AACrC,cAAc,4BAA4B,CAAC;AAG3C,cAAc,wCAAwC,CAAC;AACvD,cAAc,sCAAsC,CAAC;AAGrD,cAAc,6BAA6B,CAAC;AAC5C,cAAc,iCAAiC,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..f70ac007011f20eb41a164c68c03991761f75856 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts @@ -0,0 +1,35 @@ +import type { BaseArgs, Options } from "../../types"; +export type DocumentQuestionAnsweringArgs = BaseArgs & { + inputs: { + /** + * Raw image + * + * You can use native `File` in browsers, or `new Blob([buffer])` in node, or for a base64 image `new Blob([btoa(base64String)])`, or even `await (await fetch('...)).blob()` + **/ + image: Blob | ArrayBuffer; + question: string; + }; +}; +export interface DocumentQuestionAnsweringOutput { + /** + * A string that’s the answer within the document. + */ + answer: string; + /** + * ? + */ + end?: number; + /** + * A float that represents how likely that the answer is correct + */ + score?: number; + /** + * ? + */ + start?: number; +} +/** + * Answers a question on a document image. Recommended model: impira/layoutlm-document-qa. + */ +export declare function documentQuestionAnswering(args: DocumentQuestionAnsweringArgs, options?: Options): Promise; +//# sourceMappingURL=documentQuestionAnswering.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..87f5c49d6e0b1ab9eadf9c368fb8140a570c206f --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"documentQuestionAnswering.d.ts","sourceRoot":"","sources":["../../../../src/tasks/multimodal/documentQuestionAnswering.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAMrD,MAAM,MAAM,6BAA6B,GAAG,QAAQ,GAAG;IACtD,MAAM,EAAE;QACP;;;;YAII;QACJ,KAAK,EAAE,IAAI,GAAG,WAAW,CAAC;QAC1B,QAAQ,EAAE,MAAM,CAAC;KACjB,CAAC;CACF,CAAC;AAEF,MAAM,WAAW,+BAA+B;IAC/C;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,GAAG,CAAC,EAAE,MAAM,CAAC;IACb;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;CACf;AAED;;GAEG;AACH,wBAAsB,yBAAyB,CAC9C,IAAI,EAAE,6BAA6B,EACnC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,+BAA+B,CAAC,CA4B1C"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/multimodal/visualQuestionAnswering.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/multimodal/visualQuestionAnswering.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..b2c042bc401d4548df31b273972ec7af507f101f --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/multimodal/visualQuestionAnswering.d.ts @@ -0,0 +1,27 @@ +import type { BaseArgs, Options } from "../../types"; +export type VisualQuestionAnsweringArgs = BaseArgs & { + inputs: { + /** + * Raw image + * + * You can use native `File` in browsers, or `new Blob([buffer])` in node, or for a base64 image `new Blob([btoa(base64String)])`, or even `await (await fetch('...)).blob()` + **/ + image: Blob | ArrayBuffer; + question: string; + }; +}; +export interface VisualQuestionAnsweringOutput { + /** + * A string that’s the answer to a visual question. + */ + answer: string; + /** + * Answer correctness score. + */ + score: number; +} +/** + * Answers a question on an image. Recommended model: dandelin/vilt-b32-finetuned-vqa. + */ +export declare function visualQuestionAnswering(args: VisualQuestionAnsweringArgs, options?: Options): Promise; +//# sourceMappingURL=visualQuestionAnswering.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/multimodal/visualQuestionAnswering.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/multimodal/visualQuestionAnswering.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..fc569cf62366569cdb1b33431baa650b5082c0bb --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/multimodal/visualQuestionAnswering.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"visualQuestionAnswering.d.ts","sourceRoot":"","sources":["../../../../src/tasks/multimodal/visualQuestionAnswering.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAe,MAAM,aAAa,CAAC;AAIlE,MAAM,MAAM,2BAA2B,GAAG,QAAQ,GAAG;IACpD,MAAM,EAAE;QACP;;;;YAII;QACJ,KAAK,EAAE,IAAI,GAAG,WAAW,CAAC;QAC1B,QAAQ,EAAE,MAAM,CAAC;KACjB,CAAC;CACF,CAAC;AAEF,MAAM,WAAW,6BAA6B;IAC7C;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;CACd;AAED;;GAEG;AACH,wBAAsB,uBAAuB,CAC5C,IAAI,EAAE,2BAA2B,EACjC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,6BAA6B,CAAC,CAwBxC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/chatCompletion.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/chatCompletion.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..e0c555b1374cadc63f97d62310b5a21ea9bca8f8 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/chatCompletion.d.ts @@ -0,0 +1,7 @@ +import type { BaseArgs, Options } from "../../types"; +import type { ChatCompletionInput, ChatCompletionOutput } from "@huggingface/tasks"; +/** + * Use the chat completion endpoint to generate a response to a prompt, using OpenAI message completion API no stream + */ +export declare function chatCompletion(args: BaseArgs & ChatCompletionInput, options?: Options): Promise; +//# sourceMappingURL=chatCompletion.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/chatCompletion.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/chatCompletion.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..a61e02e40e8a698811998130848be6a365d11019 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/chatCompletion.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"chatCompletion.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/chatCompletion.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAErD,OAAO,KAAK,EAAE,mBAAmB,EAAE,oBAAoB,EAAE,MAAM,oBAAoB,CAAC;AAEpF;;GAEG;AAEH,wBAAsB,cAAc,CACnC,IAAI,EAAE,QAAQ,GAAG,mBAAmB,EACpC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,oBAAoB,CAAC,CAmB/B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/chatCompletionStream.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/chatCompletionStream.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..c4d129d6ff3671f4d5b9e0a45bc69d70904a6c87 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/chatCompletionStream.d.ts @@ -0,0 +1,7 @@ +import type { BaseArgs, Options } from "../../types"; +import type { ChatCompletionInput, ChatCompletionStreamOutput } from "@huggingface/tasks"; +/** + * Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time + */ +export declare function chatCompletionStream(args: BaseArgs & ChatCompletionInput, options?: Options): AsyncGenerator; +//# sourceMappingURL=chatCompletionStream.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/chatCompletionStream.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/chatCompletionStream.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..650be14992ffda8a70670cd11c00fc1c51e56c15 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/chatCompletionStream.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"chatCompletionStream.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/chatCompletionStream.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAErD,OAAO,KAAK,EAAE,mBAAmB,EAAE,0BAA0B,EAAE,MAAM,oBAAoB,CAAC;AAE1F;;GAEG;AACH,wBAAuB,oBAAoB,CAC1C,IAAI,EAAE,QAAQ,GAAG,mBAAmB,EACpC,OAAO,CAAC,EAAE,OAAO,GACf,cAAc,CAAC,0BAA0B,CAAC,CAM5C"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/featureExtraction.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/featureExtraction.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..63115a5856f1ffa77dcac13ce4b901287983272e --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/featureExtraction.d.ts @@ -0,0 +1,19 @@ +import type { BaseArgs, Options } from "../../types"; +export type FeatureExtractionArgs = BaseArgs & { + /** + * The inputs is a string or a list of strings to get the features from. + * + * inputs: "That is a happy person", + * + */ + inputs: string | string[]; +}; +/** + * Returned values are a multidimensional array of floats (dimension depending on if you sent a string or a list of string, and if the automatic reduction, usually mean_pooling for instance was applied for you or not. This should be explained on the model's README). + */ +export type FeatureExtractionOutput = (number | number[] | number[][])[]; +/** + * This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search. + */ +export declare function featureExtraction(args: FeatureExtractionArgs, options?: Options): Promise; +//# sourceMappingURL=featureExtraction.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/featureExtraction.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/featureExtraction.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..f123037d2641088fc992c46a1d4eca54cc9e434d --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/featureExtraction.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"featureExtraction.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/featureExtraction.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,qBAAqB,GAAG,QAAQ,GAAG;IAC9C;;;;;OAKG;IACH,MAAM,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;CAC1B,CAAC;AAEF;;GAEG;AACH,MAAM,MAAM,uBAAuB,GAAG,CAAC,MAAM,GAAG,MAAM,EAAE,GAAG,MAAM,EAAE,EAAE,CAAC,EAAE,CAAC;AAEzE;;GAEG;AACH,wBAAsB,iBAAiB,CACtC,IAAI,EAAE,qBAAqB,EAC3B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,uBAAuB,CAAC,CAyBlC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/fillMask.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/fillMask.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..d0be32e9e429f1dc710a1d5e4e986f5ffc51686e --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/fillMask.d.ts @@ -0,0 +1,27 @@ +import type { BaseArgs, Options } from "../../types"; +export type FillMaskArgs = BaseArgs & { + inputs: string; +}; +export type FillMaskOutput = { + /** + * The probability for this token. + */ + score: number; + /** + * The actual sequence of tokens that ran against the model (may contain special tokens) + */ + sequence: string; + /** + * The id of the token + */ + token: number; + /** + * The string representation of the token + */ + token_str: string; +}[]; +/** + * Tries to fill in a hole with a missing word (token to be precise). That’s the base task for BERT models. + */ +export declare function fillMask(args: FillMaskArgs, options?: Options): Promise; +//# sourceMappingURL=fillMask.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/fillMask.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/fillMask.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..75724a706157bdbd2acb9a50b9cd04c7c037b095 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/fillMask.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"fillMask.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/fillMask.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,YAAY,GAAG,QAAQ,GAAG;IACrC,MAAM,EAAE,MAAM,CAAC;CACf,CAAC;AAEF,MAAM,MAAM,cAAc,GAAG;IAC5B;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,SAAS,EAAE,MAAM,CAAC;CAClB,EAAE,CAAC;AAEJ;;GAEG;AACH,wBAAsB,QAAQ,CAAC,IAAI,EAAE,YAAY,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,cAAc,CAAC,CAoB7F"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/questionAnswering.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/questionAnswering.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..32f55dbc6ec8ee2282d734eb4ec78ba5d1cab403 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/questionAnswering.d.ts @@ -0,0 +1,30 @@ +import type { BaseArgs, Options } from "../../types"; +export type QuestionAnsweringArgs = BaseArgs & { + inputs: { + context: string; + question: string; + }; +}; +export interface QuestionAnsweringOutput { + /** + * A string that’s the answer within the text. + */ + answer: string; + /** + * The index (string wise) of the stop of the answer within context. + */ + end: number; + /** + * A float that represents how likely that the answer is correct + */ + score: number; + /** + * The index (string wise) of the start of the answer within context. + */ + start: number; +} +/** + * Want to have a nice know-it-all bot that can answer any question?. Recommended model: deepset/roberta-base-squad2 + */ +export declare function questionAnswering(args: QuestionAnsweringArgs, options?: Options): Promise; +//# sourceMappingURL=questionAnswering.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/questionAnswering.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/questionAnswering.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..647e090117f21ce8dca13a7a71a3a684108f68c6 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/questionAnswering.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"questionAnswering.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/questionAnswering.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,qBAAqB,GAAG,QAAQ,GAAG;IAC9C,MAAM,EAAE;QACP,OAAO,EAAE,MAAM,CAAC;QAChB,QAAQ,EAAE,MAAM,CAAC;KACjB,CAAC;CACF,CAAC;AAEF,MAAM,WAAW,uBAAuB;IACvC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,GAAG,EAAE,MAAM,CAAC;IACZ;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;CACd;AAED;;GAEG;AACH,wBAAsB,iBAAiB,CACtC,IAAI,EAAE,qBAAqB,EAC3B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,uBAAuB,CAAC,CAgBlC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/sentenceSimilarity.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/sentenceSimilarity.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..8c51e062041d9c85b825b35fba560d92cd56f5dd --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/sentenceSimilarity.d.ts @@ -0,0 +1,19 @@ +import type { BaseArgs, Options } from "../../types"; +export type SentenceSimilarityArgs = BaseArgs & { + /** + * The inputs vary based on the model. + * + * For example when using sentence-transformers/paraphrase-xlm-r-multilingual-v1 the inputs will have a `source_sentence` string and + * a `sentences` array of strings + */ + inputs: Record | Record[]; +}; +/** + * Returned values are a list of floats + */ +export type SentenceSimilarityOutput = number[]; +/** + * Calculate the semantic similarity between one text and a list of other sentences by comparing their embeddings. + */ +export declare function sentenceSimilarity(args: SentenceSimilarityArgs, options?: Options): Promise; +//# sourceMappingURL=sentenceSimilarity.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/sentenceSimilarity.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/sentenceSimilarity.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..8f1ab3df6e24034b227613a52e744a3835367d1e --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/sentenceSimilarity.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"sentenceSimilarity.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/sentenceSimilarity.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,sBAAsB,GAAG,QAAQ,GAAG;IAC/C;;;;;OAKG;IACH,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,EAAE,CAAC;CAC5D,CAAC;AAEF;;GAEG;AACH,MAAM,MAAM,wBAAwB,GAAG,MAAM,EAAE,CAAC;AAEhD;;GAEG;AACH,wBAAsB,kBAAkB,CACvC,IAAI,EAAE,sBAAsB,EAC5B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,wBAAwB,CAAC,CAanC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/summarization.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/summarization.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..a8b64d4f70bfdb21d963f9f194174641ea501040 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/summarization.d.ts @@ -0,0 +1,48 @@ +import type { BaseArgs, Options } from "../../types"; +export type SummarizationArgs = BaseArgs & { + /** + * A string to be summarized + */ + inputs: string; + parameters?: { + /** + * (Default: None). Integer to define the maximum length in tokens of the output summary. + */ + max_length?: number; + /** + * (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. + */ + max_time?: number; + /** + * (Default: None). Integer to define the minimum length in tokens of the output summary. + */ + min_length?: number; + /** + * (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes. + */ + repetition_penalty?: number; + /** + * (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability. + */ + temperature?: number; + /** + * (Default: None). Integer to define the top tokens considered within the sample operation to create new text. + */ + top_k?: number; + /** + * (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p. + */ + top_p?: number; + }; +}; +export interface SummarizationOutput { + /** + * The string after translation + */ + summary_text: string; +} +/** + * This task is well known to summarize longer text into shorter text. Be careful, some models have a maximum length of input. That means that the summary cannot handle full books for instance. Be careful when choosing your model. + */ +export declare function summarization(args: SummarizationArgs, options?: Options): Promise; +//# sourceMappingURL=summarization.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/summarization.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/summarization.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..68ef512013adb6825f93319ffe4a8b9b5cad6c1f --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/summarization.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"summarization.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/summarization.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,iBAAiB,GAAG,QAAQ,GAAG;IAC1C;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf,UAAU,CAAC,EAAE;QACZ;;WAEG;QACH,UAAU,CAAC,EAAE,MAAM,CAAC;QACpB;;WAEG;QACH,QAAQ,CAAC,EAAE,MAAM,CAAC;QAClB;;WAEG;QACH,UAAU,CAAC,EAAE,MAAM,CAAC;QACpB;;WAEG;QACH,kBAAkB,CAAC,EAAE,MAAM,CAAC;QAC5B;;WAEG;QACH,WAAW,CAAC,EAAE,MAAM,CAAC;QACrB;;WAEG;QACH,KAAK,CAAC,EAAE,MAAM,CAAC;QACf;;WAEG;QACH,KAAK,CAAC,EAAE,MAAM,CAAC;KACf,CAAC;CACF,CAAC;AAEF,MAAM,WAAW,mBAAmB;IACnC;;OAEG;IACH,YAAY,EAAE,MAAM,CAAC;CACrB;AAED;;GAEG;AACH,wBAAsB,aAAa,CAAC,IAAI,EAAE,iBAAiB,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,mBAAmB,CAAC,CAU5G"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/tableQuestionAnswering.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/tableQuestionAnswering.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..2bcc95669986b93b79ba5990479b1a9cffb970d8 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/tableQuestionAnswering.d.ts @@ -0,0 +1,36 @@ +import type { BaseArgs, Options } from "../../types"; +export type TableQuestionAnsweringArgs = BaseArgs & { + inputs: { + /** + * The query in plain text that you want to ask the table + */ + query: string; + /** + * A table of data represented as a dict of list where entries are headers and the lists are all the values, all lists must have the same size. + */ + table: Record; + }; +}; +export interface TableQuestionAnsweringOutput { + /** + * The aggregator used to get the answer + */ + aggregator: string; + /** + * The plaintext answer + */ + answer: string; + /** + * A list of coordinates of the cells contents + */ + cells: string[]; + /** + * a list of coordinates of the cells referenced in the answer + */ + coordinates: number[][]; +} +/** + * Don’t know SQL? Don’t want to dive into a large spreadsheet? Ask questions in plain english! Recommended model: google/tapas-base-finetuned-wtq. + */ +export declare function tableQuestionAnswering(args: TableQuestionAnsweringArgs, options?: Options): Promise; +//# sourceMappingURL=tableQuestionAnswering.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/tableQuestionAnswering.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/tableQuestionAnswering.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..d1f3de30a01afffb0b941a965b4efdf64f23adc3 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/tableQuestionAnswering.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"tableQuestionAnswering.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/tableQuestionAnswering.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,0BAA0B,GAAG,QAAQ,GAAG;IACnD,MAAM,EAAE;QACP;;WAEG;QACH,KAAK,EAAE,MAAM,CAAC;QACd;;WAEG;QACH,KAAK,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAC;KAChC,CAAC;CACF,CAAC;AAEF,MAAM,WAAW,4BAA4B;IAC5C;;OAEG;IACH,UAAU,EAAE,MAAM,CAAC;IACnB;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,KAAK,EAAE,MAAM,EAAE,CAAC;IAChB;;OAEG;IACH,WAAW,EAAE,MAAM,EAAE,EAAE,CAAC;CACxB;AAED;;GAEG;AACH,wBAAsB,sBAAsB,CAC3C,IAAI,EAAE,0BAA0B,EAChC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,4BAA4B,CAAC,CAkBvC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/textClassification.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/textClassification.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..047c345b59b3f140dfa529c8577ecc8292ed8a0f --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/textClassification.d.ts @@ -0,0 +1,22 @@ +import type { BaseArgs, Options } from "../../types"; +export type TextClassificationArgs = BaseArgs & { + /** + * A string to be classified + */ + inputs: string; +}; +export type TextClassificationOutput = { + /** + * The label for the class (model specific) + */ + label: string; + /** + * A floats that represents how likely is that the text belongs to this class. + */ + score: number; +}[]; +/** + * Usually used for sentiment-analysis this will output the likelihood of classes of an input. Recommended model: distilbert-base-uncased-finetuned-sst-2-english + */ +export declare function textClassification(args: TextClassificationArgs, options?: Options): Promise; +//# sourceMappingURL=textClassification.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/textClassification.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/textClassification.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..00fcc76651012c720c6228f8ae73d57d79b410e7 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/textClassification.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"textClassification.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/textClassification.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,sBAAsB,GAAG,QAAQ,GAAG;IAC/C;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;CACf,CAAC;AAEF,MAAM,MAAM,wBAAwB,GAAG;IACtC;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;CACd,EAAE,CAAC;AAEJ;;GAEG;AACH,wBAAsB,kBAAkB,CACvC,IAAI,EAAE,sBAAsB,EAC5B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,wBAAwB,CAAC,CAanC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/textGeneration.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/textGeneration.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..c23b636368b4f702f65a289eb8851b10dbd0d847 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/textGeneration.d.ts @@ -0,0 +1,8 @@ +import type { TextGenerationInput, TextGenerationOutput } from "@huggingface/tasks"; +import type { BaseArgs, Options } from "../../types"; +export type { TextGenerationInput, TextGenerationOutput }; +/** + * Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with). + */ +export declare function textGeneration(args: BaseArgs & TextGenerationInput, options?: Options): Promise; +//# sourceMappingURL=textGeneration.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/textGeneration.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/textGeneration.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..33f8cf4511924c9c63e3f12ac1e81dd3d0761553 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/textGeneration.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"textGeneration.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/textGeneration.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,mBAAmB,EAAE,oBAAoB,EAAE,MAAM,oBAAoB,CAAC;AAEpF,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAIrD,YAAY,EAAE,mBAAmB,EAAE,oBAAoB,EAAE,CAAC;AAE1D;;GAEG;AACH,wBAAsB,cAAc,CACnC,IAAI,EAAE,QAAQ,GAAG,mBAAmB,EACpC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,oBAAoB,CAAC,CAY/B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/textGenerationStream.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/textGenerationStream.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..4022b94836c20c2cb1408b584b413880fddd362c --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/textGenerationStream.d.ts @@ -0,0 +1,81 @@ +import type { TextGenerationInput } from "@huggingface/tasks"; +import type { BaseArgs, Options } from "../../types"; +export interface TextGenerationStreamToken { + /** Token ID from the model tokenizer */ + id: number; + /** Token text */ + text: string; + /** Logprob */ + logprob: number; + /** + * Is the token a special token + * Can be used to ignore tokens when concatenating + */ + special: boolean; +} +export interface TextGenerationStreamPrefillToken { + /** Token ID from the model tokenizer */ + id: number; + /** Token text */ + text: string; + /** + * Logprob + * Optional since the logprob of the first token cannot be computed + */ + logprob?: number; +} +export interface TextGenerationStreamBestOfSequence { + /** Generated text */ + generated_text: string; + /** Generation finish reason */ + finish_reason: TextGenerationStreamFinishReason; + /** Number of generated tokens */ + generated_tokens: number; + /** Sampling seed if sampling was activated */ + seed?: number; + /** Prompt tokens */ + prefill: TextGenerationStreamPrefillToken[]; + /** Generated tokens */ + tokens: TextGenerationStreamToken[]; +} +export type TextGenerationStreamFinishReason = +/** number of generated tokens == `max_new_tokens` */ +"length" +/** the model generated its end of sequence token */ + | "eos_token" +/** the model generated a text included in `stop_sequences` */ + | "stop_sequence"; +export interface TextGenerationStreamDetails { + /** Generation finish reason */ + finish_reason: TextGenerationStreamFinishReason; + /** Number of generated tokens */ + generated_tokens: number; + /** Sampling seed if sampling was activated */ + seed?: number; + /** Prompt tokens */ + prefill: TextGenerationStreamPrefillToken[]; + /** */ + tokens: TextGenerationStreamToken[]; + /** Additional sequences when using the `best_of` parameter */ + best_of_sequences?: TextGenerationStreamBestOfSequence[]; +} +export interface TextGenerationStreamOutput { + index?: number; + /** Generated token, one at a time */ + token: TextGenerationStreamToken; + /** + * Complete generated text + * Only available when the generation is finished + */ + generated_text: string | null; + /** + * Generation details + * Only available when the generation is finished + */ + details: TextGenerationStreamDetails | null; +} +/** + * Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time + */ +export declare function textGenerationStream(args: BaseArgs & TextGenerationInput, options?: Options): AsyncGenerator; +//# sourceMappingURL=textGenerationStream.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/textGenerationStream.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/textGenerationStream.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..1a9a9965df5b8084f4f4e39fdf93f4b158557c8f --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/textGenerationStream.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"textGenerationStream.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/textGenerationStream.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,oBAAoB,CAAC;AAC9D,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,WAAW,yBAAyB;IACzC,wCAAwC;IACxC,EAAE,EAAE,MAAM,CAAC;IACX,iBAAiB;IACjB,IAAI,EAAE,MAAM,CAAC;IACb,cAAc;IACd,OAAO,EAAE,MAAM,CAAC;IAChB;;;OAGG;IACH,OAAO,EAAE,OAAO,CAAC;CACjB;AAED,MAAM,WAAW,gCAAgC;IAChD,wCAAwC;IACxC,EAAE,EAAE,MAAM,CAAC;IACX,iBAAiB;IACjB,IAAI,EAAE,MAAM,CAAC;IACb;;;OAGG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,WAAW,kCAAkC;IAClD,qBAAqB;IACrB,cAAc,EAAE,MAAM,CAAC;IACvB,+BAA+B;IAC/B,aAAa,EAAE,gCAAgC,CAAC;IAChD,iCAAiC;IACjC,gBAAgB,EAAE,MAAM,CAAC;IACzB,8CAA8C;IAC9C,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,oBAAoB;IACpB,OAAO,EAAE,gCAAgC,EAAE,CAAC;IAC5C,uBAAuB;IACvB,MAAM,EAAE,yBAAyB,EAAE,CAAC;CACpC;AAED,MAAM,MAAM,gCAAgC;AAC3C,qDAAqD;AACnD,QAAQ;AACV,oDAAoD;GAClD,WAAW;AACb,8DAA8D;GAC5D,eAAe,CAAC;AAEnB,MAAM,WAAW,2BAA2B;IAC3C,+BAA+B;IAC/B,aAAa,EAAE,gCAAgC,CAAC;IAChD,iCAAiC;IACjC,gBAAgB,EAAE,MAAM,CAAC;IACzB,8CAA8C;IAC9C,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,oBAAoB;IACpB,OAAO,EAAE,gCAAgC,EAAE,CAAC;IAC5C,MAAM;IACN,MAAM,EAAE,yBAAyB,EAAE,CAAC;IACpC,8DAA8D;IAC9D,iBAAiB,CAAC,EAAE,kCAAkC,EAAE,CAAC;CACzD;AAED,MAAM,WAAW,0BAA0B;IAC1C,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,qCAAqC;IACrC,KAAK,EAAE,yBAAyB,CAAC;IACjC;;;OAGG;IACH,cAAc,EAAE,MAAM,GAAG,IAAI,CAAC;IAC9B;;;OAGG;IACH,OAAO,EAAE,2BAA2B,GAAG,IAAI,CAAC;CAC5C;AAED;;GAEG;AACH,wBAAuB,oBAAoB,CAC1C,IAAI,EAAE,QAAQ,GAAG,mBAAmB,EACpC,OAAO,CAAC,EAAE,OAAO,GACf,cAAc,CAAC,0BAA0B,CAAC,CAK5C"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/tokenClassification.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/tokenClassification.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..19a3e55ebea33fc05fee2c2235697e46ab3e4a0f --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/tokenClassification.d.ts @@ -0,0 +1,51 @@ +import type { BaseArgs, Options } from "../../types"; +export type TokenClassificationArgs = BaseArgs & { + /** + * A string to be classified + */ + inputs: string; + parameters?: { + /** + * (Default: simple). There are several aggregation strategies: + * + * none: Every token gets classified without further aggregation. + * + * simple: Entities are grouped according to the default schema (B-, I- tags get merged when the tag is similar). + * + * first: Same as the simple strategy except words cannot end up with different tags. Words will use the tag of the first token when there is ambiguity. + * + * average: Same as the simple strategy except words cannot end up with different tags. Scores are averaged across tokens and then the maximum label is applied. + * + * max: Same as the simple strategy except words cannot end up with different tags. Word entity will be the token with the maximum score. + */ + aggregation_strategy?: "none" | "simple" | "first" | "average" | "max"; + }; +}; +export interface TokenClassificationOutputValue { + /** + * The offset stringwise where the answer is located. Useful to disambiguate if word occurs multiple times. + */ + end: number; + /** + * The type for the entity being recognized (model specific). + */ + entity_group: string; + /** + * How likely the entity was recognized. + */ + score: number; + /** + * The offset stringwise where the answer is located. Useful to disambiguate if word occurs multiple times. + */ + start: number; + /** + * The string that was captured + */ + word: string; +} +export type TokenClassificationOutput = TokenClassificationOutputValue[]; +/** + * Usually used for sentence parsing, either grammatical, or Named Entity Recognition (NER) to understand keywords contained within text. Recommended model: dbmdz/bert-large-cased-finetuned-conll03-english + */ +export declare function tokenClassification(args: TokenClassificationArgs, options?: Options): Promise; +//# sourceMappingURL=tokenClassification.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/tokenClassification.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/tokenClassification.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..e294475892bf5cd32931e211e8224f5a35d3737d --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/tokenClassification.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"tokenClassification.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/tokenClassification.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAIrD,MAAM,MAAM,uBAAuB,GAAG,QAAQ,GAAG;IAChD;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf,UAAU,CAAC,EAAE;QACZ;;;;;;;;;;;;WAYG;QACH,oBAAoB,CAAC,EAAE,MAAM,GAAG,QAAQ,GAAG,OAAO,GAAG,SAAS,GAAG,KAAK,CAAC;KACvE,CAAC;CACF,CAAC;AAEF,MAAM,WAAW,8BAA8B;IAC9C;;OAEG;IACH,GAAG,EAAE,MAAM,CAAC;IACZ;;OAEG;IACH,YAAY,EAAE,MAAM,CAAC;IACrB;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;CACb;AAED,MAAM,MAAM,yBAAyB,GAAG,8BAA8B,EAAE,CAAC;AAEzE;;GAEG;AACH,wBAAsB,mBAAmB,CACxC,IAAI,EAAE,uBAAuB,EAC7B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,yBAAyB,CAAC,CAuBpC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/translation.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/translation.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..1f92216db1ac2f4cfee2071f77063e7f04b57500 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/translation.d.ts @@ -0,0 +1,19 @@ +import type { BaseArgs, Options } from "../../types"; +export type TranslationArgs = BaseArgs & { + /** + * A string to be translated + */ + inputs: string | string[]; +}; +export interface TranslationOutputValue { + /** + * The string after translation + */ + translation_text: string; +} +export type TranslationOutput = TranslationOutputValue | TranslationOutputValue[]; +/** + * This task is well known to translate text from one language to another. Recommended model: Helsinki-NLP/opus-mt-ru-en. + */ +export declare function translation(args: TranslationArgs, options?: Options): Promise; +//# sourceMappingURL=translation.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/translation.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/translation.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..8c810731e8b7893932d0d6dbc60cc7effb0b7775 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/translation.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"translation.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/translation.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,eAAe,GAAG,QAAQ,GAAG;IACxC;;OAEG;IACH,MAAM,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;CAC1B,CAAC;AAEF,MAAM,WAAW,sBAAsB;IACtC;;OAEG;IACH,gBAAgB,EAAE,MAAM,CAAC;CACzB;AAED,MAAM,MAAM,iBAAiB,GAAG,sBAAsB,GAAG,sBAAsB,EAAE,CAAC;AAElF;;GAEG;AACH,wBAAsB,WAAW,CAAC,IAAI,EAAE,eAAe,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,iBAAiB,CAAC,CAUtG"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/zeroShotClassification.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/zeroShotClassification.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..28e98c0fb1003e6ae0da1e491e52c60feefa8773 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/zeroShotClassification.d.ts @@ -0,0 +1,28 @@ +import type { BaseArgs, Options } from "../../types"; +export type ZeroShotClassificationArgs = BaseArgs & { + /** + * a string or list of strings + */ + inputs: string | string[]; + parameters: { + /** + * a list of strings that are potential classes for inputs. (max 10 candidate_labels, for more, simply run multiple requests, results are going to be misleading if using too many candidate_labels anyway. If you want to keep the exact same, you can simply run multi_label=True and do the scaling on your end. + */ + candidate_labels: string[]; + /** + * (Default: false) Boolean that is set to True if classes can overlap + */ + multi_label?: boolean; + }; +}; +export interface ZeroShotClassificationOutputValue { + labels: string[]; + scores: number[]; + sequence: string; +} +export type ZeroShotClassificationOutput = ZeroShotClassificationOutputValue[]; +/** + * This task is super useful to try out classification with zero code, you simply pass a sentence/paragraph and the possible labels for that sentence, and you get a result. Recommended model: facebook/bart-large-mnli. + */ +export declare function zeroShotClassification(args: ZeroShotClassificationArgs, options?: Options): Promise; +//# sourceMappingURL=zeroShotClassification.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/zeroShotClassification.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/zeroShotClassification.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..8edbc913d1ae01053fdedbf80a46cb16146d50a1 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/nlp/zeroShotClassification.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"zeroShotClassification.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/zeroShotClassification.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAIrD,MAAM,MAAM,0BAA0B,GAAG,QAAQ,GAAG;IACnD;;OAEG;IACH,MAAM,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;IAC1B,UAAU,EAAE;QACX;;WAEG;QACH,gBAAgB,EAAE,MAAM,EAAE,CAAC;QAC3B;;WAEG;QACH,WAAW,CAAC,EAAE,OAAO,CAAC;KACtB,CAAC;CACF,CAAC;AAEF,MAAM,WAAW,iCAAiC;IACjD,MAAM,EAAE,MAAM,EAAE,CAAC;IACjB,MAAM,EAAE,MAAM,EAAE,CAAC;IACjB,QAAQ,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,MAAM,4BAA4B,GAAG,iCAAiC,EAAE,CAAC;AAE/E;;GAEG;AACH,wBAAsB,sBAAsB,CAC3C,IAAI,EAAE,0BAA0B,EAChC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,4BAA4B,CAAC,CAqBvC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/tabular/tabularClassification.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/tabular/tabularClassification.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..5fdfec224a3dbc04e33c76ec830bedb07b7b0375 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/tabular/tabularClassification.d.ts @@ -0,0 +1,20 @@ +import type { BaseArgs, Options } from "../../types"; +export type TabularClassificationArgs = BaseArgs & { + inputs: { + /** + * A table of data represented as a dict of list where entries are headers and the lists are all the values, all lists must have the same size. + */ + data: Record; + }; +}; +/** + * A list of predicted labels for each row + */ +export type TabularClassificationOutput = number[]; +/** + * Predicts target label for a given set of features in tabular form. + * Typically, you will want to train a classification model on your training data and use it with your new data of the same format. + * Example model: vvmnnnkv/wine-quality + */ +export declare function tabularClassification(args: TabularClassificationArgs, options?: Options): Promise; +//# sourceMappingURL=tabularClassification.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/tabular/tabularClassification.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/tabular/tabularClassification.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..dc1eb46485580eeca95462ac9356d164e2667ca8 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/tabular/tabularClassification.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"tabularClassification.d.ts","sourceRoot":"","sources":["../../../../src/tasks/tabular/tabularClassification.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,yBAAyB,GAAG,QAAQ,GAAG;IAClD,MAAM,EAAE;QACP;;WAEG;QACH,IAAI,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAC;KAC/B,CAAC;CACF,CAAC;AAEF;;GAEG;AACH,MAAM,MAAM,2BAA2B,GAAG,MAAM,EAAE,CAAC;AAEnD;;;;GAIG;AACH,wBAAsB,qBAAqB,CAC1C,IAAI,EAAE,yBAAyB,EAC/B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,2BAA2B,CAAC,CAUtC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/tabular/tabularRegression.d.ts b/data/node_modules/@huggingface/inference/dist/src/tasks/tabular/tabularRegression.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..37c39125b8ca89d920333b278d1b18e970129cf0 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/tabular/tabularRegression.d.ts @@ -0,0 +1,20 @@ +import type { BaseArgs, Options } from "../../types"; +export type TabularRegressionArgs = BaseArgs & { + inputs: { + /** + * A table of data represented as a dict of list where entries are headers and the lists are all the values, all lists must have the same size. + */ + data: Record; + }; +}; +/** + * a list of predicted values for each row + */ +export type TabularRegressionOutput = number[]; +/** + * Predicts target value for a given set of features in tabular form. + * Typically, you will want to train a regression model on your training data and use it with your new data of the same format. + * Example model: scikit-learn/Fish-Weight + */ +export declare function tabularRegression(args: TabularRegressionArgs, options?: Options): Promise; +//# sourceMappingURL=tabularRegression.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/tasks/tabular/tabularRegression.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/tasks/tabular/tabularRegression.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..cefad99d0c129ce8227f23347a09ed93cf561e05 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/tasks/tabular/tabularRegression.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"tabularRegression.d.ts","sourceRoot":"","sources":["../../../../src/tasks/tabular/tabularRegression.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,qBAAqB,GAAG,QAAQ,GAAG;IAC9C,MAAM,EAAE;QACP;;WAEG;QACH,IAAI,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAC;KAC/B,CAAC;CACF,CAAC;AAEF;;GAEG;AACH,MAAM,MAAM,uBAAuB,GAAG,MAAM,EAAE,CAAC;AAE/C;;;;GAIG;AACH,wBAAsB,iBAAiB,CACtC,IAAI,EAAE,qBAAqB,EAC3B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,uBAAuB,CAAC,CAUlC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/types.d.ts b/data/node_modules/@huggingface/inference/dist/src/types.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..0f885f368a01ae7268c5cbcfa1bca7560574f7b8 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/types.d.ts @@ -0,0 +1,69 @@ +import type { PipelineType } from "@huggingface/tasks"; +import type { ChatCompletionInput } from "@huggingface/tasks"; +export interface Options { + /** + * (Default: true) Boolean. If a request 503s and wait_for_model is set to false, the request will be retried with the same parameters but with wait_for_model set to true. + */ + retry_on_error?: boolean; + /** + * (Default: true). Boolean. There is a cache layer on Inference API (serverless) to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query. + */ + use_cache?: boolean; + /** + * (Default: false). Boolean. Do not load the model if it's not already available. + */ + dont_load_model?: boolean; + /** + * (Default: false). Boolean to use GPU instead of CPU for inference (requires Startup plan at least). + */ + use_gpu?: boolean; + /** + * (Default: false) Boolean. If the model is not ready, wait for it instead of receiving 503. It limits the number of requests required to get your inference done. It is advised to only set this flag to true after receiving a 503 error as it will limit hanging in your application to known places. + */ + wait_for_model?: boolean; + /** + * Custom fetch function to use instead of the default one, for example to use a proxy or edit headers. + */ + fetch?: typeof fetch; + /** + * Abort Controller signal to use for request interruption. + */ + signal?: AbortSignal; + /** + * (Default: "same-origin"). String | Boolean. Credentials to use for the request. If this is a string, it will be passed straight on. If it's a boolean, true will be "include" and false will not send credentials at all. + */ + includeCredentials?: string | boolean; +} +export type InferenceTask = Exclude; +export interface BaseArgs { + /** + * The access token to use. Without it, you'll get rate-limited quickly. + * + * Can be created for free in hf.co/settings/token + */ + accessToken?: string; + /** + * The model to use. + * + * If not specified, will call huggingface.co/api/tasks to get the default model for the task. + * + * /!\ Legacy behavior allows this to be an URL, but this is deprecated and will be removed in the future. + * Use the `endpointUrl` parameter instead. + */ + model?: string; + /** + * The URL of the endpoint to use. If not specified, will call huggingface.co/api/tasks to get the default endpoint for the task. + * + * If specified, will use this URL instead of the default one. + */ + endpointUrl?: string; +} +export type RequestArgs = BaseArgs & ({ + data: Blob | ArrayBuffer; +} | { + inputs: unknown; +} | ChatCompletionInput) & { + parameters?: Record; + accessToken?: string; +}; +//# sourceMappingURL=types.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/types.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/types.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..bdf5297ed3f5df24ba08a77b6e020d6e552e5c1c --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/types.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AACvD,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,oBAAoB,CAAC;AAE9D,MAAM,WAAW,OAAO;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,eAAe,CAAC,EAAE,OAAO,CAAC;IAC1B;;OAEG;IACH,OAAO,CAAC,EAAE,OAAO,CAAC;IAElB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB;;OAEG;IACH,KAAK,CAAC,EAAE,OAAO,KAAK,CAAC;IACrB;;OAEG;IACH,MAAM,CAAC,EAAE,WAAW,CAAC;IAErB;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC;CACtC;AAED,MAAM,MAAM,aAAa,GAAG,OAAO,CAAC,YAAY,EAAE,OAAO,CAAC,CAAC;AAE3D,MAAM,WAAW,QAAQ;IACxB;;;;OAIG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB;;;;;;;OAOG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IAEf;;;;OAIG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;CACrB;AAED,MAAM,MAAM,WAAW,GAAG,QAAQ,GACjC,CAAC;IAAE,IAAI,EAAE,IAAI,GAAG,WAAW,CAAA;CAAE,GAAG;IAAE,MAAM,EAAE,OAAO,CAAA;CAAE,GAAG,mBAAmB,CAAC,GAAG;IAC5E,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IACrC,WAAW,CAAC,EAAE,MAAM,CAAC;CACrB,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/utils/base64FromBytes.d.ts b/data/node_modules/@huggingface/inference/dist/src/utils/base64FromBytes.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..4cbee918664971dbe986ef21375120c44924979a --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/utils/base64FromBytes.d.ts @@ -0,0 +1,2 @@ +export declare function base64FromBytes(arr: Uint8Array): string; +//# sourceMappingURL=base64FromBytes.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/utils/base64FromBytes.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/utils/base64FromBytes.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..6f58a178c9da0fcdd6e1fd27282e2b1bc612489e --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/utils/base64FromBytes.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"base64FromBytes.d.ts","sourceRoot":"","sources":["../../../src/utils/base64FromBytes.ts"],"names":[],"mappings":"AAAA,wBAAgB,eAAe,CAAC,GAAG,EAAE,UAAU,GAAG,MAAM,CAUvD"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/utils/distributive-omit.d.ts b/data/node_modules/@huggingface/inference/dist/src/utils/distributive-omit.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..51d799f9ea8ede3413e0c6ed2fc5bdb3934e2efe --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/utils/distributive-omit.d.ts @@ -0,0 +1,9 @@ +/** + * This allows omitting keys from objects inside unions, without merging the individual components of the union. + */ +type Omit_ = Omit>; +export type DistributiveOmit = T extends unknown ? keyof Omit_ extends never ? never : { + [P in keyof Omit_]: Omit_[P]; +} : never; +export {}; +//# sourceMappingURL=distributive-omit.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/utils/distributive-omit.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/utils/distributive-omit.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..deb6a7aee905b44fb14b8dc0e2c7e05da03c8c58 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/utils/distributive-omit.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"distributive-omit.d.ts","sourceRoot":"","sources":["../../../src/utils/distributive-omit.ts"],"names":[],"mappings":"AAEA;;GAEG;AAEH,KAAK,KAAK,CAAC,CAAC,EAAE,CAAC,IAAI,IAAI,CAAC,CAAC,EAAE,OAAO,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AAEhD,MAAM,MAAM,gBAAgB,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,SAAS,OAAO,GACnD,MAAM,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,SAAS,KAAK,GAC9B,KAAK,GACL;KAAG,CAAC,IAAI,MAAM,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;CAAE,GAC7C,KAAK,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/utils/isBackend.d.ts b/data/node_modules/@huggingface/inference/dist/src/utils/isBackend.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..069d8727ffbc8bb2c70bae99fe1413db4bf0e7a2 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/utils/isBackend.d.ts @@ -0,0 +1,2 @@ +export declare const isBackend: boolean; +//# sourceMappingURL=isBackend.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/utils/isBackend.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/utils/isBackend.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..56291c33b8f5490fd2cc3d194f43bd791f599280 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/utils/isBackend.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"isBackend.d.ts","sourceRoot":"","sources":["../../../src/utils/isBackend.ts"],"names":[],"mappings":"AAKA,eAAO,MAAM,SAAS,SAA6B,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/utils/isFrontend.d.ts b/data/node_modules/@huggingface/inference/dist/src/utils/isFrontend.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..f48051727350420cea4a870a0df61fd629c8f47a --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/utils/isFrontend.d.ts @@ -0,0 +1,2 @@ +export declare const isFrontend: boolean; +//# sourceMappingURL=isFrontend.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/utils/isFrontend.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/utils/isFrontend.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..eb97bede136119de84b957123d53836f5de4ea2f --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/utils/isFrontend.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"isFrontend.d.ts","sourceRoot":"","sources":["../../../src/utils/isFrontend.ts"],"names":[],"mappings":"AAEA,eAAO,MAAM,UAAU,SAAa,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/utils/omit.d.ts b/data/node_modules/@huggingface/inference/dist/src/utils/omit.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..505630a1e566878746e9b7ad8c20a8e7b6c3cda0 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/utils/omit.d.ts @@ -0,0 +1,5 @@ +/** + * Return copy of object, omitting blocklisted array of props + */ +export declare function omit(o: T, props: K[] | K): Pick>; +//# sourceMappingURL=omit.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/utils/omit.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/utils/omit.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..8043245b125d1bdde89af25489c71b5a8bf21289 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/utils/omit.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"omit.d.ts","sourceRoot":"","sources":["../../../src/utils/omit.ts"],"names":[],"mappings":"AAGA;;GAEG;AACH,wBAAgB,IAAI,CAAC,CAAC,SAAS,MAAM,EAAE,CAAC,SAAS,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,KAAK,EAAE,CAAC,EAAE,GAAG,CAAC,GAAG,IAAI,CAAC,CAAC,EAAE,OAAO,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAI5G"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/utils/pick.d.ts b/data/node_modules/@huggingface/inference/dist/src/utils/pick.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..fa60bf48ad2dd91409d967173baf4274ea3329b5 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/utils/pick.d.ts @@ -0,0 +1,5 @@ +/** + * Return copy of object, only keeping allowlisted properties. + */ +export declare function pick(o: T, props: K[] | ReadonlyArray): Pick; +//# sourceMappingURL=pick.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/utils/pick.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/utils/pick.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..0ea81b4da845f65ccce0ceb28d60fff5e33ba7bf --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/utils/pick.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"pick.d.ts","sourceRoot":"","sources":["../../../src/utils/pick.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,wBAAgB,IAAI,CAAC,CAAC,EAAE,CAAC,SAAS,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,KAAK,EAAE,CAAC,EAAE,GAAG,aAAa,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,CAS1F"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/utils/toArray.d.ts b/data/node_modules/@huggingface/inference/dist/src/utils/toArray.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..e02ed23088723b50e1b144a1f51809fb88f6a818 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/utils/toArray.d.ts @@ -0,0 +1,2 @@ +export declare function toArray(obj: T): T extends unknown[] ? T : T[]; +//# sourceMappingURL=toArray.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/utils/toArray.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/utils/toArray.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..a2a6b94859813f05b8d5be84c5bddd566978d330 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/utils/toArray.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"toArray.d.ts","sourceRoot":"","sources":["../../../src/utils/toArray.ts"],"names":[],"mappings":"AAAA,wBAAgB,OAAO,CAAC,CAAC,EAAE,GAAG,EAAE,CAAC,GAAG,CAAC,SAAS,OAAO,EAAE,GAAG,CAAC,GAAG,CAAC,EAAE,CAKhE"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/utils/typedInclude.d.ts b/data/node_modules/@huggingface/inference/dist/src/utils/typedInclude.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..18fcc761228d7347ac431d408cd967cdb1f60c47 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/utils/typedInclude.d.ts @@ -0,0 +1,2 @@ +export declare function typedInclude(arr: readonly T[], v: V): v is T; +//# sourceMappingURL=typedInclude.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/utils/typedInclude.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/utils/typedInclude.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..4512a06ceb2dca2c2c3207dc352faef3e3099512 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/utils/typedInclude.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"typedInclude.d.ts","sourceRoot":"","sources":["../../../src/utils/typedInclude.ts"],"names":[],"mappings":"AAAA,wBAAgB,YAAY,CAAC,CAAC,EAAE,CAAC,SAAS,CAAC,EAAE,GAAG,EAAE,SAAS,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,CAE5E"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/vendor/fetch-event-source/parse.d.ts b/data/node_modules/@huggingface/inference/dist/src/vendor/fetch-event-source/parse.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..6b8db283a6572d370b8435a72fd12930ec505f73 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/vendor/fetch-event-source/parse.d.ts @@ -0,0 +1,69 @@ +/** + This file is a part of fetch-event-source package (as of v2.0.1) + https://github.com/Azure/fetch-event-source/blob/v2.0.1/src/parse.ts + + Full package can be used after it is made compatible with nodejs: + https://github.com/Azure/fetch-event-source/issues/20 + + Below is the fetch-event-source package license: + + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE + + */ +/** + * Represents a message sent in an event stream + * https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format + */ +export interface EventSourceMessage { + /** The event ID to set the EventSource object's last event ID value. */ + id: string; + /** A string identifying the type of event described. */ + event: string; + /** The event data */ + data: string; + /** The reconnection interval (in milliseconds) to wait before retrying the connection */ + retry?: number; +} +/** + * Converts a ReadableStream into a callback pattern. + * @param stream The input ReadableStream. + * @param onChunk A function that will be called on each new byte chunk in the stream. + * @returns {Promise} A promise that will be resolved when the stream closes. + */ +export declare function getBytes(stream: ReadableStream, onChunk: (arr: Uint8Array) => void): Promise; +/** + * Parses arbitary byte chunks into EventSource line buffers. + * Each line should be of the format "field: value" and ends with \r, \n, or \r\n. + * @param onLine A function that will be called on each new EventSource line. + * @returns A function that should be called for each incoming byte chunk. + */ +export declare function getLines(onLine: (line: Uint8Array, fieldLength: number) => void): (arr: Uint8Array) => void; +/** + * Parses line buffers into EventSourceMessages. + * @param onId A function that will be called on each `id` field. + * @param onRetry A function that will be called on each `retry` field. + * @param onMessage A function that will be called on each message. + * @returns A function that should be called for each incoming line buffer. + */ +export declare function getMessages(onId: (id: string) => void, onRetry: (retry: number) => void, onMessage?: (msg: EventSourceMessage) => void): (line: Uint8Array, fieldLength: number) => void; +//# sourceMappingURL=parse.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/vendor/fetch-event-source/parse.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/vendor/fetch-event-source/parse.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..1a9c56d7252d365ab676018b8c0b7a56689fd5fa --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/vendor/fetch-event-source/parse.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"parse.d.ts","sourceRoot":"","sources":["../../../../src/vendor/fetch-event-source/parse.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA+BG;AAEH;;;GAGG;AACH,MAAM,WAAW,kBAAkB;IAC/B,wEAAwE;IACxE,EAAE,EAAE,MAAM,CAAC;IACX,wDAAwD;IACxD,KAAK,EAAE,MAAM,CAAC;IACd,qBAAqB;IACrB,IAAI,EAAE,MAAM,CAAC;IACb,yFAAyF;IACzF,KAAK,CAAC,EAAE,MAAM,CAAC;CAClB;AAED;;;;;GAKG;AACH,wBAAsB,QAAQ,CAAC,MAAM,EAAE,cAAc,CAAC,UAAU,CAAC,EAAE,OAAO,EAAE,CAAC,GAAG,EAAE,UAAU,KAAK,IAAI,iBAMpG;AASD;;;;;GAKG;AACH,wBAAgB,QAAQ,CAAC,MAAM,EAAE,CAAC,IAAI,EAAE,UAAU,EAAE,WAAW,EAAE,MAAM,KAAK,IAAI,SAO/C,UAAU,UA4D1C;AAED;;;;;;GAMG;AACH,wBAAgB,WAAW,CACvB,IAAI,EAAE,CAAC,EAAE,EAAE,MAAM,KAAK,IAAI,EAC1B,OAAO,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,EAChC,SAAS,CAAC,EAAE,CAAC,GAAG,EAAE,kBAAkB,KAAK,IAAI,UAMhB,UAAU,eAAe,MAAM,UAmC/D"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/vendor/fetch-event-source/parse.spec.d.ts b/data/node_modules/@huggingface/inference/dist/src/vendor/fetch-event-source/parse.spec.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..7d3b140b1faf504609cfb71b4270aabd39c18e61 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/vendor/fetch-event-source/parse.spec.d.ts @@ -0,0 +1,2 @@ +export {}; +//# sourceMappingURL=parse.spec.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/src/vendor/fetch-event-source/parse.spec.d.ts.map b/data/node_modules/@huggingface/inference/dist/src/vendor/fetch-event-source/parse.spec.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..07a554a541e244e159bced295ee04f226af8b60f --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/src/vendor/fetch-event-source/parse.spec.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"parse.spec.d.ts","sourceRoot":"","sources":["../../../../src/vendor/fetch-event-source/parse.spec.ts"],"names":[],"mappings":""} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/test/HfInference.spec.d.ts b/data/node_modules/@huggingface/inference/dist/test/HfInference.spec.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..932b6b83d75fc056145f478ccb3e3539805838e3 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/test/HfInference.spec.d.ts @@ -0,0 +1,2 @@ +import "./vcr"; +//# sourceMappingURL=HfInference.spec.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/test/HfInference.spec.d.ts.map b/data/node_modules/@huggingface/inference/dist/test/HfInference.spec.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..c12568827422ee1c580b0891268837720ee05eeb --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/test/HfInference.spec.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"HfInference.spec.d.ts","sourceRoot":"","sources":["../../test/HfInference.spec.ts"],"names":[],"mappings":"AAKA,OAAO,OAAO,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/test/expect-closeto.d.ts b/data/node_modules/@huggingface/inference/dist/test/expect-closeto.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..74ad38d96e9de587f1995c9e93361e25fdf65a90 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/test/expect-closeto.d.ts @@ -0,0 +1,2 @@ +export {}; +//# sourceMappingURL=expect-closeto.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/test/expect-closeto.d.ts.map b/data/node_modules/@huggingface/inference/dist/test/expect-closeto.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..42b92b5515b6e1ba9e9b3da559f43436889be288 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/test/expect-closeto.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"expect-closeto.d.ts","sourceRoot":"","sources":["../../test/expect-closeto.ts"],"names":[],"mappings":""} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/test/test-files.d.ts b/data/node_modules/@huggingface/inference/dist/test/test-files.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..08ed0e2f7d49c662c04a860e75b48cc2fba28abf --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/test/test-files.d.ts @@ -0,0 +1,2 @@ +export declare const readTestFile: (filename: string) => Uint8Array; +//# sourceMappingURL=test-files.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/test/test-files.d.ts.map b/data/node_modules/@huggingface/inference/dist/test/test-files.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..db30f1f09cfc2b0f50ae1421710ad49e5a7e2b54 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/test/test-files.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"test-files.d.ts","sourceRoot":"","sources":["../../test/test-files.ts"],"names":[],"mappings":"AAGA,eAAO,MAAM,YAAY,aAAc,MAAM,KAAG,UAK/C,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/test/vcr.d.ts b/data/node_modules/@huggingface/inference/dist/test/vcr.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..0466501c53233eddf72302a84472e8c879eeb403 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/test/vcr.d.ts @@ -0,0 +1,2 @@ +export {}; +//# sourceMappingURL=vcr.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/dist/test/vcr.d.ts.map b/data/node_modules/@huggingface/inference/dist/test/vcr.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..4c56143226e9bd7dc90de7633fcaad3287ee2625 --- /dev/null +++ b/data/node_modules/@huggingface/inference/dist/test/vcr.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"vcr.d.ts","sourceRoot":"","sources":["../../test/vcr.ts"],"names":[],"mappings":""} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/package.json b/data/node_modules/@huggingface/inference/package.json new file mode 100644 index 0000000000000000000000000000000000000000..b5bd1634e26e5ce63c8e97b8559cace0403934fb --- /dev/null +++ b/data/node_modules/@huggingface/inference/package.json @@ -0,0 +1,59 @@ +{ + "name": "@huggingface/inference", + "version": "2.8.0", + "packageManager": "pnpm@8.10.5", + "license": "MIT", + "author": "Tim Mikeladze ", + "description": "Typescript wrapper for the Hugging Face Inference Endpoints & Inference API", + "repository": { + "type": "git", + "url": "https://github.com/huggingface/huggingface.js.git" + }, + "publishConfig": { + "access": "public" + }, + "keywords": [ + "hugging face", + "hugging face typescript", + "huggingface", + "huggingface-inference-api", + "huggingface-inference-api-typescript", + "inference", + "ai" + ], + "engines": { + "node": ">=18" + }, + "files": [ + "dist", + "src" + ], + "source": "src/index.ts", + "types": "./dist/src/index.d.ts", + "main": "./dist/index.cjs", + "module": "./dist/index.js", + "exports": { + "types": "./dist/src/index.d.ts", + "require": "./dist/index.cjs", + "import": "./dist/index.js" + }, + "type": "module", + "dependencies": { + "@huggingface/tasks": "^0.11.2" + }, + "devDependencies": { + "@types/node": "18.13.0" + }, + "resolutions": {}, + "scripts": { + "build": "tsup src/index.ts --format cjs,esm --clean && tsc --emitDeclarationOnly --declaration", + "dts": "tsx scripts/generate-dts.ts && tsc --noEmit dist/index.d.ts", + "lint": "eslint --quiet --fix --ext .cjs,.ts .", + "lint:check": "eslint --ext .cjs,.ts .", + "format": "prettier --write .", + "format:check": "prettier --check .", + "test": "vitest run --config vitest.config.mts", + "test:browser": "vitest run --browser.name=chrome --browser.headless --config vitest.config.mts", + "check": "tsc" + } +} \ No newline at end of file diff --git a/data/node_modules/@huggingface/inference/src/HfInference.ts b/data/node_modules/@huggingface/inference/src/HfInference.ts new file mode 100644 index 0000000000000000000000000000000000000000..6cc268cf294ff571f5ebd9b5a5b5c365b6614232 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/HfInference.ts @@ -0,0 +1,68 @@ +import * as tasks from "./tasks"; +import type { Options, RequestArgs } from "./types"; +import type { DistributiveOmit } from "./utils/distributive-omit"; + +/* eslint-disable @typescript-eslint/no-empty-interface */ +/* eslint-disable @typescript-eslint/no-unsafe-declaration-merging */ + +type Task = typeof tasks; + +type TaskWithNoAccessToken = { + [key in keyof Task]: ( + args: DistributiveOmit[0], "accessToken">, + options?: Parameters[1] + ) => ReturnType; +}; + +type TaskWithNoAccessTokenNoEndpointUrl = { + [key in keyof Task]: ( + args: DistributiveOmit[0], "accessToken" | "endpointUrl">, + options?: Parameters[1] + ) => ReturnType; +}; + +export class HfInference { + private readonly accessToken: string; + private readonly defaultOptions: Options; + + constructor(accessToken = "", defaultOptions: Options = {}) { + this.accessToken = accessToken; + this.defaultOptions = defaultOptions; + + for (const [name, fn] of Object.entries(tasks)) { + Object.defineProperty(this, name, { + enumerable: false, + value: (params: RequestArgs, options: Options) => + // eslint-disable-next-line @typescript-eslint/no-explicit-any + fn({ ...params, accessToken } as any, { ...defaultOptions, ...options }), + }); + } + } + + /** + * Returns copy of HfInference tied to a specified endpoint. + */ + public endpoint(endpointUrl: string): HfInferenceEndpoint { + return new HfInferenceEndpoint(endpointUrl, this.accessToken, this.defaultOptions); + } +} + +export class HfInferenceEndpoint { + constructor(endpointUrl: string, accessToken = "", defaultOptions: Options = {}) { + accessToken; + defaultOptions; + + for (const [name, fn] of Object.entries(tasks)) { + Object.defineProperty(this, name, { + enumerable: false, + value: (params: RequestArgs, options: Options) => + // eslint-disable-next-line @typescript-eslint/no-explicit-any + fn({ ...params, accessToken, endpointUrl } as any, { ...defaultOptions, ...options }), + }); + } + } +} + +export interface HfInference extends TaskWithNoAccessToken {} + +export interface HfInferenceEndpoint extends TaskWithNoAccessTokenNoEndpointUrl {} diff --git a/data/node_modules/@huggingface/inference/src/index.ts b/data/node_modules/@huggingface/inference/src/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..566b5c1abacdce892f1776f174bca7d1d852c7dd --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/index.ts @@ -0,0 +1,4 @@ +export { HfInference, HfInferenceEndpoint } from "./HfInference"; +export { InferenceOutputError } from "./lib/InferenceOutputError"; +export * from "./types"; +export * from "./tasks"; diff --git a/data/node_modules/@huggingface/inference/src/lib/InferenceOutputError.ts b/data/node_modules/@huggingface/inference/src/lib/InferenceOutputError.ts new file mode 100644 index 0000000000000000000000000000000000000000..0765b99944b97ae261b5bd931574382b063307c3 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/lib/InferenceOutputError.ts @@ -0,0 +1,8 @@ +export class InferenceOutputError extends TypeError { + constructor(message: string) { + super( + `Invalid inference output: ${message}. Use the 'request' method with the same parameters to do a custom call with no type checking.` + ); + this.name = "InferenceOutputError"; + } +} diff --git a/data/node_modules/@huggingface/inference/src/lib/getDefaultTask.ts b/data/node_modules/@huggingface/inference/src/lib/getDefaultTask.ts new file mode 100644 index 0000000000000000000000000000000000000000..3149998d6e44939173ffc1a083e508e8a337a627 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/lib/getDefaultTask.ts @@ -0,0 +1,61 @@ +import { isUrl } from "./isUrl"; + +/** + * We want to make calls to the huggingface hub the least possible, eg if + * someone is calling Inference Endpoints 1000 times per second, we don't want + * to make 1000 calls to the hub to get the task name. + */ +const taskCache = new Map(); +const CACHE_DURATION = 10 * 60 * 1000; +const MAX_CACHE_ITEMS = 1000; +export const HF_HUB_URL = "https://huggingface.co"; + +export interface DefaultTaskOptions { + fetch?: typeof fetch; +} + +/** + * Get the default task. Use a LRU cache of 1000 items with 10 minutes expiration + * to avoid making too many calls to the HF hub. + * + * @returns The default task for the model, or `null` if it was impossible to get it + */ +export async function getDefaultTask( + model: string, + accessToken: string | undefined, + options?: DefaultTaskOptions +): Promise { + if (isUrl(model)) { + return null; + } + + const key = `${model}:${accessToken}`; + let cachedTask = taskCache.get(key); + + if (cachedTask && cachedTask.date < new Date(Date.now() - CACHE_DURATION)) { + taskCache.delete(key); + cachedTask = undefined; + } + + if (cachedTask === undefined) { + const modelTask = await (options?.fetch ?? fetch)(`${HF_HUB_URL}/api/models/${model}?expand[]=pipeline_tag`, { + headers: accessToken ? { Authorization: `Bearer ${accessToken}` } : {}, + }) + .then((resp) => resp.json()) + .then((json) => json.pipeline_tag) + .catch(() => null); + + if (!modelTask) { + return null; + } + + cachedTask = { task: modelTask, date: new Date() }; + taskCache.set(key, { task: modelTask, date: new Date() }); + + if (taskCache.size > MAX_CACHE_ITEMS) { + taskCache.delete(taskCache.keys().next().value); + } + } + + return cachedTask.task; +} diff --git a/data/node_modules/@huggingface/inference/src/lib/isUrl.ts b/data/node_modules/@huggingface/inference/src/lib/isUrl.ts new file mode 100644 index 0000000000000000000000000000000000000000..b97c250e061b634daa175a8ee710e537c77f139c --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/lib/isUrl.ts @@ -0,0 +1,3 @@ +export function isUrl(modelOrUrl: string): boolean { + return /^http(s?):/.test(modelOrUrl) || modelOrUrl.startsWith("/"); +} diff --git a/data/node_modules/@huggingface/inference/src/lib/makeRequestOptions.ts b/data/node_modules/@huggingface/inference/src/lib/makeRequestOptions.ts new file mode 100644 index 0000000000000000000000000000000000000000..50eff494347362c9f0f1d4194a114b1ea47c1ee9 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/lib/makeRequestOptions.ts @@ -0,0 +1,126 @@ +import type { InferenceTask, Options, RequestArgs } from "../types"; +import { omit } from "../utils/omit"; +import { HF_HUB_URL } from "./getDefaultTask"; +import { isUrl } from "./isUrl"; + +const HF_INFERENCE_API_BASE_URL = "https://api-inference.huggingface.co"; + +/** + * Loaded from huggingface.co/api/tasks if needed + */ +let tasks: Record | null = null; + +/** + * Helper that prepares request arguments + */ +export async function makeRequestOptions( + args: RequestArgs & { + data?: Blob | ArrayBuffer; + stream?: boolean; + }, + options?: Options & { + /** When a model can be used for multiple tasks, and we want to run a non-default task */ + forceTask?: string | InferenceTask; + /** To load default model if needed */ + taskHint?: InferenceTask; + chatCompletion?: boolean; + } +): Promise<{ url: string; info: RequestInit }> { + const { accessToken, endpointUrl, ...otherArgs } = args; + let { model } = args; + const { + forceTask: task, + includeCredentials, + taskHint, + wait_for_model, + use_cache, + dont_load_model, + chatCompletion, + } = options ?? {}; + + const headers: Record = {}; + if (accessToken) { + headers["Authorization"] = `Bearer ${accessToken}`; + } + + if (!model && !tasks && taskHint) { + const res = await fetch(`${HF_HUB_URL}/api/tasks`); + + if (res.ok) { + tasks = await res.json(); + } + } + + if (!model && tasks && taskHint) { + const taskInfo = tasks[taskHint]; + if (taskInfo) { + model = taskInfo.models[0].id; + } + } + + if (!model) { + throw new Error("No model provided, and no default model found for this task"); + } + + const binary = "data" in args && !!args.data; + + if (!binary) { + headers["Content-Type"] = "application/json"; + } + + if (wait_for_model) { + headers["X-Wait-For-Model"] = "true"; + } + if (use_cache === false) { + headers["X-Use-Cache"] = "false"; + } + if (dont_load_model) { + headers["X-Load-Model"] = "0"; + } + + let url = (() => { + if (endpointUrl && isUrl(model)) { + throw new TypeError("Both model and endpointUrl cannot be URLs"); + } + if (isUrl(model)) { + console.warn("Using a model URL is deprecated, please use the `endpointUrl` parameter instead"); + return model; + } + if (endpointUrl) { + return endpointUrl; + } + if (task) { + return `${HF_INFERENCE_API_BASE_URL}/pipeline/${task}/${model}`; + } + + return `${HF_INFERENCE_API_BASE_URL}/models/${model}`; + })(); + + if (chatCompletion && !url.endsWith("/chat/completions")) { + url += "/v1/chat/completions"; + } + + /** + * For edge runtimes, leave 'credentials' undefined, otherwise cloudflare workers will error + */ + let credentials: RequestCredentials | undefined; + if (typeof includeCredentials === "string") { + credentials = includeCredentials as RequestCredentials; + } else if (includeCredentials === true) { + credentials = "include"; + } + + const info: RequestInit = { + headers, + method: "POST", + body: binary + ? args.data + : JSON.stringify({ + ...(otherArgs.model && isUrl(otherArgs.model) ? omit(otherArgs, "model") : otherArgs), + }), + ...(credentials && { credentials }), + signal: options?.signal, + }; + + return { url, info }; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/audio/audioClassification.ts b/data/node_modules/@huggingface/inference/src/tasks/audio/audioClassification.ts new file mode 100644 index 0000000000000000000000000000000000000000..5d7e274e5a722a1846f574313bfcb6e4dbe38d97 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/audio/audioClassification.ts @@ -0,0 +1,44 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; + +export type AudioClassificationArgs = BaseArgs & { + /** + * Binary audio data + */ + data: Blob | ArrayBuffer; +}; + +export interface AudioClassificationOutputValue { + /** + * The label for the class (model specific) + */ + label: string; + + /** + * A float that represents how likely it is that the audio file belongs to this class. + */ + score: number; +} + +export type AudioClassificationReturn = AudioClassificationOutputValue[]; + +/** + * This task reads some audio input and outputs the likelihood of classes. + * Recommended model: superb/hubert-large-superb-er + */ +export async function audioClassification( + args: AudioClassificationArgs, + options?: Options +): Promise { + const res = await request(args, { + ...options, + taskHint: "audio-classification", + }); + const isValidOutput = + Array.isArray(res) && res.every((x) => typeof x.label === "string" && typeof x.score === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{label: string, score: number}>"); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/audio/audioToAudio.ts b/data/node_modules/@huggingface/inference/src/tasks/audio/audioToAudio.ts new file mode 100644 index 0000000000000000000000000000000000000000..c339cdf61a535a25c1a5b80e30d271d61dcb8a3f --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/audio/audioToAudio.ts @@ -0,0 +1,49 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; + +export type AudioToAudioArgs = BaseArgs & { + /** + * Binary audio data + */ + data: Blob | ArrayBuffer; +}; + +export interface AudioToAudioOutputValue { + /** + * The label for the audio output (model specific) + */ + label: string; + + /** + * Base64 encoded audio output. + */ + blob: string; + + /** + * Content-type for blob, e.g. audio/flac + */ + "content-type": string; +} + +export type AudioToAudioReturn = AudioToAudioOutputValue[]; + +/** + * This task reads some audio input and outputs one or multiple audio files. + * Example model: speechbrain/sepformer-wham does audio source separation. + */ +export async function audioToAudio(args: AudioToAudioArgs, options?: Options): Promise { + const res = await request(args, { + ...options, + taskHint: "audio-to-audio", + }); + const isValidOutput = + Array.isArray(res) && + res.every( + (x) => typeof x.label === "string" && typeof x.blob === "string" && typeof x["content-type"] === "string" + ); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{label: string, blob: string, content-type: string}>"); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/audio/automaticSpeechRecognition.ts b/data/node_modules/@huggingface/inference/src/tasks/audio/automaticSpeechRecognition.ts new file mode 100644 index 0000000000000000000000000000000000000000..600d5b6c740706e276c1b9bf8fee7f68574e58b1 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/audio/automaticSpeechRecognition.ts @@ -0,0 +1,36 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; + +export type AutomaticSpeechRecognitionArgs = BaseArgs & { + /** + * Binary audio data + */ + data: Blob | ArrayBuffer; +}; + +export interface AutomaticSpeechRecognitionOutput { + /** + * The text that was recognized from the audio + */ + text: string; +} + +/** + * This task reads some audio input and outputs the said words within the audio files. + * Recommended model (english language): facebook/wav2vec2-large-960h-lv60-self + */ +export async function automaticSpeechRecognition( + args: AutomaticSpeechRecognitionArgs, + options?: Options +): Promise { + const res = await request(args, { + ...options, + taskHint: "automatic-speech-recognition", + }); + const isValidOutput = typeof res?.text === "string"; + if (!isValidOutput) { + throw new InferenceOutputError("Expected {text: string}"); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/audio/textToSpeech.ts b/data/node_modules/@huggingface/inference/src/tasks/audio/textToSpeech.ts new file mode 100644 index 0000000000000000000000000000000000000000..3c466110f5d5713b47025c9e0b2ae9cd69ce4bfd --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/audio/textToSpeech.ts @@ -0,0 +1,28 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; + +export type TextToSpeechArgs = BaseArgs & { + /** + * The text to generate an audio from + */ + inputs: string; +}; + +export type TextToSpeechOutput = Blob; + +/** + * This task synthesize an audio of a voice pronouncing a given text. + * Recommended model: espnet/kan-bayashi_ljspeech_vits + */ +export async function textToSpeech(args: TextToSpeechArgs, options?: Options): Promise { + const res = await request(args, { + ...options, + taskHint: "text-to-speech", + }); + const isValidOutput = res && res instanceof Blob; + if (!isValidOutput) { + throw new InferenceOutputError("Expected Blob"); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/custom/request.ts b/data/node_modules/@huggingface/inference/src/tasks/custom/request.ts new file mode 100644 index 0000000000000000000000000000000000000000..05a31a237231d4933ef2f3b04e98435ca1fe3da6 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/custom/request.ts @@ -0,0 +1,46 @@ +import type { InferenceTask, Options, RequestArgs } from "../../types"; +import { makeRequestOptions } from "../../lib/makeRequestOptions"; + +/** + * Primitive to make custom calls to Inference Endpoints + */ +export async function request( + args: RequestArgs, + options?: Options & { + /** When a model can be used for multiple tasks, and we want to run a non-default task */ + task?: string | InferenceTask; + /** To load default model if needed */ + taskHint?: InferenceTask; + /** Is chat completion compatible */ + chatCompletion?: boolean; + } +): Promise { + const { url, info } = await makeRequestOptions(args, options); + const response = await (options?.fetch ?? fetch)(url, info); + + if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) { + return request(args, { + ...options, + wait_for_model: true, + }); + } + + if (!response.ok) { + if (response.headers.get("Content-Type")?.startsWith("application/json")) { + const output = await response.json(); + if ([400, 422, 404, 500].includes(response.status) && options?.chatCompletion) { + throw new Error(`Server ${args.model} does not seem to support chat completion. Error: ${output.error}`); + } + if (output.error) { + throw new Error(output.error); + } + } + throw new Error("An error occurred while fetching the blob"); + } + + if (response.headers.get("Content-Type")?.startsWith("application/json")) { + return await response.json(); + } + + return (await response.blob()) as T; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/custom/streamingRequest.ts b/data/node_modules/@huggingface/inference/src/tasks/custom/streamingRequest.ts new file mode 100644 index 0000000000000000000000000000000000000000..7f41798118779e67fbacdb0dc5259fe73b723a4f --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/custom/streamingRequest.ts @@ -0,0 +1,90 @@ +import type { InferenceTask, Options, RequestArgs } from "../../types"; +import { makeRequestOptions } from "../../lib/makeRequestOptions"; +import type { EventSourceMessage } from "../../vendor/fetch-event-source/parse"; +import { getLines, getMessages } from "../../vendor/fetch-event-source/parse"; + +/** + * Primitive to make custom inference calls that expect server-sent events, and returns the response through a generator + */ +export async function* streamingRequest( + args: RequestArgs, + options?: Options & { + /** When a model can be used for multiple tasks, and we want to run a non-default task */ + task?: string | InferenceTask; + /** To load default model if needed */ + taskHint?: InferenceTask; + /** Is chat completion compatible */ + chatCompletion?: boolean; + } +): AsyncGenerator { + const { url, info } = await makeRequestOptions({ ...args, stream: true }, options); + const response = await (options?.fetch ?? fetch)(url, info); + + if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) { + return yield* streamingRequest(args, { + ...options, + wait_for_model: true, + }); + } + if (!response.ok) { + if (response.headers.get("Content-Type")?.startsWith("application/json")) { + const output = await response.json(); + if ([400, 422, 404, 500].includes(response.status) && options?.chatCompletion) { + throw new Error(`Server ${args.model} does not seem to support chat completion. Error: ${output.error}`); + } + if (output.error) { + throw new Error(output.error); + } + } + + throw new Error(`Server response contains error: ${response.status}`); + } + if (!response.headers.get("content-type")?.startsWith("text/event-stream")) { + throw new Error( + `Server does not support event stream content type, it returned ` + response.headers.get("content-type") + ); + } + + if (!response.body) { + return; + } + + const reader = response.body.getReader(); + let events: EventSourceMessage[] = []; + + const onEvent = (event: EventSourceMessage) => { + // accumulate events in array + events.push(event); + }; + + const onChunk = getLines( + getMessages( + () => {}, + () => {}, + onEvent + ) + ); + + try { + while (true) { + const { done, value } = await reader.read(); + if (done) return; + onChunk(value); + for (const event of events) { + if (event.data.length > 0) { + if (event.data === "[DONE]") { + return; + } + const data = JSON.parse(event.data); + if (typeof data === "object" && data !== null && "error" in data) { + throw new Error(data.error); + } + yield data as T; + } + } + events = []; + } + } finally { + reader.releaseLock(); + } +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/cv/imageClassification.ts b/data/node_modules/@huggingface/inference/src/tasks/cv/imageClassification.ts new file mode 100644 index 0000000000000000000000000000000000000000..2ae725870421af898492d6bf3a069ff7a59587f9 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/cv/imageClassification.ts @@ -0,0 +1,43 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; + +export type ImageClassificationArgs = BaseArgs & { + /** + * Binary image data + */ + data: Blob | ArrayBuffer; +}; + +export interface ImageClassificationOutputValue { + /** + * The label for the class (model specific) + */ + label: string; + /** + * A float that represents how likely it is that the image file belongs to this class. + */ + score: number; +} + +export type ImageClassificationOutput = ImageClassificationOutputValue[]; + +/** + * This task reads some image input and outputs the likelihood of classes. + * Recommended model: google/vit-base-patch16-224 + */ +export async function imageClassification( + args: ImageClassificationArgs, + options?: Options +): Promise { + const res = await request(args, { + ...options, + taskHint: "image-classification", + }); + const isValidOutput = + Array.isArray(res) && res.every((x) => typeof x.label === "string" && typeof x.score === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{label: string, score: number}>"); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/cv/imageSegmentation.ts b/data/node_modules/@huggingface/inference/src/tasks/cv/imageSegmentation.ts new file mode 100644 index 0000000000000000000000000000000000000000..171f065260f8d0f45de707fbe8b94a9e2207340e --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/cv/imageSegmentation.ts @@ -0,0 +1,48 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; + +export type ImageSegmentationArgs = BaseArgs & { + /** + * Binary image data + */ + data: Blob | ArrayBuffer; +}; + +export interface ImageSegmentationOutputValue { + /** + * The label for the class (model specific) of a segment. + */ + label: string; + /** + * A str (base64 str of a single channel black-and-white img) representing the mask of a segment. + */ + mask: string; + /** + * A float that represents how likely it is that the detected object belongs to the given class. + */ + score: number; +} + +export type ImageSegmentationOutput = ImageSegmentationOutputValue[]; + +/** + * This task reads some image input and outputs the likelihood of classes & bounding boxes of detected objects. + * Recommended model: facebook/detr-resnet-50-panoptic + */ +export async function imageSegmentation( + args: ImageSegmentationArgs, + options?: Options +): Promise { + const res = await request(args, { + ...options, + taskHint: "image-segmentation", + }); + const isValidOutput = + Array.isArray(res) && + res.every((x) => typeof x.label === "string" && typeof x.mask === "string" && typeof x.score === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{label: string, mask: string, score: number}>"); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/cv/imageToImage.ts b/data/node_modules/@huggingface/inference/src/tasks/cv/imageToImage.ts new file mode 100644 index 0000000000000000000000000000000000000000..5c18ccb111120f8fdc5461967a3df8de0c0e0ac3 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/cv/imageToImage.ts @@ -0,0 +1,86 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options, RequestArgs } from "../../types"; +import { base64FromBytes } from "../../utils/base64FromBytes"; +import { request } from "../custom/request"; + +export type ImageToImageArgs = BaseArgs & { + /** + * The initial image condition + * + **/ + inputs: Blob | ArrayBuffer; + + parameters?: { + /** + * The text prompt to guide the image generation. + */ + prompt?: string; + /** + * strengh param only works for SD img2img and alt diffusion img2img models + * Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + * will be used as a starting point, adding more noise to it the larger the `strength`. The number of + * denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + * be maximum and the denoising process will run for the full number of iterations specified in + * `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + **/ + strength?: number; + /** + * An optional negative prompt for the image generation + */ + negative_prompt?: string; + /** + * The height in pixels of the generated image + */ + height?: number; + /** + * The width in pixels of the generated image + */ + width?: number; + /** + * The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. + */ + num_inference_steps?: number; + /** + * Guidance scale: Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. + */ + guidance_scale?: number; + /** + * guess_mode only works for ControlNet models, defaults to False In this mode, the ControlNet encoder will try best to recognize the content of the input image even if + * you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended. + */ + guess_mode?: boolean; + }; +}; + +export type ImageToImageOutput = Blob; + +/** + * This task reads some text input and outputs an image. + * Recommended model: lllyasviel/sd-controlnet-depth + */ +export async function imageToImage(args: ImageToImageArgs, options?: Options): Promise { + let reqArgs: RequestArgs; + if (!args.parameters) { + reqArgs = { + accessToken: args.accessToken, + model: args.model, + data: args.inputs, + }; + } else { + reqArgs = { + ...args, + inputs: base64FromBytes( + new Uint8Array(args.inputs instanceof ArrayBuffer ? args.inputs : await args.inputs.arrayBuffer()) + ), + }; + } + const res = await request(reqArgs, { + ...options, + taskHint: "image-to-image", + }); + const isValidOutput = res && res instanceof Blob; + if (!isValidOutput) { + throw new InferenceOutputError("Expected Blob"); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/cv/imageToText.ts b/data/node_modules/@huggingface/inference/src/tasks/cv/imageToText.ts new file mode 100644 index 0000000000000000000000000000000000000000..9dd3ae8c20ae02da28bb7f81439599f975115858 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/cv/imageToText.ts @@ -0,0 +1,35 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; + +export type ImageToTextArgs = BaseArgs & { + /** + * Binary image data + */ + data: Blob | ArrayBuffer; +}; + +export interface ImageToTextOutput { + /** + * The generated caption + */ + generated_text: string; +} + +/** + * This task reads some image input and outputs the text caption. + */ +export async function imageToText(args: ImageToTextArgs, options?: Options): Promise { + const res = ( + await request<[ImageToTextOutput]>(args, { + ...options, + taskHint: "image-to-text", + }) + )?.[0]; + + if (typeof res?.generated_text !== "string") { + throw new InferenceOutputError("Expected {generated_text: string}"); + } + + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/cv/objectDetection.ts b/data/node_modules/@huggingface/inference/src/tasks/cv/objectDetection.ts new file mode 100644 index 0000000000000000000000000000000000000000..5bec721156ef917cb0b5ea9842bb65365844b627 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/cv/objectDetection.ts @@ -0,0 +1,61 @@ +import { request } from "../custom/request"; +import type { BaseArgs, Options } from "../../types"; +import { InferenceOutputError } from "../../lib/InferenceOutputError"; + +export type ObjectDetectionArgs = BaseArgs & { + /** + * Binary image data + */ + data: Blob | ArrayBuffer; +}; + +export interface ObjectDetectionOutputValue { + /** + * A dict (with keys [xmin,ymin,xmax,ymax]) representing the bounding box of a detected object. + */ + box: { + xmax: number; + xmin: number; + ymax: number; + ymin: number; + }; + /** + * The label for the class (model specific) of a detected object. + */ + label: string; + + /** + * A float that represents how likely it is that the detected object belongs to the given class. + */ + score: number; +} + +export type ObjectDetectionOutput = ObjectDetectionOutputValue[]; + +/** + * This task reads some image input and outputs the likelihood of classes & bounding boxes of detected objects. + * Recommended model: facebook/detr-resnet-50 + */ +export async function objectDetection(args: ObjectDetectionArgs, options?: Options): Promise { + const res = await request(args, { + ...options, + taskHint: "object-detection", + }); + const isValidOutput = + Array.isArray(res) && + res.every( + (x) => + typeof x.label === "string" && + typeof x.score === "number" && + typeof x.box.xmin === "number" && + typeof x.box.ymin === "number" && + typeof x.box.xmax === "number" && + typeof x.box.ymax === "number" + ); + if (!isValidOutput) { + throw new InferenceOutputError( + "Expected Array<{label:string; score:number; box:{xmin:number; ymin:number; xmax:number; ymax:number}}>" + ); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/cv/textToImage.ts b/data/node_modules/@huggingface/inference/src/tasks/cv/textToImage.ts new file mode 100644 index 0000000000000000000000000000000000000000..677b3bc5c73c1ee1d47fdf266f419c3dcda26582 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/cv/textToImage.ts @@ -0,0 +1,51 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; + +export type TextToImageArgs = BaseArgs & { + /** + * The text to generate an image from + */ + inputs: string; + + parameters?: { + /** + * An optional negative prompt for the image generation + */ + negative_prompt?: string; + /** + * The height in pixels of the generated image + */ + height?: number; + /** + * The width in pixels of the generated image + */ + width?: number; + /** + * The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. + */ + num_inference_steps?: number; + /** + * Guidance scale: Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. + */ + guidance_scale?: number; + }; +}; + +export type TextToImageOutput = Blob; + +/** + * This task reads some text input and outputs an image. + * Recommended model: stabilityai/stable-diffusion-2 + */ +export async function textToImage(args: TextToImageArgs, options?: Options): Promise { + const res = await request(args, { + ...options, + taskHint: "text-to-image", + }); + const isValidOutput = res && res instanceof Blob; + if (!isValidOutput) { + throw new InferenceOutputError("Expected Blob"); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/cv/zeroShotImageClassification.ts b/data/node_modules/@huggingface/inference/src/tasks/cv/zeroShotImageClassification.ts new file mode 100644 index 0000000000000000000000000000000000000000..062b86b2114c3327c0976c50dc9ed67f05f95218 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/cv/zeroShotImageClassification.ts @@ -0,0 +1,58 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; +import type { RequestArgs } from "../../types"; +import { base64FromBytes } from "../../utils/base64FromBytes"; + +export type ZeroShotImageClassificationArgs = BaseArgs & { + inputs: { + /** + * Binary image data + */ + image: Blob | ArrayBuffer; + }; + parameters: { + /** + * A list of strings that are potential classes for inputs. (max 10) + */ + candidate_labels: string[]; + }; +}; + +export interface ZeroShotImageClassificationOutputValue { + label: string; + score: number; +} + +export type ZeroShotImageClassificationOutput = ZeroShotImageClassificationOutputValue[]; + +/** + * Classify an image to specified classes. + * Recommended model: openai/clip-vit-large-patch14-336 + */ +export async function zeroShotImageClassification( + args: ZeroShotImageClassificationArgs, + options?: Options +): Promise { + const reqArgs: RequestArgs = { + ...args, + inputs: { + image: base64FromBytes( + new Uint8Array( + args.inputs.image instanceof ArrayBuffer ? args.inputs.image : await args.inputs.image.arrayBuffer() + ) + ), + }, + } as RequestArgs; + + const res = await request(reqArgs, { + ...options, + taskHint: "zero-shot-image-classification", + }); + const isValidOutput = + Array.isArray(res) && res.every((x) => typeof x.label === "string" && typeof x.score === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{label: string, score: number}>"); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/index.ts b/data/node_modules/@huggingface/inference/src/tasks/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..ba3108446cc76447b1c1bdb92f80c047064b50d2 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/index.ts @@ -0,0 +1,42 @@ +// Custom tasks with arbitrary inputs and outputs +export * from "./custom/request"; +export * from "./custom/streamingRequest"; + +// Audio tasks +export * from "./audio/audioClassification"; +export * from "./audio/automaticSpeechRecognition"; +export * from "./audio/textToSpeech"; +export * from "./audio/audioToAudio"; + +// Computer Vision tasks +export * from "./cv/imageClassification"; +export * from "./cv/imageSegmentation"; +export * from "./cv/imageToText"; +export * from "./cv/objectDetection"; +export * from "./cv/textToImage"; +export * from "./cv/imageToImage"; +export * from "./cv/zeroShotImageClassification"; + +// Natural Language Processing tasks +export * from "./nlp/featureExtraction"; +export * from "./nlp/fillMask"; +export * from "./nlp/questionAnswering"; +export * from "./nlp/sentenceSimilarity"; +export * from "./nlp/summarization"; +export * from "./nlp/tableQuestionAnswering"; +export * from "./nlp/textClassification"; +export * from "./nlp/textGeneration"; +export * from "./nlp/textGenerationStream"; +export * from "./nlp/tokenClassification"; +export * from "./nlp/translation"; +export * from "./nlp/zeroShotClassification"; +export * from "./nlp/chatCompletion"; +export * from "./nlp/chatCompletionStream"; + +// Multimodal tasks +export * from "./multimodal/documentQuestionAnswering"; +export * from "./multimodal/visualQuestionAnswering"; + +// Tabular tasks +export * from "./tabular/tabularRegression"; +export * from "./tabular/tabularClassification"; diff --git a/data/node_modules/@huggingface/inference/src/tasks/multimodal/documentQuestionAnswering.ts b/data/node_modules/@huggingface/inference/src/tasks/multimodal/documentQuestionAnswering.ts new file mode 100644 index 0000000000000000000000000000000000000000..205e956b9d8f5807ff1a29439e6f606efa02ed3f --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/multimodal/documentQuestionAnswering.ts @@ -0,0 +1,73 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; +import type { RequestArgs } from "../../types"; +import { toArray } from "../../utils/toArray"; +import { base64FromBytes } from "../../utils/base64FromBytes"; + +export type DocumentQuestionAnsweringArgs = BaseArgs & { + inputs: { + /** + * Raw image + * + * You can use native `File` in browsers, or `new Blob([buffer])` in node, or for a base64 image `new Blob([btoa(base64String)])`, or even `await (await fetch('...)).blob()` + **/ + image: Blob | ArrayBuffer; + question: string; + }; +}; + +export interface DocumentQuestionAnsweringOutput { + /** + * A string that’s the answer within the document. + */ + answer: string; + /** + * ? + */ + end?: number; + /** + * A float that represents how likely that the answer is correct + */ + score?: number; + /** + * ? + */ + start?: number; +} + +/** + * Answers a question on a document image. Recommended model: impira/layoutlm-document-qa. + */ +export async function documentQuestionAnswering( + args: DocumentQuestionAnsweringArgs, + options?: Options +): Promise { + const reqArgs: RequestArgs = { + ...args, + inputs: { + question: args.inputs.question, + // convert Blob or ArrayBuffer to base64 + image: base64FromBytes( + new Uint8Array( + args.inputs.image instanceof ArrayBuffer ? args.inputs.image : await args.inputs.image.arrayBuffer() + ) + ), + }, + } as RequestArgs; + const res = toArray( + await request<[DocumentQuestionAnsweringOutput] | DocumentQuestionAnsweringOutput>(reqArgs, { + ...options, + taskHint: "document-question-answering", + }) + )?.[0]; + const isValidOutput = + typeof res?.answer === "string" && + (typeof res.end === "number" || typeof res.end === "undefined") && + (typeof res.score === "number" || typeof res.score === "undefined") && + (typeof res.start === "number" || typeof res.start === "undefined"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{answer: string, end?: number, score?: number, start?: number}>"); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/multimodal/visualQuestionAnswering.ts b/data/node_modules/@huggingface/inference/src/tasks/multimodal/visualQuestionAnswering.ts new file mode 100644 index 0000000000000000000000000000000000000000..80e8a9a15c19fd34c5c5906441b9650c59d03092 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/multimodal/visualQuestionAnswering.ts @@ -0,0 +1,59 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options, RequestArgs } from "../../types"; +import { base64FromBytes } from "../../utils/base64FromBytes"; +import { request } from "../custom/request"; + +export type VisualQuestionAnsweringArgs = BaseArgs & { + inputs: { + /** + * Raw image + * + * You can use native `File` in browsers, or `new Blob([buffer])` in node, or for a base64 image `new Blob([btoa(base64String)])`, or even `await (await fetch('...)).blob()` + **/ + image: Blob | ArrayBuffer; + question: string; + }; +}; + +export interface VisualQuestionAnsweringOutput { + /** + * A string that’s the answer to a visual question. + */ + answer: string; + /** + * Answer correctness score. + */ + score: number; +} + +/** + * Answers a question on an image. Recommended model: dandelin/vilt-b32-finetuned-vqa. + */ +export async function visualQuestionAnswering( + args: VisualQuestionAnsweringArgs, + options?: Options +): Promise { + const reqArgs: RequestArgs = { + ...args, + inputs: { + question: args.inputs.question, + // convert Blob or ArrayBuffer to base64 + image: base64FromBytes( + new Uint8Array( + args.inputs.image instanceof ArrayBuffer ? args.inputs.image : await args.inputs.image.arrayBuffer() + ) + ), + }, + } as RequestArgs; + const res = ( + await request<[VisualQuestionAnsweringOutput]>(reqArgs, { + ...options, + taskHint: "visual-question-answering", + }) + )?.[0]; + const isValidOutput = typeof res?.answer === "string" && typeof res.score === "number"; + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{answer: string, score: number}>"); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/nlp/chatCompletion.ts b/data/node_modules/@huggingface/inference/src/tasks/nlp/chatCompletion.ts new file mode 100644 index 0000000000000000000000000000000000000000..fbc7e0cc11e91a1d2f161792381249b56f8c5dcd --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/nlp/chatCompletion.ts @@ -0,0 +1,32 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; +import type { ChatCompletionInput, ChatCompletionOutput } from "@huggingface/tasks"; + +/** + * Use the chat completion endpoint to generate a response to a prompt, using OpenAI message completion API no stream + */ + +export async function chatCompletion( + args: BaseArgs & ChatCompletionInput, + options?: Options +): Promise { + const res = await request(args, { + ...options, + taskHint: "text-generation", + chatCompletion: true, + }); + const isValidOutput = + typeof res === "object" && + Array.isArray(res?.choices) && + typeof res?.created === "number" && + typeof res?.id === "string" && + typeof res?.model === "string" && + typeof res?.system_fingerprint === "string" && + typeof res?.usage === "object"; + + if (!isValidOutput) { + throw new InferenceOutputError("Expected ChatCompletionOutput"); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/nlp/chatCompletionStream.ts b/data/node_modules/@huggingface/inference/src/tasks/nlp/chatCompletionStream.ts new file mode 100644 index 0000000000000000000000000000000000000000..e8c19780116d2a2db92e9552779f028f1e1a87fa --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/nlp/chatCompletionStream.ts @@ -0,0 +1,17 @@ +import type { BaseArgs, Options } from "../../types"; +import { streamingRequest } from "../custom/streamingRequest"; +import type { ChatCompletionInput, ChatCompletionStreamOutput } from "@huggingface/tasks"; + +/** + * Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time + */ +export async function* chatCompletionStream( + args: BaseArgs & ChatCompletionInput, + options?: Options +): AsyncGenerator { + yield* streamingRequest(args, { + ...options, + taskHint: "text-generation", + chatCompletion: true, + }); +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/nlp/featureExtraction.ts b/data/node_modules/@huggingface/inference/src/tasks/nlp/featureExtraction.ts new file mode 100644 index 0000000000000000000000000000000000000000..fef6ccc6145d9eff9eda089981bf58ef6722432f --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/nlp/featureExtraction.ts @@ -0,0 +1,52 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import { getDefaultTask } from "../../lib/getDefaultTask"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; + +export type FeatureExtractionArgs = BaseArgs & { + /** + * The inputs is a string or a list of strings to get the features from. + * + * inputs: "That is a happy person", + * + */ + inputs: string | string[]; +}; + +/** + * Returned values are a multidimensional array of floats (dimension depending on if you sent a string or a list of string, and if the automatic reduction, usually mean_pooling for instance was applied for you or not. This should be explained on the model's README). + */ +export type FeatureExtractionOutput = (number | number[] | number[][])[]; + +/** + * This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search. + */ +export async function featureExtraction( + args: FeatureExtractionArgs, + options?: Options +): Promise { + const defaultTask = args.model ? await getDefaultTask(args.model, args.accessToken, options) : undefined; + + const res = await request(args, { + ...options, + taskHint: "feature-extraction", + ...(defaultTask === "sentence-similarity" && { forceTask: "feature-extraction" }), + }); + let isValidOutput = true; + + const isNumArrayRec = (arr: unknown[], maxDepth: number, curDepth = 0): boolean => { + if (curDepth > maxDepth) return false; + if (arr.every((x) => Array.isArray(x))) { + return arr.every((x) => isNumArrayRec(x as unknown[], maxDepth, curDepth + 1)); + } else { + return arr.every((x) => typeof x === "number"); + } + }; + + isValidOutput = Array.isArray(res) && isNumArrayRec(res, 3, 0); + + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array"); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/nlp/fillMask.ts b/data/node_modules/@huggingface/inference/src/tasks/nlp/fillMask.ts new file mode 100644 index 0000000000000000000000000000000000000000..b8a2af12862d8ad03a527cb8784a338e8e7963ee --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/nlp/fillMask.ts @@ -0,0 +1,51 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; + +export type FillMaskArgs = BaseArgs & { + inputs: string; +}; + +export type FillMaskOutput = { + /** + * The probability for this token. + */ + score: number; + /** + * The actual sequence of tokens that ran against the model (may contain special tokens) + */ + sequence: string; + /** + * The id of the token + */ + token: number; + /** + * The string representation of the token + */ + token_str: string; +}[]; + +/** + * Tries to fill in a hole with a missing word (token to be precise). That’s the base task for BERT models. + */ +export async function fillMask(args: FillMaskArgs, options?: Options): Promise { + const res = await request(args, { + ...options, + taskHint: "fill-mask", + }); + const isValidOutput = + Array.isArray(res) && + res.every( + (x) => + typeof x.score === "number" && + typeof x.sequence === "string" && + typeof x.token === "number" && + typeof x.token_str === "string" + ); + if (!isValidOutput) { + throw new InferenceOutputError( + "Expected Array<{score: number, sequence: string, token: number, token_str: string}>" + ); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/nlp/questionAnswering.ts b/data/node_modules/@huggingface/inference/src/tasks/nlp/questionAnswering.ts new file mode 100644 index 0000000000000000000000000000000000000000..58074eb9c96196876d64fd72f42c2c4043681aea --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/nlp/questionAnswering.ts @@ -0,0 +1,53 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; + +export type QuestionAnsweringArgs = BaseArgs & { + inputs: { + context: string; + question: string; + }; +}; + +export interface QuestionAnsweringOutput { + /** + * A string that’s the answer within the text. + */ + answer: string; + /** + * The index (string wise) of the stop of the answer within context. + */ + end: number; + /** + * A float that represents how likely that the answer is correct + */ + score: number; + /** + * The index (string wise) of the start of the answer within context. + */ + start: number; +} + +/** + * Want to have a nice know-it-all bot that can answer any question?. Recommended model: deepset/roberta-base-squad2 + */ +export async function questionAnswering( + args: QuestionAnsweringArgs, + options?: Options +): Promise { + const res = await request(args, { + ...options, + taskHint: "question-answering", + }); + const isValidOutput = + typeof res === "object" && + !!res && + typeof res.answer === "string" && + typeof res.end === "number" && + typeof res.score === "number" && + typeof res.start === "number"; + if (!isValidOutput) { + throw new InferenceOutputError("Expected {answer: string, end: number, score: number, start: number}"); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/nlp/sentenceSimilarity.ts b/data/node_modules/@huggingface/inference/src/tasks/nlp/sentenceSimilarity.ts new file mode 100644 index 0000000000000000000000000000000000000000..ec5c173ca21871c110500eab48d9033a32201716 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/nlp/sentenceSimilarity.ts @@ -0,0 +1,40 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import { getDefaultTask } from "../../lib/getDefaultTask"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; + +export type SentenceSimilarityArgs = BaseArgs & { + /** + * The inputs vary based on the model. + * + * For example when using sentence-transformers/paraphrase-xlm-r-multilingual-v1 the inputs will have a `source_sentence` string and + * a `sentences` array of strings + */ + inputs: Record | Record[]; +}; + +/** + * Returned values are a list of floats + */ +export type SentenceSimilarityOutput = number[]; + +/** + * Calculate the semantic similarity between one text and a list of other sentences by comparing their embeddings. + */ +export async function sentenceSimilarity( + args: SentenceSimilarityArgs, + options?: Options +): Promise { + const defaultTask = args.model ? await getDefaultTask(args.model, args.accessToken, options) : undefined; + const res = await request(args, { + ...options, + taskHint: "sentence-similarity", + ...(defaultTask === "feature-extraction" && { forceTask: "sentence-similarity" }), + }); + + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected number[]"); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/nlp/summarization.ts b/data/node_modules/@huggingface/inference/src/tasks/nlp/summarization.ts new file mode 100644 index 0000000000000000000000000000000000000000..71efd1c3b9d0af3a1bb0b76da10556f71eb9ec81 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/nlp/summarization.ts @@ -0,0 +1,62 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; + +export type SummarizationArgs = BaseArgs & { + /** + * A string to be summarized + */ + inputs: string; + parameters?: { + /** + * (Default: None). Integer to define the maximum length in tokens of the output summary. + */ + max_length?: number; + /** + * (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. + */ + max_time?: number; + /** + * (Default: None). Integer to define the minimum length in tokens of the output summary. + */ + min_length?: number; + /** + * (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes. + */ + repetition_penalty?: number; + /** + * (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability. + */ + temperature?: number; + /** + * (Default: None). Integer to define the top tokens considered within the sample operation to create new text. + */ + top_k?: number; + /** + * (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p. + */ + top_p?: number; + }; +}; + +export interface SummarizationOutput { + /** + * The string after translation + */ + summary_text: string; +} + +/** + * This task is well known to summarize longer text into shorter text. Be careful, some models have a maximum length of input. That means that the summary cannot handle full books for instance. Be careful when choosing your model. + */ +export async function summarization(args: SummarizationArgs, options?: Options): Promise { + const res = await request(args, { + ...options, + taskHint: "summarization", + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x?.summary_text === "string"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{summary_text: string}>"); + } + return res?.[0]; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/nlp/tableQuestionAnswering.ts b/data/node_modules/@huggingface/inference/src/tasks/nlp/tableQuestionAnswering.ts new file mode 100644 index 0000000000000000000000000000000000000000..a0cf6925121d75db5d13f614f00d3e068c4c03a9 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/nlp/tableQuestionAnswering.ts @@ -0,0 +1,61 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; + +export type TableQuestionAnsweringArgs = BaseArgs & { + inputs: { + /** + * The query in plain text that you want to ask the table + */ + query: string; + /** + * A table of data represented as a dict of list where entries are headers and the lists are all the values, all lists must have the same size. + */ + table: Record; + }; +}; + +export interface TableQuestionAnsweringOutput { + /** + * The aggregator used to get the answer + */ + aggregator: string; + /** + * The plaintext answer + */ + answer: string; + /** + * A list of coordinates of the cells contents + */ + cells: string[]; + /** + * a list of coordinates of the cells referenced in the answer + */ + coordinates: number[][]; +} + +/** + * Don’t know SQL? Don’t want to dive into a large spreadsheet? Ask questions in plain english! Recommended model: google/tapas-base-finetuned-wtq. + */ +export async function tableQuestionAnswering( + args: TableQuestionAnsweringArgs, + options?: Options +): Promise { + const res = await request(args, { + ...options, + taskHint: "table-question-answering", + }); + const isValidOutput = + typeof res?.aggregator === "string" && + typeof res.answer === "string" && + Array.isArray(res.cells) && + res.cells.every((x) => typeof x === "string") && + Array.isArray(res.coordinates) && + res.coordinates.every((coord) => Array.isArray(coord) && coord.every((x) => typeof x === "number")); + if (!isValidOutput) { + throw new InferenceOutputError( + "Expected {aggregator: string, answer: string, cells: string[], coordinates: number[][]}" + ); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/nlp/textClassification.ts b/data/node_modules/@huggingface/inference/src/tasks/nlp/textClassification.ts new file mode 100644 index 0000000000000000000000000000000000000000..41ced40571f31cb795bbe79643e70c18823507d2 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/nlp/textClassification.ts @@ -0,0 +1,42 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; + +export type TextClassificationArgs = BaseArgs & { + /** + * A string to be classified + */ + inputs: string; +}; + +export type TextClassificationOutput = { + /** + * The label for the class (model specific) + */ + label: string; + /** + * A floats that represents how likely is that the text belongs to this class. + */ + score: number; +}[]; + +/** + * Usually used for sentiment-analysis this will output the likelihood of classes of an input. Recommended model: distilbert-base-uncased-finetuned-sst-2-english + */ +export async function textClassification( + args: TextClassificationArgs, + options?: Options +): Promise { + const res = ( + await request(args, { + ...options, + taskHint: "text-classification", + }) + )?.[0]; + const isValidOutput = + Array.isArray(res) && res.every((x) => typeof x?.label === "string" && typeof x.score === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{label: string, score: number}>"); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/nlp/textGeneration.ts b/data/node_modules/@huggingface/inference/src/tasks/nlp/textGeneration.ts new file mode 100644 index 0000000000000000000000000000000000000000..bda33cee596e7ab7a0ee00aa483c09b2f1b625dd --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/nlp/textGeneration.ts @@ -0,0 +1,27 @@ +import type { TextGenerationInput, TextGenerationOutput } from "@huggingface/tasks"; +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { toArray } from "../../utils/toArray"; +import { request } from "../custom/request"; + +export type { TextGenerationInput, TextGenerationOutput }; + +/** + * Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with). + */ +export async function textGeneration( + args: BaseArgs & TextGenerationInput, + options?: Options +): Promise { + const res = toArray( + await request(args, { + ...options, + taskHint: "text-generation", + }) + ); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x?.generated_text === "string"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{generated_text: string}>"); + } + return res?.[0]; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/nlp/textGenerationStream.ts b/data/node_modules/@huggingface/inference/src/tasks/nlp/textGenerationStream.ts new file mode 100644 index 0000000000000000000000000000000000000000..029026c2e9794eb1653e2088a2c5cd9c48c7c2fd --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/nlp/textGenerationStream.ts @@ -0,0 +1,96 @@ +import type { TextGenerationInput } from "@huggingface/tasks"; +import type { BaseArgs, Options } from "../../types"; +import { streamingRequest } from "../custom/streamingRequest"; + +export interface TextGenerationStreamToken { + /** Token ID from the model tokenizer */ + id: number; + /** Token text */ + text: string; + /** Logprob */ + logprob: number; + /** + * Is the token a special token + * Can be used to ignore tokens when concatenating + */ + special: boolean; +} + +export interface TextGenerationStreamPrefillToken { + /** Token ID from the model tokenizer */ + id: number; + /** Token text */ + text: string; + /** + * Logprob + * Optional since the logprob of the first token cannot be computed + */ + logprob?: number; +} + +export interface TextGenerationStreamBestOfSequence { + /** Generated text */ + generated_text: string; + /** Generation finish reason */ + finish_reason: TextGenerationStreamFinishReason; + /** Number of generated tokens */ + generated_tokens: number; + /** Sampling seed if sampling was activated */ + seed?: number; + /** Prompt tokens */ + prefill: TextGenerationStreamPrefillToken[]; + /** Generated tokens */ + tokens: TextGenerationStreamToken[]; +} + +export type TextGenerationStreamFinishReason = + /** number of generated tokens == `max_new_tokens` */ + | "length" + /** the model generated its end of sequence token */ + | "eos_token" + /** the model generated a text included in `stop_sequences` */ + | "stop_sequence"; + +export interface TextGenerationStreamDetails { + /** Generation finish reason */ + finish_reason: TextGenerationStreamFinishReason; + /** Number of generated tokens */ + generated_tokens: number; + /** Sampling seed if sampling was activated */ + seed?: number; + /** Prompt tokens */ + prefill: TextGenerationStreamPrefillToken[]; + /** */ + tokens: TextGenerationStreamToken[]; + /** Additional sequences when using the `best_of` parameter */ + best_of_sequences?: TextGenerationStreamBestOfSequence[]; +} + +export interface TextGenerationStreamOutput { + index?: number; + /** Generated token, one at a time */ + token: TextGenerationStreamToken; + /** + * Complete generated text + * Only available when the generation is finished + */ + generated_text: string | null; + /** + * Generation details + * Only available when the generation is finished + */ + details: TextGenerationStreamDetails | null; +} + +/** + * Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time + */ +export async function* textGenerationStream( + args: BaseArgs & TextGenerationInput, + options?: Options +): AsyncGenerator { + yield* streamingRequest(args, { + ...options, + taskHint: "text-generation", + }); +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/nlp/tokenClassification.ts b/data/node_modules/@huggingface/inference/src/tasks/nlp/tokenClassification.ts new file mode 100644 index 0000000000000000000000000000000000000000..eeee58d4c641de80429d7e990c06f9e5c124d974 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/nlp/tokenClassification.ts @@ -0,0 +1,83 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { toArray } from "../../utils/toArray"; +import { request } from "../custom/request"; + +export type TokenClassificationArgs = BaseArgs & { + /** + * A string to be classified + */ + inputs: string; + parameters?: { + /** + * (Default: simple). There are several aggregation strategies: + * + * none: Every token gets classified without further aggregation. + * + * simple: Entities are grouped according to the default schema (B-, I- tags get merged when the tag is similar). + * + * first: Same as the simple strategy except words cannot end up with different tags. Words will use the tag of the first token when there is ambiguity. + * + * average: Same as the simple strategy except words cannot end up with different tags. Scores are averaged across tokens and then the maximum label is applied. + * + * max: Same as the simple strategy except words cannot end up with different tags. Word entity will be the token with the maximum score. + */ + aggregation_strategy?: "none" | "simple" | "first" | "average" | "max"; + }; +}; + +export interface TokenClassificationOutputValue { + /** + * The offset stringwise where the answer is located. Useful to disambiguate if word occurs multiple times. + */ + end: number; + /** + * The type for the entity being recognized (model specific). + */ + entity_group: string; + /** + * How likely the entity was recognized. + */ + score: number; + /** + * The offset stringwise where the answer is located. Useful to disambiguate if word occurs multiple times. + */ + start: number; + /** + * The string that was captured + */ + word: string; +} + +export type TokenClassificationOutput = TokenClassificationOutputValue[]; + +/** + * Usually used for sentence parsing, either grammatical, or Named Entity Recognition (NER) to understand keywords contained within text. Recommended model: dbmdz/bert-large-cased-finetuned-conll03-english + */ +export async function tokenClassification( + args: TokenClassificationArgs, + options?: Options +): Promise { + const res = toArray( + await request(args, { + ...options, + taskHint: "token-classification", + }) + ); + const isValidOutput = + Array.isArray(res) && + res.every( + (x) => + typeof x.end === "number" && + typeof x.entity_group === "string" && + typeof x.score === "number" && + typeof x.start === "number" && + typeof x.word === "string" + ); + if (!isValidOutput) { + throw new InferenceOutputError( + "Expected Array<{end: number, entity_group: string, score: number, start: number, word: string}>" + ); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/nlp/translation.ts b/data/node_modules/@huggingface/inference/src/tasks/nlp/translation.ts new file mode 100644 index 0000000000000000000000000000000000000000..ea7a3054c0a20a7dbc51fed988a25adab8dc8ee8 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/nlp/translation.ts @@ -0,0 +1,34 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; + +export type TranslationArgs = BaseArgs & { + /** + * A string to be translated + */ + inputs: string | string[]; +}; + +export interface TranslationOutputValue { + /** + * The string after translation + */ + translation_text: string; +} + +export type TranslationOutput = TranslationOutputValue | TranslationOutputValue[]; + +/** + * This task is well known to translate text from one language to another. Recommended model: Helsinki-NLP/opus-mt-ru-en. + */ +export async function translation(args: TranslationArgs, options?: Options): Promise { + const res = await request(args, { + ...options, + taskHint: "translation", + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x?.translation_text === "string"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected type Array<{translation_text: string}>"); + } + return res?.length === 1 ? res?.[0] : res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/nlp/zeroShotClassification.ts b/data/node_modules/@huggingface/inference/src/tasks/nlp/zeroShotClassification.ts new file mode 100644 index 0000000000000000000000000000000000000000..2552489c363b1fec32921e2eb87565d76c22b0e8 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/nlp/zeroShotClassification.ts @@ -0,0 +1,58 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { toArray } from "../../utils/toArray"; +import { request } from "../custom/request"; + +export type ZeroShotClassificationArgs = BaseArgs & { + /** + * a string or list of strings + */ + inputs: string | string[]; + parameters: { + /** + * a list of strings that are potential classes for inputs. (max 10 candidate_labels, for more, simply run multiple requests, results are going to be misleading if using too many candidate_labels anyway. If you want to keep the exact same, you can simply run multi_label=True and do the scaling on your end. + */ + candidate_labels: string[]; + /** + * (Default: false) Boolean that is set to True if classes can overlap + */ + multi_label?: boolean; + }; +}; + +export interface ZeroShotClassificationOutputValue { + labels: string[]; + scores: number[]; + sequence: string; +} + +export type ZeroShotClassificationOutput = ZeroShotClassificationOutputValue[]; + +/** + * This task is super useful to try out classification with zero code, you simply pass a sentence/paragraph and the possible labels for that sentence, and you get a result. Recommended model: facebook/bart-large-mnli. + */ +export async function zeroShotClassification( + args: ZeroShotClassificationArgs, + options?: Options +): Promise { + const res = toArray( + await request(args, { + ...options, + taskHint: "zero-shot-classification", + }) + ); + const isValidOutput = + Array.isArray(res) && + res.every( + (x) => + Array.isArray(x.labels) && + x.labels.every((_label) => typeof _label === "string") && + Array.isArray(x.scores) && + x.scores.every((_score) => typeof _score === "number") && + typeof x.sequence === "string" + ); + if (!isValidOutput) { + throw new InferenceOutputError("Expected Array<{labels: string[], scores: number[], sequence: string}>"); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/tabular/tabularClassification.ts b/data/node_modules/@huggingface/inference/src/tasks/tabular/tabularClassification.ts new file mode 100644 index 0000000000000000000000000000000000000000..f53e926e94344cff705968618f5390e95e331f57 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/tabular/tabularClassification.ts @@ -0,0 +1,37 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; + +export type TabularClassificationArgs = BaseArgs & { + inputs: { + /** + * A table of data represented as a dict of list where entries are headers and the lists are all the values, all lists must have the same size. + */ + data: Record; + }; +}; + +/** + * A list of predicted labels for each row + */ +export type TabularClassificationOutput = number[]; + +/** + * Predicts target label for a given set of features in tabular form. + * Typically, you will want to train a classification model on your training data and use it with your new data of the same format. + * Example model: vvmnnnkv/wine-quality + */ +export async function tabularClassification( + args: TabularClassificationArgs, + options?: Options +): Promise { + const res = await request(args, { + ...options, + taskHint: "tabular-classification", + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected number[]"); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/tasks/tabular/tabularRegression.ts b/data/node_modules/@huggingface/inference/src/tasks/tabular/tabularRegression.ts new file mode 100644 index 0000000000000000000000000000000000000000..e6bd9e3de1117fe10c7da42cabee58af48fb5829 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/tasks/tabular/tabularRegression.ts @@ -0,0 +1,37 @@ +import { InferenceOutputError } from "../../lib/InferenceOutputError"; +import type { BaseArgs, Options } from "../../types"; +import { request } from "../custom/request"; + +export type TabularRegressionArgs = BaseArgs & { + inputs: { + /** + * A table of data represented as a dict of list where entries are headers and the lists are all the values, all lists must have the same size. + */ + data: Record; + }; +}; + +/** + * a list of predicted values for each row + */ +export type TabularRegressionOutput = number[]; + +/** + * Predicts target value for a given set of features in tabular form. + * Typically, you will want to train a regression model on your training data and use it with your new data of the same format. + * Example model: scikit-learn/Fish-Weight + */ +export async function tabularRegression( + args: TabularRegressionArgs, + options?: Options +): Promise { + const res = await request(args, { + ...options, + taskHint: "tabular-regression", + }); + const isValidOutput = Array.isArray(res) && res.every((x) => typeof x === "number"); + if (!isValidOutput) { + throw new InferenceOutputError("Expected number[]"); + } + return res; +} diff --git a/data/node_modules/@huggingface/inference/src/types.ts b/data/node_modules/@huggingface/inference/src/types.ts new file mode 100644 index 0000000000000000000000000000000000000000..66490bf2c2fea4e58d18f9854b860ae7d971bf5d --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/types.ts @@ -0,0 +1,72 @@ +import type { PipelineType } from "@huggingface/tasks"; +import type { ChatCompletionInput } from "@huggingface/tasks"; + +export interface Options { + /** + * (Default: true) Boolean. If a request 503s and wait_for_model is set to false, the request will be retried with the same parameters but with wait_for_model set to true. + */ + retry_on_error?: boolean; + /** + * (Default: true). Boolean. There is a cache layer on Inference API (serverless) to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query. + */ + use_cache?: boolean; + /** + * (Default: false). Boolean. Do not load the model if it's not already available. + */ + dont_load_model?: boolean; + /** + * (Default: false). Boolean to use GPU instead of CPU for inference (requires Startup plan at least). + */ + use_gpu?: boolean; + + /** + * (Default: false) Boolean. If the model is not ready, wait for it instead of receiving 503. It limits the number of requests required to get your inference done. It is advised to only set this flag to true after receiving a 503 error as it will limit hanging in your application to known places. + */ + wait_for_model?: boolean; + /** + * Custom fetch function to use instead of the default one, for example to use a proxy or edit headers. + */ + fetch?: typeof fetch; + /** + * Abort Controller signal to use for request interruption. + */ + signal?: AbortSignal; + + /** + * (Default: "same-origin"). String | Boolean. Credentials to use for the request. If this is a string, it will be passed straight on. If it's a boolean, true will be "include" and false will not send credentials at all. + */ + includeCredentials?: string | boolean; +} + +export type InferenceTask = Exclude; + +export interface BaseArgs { + /** + * The access token to use. Without it, you'll get rate-limited quickly. + * + * Can be created for free in hf.co/settings/token + */ + accessToken?: string; + /** + * The model to use. + * + * If not specified, will call huggingface.co/api/tasks to get the default model for the task. + * + * /!\ Legacy behavior allows this to be an URL, but this is deprecated and will be removed in the future. + * Use the `endpointUrl` parameter instead. + */ + model?: string; + + /** + * The URL of the endpoint to use. If not specified, will call huggingface.co/api/tasks to get the default endpoint for the task. + * + * If specified, will use this URL instead of the default one. + */ + endpointUrl?: string; +} + +export type RequestArgs = BaseArgs & + ({ data: Blob | ArrayBuffer } | { inputs: unknown } | ChatCompletionInput) & { + parameters?: Record; + accessToken?: string; + }; diff --git a/data/node_modules/@huggingface/inference/src/utils/base64FromBytes.ts b/data/node_modules/@huggingface/inference/src/utils/base64FromBytes.ts new file mode 100644 index 0000000000000000000000000000000000000000..5327bbfe25838372cc7e25123d1cc9beaf80ceda --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/utils/base64FromBytes.ts @@ -0,0 +1,11 @@ +export function base64FromBytes(arr: Uint8Array): string { + if (globalThis.Buffer) { + return globalThis.Buffer.from(arr).toString("base64"); + } else { + const bin: string[] = []; + arr.forEach((byte) => { + bin.push(String.fromCharCode(byte)); + }); + return globalThis.btoa(bin.join("")); + } +} diff --git a/data/node_modules/@huggingface/inference/src/utils/distributive-omit.ts b/data/node_modules/@huggingface/inference/src/utils/distributive-omit.ts new file mode 100644 index 0000000000000000000000000000000000000000..1f2536c3ed5ff5e16b0d0abfcf9ca065fc4e7051 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/utils/distributive-omit.ts @@ -0,0 +1,13 @@ +// https://dev.to/safareli/pick-omit-and-union-types-in-typescript-4nd9 +// https://github.com/microsoft/TypeScript/issues/28339#issuecomment-467393437 +/** + * This allows omitting keys from objects inside unions, without merging the individual components of the union. + */ + +type Omit_ = Omit>; + +export type DistributiveOmit = T extends unknown + ? keyof Omit_ extends never + ? never + : { [P in keyof Omit_]: Omit_[P] } + : never; diff --git a/data/node_modules/@huggingface/inference/src/utils/isBackend.ts b/data/node_modules/@huggingface/inference/src/utils/isBackend.ts new file mode 100644 index 0000000000000000000000000000000000000000..1e6f27998645f2971dcdd92503d78de521273a26 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/utils/isBackend.ts @@ -0,0 +1,6 @@ +const isBrowser = typeof window !== "undefined" && typeof window.document !== "undefined"; + +const isWebWorker = + typeof self === "object" && self.constructor && self.constructor.name === "DedicatedWorkerGlobalScope"; + +export const isBackend = !isBrowser && !isWebWorker; diff --git a/data/node_modules/@huggingface/inference/src/utils/isFrontend.ts b/data/node_modules/@huggingface/inference/src/utils/isFrontend.ts new file mode 100644 index 0000000000000000000000000000000000000000..0b9bab392e71f315704c210bc0e8ff210379703d --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/utils/isFrontend.ts @@ -0,0 +1,3 @@ +import { isBackend } from "./isBackend"; + +export const isFrontend = !isBackend; diff --git a/data/node_modules/@huggingface/inference/src/utils/omit.ts b/data/node_modules/@huggingface/inference/src/utils/omit.ts new file mode 100644 index 0000000000000000000000000000000000000000..6515f18df60026616ecebcb45bda9da46b66f818 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/utils/omit.ts @@ -0,0 +1,11 @@ +import { pick } from "./pick"; +import { typedInclude } from "./typedInclude"; + +/** + * Return copy of object, omitting blocklisted array of props + */ +export function omit(o: T, props: K[] | K): Pick> { + const propsArr = Array.isArray(props) ? props : [props]; + const letsKeep = (Object.keys(o) as (keyof T)[]).filter((prop) => !typedInclude(propsArr, prop)); + return pick(o, letsKeep); +} diff --git a/data/node_modules/@huggingface/inference/src/utils/pick.ts b/data/node_modules/@huggingface/inference/src/utils/pick.ts new file mode 100644 index 0000000000000000000000000000000000000000..7bb040cbba9ae7a4eb80c9e8a94d06fbd0ef8b4e --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/utils/pick.ts @@ -0,0 +1,13 @@ +/** + * Return copy of object, only keeping allowlisted properties. + */ +export function pick(o: T, props: K[] | ReadonlyArray): Pick { + return Object.assign( + {}, + ...props.map((prop) => { + if (o[prop] !== undefined) { + return { [prop]: o[prop] }; + } + }) + ); +} diff --git a/data/node_modules/@huggingface/inference/src/utils/toArray.ts b/data/node_modules/@huggingface/inference/src/utils/toArray.ts new file mode 100644 index 0000000000000000000000000000000000000000..e918f06216563afc58ace3a1781adb53ceba6df9 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/utils/toArray.ts @@ -0,0 +1,6 @@ +export function toArray(obj: T): T extends unknown[] ? T : T[] { + if (Array.isArray(obj)) { + return obj as T extends unknown[] ? T : T[]; + } + return [obj] as T extends unknown[] ? T : T[]; +} diff --git a/data/node_modules/@huggingface/inference/src/utils/typedInclude.ts b/data/node_modules/@huggingface/inference/src/utils/typedInclude.ts new file mode 100644 index 0000000000000000000000000000000000000000..71e2f7a7e111995a744589dd34cb090d9743ea16 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/utils/typedInclude.ts @@ -0,0 +1,3 @@ +export function typedInclude(arr: readonly T[], v: V): v is T { + return arr.includes(v as T); +} diff --git a/data/node_modules/@huggingface/inference/src/vendor/fetch-event-source/parse.spec.ts b/data/node_modules/@huggingface/inference/src/vendor/fetch-event-source/parse.spec.ts new file mode 100644 index 0000000000000000000000000000000000000000..be9752335af0b8610eacb94da4591543f1c17fdf --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/vendor/fetch-event-source/parse.spec.ts @@ -0,0 +1,389 @@ +import { expect, it, describe } from "vitest"; +const fail = (msg: string) => { throw new Error(msg) }; + +/** + This file is a part of fetch-event-source package (as of v2.0.1) + https://github.com/Azure/fetch-event-source/blob/v2.0.1/src/parse.spec.ts + + Full package can be used after it is made compatible with nodejs: + https://github.com/Azure/fetch-event-source/issues/20 + + Below is the fetch-event-source package license: + + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE + + */ + +import * as parse from './parse'; + +describe('parse', () => { + const encoder = new TextEncoder(); + const decoder = new TextDecoder(); + + describe('getLines', () => { + it('single line', () => { + // arrange: + let lineNum = 0; + const next = parse.getLines((line, fieldLength) => { + ++lineNum; + expect(decoder.decode(line)).toEqual('id: abc'); + expect(fieldLength).toEqual(2); + }); + + // act: + next(encoder.encode('id: abc\n')); + + // assert: + expect(lineNum).toBe(1); + }); + + it('multiple lines', () => { + // arrange: + let lineNum = 0; + const next = parse.getLines((line, fieldLength) => { + ++lineNum; + expect(decoder.decode(line)).toEqual(lineNum === 1 ? 'id: abc' : 'data: def'); + expect(fieldLength).toEqual(lineNum === 1 ? 2 : 4); + }); + + // act: + next(encoder.encode('id: abc\n')); + next(encoder.encode('data: def\n')); + + // assert: + expect(lineNum).toBe(2); + }); + + it('single line split across multiple arrays', () => { + // arrange: + let lineNum = 0; + const next = parse.getLines((line, fieldLength) => { + ++lineNum; + expect(decoder.decode(line)).toEqual('id: abc'); + expect(fieldLength).toEqual(2); + }); + + // act: + next(encoder.encode('id: a')); + next(encoder.encode('bc\n')); + + // assert: + expect(lineNum).toBe(1); + }); + + it('multiple lines split across multiple arrays', () => { + // arrange: + let lineNum = 0; + const next = parse.getLines((line, fieldLength) => { + ++lineNum; + expect(decoder.decode(line)).toEqual(lineNum === 1 ? 'id: abc' : 'data: def'); + expect(fieldLength).toEqual(lineNum === 1 ? 2 : 4); + }); + + // act: + next(encoder.encode('id: ab')); + next(encoder.encode('c\nda')); + next(encoder.encode('ta: def\n')); + + // assert: + expect(lineNum).toBe(2); + }); + + it('new line', () => { + // arrange: + let lineNum = 0; + const next = parse.getLines((line, fieldLength) => { + ++lineNum; + expect(decoder.decode(line)).toEqual(''); + expect(fieldLength).toEqual(-1); + }); + + // act: + next(encoder.encode('\n')); + + // assert: + expect(lineNum).toBe(1); + }); + + it('comment line', () => { + // arrange: + let lineNum = 0; + const next = parse.getLines((line, fieldLength) => { + ++lineNum; + expect(decoder.decode(line)).toEqual(': this is a comment'); + expect(fieldLength).toEqual(0); + }); + + // act: + next(encoder.encode(': this is a comment\n')); + + // assert: + expect(lineNum).toBe(1); + }); + + it('line with no field', () => { + // arrange: + let lineNum = 0; + const next = parse.getLines((line, fieldLength) => { + ++lineNum; + expect(decoder.decode(line)).toEqual('this is an invalid line'); + expect(fieldLength).toEqual(-1); + }); + + // act: + next(encoder.encode('this is an invalid line\n')); + + // assert: + expect(lineNum).toBe(1); + }); + + it('line with multiple colons', () => { + // arrange: + let lineNum = 0; + const next = parse.getLines((line, fieldLength) => { + ++lineNum; + expect(decoder.decode(line)).toEqual('id: abc: def'); + expect(fieldLength).toEqual(2); + }); + + // act: + next(encoder.encode('id: abc: def\n')); + + // assert: + expect(lineNum).toBe(1); + }); + + it('single byte array with multiple lines separated by \\n', () => { + // arrange: + let lineNum = 0; + const next = parse.getLines((line, fieldLength) => { + ++lineNum; + expect(decoder.decode(line)).toEqual(lineNum === 1 ? 'id: abc' : 'data: def'); + expect(fieldLength).toEqual(lineNum === 1 ? 2 : 4); + }); + + // act: + next(encoder.encode('id: abc\ndata: def\n')); + + // assert: + expect(lineNum).toBe(2); + }); + + it('single byte array with multiple lines separated by \\r', () => { + // arrange: + let lineNum = 0; + const next = parse.getLines((line, fieldLength) => { + ++lineNum; + expect(decoder.decode(line)).toEqual(lineNum === 1 ? 'id: abc' : 'data: def'); + expect(fieldLength).toEqual(lineNum === 1 ? 2 : 4); + }); + + // act: + next(encoder.encode('id: abc\rdata: def\r')); + + // assert: + expect(lineNum).toBe(2); + }); + + it('single byte array with multiple lines separated by \\r\\n', () => { + // arrange: + let lineNum = 0; + const next = parse.getLines((line, fieldLength) => { + ++lineNum; + expect(decoder.decode(line)).toEqual(lineNum === 1 ? 'id: abc' : 'data: def'); + expect(fieldLength).toEqual(lineNum === 1 ? 2 : 4); + }); + + // act: + next(encoder.encode('id: abc\r\ndata: def\r\n')); + + // assert: + expect(lineNum).toBe(2); + }); + }); + + describe('getMessages', () => { + it('happy path', () => { + // arrange: + let msgNum = 0; + const next = parse.getMessages(id => { + expect(id).toEqual('abc'); + }, retry => { + expect(retry).toEqual(42); + }, msg => { + ++msgNum; + expect(msg).toEqual({ + retry: 42, + id: 'abc', + event: 'def', + data: 'ghi' + }); + }); + + // act: + next(encoder.encode('retry: 42'), 5); + next(encoder.encode('id: abc'), 2); + next(encoder.encode('event:def'), 5); + next(encoder.encode('data:ghi'), 4); + next(encoder.encode(''), -1); + + // assert: + expect(msgNum).toBe(1); + }); + + it('skip unknown fields', () => { + let msgNum = 0; + const next = parse.getMessages(id => { + expect(id).toEqual('abc'); + }, _retry => { + fail('retry should not be called'); + }, msg => { + ++msgNum; + expect(msg).toEqual({ + id: 'abc', + data: '', + event: '', + retry: undefined, + }); + }); + + // act: + next(encoder.encode('id: abc'), 2); + next(encoder.encode('foo: null'), 3); + next(encoder.encode(''), -1); + + // assert: + expect(msgNum).toBe(1); + }); + + it('ignore non-integer retry', () => { + let msgNum = 0; + const next = parse.getMessages(_id => { + fail('id should not be called'); + }, _retry => { + fail('retry should not be called'); + }, msg => { + ++msgNum; + expect(msg).toEqual({ + id: '', + data: '', + event: '', + retry: undefined, + }); + }); + + // act: + next(encoder.encode('retry: def'), 5); + next(encoder.encode(''), -1); + + // assert: + expect(msgNum).toBe(1); + }); + + it('skip comment-only messages', () => { + // arrange: + let msgNum = 0; + const next = parse.getMessages(id => { + expect(id).toEqual('123'); + }, _retry => { + fail('retry should not be called'); + }, msg => { + ++msgNum; + expect(msg).toEqual({ + retry: undefined, + id: '123', + event: 'foo ', + data: '', + }); + }); + + // act: + next(encoder.encode('id:123'), 2); + next(encoder.encode(':'), 0); + next(encoder.encode(': '), 0); + next(encoder.encode('event: foo '), 5); + next(encoder.encode(''), -1); + + // assert: + expect(msgNum).toBe(1); + }); + + it('should append data split across multiple lines', () => { + // arrange: + let msgNum = 0; + const next = parse.getMessages(_id => { + fail('id should not be called'); + }, _retry => { + fail('retry should not be called'); + }, msg => { + ++msgNum; + expect(msg).toEqual({ + data: 'YHOO\n+2\n\n10', + id: '', + event: '', + retry: undefined, + }); + }); + + // act: + next(encoder.encode('data:YHOO'), 4); + next(encoder.encode('data: +2'), 4); + next(encoder.encode('data'), 4); + next(encoder.encode('data: 10'), 4); + next(encoder.encode(''), -1); + + // assert: + expect(msgNum).toBe(1); + }); + + it('should reset id if sent multiple times', () => { + // arrange: + const expectedIds = ['foo', '']; + let idsIdx = 0; + let msgNum = 0; + const next = parse.getMessages(id => { + expect(id).toEqual(expectedIds[idsIdx]); + ++idsIdx; + }, _retry => { + fail('retry should not be called'); + }, msg => { + ++msgNum; + expect(msg).toEqual({ + data: '', + id: '', + event: '', + retry: undefined, + }); + }); + + // act: + next(encoder.encode('id: foo'), 2); + next(encoder.encode('id'), 2); + next(encoder.encode(''), -1); + + // assert: + expect(idsIdx).toBe(2); + expect(msgNum).toBe(1); + }); + }); +}); diff --git a/data/node_modules/@huggingface/inference/src/vendor/fetch-event-source/parse.ts b/data/node_modules/@huggingface/inference/src/vendor/fetch-event-source/parse.ts new file mode 100644 index 0000000000000000000000000000000000000000..3915348e44a407c3e4e411c2091737248a789bf9 --- /dev/null +++ b/data/node_modules/@huggingface/inference/src/vendor/fetch-event-source/parse.ts @@ -0,0 +1,216 @@ +/** + This file is a part of fetch-event-source package (as of v2.0.1) + https://github.com/Azure/fetch-event-source/blob/v2.0.1/src/parse.ts + + Full package can be used after it is made compatible with nodejs: + https://github.com/Azure/fetch-event-source/issues/20 + + Below is the fetch-event-source package license: + + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE + + */ + +/** + * Represents a message sent in an event stream + * https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format + */ +export interface EventSourceMessage { + /** The event ID to set the EventSource object's last event ID value. */ + id: string; + /** A string identifying the type of event described. */ + event: string; + /** The event data */ + data: string; + /** The reconnection interval (in milliseconds) to wait before retrying the connection */ + retry?: number; +} + +/** + * Converts a ReadableStream into a callback pattern. + * @param stream The input ReadableStream. + * @param onChunk A function that will be called on each new byte chunk in the stream. + * @returns {Promise} A promise that will be resolved when the stream closes. + */ +export async function getBytes(stream: ReadableStream, onChunk: (arr: Uint8Array) => void) { + const reader = stream.getReader(); + let result: ReadableStreamReadResult; + while (!(result = await reader.read()).done) { + onChunk(result.value); + } +} + +const enum ControlChars { + NewLine = 10, + CarriageReturn = 13, + Space = 32, + Colon = 58, +} + +/** + * Parses arbitary byte chunks into EventSource line buffers. + * Each line should be of the format "field: value" and ends with \r, \n, or \r\n. + * @param onLine A function that will be called on each new EventSource line. + * @returns A function that should be called for each incoming byte chunk. + */ +export function getLines(onLine: (line: Uint8Array, fieldLength: number) => void) { + let buffer: Uint8Array | undefined; + let position: number; // current read position + let fieldLength: number; // length of the `field` portion of the line + let discardTrailingNewline = false; + + // return a function that can process each incoming byte chunk: + return function onChunk(arr: Uint8Array) { + if (buffer === undefined) { + buffer = arr; + position = 0; + fieldLength = -1; + } else { + // we're still parsing the old line. Append the new bytes into buffer: + buffer = concat(buffer, arr); + } + + const bufLength = buffer.length; + let lineStart = 0; // index where the current line starts + while (position < bufLength) { + if (discardTrailingNewline) { + if (buffer[position] === ControlChars.NewLine) { + lineStart = ++position; // skip to next char + } + + discardTrailingNewline = false; + } + + // start looking forward till the end of line: + let lineEnd = -1; // index of the \r or \n char + for (; position < bufLength && lineEnd === -1; ++position) { + switch (buffer[position]) { + case ControlChars.Colon: + if (fieldLength === -1) { // first colon in line + fieldLength = position - lineStart; + } + break; + // @ts-ignore:7029 \r case below should fallthrough to \n: + case ControlChars.CarriageReturn: + discardTrailingNewline = true; + case ControlChars.NewLine: + lineEnd = position; + break; + } + } + + if (lineEnd === -1) { + // We reached the end of the buffer but the line hasn't ended. + // Wait for the next arr and then continue parsing: + break; + } + + // we've reached the line end, send it out: + onLine(buffer.subarray(lineStart, lineEnd), fieldLength); + lineStart = position; // we're now on the next line + fieldLength = -1; + } + + if (lineStart === bufLength) { + buffer = undefined; // we've finished reading it + } else if (lineStart !== 0) { + // Create a new view into buffer beginning at lineStart so we don't + // need to copy over the previous lines when we get the new arr: + buffer = buffer.subarray(lineStart); + position -= lineStart; + } + } +} + +/** + * Parses line buffers into EventSourceMessages. + * @param onId A function that will be called on each `id` field. + * @param onRetry A function that will be called on each `retry` field. + * @param onMessage A function that will be called on each message. + * @returns A function that should be called for each incoming line buffer. + */ +export function getMessages( + onId: (id: string) => void, + onRetry: (retry: number) => void, + onMessage?: (msg: EventSourceMessage) => void +) { + let message = newMessage(); + const decoder = new TextDecoder(); + + // return a function that can process each incoming line buffer: + return function onLine(line: Uint8Array, fieldLength: number) { + if (line.length === 0) { + // empty line denotes end of message. Trigger the callback and start a new message: + onMessage?.(message); + message = newMessage(); + } else if (fieldLength > 0) { // exclude comments and lines with no values + // line is of format ":" or ": " + // https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation + const field = decoder.decode(line.subarray(0, fieldLength)); + const valueOffset = fieldLength + (line[fieldLength + 1] === ControlChars.Space ? 2 : 1); + const value = decoder.decode(line.subarray(valueOffset)); + + switch (field) { + case 'data': + // if this message already has data, append the new value to the old. + // otherwise, just set to the new value: + message.data = message.data + ? message.data + '\n' + value + : value; // otherwise, + break; + case 'event': + message.event = value; + break; + case 'id': + onId(message.id = value); + break; + case 'retry': + const retry = parseInt(value, 10); + if (!isNaN(retry)) { // per spec, ignore non-integers + onRetry(message.retry = retry); + } + break; + } + } + } +} + +function concat(a: Uint8Array, b: Uint8Array) { + const res = new Uint8Array(a.length + b.length); + res.set(a); + res.set(b, a.length); + return res; +} + +function newMessage(): EventSourceMessage { + // data, event, and id must be initialized to empty strings: + // https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation + // retry should be initialized to undefined so we return a consistent shape + // to the js engine all the time: https://mathiasbynens.be/notes/shapes-ics#takeaways + return { + data: '', + event: '', + id: '', + retry: undefined, + }; +} diff --git a/data/node_modules/@huggingface/tasks/LICENSE b/data/node_modules/@huggingface/tasks/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..1e25cde938b8abaa64a25aba817a01b7aba6472d --- /dev/null +++ b/data/node_modules/@huggingface/tasks/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Hugging Face + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/README.md b/data/node_modules/@huggingface/tasks/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3d49eb3a8a963177742bf062d844fdf2c7f7890a --- /dev/null +++ b/data/node_modules/@huggingface/tasks/README.md @@ -0,0 +1,32 @@ +# Tasks + +This package contains the definition files (written in Typescript) for the huggingface.co hub's: + +- **pipeline types** (a.k.a. **task types**) - used to determine which widget to display on the model page, and which inference API to run. +- **default widget inputs** - when they aren't provided in the model card. +- definitions and UI elements for **model and dataset libraries**. + +Please add any missing ones to these definitions by opening a PR. Thanks 🔥 + +⚠️ The hub's definitive doc is at https://huggingface.co/docs/hub. + +## Definition of Tasks + +This package also contains data used to define https://huggingface.co/tasks. + +The Task pages are made to lower the barrier of entry to understand a task that can be solved with machine learning and use or train a model to accomplish it. It's a collaborative documentation effort made to help out software developers, social scientists, or anyone with no background in machine learning that is interested in understanding how machine learning models can be used to solve a problem. + +The task pages avoid jargon to let everyone understand the documentation, and if specific terminology is needed, it is explained on the most basic level possible. This is important to understand before contributing to Tasks: at the end of every task page, the user is expected to be able to find and pull a model from the Hub and use it on their data and see if it works for their use case to come up with a proof of concept. + +## How to Contribute +You can open a pull request to contribute a new documentation about a new task. Under `src/tasks` we have a folder for every task that contains two files, `about.md` and `data.ts`. `about.md` contains the markdown part of the page, use cases, resources and minimal code block to infer a model that belongs to the task. `data.ts` contains redirections to canonical models and datasets, metrics, the schema of the task and the information the inference widget needs. + +![Anatomy of a Task Page](https://huggingface.co/datasets/huggingfacejs/tasks/resolve/main/contribution-guide/anatomy.png) + +We have a [`dataset`](https://huggingface.co/datasets/huggingfacejs/tasks) that contains data used in the inference widget. The last file is `const.ts`, which has the task to library mapping (e.g. spacy to token-classification) where you can add a library. They will look in the top right corner like below. + +![Libraries of a Task](https://huggingface.co/datasets/huggingfacejs/tasks/resolve/main/contribution-guide/libraries.png) + +This might seem overwhelming, but you don't necessarily need to add all of these in one pull request or on your own, you can simply contribute one section. Feel free to ask for help whenever you need. + +## Feedback (feature requests, bugs, etc.) is super welcome 💙💚💛💜♥️🧡 diff --git a/data/node_modules/@huggingface/tasks/dist/index.cjs b/data/node_modules/@huggingface/tasks/dist/index.cjs new file mode 100644 index 0000000000000000000000000000000000000000..8ba2e254465b9b03a4e9c4d9351295e293046577 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/index.cjs @@ -0,0 +1,6669 @@ +"use strict"; +var __defProp = Object.defineProperty; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); + +// src/index.ts +var src_exports = {}; +__export(src_exports, { + ALL_DISPLAY_MODEL_LIBRARY_KEYS: () => ALL_DISPLAY_MODEL_LIBRARY_KEYS, + ALL_MODEL_LIBRARY_KEYS: () => ALL_MODEL_LIBRARY_KEYS, + DATASET_LIBRARIES_UI_ELEMENTS: () => DATASET_LIBRARIES_UI_ELEMENTS, + DEFAULT_MEMORY_OPTIONS: () => DEFAULT_MEMORY_OPTIONS, + LIBRARY_TASK_MAPPING: () => LIBRARY_TASK_MAPPING, + LOCAL_APPS: () => LOCAL_APPS, + MAPPING_DEFAULT_WIDGET: () => MAPPING_DEFAULT_WIDGET, + MODALITIES: () => MODALITIES, + MODALITY_LABELS: () => MODALITY_LABELS, + MODEL_LIBRARIES_UI_ELEMENTS: () => MODEL_LIBRARIES_UI_ELEMENTS, + PIPELINE_DATA: () => PIPELINE_DATA, + PIPELINE_TYPES: () => PIPELINE_TYPES, + PIPELINE_TYPES_SET: () => PIPELINE_TYPES_SET, + SKUS: () => SKUS, + SPECIAL_TOKENS_ATTRIBUTES: () => SPECIAL_TOKENS_ATTRIBUTES, + SUBTASK_TYPES: () => SUBTASK_TYPES, + TASKS_DATA: () => TASKS_DATA, + TASKS_MODEL_LIBRARIES: () => TASKS_MODEL_LIBRARIES, + snippets: () => snippets_exports +}); +module.exports = __toCommonJS(src_exports); + +// src/library-to-tasks.ts +var LIBRARY_TASK_MAPPING = { + "adapter-transformers": ["question-answering", "text-classification", "token-classification"], + allennlp: ["question-answering"], + asteroid: [ + // "audio-source-separation", + "audio-to-audio" + ], + bertopic: ["text-classification"], + diffusers: ["image-to-image", "text-to-image"], + doctr: ["object-detection"], + espnet: ["text-to-speech", "automatic-speech-recognition"], + fairseq: ["text-to-speech", "audio-to-audio"], + fastai: ["image-classification"], + fasttext: ["feature-extraction", "text-classification"], + flair: ["token-classification"], + k2: ["automatic-speech-recognition"], + keras: ["image-classification"], + nemo: ["automatic-speech-recognition"], + open_clip: ["zero-shot-classification", "zero-shot-image-classification"], + paddlenlp: ["fill-mask", "summarization", "zero-shot-classification"], + peft: ["text-generation"], + "pyannote-audio": ["automatic-speech-recognition"], + "sentence-transformers": ["feature-extraction", "sentence-similarity"], + setfit: ["text-classification"], + sklearn: ["tabular-classification", "tabular-regression", "text-classification"], + spacy: ["token-classification", "text-classification", "sentence-similarity"], + "span-marker": ["token-classification"], + speechbrain: [ + "audio-classification", + "audio-to-audio", + "automatic-speech-recognition", + "text-to-speech", + "text2text-generation" + ], + stanza: ["token-classification"], + timm: ["image-classification"], + transformers: [ + "audio-classification", + "automatic-speech-recognition", + "depth-estimation", + "document-question-answering", + "feature-extraction", + "fill-mask", + "image-classification", + "image-segmentation", + "image-to-image", + "image-to-text", + "object-detection", + "question-answering", + "summarization", + "table-question-answering", + "text2text-generation", + "text-classification", + "text-generation", + "text-to-audio", + "text-to-speech", + "token-classification", + "translation", + "video-classification", + "visual-question-answering", + "zero-shot-classification", + "zero-shot-image-classification", + "zero-shot-object-detection" + ], + mindspore: ["image-classification"] +}; + +// src/default-widget-inputs.ts +var MAPPING_EN = /* @__PURE__ */ new Map([ + ["text-classification", [`I like you. I love you`]], + [ + "token-classification", + [ + `My name is Wolfgang and I live in Berlin`, + `My name is Sarah and I live in London`, + `My name is Clara and I live in Berkeley, California.` + ] + ], + [ + "table-question-answering", + [ + { + text: `How many stars does the transformers repository have?`, + table: { + Repository: ["Transformers", "Datasets", "Tokenizers"], + Stars: [36542, 4512, 3934], + Contributors: [651, 77, 34], + "Programming language": ["Python", "Python", "Rust, Python and NodeJS"] + } + } + ] + ], + [ + "question-answering", + [ + { + text: `Where do I live?`, + context: `My name is Wolfgang and I live in Berlin` + }, + { + text: `Where do I live?`, + context: `My name is Sarah and I live in London` + }, + { + text: `What's my name?`, + context: `My name is Clara and I live in Berkeley.` + }, + { + text: `Which name is also used to describe the Amazon rainforest in English?`, + context: `The Amazon rainforest (Portuguese: Floresta Amaz\xF4nica or Amaz\xF4nia; Spanish: Selva Amaz\xF3nica, Amazon\xEDa or usually Amazonia; French: For\xEAt amazonienne; Dutch: Amazoneregenwoud), also known in English as Amazonia or the Amazon Jungle, is a moist broadleaf forest that covers most of the Amazon basin of South America. This basin encompasses 7,000,000 square kilometres (2,700,000 sq mi), of which 5,500,000 square kilometres (2,100,000 sq mi) are covered by the rainforest. This region includes territory belonging to nine nations. The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. States or departments in four nations contain "Amazonas" in their names. The Amazon represents over half of the planet's remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species.` + } + ] + ], + [ + "zero-shot-classification", + [ + { + text: "I have a problem with my iphone that needs to be resolved asap!!", + candidate_labels: "urgent, not urgent, phone, tablet, computer", + multi_class: true + }, + { + text: "Last week I upgraded my iOS version and ever since then my phone has been overheating whenever I use your app.", + candidate_labels: "mobile, website, billing, account access", + multi_class: false + }, + { + text: "A new model offers an explanation for how the Galilean satellites formed around the solar system\u2019s largest world. Konstantin Batygin did not set out to solve one of the solar system\u2019s most puzzling mysteries when he went for a run up a hill in Nice, France. Dr. Batygin, a Caltech researcher, best known for his contributions to the search for the solar system\u2019s missing \u201CPlanet Nine,\u201D spotted a beer bottle. At a steep, 20 degree grade, he wondered why it wasn\u2019t rolling down the hill. He realized there was a breeze at his back holding the bottle in place. Then he had a thought that would only pop into the mind of a theoretical astrophysicist: \u201COh! This is how Europa formed.\u201D Europa is one of Jupiter\u2019s four large Galilean moons. And in a paper published Monday in the Astrophysical Journal, Dr. Batygin and a co-author, Alessandro Morbidelli, a planetary scientist at the C\xF4te d\u2019Azur Observatory in France, present a theory explaining how some moons form around gas giants like Jupiter and Saturn, suggesting that millimeter-sized grains of hail produced during the solar system\u2019s formation became trapped around these massive worlds, taking shape one at a time into the potentially habitable moons we know today.", + candidate_labels: "space & cosmos, scientific discovery, microbiology, robots, archeology", + multi_class: true + } + ] + ], + ["translation", [`My name is Wolfgang and I live in Berlin`, `My name is Sarah and I live in London`]], + [ + "summarization", + [ + `The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.` + ] + ], + [ + "conversational", + [ + `Hey my name is Julien! How are you?`, + `Hey my name is Thomas! How are you?`, + `Hey my name is Mariama! How are you?`, + `Hey my name is Clara! How are you?`, + `Hey my name is Julien! How are you?`, + `Hi.` + ] + ], + [ + "text-generation", + [ + `My name is Julien and I like to`, + `My name is Thomas and my main`, + `My name is Mariama, my favorite`, + `My name is Clara and I am`, + `My name is Lewis and I like to`, + `My name is Merve and my favorite`, + `My name is Teven and I am`, + `Once upon a time,` + ] + ], + ["fill-mask", [`Paris is the of France.`, `The goal of life is .`]], + [ + "sentence-similarity", + [ + { + source_sentence: "That is a happy person", + sentences: ["That is a happy dog", "That is a very happy person", "Today is a sunny day"] + } + ] + ] +]); +var MAPPING_ZH = /* @__PURE__ */ new Map([ + ["text-classification", [`\u6211\u559C\u6B22\u4F60\u3002 \u6211\u7231\u4F60`]], + ["token-classification", [`\u6211\u53EB\u6C83\u5C14\u592B\u5188\uFF0C\u6211\u4F4F\u5728\u67CF\u6797\u3002`, `\u6211\u53EB\u8428\u62C9\uFF0C\u6211\u4F4F\u5728\u4F26\u6566\u3002`, `\u6211\u53EB\u514B\u62C9\u62C9\uFF0C\u6211\u4F4F\u5728\u52A0\u5DDE\u4F2F\u514B\u5229\u3002`]], + [ + "question-answering", + [ + { + text: `\u6211\u4F4F\u5728\u54EA\u91CC\uFF1F`, + context: `\u6211\u53EB\u6C83\u5C14\u592B\u5188\uFF0C\u6211\u4F4F\u5728\u67CF\u6797\u3002` + }, + { + text: `\u6211\u4F4F\u5728\u54EA\u91CC\uFF1F`, + context: `\u6211\u53EB\u8428\u62C9\uFF0C\u6211\u4F4F\u5728\u4F26\u6566\u3002` + }, + { + text: `\u6211\u7684\u540D\u5B57\u662F\u4EC0\u4E48\uFF1F`, + context: `\u6211\u53EB\u514B\u62C9\u62C9\uFF0C\u6211\u4F4F\u5728\u4F2F\u514B\u5229\u3002` + } + ] + ], + ["translation", [`\u6211\u53EB\u6C83\u5C14\u592B\u5188\uFF0C\u6211\u4F4F\u5728\u67CF\u6797\u3002`, `\u6211\u53EB\u8428\u62C9\uFF0C\u6211\u4F4F\u5728\u4F26\u6566\u3002`]], + [ + "zero-shot-classification", + [ + { + text: "\u623F\u95F4\u5E72\u51C0\u660E\u4EAE\uFF0C\u975E\u5E38\u4E0D\u9519", + candidate_labels: "\u8FD9\u662F\u4E00\u6761\u5DEE\u8BC4, \u8FD9\u662F\u4E00\u6761\u597D\u8BC4" + } + ] + ], + [ + "summarization", + [ + `\u8BE5\u5854\u9AD8324\u7C73\uFF081063\u82F1\u5C3A\uFF09\uFF0C\u4E0E\u4E00\u5E6281\u5C42\u7684\u5EFA\u7B51\u7269\u4E00\u6837\u9AD8\uFF0C\u662F\u5DF4\u9ECE\u6700\u9AD8\u7684\u5EFA\u7B51\u7269\u3002 \u5B83\u7684\u5E95\u5EA7\u662F\u65B9\u5F62\u7684\uFF0C\u6BCF\u8FB9\u957F125\u7C73\uFF08410\u82F1\u5C3A\uFF09\u3002 \u5728\u5EFA\u9020\u8FC7\u7A0B\u4E2D\uFF0C\u827E\u83F2\u5C14\u94C1\u5854\u8D85\u8FC7\u4E86\u534E\u76DB\u987F\u7EAA\u5FF5\u7891\uFF0C\u6210\u4E3A\u4E16\u754C\u4E0A\u6700\u9AD8\u7684\u4EBA\u9020\u7ED3\u6784\uFF0C\u5B83\u4FDD\u6301\u4E8641\u5E74\u7684\u5934\u8854\uFF0C\u76F4\u52301930\u5E74\u7EBD\u7EA6\u5E02\u7684\u514B\u83B1\u65AF\u52D2\u5927\u697C\u7AE3\u5DE5\u3002\u8FD9\u662F\u7B2C\u4E00\u4E2A\u5230\u8FBE300\u7C73\u9AD8\u5EA6\u7684\u7ED3\u6784\u3002 \u7531\u4E8E1957\u5E74\u5728\u5854\u9876\u589E\u52A0\u4E86\u5E7F\u64AD\u5929\u7EBF\uFF0C\u56E0\u6B64\u5B83\u73B0\u5728\u6BD4\u514B\u83B1\u65AF\u52D2\u5927\u53A6\u9AD85.2\u7C73\uFF0817\u82F1\u5C3A\uFF09\u3002 \u9664\u53D1\u5C04\u5668\u5916\uFF0C\u827E\u83F2\u5C14\u94C1\u5854\u662F\u6CD5\u56FD\u7B2C\u4E8C\u9AD8\u7684\u72EC\u7ACB\u5F0F\u5EFA\u7B51\uFF0C\u4EC5\u6B21\u4E8E\u7C73\u52B3\u9AD8\u67B6\u6865\u3002` + ] + ], + [ + "text-generation", + [`\u6211\u53EB\u6731\u5229\u5B89\uFF0C\u6211\u559C\u6B22`, `\u6211\u53EB\u6258\u9A6C\u65AF\uFF0C\u6211\u7684\u4E3B\u8981`, `\u6211\u53EB\u739B\u4E3D\u4E9A\uFF0C\u6211\u6700\u559C\u6B22\u7684`, `\u6211\u53EB\u514B\u62C9\u62C9\uFF0C\u6211\u662F`, `\u4ECE\u524D\uFF0C`] + ], + ["fill-mask", [`\u5DF4\u9ECE\u662F\u56FD\u7684\u9996\u90FD\u3002`, `\u751F\u6D3B\u7684\u771F\u8C1B\u662F\u3002`]], + [ + "sentence-similarity", + [ + { + source_sentence: "\u90A3\u662F \u500B\u5FEB\u6A02\u7684\u4EBA", + sentences: ["\u90A3\u662F \u689D\u5FEB\u6A02\u7684\u72D7", "\u90A3\u662F \u500B\u975E\u5E38\u5E78\u798F\u7684\u4EBA", "\u4ECA\u5929\u662F\u6674\u5929"] + } + ] + ] +]); +var MAPPING_FR = /* @__PURE__ */ new Map([ + ["text-classification", [`Je t'appr\xE9cie beaucoup. Je t'aime.`]], + ["token-classification", [`Mon nom est Wolfgang et je vis \xE0 Berlin`]], + [ + "question-answering", + [ + { + text: `O\xF9 est-ce que je vis?`, + context: `Mon nom est Wolfgang et je vis \xE0 Berlin` + } + ] + ], + ["translation", [`Mon nom est Wolfgang et je vis \xE0 Berlin`]], + [ + "summarization", + [ + `La tour fait 324 m\xE8tres (1,063 pieds) de haut, environ la m\xEAme hauteur qu'un immeuble de 81 \xE9tages, et est la plus haute structure de Paris. Sa base est carr\xE9e, mesurant 125 m\xE8tres (410 pieds) sur chaque c\xF4t\xE9. Durant sa construction, la tour Eiffel surpassa le Washington Monument pour devenir la plus haute structure construite par l'homme dans le monde, un titre qu'elle conserva pendant 41 ans jusqu'\xE0 l'ach\xE8vement du Chrysler Building \xE0 New-York City en 1930. Ce fut la premi\xE8re structure \xE0 atteindre une hauteur de 300 m\xE8tres. Avec l'ajout d'une antenne de radiodiffusion au sommet de la tour Eiffel en 1957, celle-ci redevint plus haute que le Chrysler Building de 5,2 m\xE8tres (17 pieds). En excluant les transmetteurs, elle est la seconde plus haute stucture autoportante de France apr\xE8s le viaduc de Millau.` + ] + ], + ["text-generation", [`Mon nom est Julien et j'aime`, `Mon nom est Thomas et mon principal`, `Il \xE9tait une fois`]], + ["fill-mask", [`Paris est la de la France.`]], + [ + "sentence-similarity", + [ + { + source_sentence: "C'est une personne heureuse", + sentences: [ + "C'est un chien heureux", + "C'est une personne tr\xE8s heureuse", + "Aujourd'hui est une journ\xE9e ensoleill\xE9e" + ] + } + ] + ] +]); +var MAPPING_ES = /* @__PURE__ */ new Map([ + ["text-classification", [`Te quiero. Te amo.`]], + ["token-classification", [`Me llamo Wolfgang y vivo en Berlin`]], + [ + "question-answering", + [ + { + text: `\xBFD\xF3nde vivo?`, + context: `Me llamo Wolfgang y vivo en Berlin` + }, + { + text: `\xBFQui\xE9n invent\xF3 el submarino?`, + context: `Isaac Peral fue un murciano que invent\xF3 el submarino` + }, + { + text: `\xBFCu\xE1ntas personas hablan espa\xF1ol?`, + context: `El espa\xF1ol es el segundo idioma m\xE1s hablado del mundo con m\xE1s de 442 millones de hablantes` + } + ] + ], + [ + "translation", + [ + `Me llamo Wolfgang y vivo en Berlin`, + `Los ingredientes de una tortilla de patatas son: huevos, patatas y cebolla` + ] + ], + [ + "summarization", + [ + `La torre tiene 324 metros (1.063 pies) de altura, aproximadamente la misma altura que un edificio de 81 pisos y la estructura m\xE1s alta de Par\xEDs. Su base es cuadrada, mide 125 metros (410 pies) a cada lado. Durante su construcci\xF3n, la Torre Eiffel super\xF3 al Washington Monument para convertirse en la estructura artificial m\xE1s alta del mundo, un t\xEDtulo que mantuvo durante 41 a\xF1os hasta que el Chrysler Building en la ciudad de Nueva York se termin\xF3 en 1930. Fue la primera estructura en llegar Una altura de 300 metros. Debido a la adici\xF3n de una antena de transmisi\xF3n en la parte superior de la torre en 1957, ahora es m\xE1s alta que el Chrysler Building en 5,2 metros (17 pies). Excluyendo los transmisores, la Torre Eiffel es la segunda estructura independiente m\xE1s alta de Francia despu\xE9s del Viaducto de Millau.` + ] + ], + [ + "text-generation", + [ + `Me llamo Julien y me gusta`, + `Me llamo Thomas y mi principal`, + `Me llamo Manuel y trabajo en`, + `\xC9rase una vez,`, + `Si t\xFA me dices ven, ` + ] + ], + ["fill-mask", [`Mi nombre es y vivo en Nueva York.`, `El espa\xF1ol es un idioma muy en el mundo.`]], + [ + "sentence-similarity", + [ + { + source_sentence: "Esa es una persona feliz", + sentences: ["Ese es un perro feliz", "Esa es una persona muy feliz", "Hoy es un d\xEDa soleado"] + } + ] + ] +]); +var MAPPING_RU = /* @__PURE__ */ new Map([ + ["text-classification", [`\u0422\u044B \u043C\u043D\u0435 \u043D\u0440\u0430\u0432\u0438\u0448\u044C\u0441\u044F. \u042F \u0442\u0435\u0431\u044F \u043B\u044E\u0431\u043B\u044E`]], + ["token-classification", [`\u041C\u0435\u043D\u044F \u0437\u043E\u0432\u0443\u0442 \u0412\u043E\u043B\u044C\u0444\u0433\u0430\u043D\u0433 \u0438 \u044F \u0436\u0438\u0432\u0443 \u0432 \u0411\u0435\u0440\u043B\u0438\u043D\u0435`]], + [ + "question-answering", + [ + { + text: `\u0413\u0434\u0435 \u0436\u0438\u0432\u0443?`, + context: `\u041C\u0435\u043D\u044F \u0437\u043E\u0432\u0443\u0442 \u0412\u043E\u043B\u044C\u0444\u0433\u0430\u043D\u0433 \u0438 \u044F \u0436\u0438\u0432\u0443 \u0432 \u0411\u0435\u0440\u043B\u0438\u043D\u0435` + } + ] + ], + ["translation", [`\u041C\u0435\u043D\u044F \u0437\u043E\u0432\u0443\u0442 \u0412\u043E\u043B\u044C\u0444\u0433\u0430\u043D\u0433 \u0438 \u044F \u0436\u0438\u0432\u0443 \u0432 \u0411\u0435\u0440\u043B\u0438\u043D\u0435`]], + [ + "summarization", + [ + `\u0412\u044B\u0441\u043E\u0442\u0430 \u0431\u0430\u0448\u043D\u0438 \u0441\u043E\u0441\u0442\u0430\u0432\u043B\u044F\u0435\u0442 324 \u043C\u0435\u0442\u0440\u0430 (1063 \u0444\u0443\u0442\u0430), \u043F\u0440\u0438\u043C\u0435\u0440\u043D\u043E \u0442\u0430\u043A\u0430\u044F \u0436\u0435 \u0432\u044B\u0441\u043E\u0442\u0430, \u043A\u0430\u043A \u0443 81-\u044D\u0442\u0430\u0436\u043D\u043E\u0433\u043E \u0437\u0434\u0430\u043D\u0438\u044F, \u0438 \u0441\u0430\u043C\u043E\u0435 \u0432\u044B\u0441\u043E\u043A\u043E\u0435 \u0441\u043E\u043E\u0440\u0443\u0436\u0435\u043D\u0438\u0435 \u0432 \u041F\u0430\u0440\u0438\u0436\u0435. \u0415\u0433\u043E \u043E\u0441\u043D\u043E\u0432\u0430\u043D\u0438\u0435 \u043A\u0432\u0430\u0434\u0440\u0430\u0442\u043D\u043E, \u0440\u0430\u0437\u043C\u0435\u0440\u043E\u043C 125 \u043C\u0435\u0442\u0440\u043E\u0432 (410 \u0444\u0443\u0442\u043E\u0432) \u0441 \u043B\u044E\u0431\u043E\u0439 \u0441\u0442\u043E\u0440\u043E\u043D\u044B. \u0412\u043E \u0432\u0440\u0435\u043C\u044F \u0441\u0442\u0440\u043E\u0438\u0442\u0435\u043B\u044C\u0441\u0442\u0432\u0430 \u042D\u0439\u0444\u0435\u043B\u0435\u0432\u0430 \u0431\u0430\u0448\u043D\u044F \u043F\u0440\u0435\u0432\u0437\u043E\u0448\u043B\u0430 \u043C\u043E\u043D\u0443\u043C\u0435\u043D\u0442 \u0412\u0430\u0448\u0438\u043D\u0433\u0442\u043E\u043D\u0430, \u0441\u0442\u0430\u0432 \u0441\u0430\u043C\u044B\u043C \u0432\u044B\u0441\u043E\u043A\u0438\u043C \u0438\u0441\u043A\u0443\u0441\u0441\u0442\u0432\u0435\u043D\u043D\u044B\u043C \u0441\u043E\u043E\u0440\u0443\u0436\u0435\u043D\u0438\u0435\u043C \u0432 \u043C\u0438\u0440\u0435, \u0438 \u044D\u0442\u043E\u0442 \u0442\u0438\u0442\u0443\u043B \u043E\u043D\u0430 \u0443\u0434\u0435\u0440\u0436\u0438\u0432\u0430\u043B\u0430 \u0432 \u0442\u0435\u0447\u0435\u043D\u0438\u0435 41 \u0433\u043E\u0434\u0430 \u0434\u043E \u0437\u0430\u0432\u0435\u0440\u0448\u0435\u043D\u0438\u044F \u0441\u0442\u0440\u043E\u0438\u0442\u0435\u043B\u044C\u0441\u0442\u0432\u043E \u0437\u0434\u0430\u043D\u0438\u044F \u041A\u0440\u0430\u0439\u0441\u043B\u0435\u0440 \u0432 \u041D\u044C\u044E-\u0419\u043E\u0440\u043A\u0435 \u0432 1930 \u0433\u043E\u0434\u0443. \u042D\u0442\u043E \u043F\u0435\u0440\u0432\u043E\u0435 \u0441\u043E\u043E\u0440\u0443\u0436\u0435\u043D\u0438\u0435 \u043A\u043E\u0442\u043E\u0440\u043E\u0435 \u0434\u043E\u0441\u0442\u0438\u0433\u043B\u043E \u0432\u044B\u0441\u043E\u0442\u044B 300 \u043C\u0435\u0442\u0440\u043E\u0432. \u0418\u0437-\u0437\u0430 \u0434\u043E\u0431\u0430\u0432\u043B\u0435\u043D\u0438\u044F \u0432\u0435\u0449\u0430\u0442\u0435\u043B\u044C\u043D\u043E\u0439 \u0430\u043D\u0442\u0435\u043D\u043D\u044B \u043D\u0430 \u0432\u0435\u0440\u0448\u0438\u043D\u0435 \u0431\u0430\u0448\u043D\u0438 \u0432 1957 \u0433\u043E\u0434\u0443 \u043E\u043D\u0430 \u0441\u0435\u0439\u0447\u0430\u0441 \u0432\u044B\u0448\u0435 \u0437\u0434\u0430\u043D\u0438\u044F \u041A\u0440\u0430\u0439\u0441\u043B\u0435\u0440 \u043D\u0430 5,2 \u043C\u0435\u0442\u0440\u0430 (17 \u0444\u0443\u0442\u043E\u0432). \u0417\u0430 \u0438\u0441\u043A\u043B\u044E\u0447\u0435\u043D\u0438\u0435\u043C \u043F\u0435\u0440\u0435\u0434\u0430\u0442\u0447\u0438\u043A\u043E\u0432, \u042D\u0439\u0444\u0435\u043B\u0435\u0432\u0430 \u0431\u0430\u0448\u043D\u044F \u044F\u0432\u043B\u044F\u0435\u0442\u0441\u044F \u0432\u0442\u043E\u0440\u043E\u0439 \u0441\u0430\u043C\u043E\u0439 \u0432\u044B\u0441\u043E\u043A\u043E\u0439 \u043E\u0442\u0434\u0435\u043B\u044C\u043D\u043E \u0441\u0442\u043E\u044F\u0449\u0435\u0439 \u0441\u0442\u0440\u0443\u043A\u0442\u0443\u0440\u043E\u0439 \u0432\u043E \u0424\u0440\u0430\u043D\u0446\u0438\u0438 \u043F\u043E\u0441\u043B\u0435 \u0432\u0438\u0430\u0434\u0443\u043A\u0430 \u041C\u0438\u0439\u043E.` + ] + ], + ["text-generation", [`\u041C\u0435\u043D\u044F \u0437\u043E\u0432\u0443\u0442 \u0416\u044E\u043B\u044C\u0435\u043D \u0438`, `\u041C\u0435\u043D\u044F \u0437\u043E\u0432\u0443\u0442 \u0422\u043E\u043C\u0430\u0441 \u0438 \u043C\u043E\u0439 \u043E\u0441\u043D\u043E\u0432\u043D\u043E\u0439`, `\u041E\u0434\u043D\u0430\u0436\u0434\u044B`]], + ["fill-mask", [`\u041C\u0435\u043D\u044F \u0437\u043E\u0432\u0443\u0442 \u0438 \u044F \u0438\u043D\u0436\u0435\u043D\u0435\u0440 \u0436\u0438\u0432\u0443\u0449\u0438\u0439 \u0432 \u041D\u044C\u044E-\u0419\u043E\u0440\u043A\u0435.`]], + [ + "sentence-similarity", + [ + { + source_sentence: "\u042D\u0442\u043E \u0441\u0447\u0430\u0441\u0442\u043B\u0438\u0432\u044B\u0439 \u0447\u0435\u043B\u043E\u0432\u0435\u043A", + sentences: ["\u042D\u0442\u043E \u0441\u0447\u0430\u0441\u0442\u043B\u0438\u0432\u0430\u044F \u0441\u043E\u0431\u0430\u043A\u0430", "\u042D\u0442\u043E \u043E\u0447\u0435\u043D\u044C \u0441\u0447\u0430\u0441\u0442\u043B\u0438\u0432\u044B\u0439 \u0447\u0435\u043B\u043E\u0432\u0435\u043A", "\u0421\u0435\u0433\u043E\u0434\u043D\u044F \u0441\u043E\u043B\u043D\u0435\u0447\u043D\u044B\u0439 \u0434\u0435\u043D\u044C"] + } + ] + ] +]); +var MAPPING_UK = /* @__PURE__ */ new Map([ + ["translation", [`\u041C\u0435\u043D\u0435 \u0437\u0432\u0430\u0442\u0438 \u0412\u043E\u043B\u044C\u0444\u0491\u0430\u043D\u0491 \u0456 \u044F \u0436\u0438\u0432\u0443 \u0432 \u0411\u0435\u0440\u043B\u0456\u043D\u0456.`]], + ["fill-mask", [`\u041C\u0435\u043D\u0435 \u0437\u0432\u0430\u0442\u0438 .`]] +]); +var MAPPING_IT = /* @__PURE__ */ new Map([ + ["text-classification", [`Mi piaci. Ti amo`]], + [ + "token-classification", + [ + `Mi chiamo Wolfgang e vivo a Berlino`, + `Mi chiamo Sarah e vivo a Londra`, + `Mi chiamo Clara e vivo a Berkeley in California.` + ] + ], + [ + "question-answering", + [ + { + text: `Dove vivo?`, + context: `Mi chiamo Wolfgang e vivo a Berlino` + }, + { + text: `Dove vivo?`, + context: `Mi chiamo Sarah e vivo a Londra` + }, + { + text: `Come mio chiamo?`, + context: `Mi chiamo Clara e vivo a Berkeley.` + } + ] + ], + ["translation", [`Mi chiamo Wolfgang e vivo a Berlino`, `Mi chiamo Sarah e vivo a Londra`]], + [ + "summarization", + [ + `La torre degli Asinelli \xE8 una delle cosiddette due torri di Bologna, simbolo della citt\xE0, situate in piazza di porta Ravegnana, all'incrocio tra le antiche strade San Donato (ora via Zamboni), San Vitale, Maggiore e Castiglione. Eretta, secondo la tradizione, fra il 1109 e il 1119 dal nobile Gherardo Asinelli, la torre \xE8 alta 97,20 metri, pende verso ovest per 2,23 metri e presenta all'interno una scalinata composta da 498 gradini. Ancora non si pu\xF2 dire con certezza quando e da chi fu costruita la torre degli Asinelli. Si presume che la torre debba il proprio nome a Gherardo Asinelli, il nobile cavaliere di fazione ghibellina al quale se ne attribuisce la costruzione, iniziata secondo una consolidata tradizione l'11 ottobre 1109 e terminata dieci anni dopo, nel 1119.` + ] + ], + [ + "text-generation", + [ + `Mi chiamo Loreto e mi piace`, + `Mi chiamo Thomas e il mio principale`, + `Mi chiamo Marianna, la mia cosa preferita`, + `Mi chiamo Clara e sono`, + `C'era una volta` + ] + ], + ["fill-mask", [`Roma \xE8 la d'Italia.`, `Lo scopo della vita \xE8 .`]], + [ + "sentence-similarity", + [ + { + source_sentence: "Questa \xE8 una persona felice", + sentences: ["Questo \xE8 un cane felice", "Questa \xE8 una persona molto felice", "Oggi \xE8 una giornata di sole"] + } + ] + ] +]); +var MAPPING_FA = /* @__PURE__ */ new Map([ + [ + "text-classification", + [`\u067E\u0631\u0648\u0698\u0647 \u0628\u0647 \u0645\u0648\u0642\u0639 \u062A\u062D\u0648\u06CC\u0644 \u0634\u062F \u0648 \u0647\u0645\u0647 \u0686\u06CC\u0632 \u062E\u0648\u0628 \u0628\u0648\u062F.`, `\u0633\u06CC\u0628\u200C\u0632\u0645\u06CC\u0646\u06CC \u0628\u06CC\u200C\u06A9\u06CC\u0641\u06CC\u062A \u0628\u0648\u062F.`, `\u0642\u06CC\u0645\u062A \u0648 \u06A9\u06CC\u0641\u06CC\u062A \u0639\u0627\u0644\u06CC`, `\u062E\u0648\u0628 \u0646\u0628\u0648\u062F \u0627\u0635\u0644\u0627`] + ], + [ + "token-classification", + [ + `\u0627\u06CC\u0646 \u0633\u0631\u06CC\u0627\u0644 \u0628\u0647 \u0635\u0648\u0631\u062A \u0631\u0633\u0645\u06CC \u062F\u0631 \u062A\u0627\u0631\u06CC\u062E \u062F\u0647\u0645 \u0645\u06CC \u06F2\u06F0\u06F1\u06F1 \u062A\u0648\u0633\u0637 \u0634\u0628\u06A9\u0647 \u0641\u0627\u06A9\u0633 \u0628\u0631\u0627\u06CC \u067E\u062E\u0634 \u0631\u0632\u0631\u0648 \u0634\u062F.`, + `\u062F\u0641\u062A\u0631 \u0645\u0631\u06A9\u0632\u06CC \u0634\u0631\u06A9\u062A \u067E\u0627\u0631\u0633\u200C\u0645\u06CC\u0646\u0648 \u062F\u0631 \u0634\u0647\u0631 \u0627\u0631\u0627\u06A9 \u062F\u0631 \u0627\u0633\u062A\u0627\u0646 \u0645\u0631\u06A9\u0632\u06CC \u0642\u0631\u0627\u0631 \u062F\u0627\u0631\u062F.`, + `\u0648\u06CC \u062F\u0631 \u0633\u0627\u0644 \u06F2\u06F0\u06F1\u06F3 \u062F\u0631\u06AF\u0630\u0634\u062A \u0648 \u0645\u0633\u0626\u0648\u0644 \u062E\u0627\u06A9\u0633\u067E\u0627\u0631\u06CC \u0648 \u0627\u0642\u0648\u0627\u0645\u0634 \u0628\u0631\u0627\u06CC \u0627\u0648 \u0645\u0631\u0627\u0633\u0645 \u06CC\u0627\u062F\u0628\u0648\u062F \u06AF\u0631\u0641\u062A\u0646\u062F.` + ] + ], + [ + "question-answering", + [ + { + text: `\u0645\u0646 \u06A9\u062C\u0627 \u0632\u0646\u062F\u06AF\u06CC \u0645\u06CC\u06A9\u0646\u0645\u061F`, + context: `\u0646\u0627\u0645 \u0645\u0646 \u067E\u0698\u0645\u0627\u0646 \u0627\u0633\u062A \u0648 \u062F\u0631 \u06AF\u0631\u06AF\u0627\u0646 \u0632\u0646\u062F\u06AF\u06CC \u0645\u06CC\u06A9\u0646\u0645.` + }, + { + text: `\u0646\u0627\u0645\u0645 \u0686\u06CC\u0633\u062A \u0648 \u06A9\u062C\u0627 \u0632\u0646\u062F\u06AF\u06CC \u0645\u06CC\u200C\u06A9\u0646\u0645\u061F`, + context: `\u0627\u0633\u0645\u0645 \u0633\u0627\u0631\u0627 \u0627\u0633\u062A \u0648 \u062F\u0631 \u0622\u0641\u0631\u06CC\u0642\u0627\u06CC \u062C\u0646\u0648\u0628\u06CC \u0632\u0646\u062F\u06AF\u06CC \u0645\u06CC\u06A9\u0646\u0645.` + }, + { + text: `\u0646\u0627\u0645 \u0645\u0646 \u0686\u06CC\u0633\u062A\u061F`, + context: `\u0645\u0646 \u0645\u0631\u06CC\u0645 \u0647\u0633\u062A\u0645 \u0648 \u062F\u0631 \u062A\u0628\u0631\u06CC\u0632 \u0632\u0646\u062F\u06AF\u06CC \u0645\u06CC\u200C\u06A9\u0646\u0645.` + }, + { + text: `\u0628\u06CC\u0634\u062A\u0631\u06CC\u0646 \u0645\u0633\u0627\u062D\u062A \u062C\u0646\u06AF\u0644 \u0622\u0645\u0627\u0632\u0648\u0646 \u062F\u0631 \u06A9\u062F\u0627\u0645 \u06A9\u0634\u0648\u0631 \u0627\u0633\u062A\u061F`, + context: [ + "\u0622\u0645\u0627\u0632\u0648\u0646 \u0646\u0627\u0645 \u0628\u0632\u0631\u06AF\u200C\u062A\u0631\u06CC\u0646 \u062C\u0646\u06AF\u0644 \u0628\u0627\u0631\u0627\u0646\u06CC \u062C\u0647\u0627\u0646 \u0627\u0633\u062A \u06A9\u0647 \u062F\u0631 \u0634\u0645\u0627\u0644 \u0622\u0645\u0631\u06CC\u06A9\u0627\u06CC \u062C\u0646\u0648\u0628\u06CC \u0642\u0631\u0627\u0631 \u06AF\u0631\u0641\u062A\u0647 \u0648 \u0628\u06CC\u0634\u062A\u0631 \u0622\u0646 \u062F\u0631 \u062E\u0627\u06A9 \u0628\u0631\u0632\u06CC\u0644 \u0648 \u067E\u0631\u0648", + "\u062C\u0627\u06CC \u062F\u0627\u0631\u062F. \u0628\u06CC\u0634 \u0627\u0632 \u0646\u06CC\u0645\u06CC \u0627\u0632 \u0647\u0645\u0647 \u062C\u0646\u06AF\u0644\u200C\u0647\u0627\u06CC \u0628\u0627\u0631\u0627\u0646\u06CC \u0628\u0627\u0642\u06CC\u200C\u0645\u0627\u0646\u062F\u0647 \u062F\u0631 \u062C\u0647\u0627\u0646 \u062F\u0631 \u0622\u0645\u0627\u0632\u0648\u0646 \u0642\u0631\u0627\u0631 \u062F\u0627\u0631\u062F.", + "\u0645\u0633\u0627\u062D\u062A \u062C\u0646\u06AF\u0644\u200C\u0647\u0627\u06CC \u0622\u0645\u0627\u0632\u0648\u0646 \u06F5\u066B\u06F5 \u0645\u06CC\u0644\u06CC\u0648\u0646 \u06A9\u06CC\u0644\u0648\u0645\u062A\u0631 \u0645\u0631\u0628\u0639 \u0627\u0633\u062A \u06A9\u0647 \u0628\u06CC\u0646 \u06F9 \u06A9\u0634\u0648\u0631 \u062A\u0642\u0633\u06CC\u0645 \u0634\u062F\u0647\u200C\u0627\u0633\u062A." + ].join("\n") + } + ] + ], + [ + "translation", + [ + "\u0628\u06CC\u0634\u062A\u0631 \u0645\u0633\u0627\u062D\u062A \u062C\u0646\u06AF\u0644\u200C\u0647\u0627\u06CC \u0622\u0645\u0627\u0632\u0648\u0646 \u062F\u0631 \u062D\u0648\u0636\u0647 \u0622\u0628\u0631\u06CC\u0632 \u0631\u0648\u062F \u0622\u0645\u0627\u0632\u0648\u0646 \u0648 \u06F1\u06F1\u06F0\u06F0 \u0634\u0627\u062E\u0647 \u0622\u0646 \u0648\u0627\u0642\u0639 \u0634\u062F\u0647\u200C\u0627\u0633\u062A.", + "\u0645\u0631\u062F\u0645\u0627\u0646 \u0646\u064E\u0628\u064E\u0637\u06CC \u0627\u0632 \u0647\u0632\u0627\u0631\u0647\u200C\u0647\u0627\u06CC \u06CC\u06A9\u0645 \u0648 \u062F\u0648\u0645 \u067E\u06CC\u0634 \u0627\u0632 \u0645\u06CC\u0644\u0627\u062F \u062F\u0631 \u0627\u06CC\u0646 \u0645\u0646\u0637\u0642\u0647 \u0632\u0646\u062F\u06AF\u06CC \u0645\u06CC\u200C\u06A9\u0631\u062F\u0646\u062F." + ] + ], + [ + "summarization", + [ + [ + "\u0634\u0627\u0647\u0646\u0627\u0645\u0647 \u0627\u062B\u0631 \u062D\u06A9\u06CC\u0645 \u0627\u0628\u0648\u0627\u0644\u0642\u0627\u0633\u0645 \u0641\u0631\u062F\u0648\u0633\u06CC \u062A\u0648\u0633\u06CC\u060C \u062D\u0645\u0627\u0633\u0647\u200C\u0627\u06CC \u0645\u0646\u0638\u0648\u0645\u060C \u0628\u0631 \u062D\u0633\u0628 \u062F\u0633\u062A \u0646\u0648\u0634\u062A\u0647\u200C\u0647\u0627\u06CC ", + "\u0645\u0648\u062C\u0648\u062F \u062F\u0631\u0628\u0631\u06AF\u06CC\u0631\u0646\u062F\u0647 \u0646\u0632\u062F\u06CC\u06A9 \u0628\u0647 \u06F5\u06F0\u066C\u06F0\u06F0\u06F0 \u0628\u06CC\u062A \u062A\u0627 \u0646\u0632\u062F\u06CC\u06A9 \u0628\u0647 \u06F6\u06F1\u066C\u06F0\u06F0\u06F0 \u0628\u06CC\u062A \u0648 \u06CC\u06A9\u06CC \u0627\u0632 ", + "\u0628\u0632\u0631\u06AF\u200C\u062A\u0631\u06CC\u0646 \u0648 \u0628\u0631\u062C\u0633\u062A\u0647\u200C\u062A\u0631\u06CC\u0646 \u0633\u0631\u0648\u062F\u0647\u200C\u0647\u0627\u06CC \u062D\u0645\u0627\u0633\u06CC \u062C\u0647\u0627\u0646 \u0627\u0633\u062A \u06A9\u0647 \u0633\u0631\u0627\u06CC\u0634 \u0622\u0646 \u062F\u0633\u062A\u200C\u0622\u0648\u0631\u062F\u0650 ", + "\u062F\u0633\u062A\u200C\u06A9\u0645 \u0633\u06CC \u0633\u0627\u0644 \u06A9\u0627\u0631\u0650 \u067E\u06CC\u0648\u0633\u062A\u0647\u0654 \u0627\u06CC\u0646 \u0633\u062E\u0646\u200C\u0633\u0631\u0627\u06CC \u0646\u0627\u0645\u062F\u0627\u0631 \u0627\u06CC\u0631\u0627\u0646\u06CC \u0627\u0633\u062A. \u0645\u0648\u0636\u0648\u0639 \u0627\u06CC\u0646 \u0634\u0627\u0647\u06A9\u0627\u0631 \u0627\u062F\u0628\u06CC\u060C", + " \u0627\u0641\u0633\u0627\u0646\u0647\u200C\u0647\u0627 \u0648 \u062A\u0627\u0631\u06CC\u062E \u0627\u06CC\u0631\u0627\u0646 \u0627\u0632 \u0622\u063A\u0627\u0632 \u062A\u0627 \u062D\u0645\u0644\u0647\u0654 \u0639\u0631\u0628\u200C\u0647\u0627 \u0628\u0647 \u0627\u06CC\u0631\u0627\u0646 \u062F\u0631 \u0633\u062F\u0647\u0654 \u0647\u0641\u062A\u0645 \u0645\u06CC\u0644\u0627\u062F\u06CC \u0627\u0633\u062A", + " (\u0634\u0627\u0647\u0646\u0627\u0645\u0647 \u0627\u0632 \u0633\u0647 \u0628\u062E\u0634 \u0627\u0633\u0637\u0648\u0631\u0647\u060C \u067E\u0647\u0644\u0648\u0627\u0646\u06CC \u0648 \u062A\u0627\u0631\u06CC\u062E\u06CC \u062A\u0634\u06A9\u06CC\u0644 \u0634\u062F\u0647\u200C\u0627\u0633\u062A) \u06A9\u0647 \u062F\u0631 \u0686\u0647\u0627\u0631", + " \u062F\u0648\u062F\u0645\u0627\u0646 \u067E\u0627\u062F\u0634\u0627\u0647\u06CC\u0650 \u067E\u06CC\u0634\u062F\u0627\u062F\u06CC\u0627\u0646\u060C \u06A9\u06CC\u0627\u0646\u06CC\u0627\u0646\u060C \u0627\u0634\u06A9\u0627\u0646\u06CC\u0627\u0646 \u0648 \u0633\u0627\u0633\u0627\u0646\u06CC\u0627\u0646 \u06AF\u0646\u062C\u0627\u0646\u062F\u0647 \u0645\u06CC\u200C\u0634\u0648\u062F.", + " \u0634\u0627\u0647\u0646\u0627\u0645\u0647 \u0628\u0631 \u0648\u0632\u0646 \xAB\u0641\u064E\u0639\u0648\u0644\u064F\u0646 \u0641\u0639\u0648\u0644\u0646 \u0641\u0639\u0648\u0644\u0646 \u0641\u064E\u0639\u064E\u0644\u0652\xBB\u060C \u062F\u0631 \u0628\u062D\u0631\u0650 \u0645\u064F\u062A\u064E\u0642\u0627\u0631\u0650\u0628\u0650 \u0645\u062B\u0645\u064E\u0651\u0646\u0650 \u0645\u062D\u0630\u0648\u0641 \u0646\u06AF\u0627\u0634\u062A\u0647 \u0634\u062F\u0647\u200C\u0627\u0633\u062A.", + "\u0647\u0646\u06AF\u0627\u0645\u06CC \u06A9\u0647 \u0632\u0628\u0627\u0646 \u062F\u0627\u0646\u0634 \u0648 \u0627\u062F\u0628\u06CC\u0627\u062A \u062F\u0631 \u0627\u06CC\u0631\u0627\u0646 \u0632\u0628\u0627\u0646 \u0639\u0631\u0628\u06CC \u0628\u0648\u062F\u060C \u0641\u0631\u062F\u0648\u0633\u06CC\u060C \u0628\u0627 \u0633\u0631\u0648\u062F\u0646 \u0634\u0627\u0647\u0646\u0627\u0645\u0647", + " \u0628\u0627 \u0648\u06CC\u0698\u06AF\u06CC\u200C\u0647\u0627\u06CC \u0647\u062F\u0641\u200C\u0645\u0646\u062F\u06CC \u06A9\u0647 \u062F\u0627\u0634\u062A\u060C \u0632\u0628\u0627\u0646 \u067E\u0627\u0631\u0633\u06CC \u0631\u0627 \u0632\u0646\u062F\u0647 \u0648 \u067E\u0627\u06CC\u062F\u0627\u0631 \u06A9\u0631\u062F. \u06CC\u06A9\u06CC \u0627\u0632 ", + " \u0628\u0646\u200C\u0645\u0627\u06CC\u0647\u200C\u0647\u0627\u06CC \u0645\u0647\u0645\u06CC \u06A9\u0647 \u0641\u0631\u062F\u0648\u0633\u06CC \u0628\u0631\u0627\u06CC \u0633\u0631\u0648\u062F\u0646 \u0634\u0627\u0647\u0646\u0627\u0645\u0647 \u0627\u0632 \u0622\u0646 \u0627\u0633\u062A\u0641\u0627\u062F\u0647 \u06A9\u0631\u062F\u060C", + " \u0634\u0627\u0647\u0646\u0627\u0645\u0647\u0654 \u0627\u0628\u0648\u0645\u0646\u0635\u0648\u0631\u06CC \u0628\u0648\u062F. \u0634\u0627\u0647\u0646\u0627\u0645\u0647 \u0646\u0641\u0648\u0630 \u0628\u0633\u06CC\u0627\u0631\u06CC \u062F\u0631 \u062C\u0647\u062A\u200C\u06AF\u06CC\u0631\u06CC ", + " \u0641\u0631\u0647\u0646\u06AF \u0641\u0627\u0631\u0633\u06CC \u0648 \u0646\u06CC\u0632 \u0628\u0627\u0632\u062A\u0627\u0628\u200C\u0647\u0627\u06CC \u0634\u06A9\u0648\u0647\u200C\u0645\u0646\u062F\u06CC \u062F\u0631 \u0627\u062F\u0628\u06CC\u0627\u062A \u062C\u0647\u0627\u0646 \u062F\u0627\u0634\u062A\u0647\u200C\u0627\u0633\u062A \u0648 \u0634\u0627\u0639\u0631\u0627\u0646 ", + " \u0628\u0632\u0631\u06AF\u06CC \u0645\u0627\u0646\u0646\u062F \u06AF\u0648\u062A\u0647 \u0648 \u0648\u06CC\u06A9\u062A\u0648\u0631 \u0647\u0648\u06AF\u0648 \u0627\u0632 \u0622\u0646 \u0628\u0647 \u0646\u06CC\u06A9\u06CC \u06CC\u0627\u062F \u06A9\u0631\u062F\u0647\u200C\u0627\u0646\u062F." + ].join("\n") + ] + ], + ["text-generation", ["\u0627\u0633\u0645 \u0645\u0646 \u0646\u0627\u0632\u0646\u06CC\u0646 \u0627\u0633\u062A \u0648 \u0645\u0646", "\u0631\u0648\u0632\u06CC \u0631\u0648\u0632\u06AF\u0627\u0631\u06CC"]], + [ + "fill-mask", + [ + `\u0632\u0646\u062F\u06AF\u06CC \u06CC\u06A9 \u0633\u0648\u0627\u0644 \u0627\u0633\u062A \u0648 \u0627\u06CC\u0646 \u06A9\u0647 \u0686\u06AF\u0648\u0646\u0647 \u06A9\u0646\u06CC\u0645 \u067E\u0627\u0633\u062E \u0627\u06CC\u0646 \u0633\u0648\u0627\u0644!`, + `\u0632\u0646\u062F\u06AF\u06CC \u0627\u0632 \u0645\u0631\u06AF \u067E\u0631\u0633\u06CC\u062F: \u0686\u0631\u0627 \u0647\u0645\u0647 \u0645\u0646 \u0631\u0627 \u062F\u0627\u0631\u0646\u062F \u0627\u0645\u0627 \u0627\u0632 \u062A\u0648 \u0645\u062A\u0646\u0641\u0631\u0646\u062F\u061F` + ] + ] +]); +var MAPPING_AR = /* @__PURE__ */ new Map([ + ["text-classification", [`\u0623\u062D\u0628\u0643. \u0623\u0647\u0648\u0627\u0643`]], + [ + "token-classification", + [`\u0625\u0633\u0645\u064A \u0645\u062D\u0645\u062F \u0648\u0623\u0633\u0643\u0646 \u0641\u064A \u0628\u0631\u0644\u064A\u0646`, `\u0625\u0633\u0645\u064A \u0633\u0627\u0631\u0647 \u0648\u0623\u0633\u0643\u0646 \u0641\u064A \u0644\u0646\u062F\u0646`, `\u0625\u0633\u0645\u064A \u0633\u0627\u0645\u064A \u0648\u0623\u0633\u0643\u0646 \u0641\u064A \u0627\u0644\u0642\u062F\u0633 \u0641\u064A \u0641\u0644\u0633\u0637\u064A\u0646.`] + ], + [ + "question-answering", + [ + { + text: `\u0623\u064A\u0646 \u0623\u0633\u0643\u0646\u061F`, + context: `\u0625\u0633\u0645\u064A \u0645\u062D\u0645\u062F \u0648\u0623\u0633\u0643\u0646 \u0641\u064A \u0628\u064A\u0631\u0648\u062A` + }, + { + text: `\u0623\u064A\u0646 \u0623\u0633\u0643\u0646\u061F`, + context: `\u0625\u0633\u0645\u064A \u0633\u0627\u0631\u0647 \u0648\u0623\u0633\u0643\u0646 \u0641\u064A \u0644\u0646\u062F\u0646` + }, + { + text: `\u0645\u0627 \u0627\u0633\u0645\u064A\u061F`, + context: `\u0627\u0633\u0645\u064A \u0633\u0639\u064A\u062F \u0648\u0623\u0633\u0643\u0646 \u0641\u064A \u062D\u064A\u0641\u0627.` + }, + { + text: `\u0645\u0627 \u0644\u0642\u0628 \u062E\u0627\u0644\u062F \u0628\u0646 \u0627\u0644\u0648\u0644\u064A\u062F \u0628\u0627\u0644\u0639\u0631\u0628\u064A\u0629\u061F`, + context: `\u062E\u0627\u0644\u062F \u0628\u0646 \u0627\u0644\u0648\u0644\u064A\u062F \u0645\u0646 \u0623\u0628\u0637\u0627\u0644 \u0648\u0642\u0627\u062F\u0629 \u0627\u0644\u0641\u062A\u062D \u0627\u0644\u0625\u0633\u0644\u0627\u0645\u064A \u0648\u0642\u062F \u062A\u062D\u062F\u062B\u062A \u0639\u0646\u0647 \u0627\u0644\u0644\u063A\u0627\u062A \u0627\u0644\u0625\u0646\u062C\u0644\u064A\u0632\u064A\u0629 \u0648\u0627\u0644\u0641\u0631\u0646\u0633\u064A\u0629 \u0648\u0627\u0644\u0625\u0633\u0628\u0627\u0646\u064A\u0629 \u0648\u0644\u0642\u0628 \u0628\u0633\u064A\u0641 \u0627\u0644\u0644\u0647 \u0627\u0644\u0645\u0633\u0644\u0648\u0644.` + } + ] + ], + ["translation", [`\u0625\u0633\u0645\u064A \u0645\u062D\u0645\u062F \u0648\u0623\u0633\u0643\u0646 \u0641\u064A \u0628\u0631\u0644\u064A\u0646`, `\u0625\u0633\u0645\u064A \u0633\u0627\u0631\u0647 \u0648\u0623\u0633\u0643\u0646 \u0641\u064A \u0644\u0646\u062F\u0646`]], + [ + "summarization", + [ + `\u062A\u0642\u0639 \u0627\u0644\u0623\u0647\u0631\u0627\u0645\u0627\u062A \u0641\u064A \u0627\u0644\u062C\u064A\u0632\u0629 \u0642\u0631\u0628 \u0627\u0644\u0642\u0627\u0647\u0631\u0629 \u0641\u064A \u0645\u0635\u0631 \u0648\u0642\u062F \u0628\u0646\u064A\u062A \u0645\u0646\u0630 \u0639\u062F\u0629 \u0642\u0631\u0648\u0646\u060C \u0648\u0642\u064A\u0644 \u0625\u0646\u0647\u0627 \u0643\u0627\u0646\u062A \u0642\u0628\u0648\u0631\u0627 \u0644\u0644\u0641\u0631\u0627\u0639\u0646\u0629 \u0648\u062A\u0645 \u0628\u0646\u0627\u0624\u0647\u0627 \u0628\u0639\u0645\u0644\u064A\u0629 \u0647\u0646\u062F\u0633\u064A\u0629 \u0631\u0627\u0626\u0639\u0629 \u0648\u0627\u0633\u062A\u0642\u062F\u0645\u062A \u062D\u062C\u0627\u0631\u062A\u0647\u0627 \u0645\u0646 \u062C\u0628\u0644 \u0627\u0644\u0645\u0642\u0637\u0645 \u0648\u062A\u0645 \u0646\u0642\u0644\u0647\u0627 \u0628\u0627\u0644\u0633\u0641\u0646 \u0623\u0648 \u0639\u0644\u0649 \u0627\u0644\u0631\u0645\u0644\u060C \u0648\u0645\u0627 \u062A\u0632\u0627\u0644 \u0634\u0627\u0645\u062E\u0629 \u0648\u064A\u0642\u0635\u062F\u0647\u0627 \u0627\u0644\u0633\u064A\u0627\u062D \u0645\u0646 \u0643\u0627\u0641\u0629 \u0623\u0631\u062C\u0627\u0621 \u0627\u0644\u0645\u0639\u0645\u0648\u0631\u0629.` + ] + ], + [ + "text-generation", + [ + `\u0625\u0633\u0645\u064A \u0645\u062D\u0645\u062F \u0648\u0623\u062D\u0628 \u0623\u0646`, + `\u062F\u0639 \u0627\u0644\u0645\u0643\u0627\u0631\u0645 \u0644\u0627 \u062A\u0631\u062D\u0644 \u0644\u0628\u063A\u064A\u062A\u0647\u0627 - \u0648\u0627\u0642\u0639\u062F \u0641\u0625\u0646\u0643 \u0623\u0646\u062A \u0627\u0644\u0637\u0627\u0639\u0645 \u0627\u0644\u0643\u0627\u0633\u064A.`, + `\u0644\u0645\u0627\u0630\u0627 \u0646\u062D\u0646 \u0647\u0646\u0627\u061F`, + `\u0627\u0644\u0642\u062F\u0633 \u0645\u062F\u064A\u0646\u0629 \u062A\u0627\u0631\u064A\u062E\u064A\u0629\u060C \u0628\u0646\u0627\u0647\u0627 \u0627\u0644\u0643\u0646\u0639\u0627\u0646\u064A\u0648\u0646 \u0641\u064A`, + `\u0643\u0627\u0646 \u064A\u0627 \u0645\u0627 \u0643\u0627\u0646 \u0641\u064A \u0642\u062F\u064A\u0645 \u0627\u0644\u0632\u0645\u0627\u0646` + ] + ], + ["fill-mask", [`\u0628\u0627\u0631\u064A\u0633 \u0641\u0631\u0646\u0633\u0627.`, `\u0641\u0644\u0633\u0641\u0629 \u0627\u0644\u062D\u064A\u0627\u0629 \u0647\u064A .`]], + [ + "sentence-similarity", + [ + { + source_sentence: "\u0647\u0630\u0627 \u0634\u062E\u0635 \u0633\u0639\u064A\u062F", + sentences: ["\u0647\u0630\u0627 \u0643\u0644\u0628 \u0633\u0639\u064A\u062F", "\u0647\u0630\u0627 \u0634\u062E\u0635 \u0633\u0639\u064A\u062F \u062C\u062F\u0627", "\u0627\u0644\u064A\u0648\u0645 \u0647\u0648 \u064A\u0648\u0645 \u0645\u0634\u0645\u0633"] + } + ] + ] +]); +var MAPPING_BN = /* @__PURE__ */ new Map([ + ["text-classification", [`\u09AC\u09BE\u0999\u09BE\u09B2\u09BF\u09B0 \u0998\u09B0\u09C7 \u0998\u09B0\u09C7 \u0986\u099C \u09A8\u09AC\u09BE\u09A8\u09CD\u09A8 \u0989\u09CE\u09B8\u09AC\u0964`]], + [ + "token-classification", + [`\u0986\u09AE\u09BE\u09B0 \u09A8\u09BE\u09AE \u099C\u09BE\u09B9\u09BF\u09A6 \u098F\u09AC\u0982 \u0986\u09AE\u09BF \u09A2\u09BE\u0995\u09BE\u09DF \u09AC\u09BE\u09B8 \u0995\u09B0\u09BF\u0964`, `\u09A4\u09BF\u09A8\u09BF \u0997\u09C1\u0997\u09B2\u09C7 \u099A\u09BE\u0995\u09B0\u09C0 \u0995\u09B0\u09C7\u09A8\u0964`, `\u0986\u09AE\u09BE\u09B0 \u09A8\u09BE\u09AE \u09B8\u09C1\u09B8\u09CD\u09AE\u09BF\u09A4\u09BE \u098F\u09AC\u0982 \u0986\u09AE\u09BF \u0995\u09B2\u0995\u09BE\u09A4\u09BE\u09DF \u09AC\u09BE\u09B8 \u0995\u09B0\u09BF\u0964`] + ], + ["translation", [`\u0986\u09AE\u09BE\u09B0 \u09A8\u09BE\u09AE \u099C\u09BE\u09B9\u09BF\u09A6, \u0986\u09AE\u09BF \u09B0\u0982\u09AA\u09C1\u09B0\u09C7 \u09AC\u09BE\u09B8 \u0995\u09B0\u09BF\u0964`, `\u0986\u09AA\u09A8\u09BF \u0995\u09C0 \u0986\u099C\u0995\u09C7 \u09AC\u09BE\u09B8\u09BE\u09DF \u0986\u09B8\u09AC\u09C7\u09A8?`]], + [ + "summarization", + [ + `\u2018\u0987\u0995\u09CB\u09A8\u09AE\u09BF\u09B8\u09CD\u099F\u2019 \u09B2\u09BF\u0996\u09C7\u099B\u09C7, \u0985\u09CD\u09AF\u09BE\u09A8\u09CD\u099F\u09BF\u09AC\u09A1\u09BF\u09B0 \u099A\u09BE\u09B0 \u09AE\u09BE\u09B8 \u09B8\u09CD\u09A5\u09BE\u09DF\u09C0 \u09B9\u0993\u09DF\u09BE\u09B0 \u0996\u09AC\u09B0\u099F\u09BF \u09A6\u09C1\u0987 \u0995\u09BE\u09B0\u09A3\u09C7 \u0986\u09A8\u09A8\u09CD\u09A6\u09C7\u09B0\u0964 \u0985\u09CD\u09AF\u09BE\u09A8\u09CD\u099F\u09BF\u09AC\u09A1\u09BF \u09AF\u09A4 \u09A6\u09BF\u09A8 \u09AA\u09B0\u09CD\u09AF\u09A8\u09CD\u09A4 \u09B6\u09B0\u09C0\u09B0\u09C7 \u099F\u09BF\u0995\u09AC\u09C7, \u09A4\u09A4 \u09A6\u09BF\u09A8 \u09B8\u0982\u0995\u09CD\u09B0\u09AE\u09A3 \u09A5\u09C7\u0995\u09C7 \u09B8\u09C1\u09B0\u0995\u09CD\u09B7\u09BF\u09A4 \u09A5\u09BE\u0995\u09BE \u09B8\u09AE\u09CD\u09AD\u09AC\u0964 \u0985\u09B0\u09CD\u09A5\u09BE\u09CE, \u098F\u09AE\u09A8 \u098F\u0995 \u099F\u09BF\u0995\u09BE\u09B0 \u09AA\u09CD\u09B0\u09DF\u09CB\u099C\u09A8 \u09B9\u09AC\u09C7, \u09AF\u09BE \u0985\u09CD\u09AF\u09BE\u09A8\u09CD\u099F\u09BF\u09AC\u09A1\u09BF\u09B0 \u0989\u09A4\u09CD\u09AA\u09BE\u09A6\u09A8\u0995\u09C7 \u09AA\u09CD\u09B0\u09B0\u09CB\u099A\u09BF\u09A4 \u0995\u09B0\u09A4\u09C7 \u09AA\u09BE\u09B0\u09C7 \u098F\u09AC\u0982 \u09A6\u09C0\u09B0\u09CD\u0998\u09B8\u09CD\u09A5\u09BE\u09DF\u09C0 \u09B8\u09C1\u09B0\u0995\u09CD\u09B7\u09BE \u09A6\u09BF\u09A4\u09C7 \u09AA\u09BE\u09B0\u09C7\u0964 \u098F\u0997\u09C1\u09B2\u09CB \u0996\u09C1\u0981\u099C\u09C7 \u09AC\u09C7\u09B0 \u0995\u09B0\u09BE\u0993 \u09B8\u09B9\u099C\u0964 \u098F\u099F\u09BF \u0986\u09AD\u09BE\u09B8 \u09A6\u09C7\u09DF, \u09AC\u09CD\u09AF\u09BE\u09AA\u0995 \u09B9\u09BE\u09B0\u09C7 \u0985\u09CD\u09AF\u09BE\u09A8\u09CD\u099F\u09BF\u09AC\u09A1\u09BF \u09B6\u09A8\u09BE\u0995\u09CD\u09A4\u0995\u09B0\u09A3 \u09AB\u09B2\u09BE\u09AB\u09B2 \u09AE\u09CB\u099F\u09BE\u09AE\u09C1\u099F\u09BF \u09A8\u09BF\u09B0\u09CD\u09AD\u09C1\u09B2 \u09B9\u0993\u09DF\u09BE \u0989\u099A\u09BF\u09A4\u0964 \u09A6\u09CD\u09AC\u09BF\u09A4\u09C0\u09DF \u0986\u09B0\u09C7\u0995\u099F\u09BF \u0997\u09AC\u09C7\u09B7\u09A3\u09BE\u09B0 \u09A8\u09C7\u09A4\u09C3\u09A4\u09CD\u09AC \u09A6\u09BF\u09DF\u09C7\u099B\u09C7\u09A8 \u09AF\u09C1\u0995\u09CD\u09A4\u09B0\u09BE\u099C\u09CD\u09AF\u09C7\u09B0 \u09AE\u09C7\u09A1\u09BF\u0995\u09C7\u09B2 \u09B0\u09BF\u09B8\u09BE\u09B0\u09CD\u099A \u0995\u09BE\u0989\u09A8\u09CD\u09B8\u09BF\u09B2\u09C7\u09B0 (\u098F\u09AE\u0986\u09B0\u09B8\u09BF) \u0987\u09AE\u09BF\u0989\u09A8\u09CB\u09B2\u099C\u09BF\u09B8\u09CD\u099F \u09A4\u09BE\u0993 \u09A6\u0982\u0964 \u09A4\u09BF\u09A8\u09BF \u099F\u09BF-\u09B8\u09C7\u09B2 \u09B6\u09A8\u09BE\u0995\u09CD\u09A4\u0995\u09B0\u09A3\u09C7 \u0995\u09BE\u099C \u0995\u09B0\u09C7\u099B\u09C7\u09A8\u0964 \u099F\u09BF-\u09B8\u09C7\u09B2 \u09B6\u09A8\u09BE\u0995\u09CD\u09A4\u0995\u09B0\u09A3\u09C7\u09B0 \u09AA\u09CD\u09B0\u0995\u09CD\u09B0\u09BF\u09DF\u09BE \u0985\u09AC\u09B6\u09CD\u09AF \u0985\u09CD\u09AF\u09BE\u09A8\u09CD\u099F\u09BF\u09AC\u09A1\u09BF\u09B0 \u09AE\u09A4\u09CB \u098F\u09A4 \u0986\u09B2\u09CB\u099A\u09BF\u09A4 \u09A8\u09DF\u0964 \u09A4\u09AC\u09C7 \u09B8\u0982\u0995\u09CD\u09B0\u09AE\u09A3\u09C7\u09B0 \u09AC\u09BF\u09B0\u09C1\u09A6\u09CD\u09A7\u09C7 \u09B2\u09DC\u09BE\u0987 \u098F\u09AC\u0982 \u09A6\u09C0\u09B0\u09CD\u0998\u09AE\u09C7\u09DF\u09BE\u09A6\u09BF \u09B8\u09C1\u09B0\u0995\u09CD\u09B7\u09BE\u09DF \u09B8\u09AE\u09BE\u09A8 \u0997\u09C1\u09B0\u09C1\u09A4\u09CD\u09AC\u09AA\u09C2\u09B0\u09CD\u09A3 \u09AD\u09C2\u09AE\u09BF\u0995\u09BE \u09AA\u09BE\u09B2\u09A8 \u0995\u09B0\u09C7\u0964 \u0997\u09AC\u09C7\u09B7\u09A3\u09BE\u09B8\u0982\u0995\u09CD\u09B0\u09BE\u09A8\u09CD\u09A4 \u09A8\u09BF\u09AC\u09A8\u09CD\u09A7 \u09AA\u09CD\u09B0\u0995\u09BE\u09B6\u09BF\u09A4 \u09B9\u09DF\u09C7\u099B\u09C7 \u2018\u09A8\u09C7\u099A\u09BE\u09B0 \u0987\u09AE\u09BF\u0989\u09A8\u09CB\u09B2\u099C\u09BF\u2019 \u09B8\u09BE\u09AE\u09DF\u09BF\u0995\u09C0\u09A4\u09C7\u0964 \u09A4\u09BE\u0981\u09B0\u09BE \u09AC\u09B2\u099B\u09C7\u09A8, \u0997\u09AC\u09C7\u09B7\u09A3\u09BE\u09B0 \u0995\u09CD\u09B7\u09C7\u09A4\u09CD\u09B0\u09C7 \u0995\u09CB\u09AD\u09BF\u09A1-\u09E7\u09EF \u09AE\u09C3\u09A6\u09C1 \u09B8\u0982\u0995\u09CD\u09B0\u09AE\u09A3\u09C7\u09B0 \u09B6\u09BF\u0995\u09BE\u09B0 \u09E8\u09EE \u09AC\u09CD\u09AF\u0995\u09CD\u09A4\u09BF\u09B0 \u09B0\u0995\u09CD\u09A4\u09C7\u09B0 \u09A8\u09AE\u09C1\u09A8\u09BE, \u09E7\u09EA \u099C\u09A8 \u0997\u09C1\u09B0\u09C1\u09A4\u09B0 \u0985\u09B8\u09C1\u09B8\u09CD\u09A5 \u0993 \u09E7\u09EC \u099C\u09A8 \u09B8\u09C1\u09B8\u09CD\u09A5 \u09AC\u09CD\u09AF\u0995\u09CD\u09A4\u09BF\u09B0 \u09B0\u0995\u09CD\u09A4\u09C7\u09B0 \u09A8\u09AE\u09C1\u09A8\u09BE \u09AA\u09B0\u09C0\u0995\u09CD\u09B7\u09BE \u0995\u09B0\u09C7\u099B\u09C7\u09A8\u0964 \u0997\u09AC\u09C7\u09B7\u09A3\u09BE \u09A8\u09BF\u09AC\u09A8\u09CD\u09A7\u09C7 \u09AC\u09B2\u09BE \u09B9\u09DF, \u09B8\u0982\u0995\u09CD\u09B0\u09AE\u09BF\u09A4 \u09AC\u09CD\u09AF\u0995\u09CD\u09A4\u09BF\u09A6\u09C7\u09B0 \u0995\u09CD\u09B7\u09C7\u09A4\u09CD\u09B0\u09C7 \u099F\u09BF-\u09B8\u09C7\u09B2\u09C7\u09B0 \u09A4\u09C0\u09AC\u09CD\u09B0 \u09AA\u09CD\u09B0\u09A4\u09BF\u0995\u09CD\u09B0\u09BF\u09DF\u09BE \u09A4\u09BE\u0981\u09B0\u09BE \u09A6\u09C7\u0996\u09C7\u099B\u09C7\u09A8\u0964 \u098F \u0995\u09CD\u09B7\u09C7\u09A4\u09CD\u09B0\u09C7 \u09AE\u09C3\u09A6\u09C1 \u0993 \u0997\u09C1\u09B0\u09C1\u09A4\u09B0 \u0985\u09B8\u09C1\u09B8\u09CD\u09A5 \u09AC\u09CD\u09AF\u0995\u09CD\u09A4\u09BF\u09A6\u09C7\u09B0 \u0995\u09CD\u09B7\u09C7\u09A4\u09CD\u09B0\u09C7 \u09AA\u09CD\u09B0\u09A4\u09BF\u0995\u09CD\u09B0\u09BF\u09DF\u09BE\u09B0 \u09AD\u09BF\u09A8\u09CD\u09A8\u09A4\u09BE \u09AA\u09BE\u0993\u09DF\u09BE \u0997\u09C7\u099B\u09C7\u0964` + ] + ], + ["text-generation", [`\u0986\u09AE\u09BF \u09B0\u09A4\u09A8 \u098F\u09AC\u0982 \u0986\u09AE\u09BF`, `\u09A4\u09C1\u09AE\u09BF \u09AF\u09A6\u09BF \u099A\u09BE\u0993 \u09A4\u09AC\u09C7`, `\u09AE\u09BF\u09A5\u09BF\u09B2\u09BE \u0986\u099C\u0995\u09C7 \u09AC\u09A1\u09CD\u09A1`]], + ["fill-mask", [`\u0986\u09AE\u09BF \u09AC\u09BE\u0982\u09B2\u09BE\u09DF \u0997\u09BE\u0987\u0964`, `\u0986\u09AE\u09BF \u0996\u09C1\u09AC \u09AD\u09BE\u09B2\u09CB\u09AC\u09BE\u09B8\u09BF\u0964 `]], + [ + "question-answering", + [ + { + text: `\u09AA\u09CD\u09B0\u09A5\u09AE \u098F\u09B6\u09BF\u09AF\u09BC\u09BE \u0995\u09BE\u09AA \u0995\u09CD\u09B0\u09BF\u0995\u09C7\u099F \u099F\u09C1\u09B0\u09CD\u09A8\u09BE\u09AE\u09C7\u09A8\u09CD\u099F \u0995\u09CB\u09A5\u09BE\u09DF \u0985\u09A8\u09C1\u09B7\u09CD\u09A0\u09BF\u09A4 \u09B9\u09DF ?`, + context: `\u09AA\u09CD\u09B0\u09A5\u09AE \u099F\u09C1\u09B0\u09CD\u09A8\u09BE\u09AE\u09C7\u09A8\u09CD\u099F \u0985\u09A8\u09C1\u09B7\u09CD\u09A0\u09BF\u09A4 \u09B9\u09AF\u09BC \u09E7\u09EF\u09EE\u09EA \u09B8\u09BE\u09B2\u09C7 \u09B8\u0982\u09AF\u09C1\u0995\u09CD\u09A4 \u0986\u09B0\u09AC \u0986\u09AE\u09BF\u09B0\u09BE\u09A4 \u098F\u09B0 \u09B6\u09BE\u09B0\u099C\u09BE\u09B9 \u09A4\u09C7 \u09AF\u09C7\u0996\u09BE\u09A8\u09C7 \u0995\u09BE\u0989\u09A8\u09CD\u09B8\u09BF\u09B2\u09C7\u09B0 \u09AE\u09C2\u09B2 \u0985\u09AB\u09BF\u09B8 \u099B\u09BF\u09B2 (\u09E7\u09EF\u09EF\u09EB \u09AA\u09B0\u09CD\u09AF\u09A8\u09CD\u09A4)\u0964 \u09AD\u09BE\u09B0\u09A4 \u09B6\u09CD\u09B0\u09C0\u09B2\u0999\u09CD\u0995\u09BE\u09B0 \u09B8\u09BE\u09A5\u09C7 \u0986\u09A8\u09CD\u09A4\u09B0\u09BF\u0995\u09A4\u09BE\u09B9\u09C0\u09A8 \u0995\u09CD\u09B0\u09BF\u0995\u09C7\u099F \u09B8\u09AE\u09CD\u09AA\u09B0\u09CD\u0995\u09C7\u09B0 \u0995\u09BE\u09B0\u09A3\u09C7 \u09E7\u09EF\u09EE\u09EC \u09B8\u09BE\u09B2\u09C7\u09B0 \u099F\u09C1\u09B0\u09CD\u09A8\u09BE\u09AE\u09C7\u09A8\u09CD\u099F \u09AC\u09B0\u09CD\u099C\u09A8 \u0995\u09B0\u09C7\u0964 \u09E7\u09EF\u09EF\u09E9 \u09B8\u09BE\u09B2\u09C7 \u09AD\u09BE\u09B0\u09A4 \u0993 \u09AA\u09BE\u0995\u09BF\u09B8\u09CD\u09A4\u09BE\u09A8 \u098F\u09B0 \u09AE\u09A7\u09CD\u09AF\u09C7 \u09B0\u09BE\u099C\u09A8\u09C8\u09A4\u09BF\u0995 \u0985\u09B8\u09CD\u09A5\u09BF\u09B0\u09A4\u09BE\u09B0 \u0995\u09BE\u09B0\u09A3\u09C7 \u098F\u099F\u09BF \u09AC\u09BE\u09A4\u09BF\u09B2 \u09B9\u09AF\u09BC\u09C7 \u09AF\u09BE\u09AF\u09BC\u0964 \u09B6\u09CD\u09B0\u09C0\u09B2\u0999\u09CD\u0995\u09BE \u098F\u09B6\u09BF\u09AF\u09BC\u09BE \u0995\u09BE\u09AA \u09B6\u09C1\u09B0\u09C1 \u09A5\u09C7\u0995\u09C7 \u0985\u0982\u09B6 \u0997\u09CD\u09B0\u09B9\u09A3 \u0995\u09B0\u09C7 \u0986\u09B8\u099B\u09C7\u0964 \u0986\u09A8\u09CD\u09A4\u09B0\u09CD\u099C\u09BE\u09A4\u09BF\u0995 \u0995\u09CD\u09B0\u09BF\u0995\u09C7\u099F \u0995\u09BE\u0989\u09A8\u09CD\u09B8\u09BF\u09B2 \u09A8\u09BF\u09AF\u09BC\u09AE \u0995\u09B0\u09C7 \u09A6\u09BF\u09AF\u09BC\u09C7\u099B\u09C7 \u09AF\u09C7 \u098F\u09B6\u09BF\u09AF\u09BC\u09BE \u0995\u09BE\u09AA\u09C7\u09B0 \u09B8\u0995\u09B2 \u0996\u09C7\u09B2\u09BE \u0985\u09A8\u09C1\u09B7\u09CD\u09A0\u09BF\u09A4 \u09B9\u09AC\u09C7 \u0985\u09AB\u09BF\u09B8\u09BF\u09AF\u09BC\u09BE\u09B2 \u098F\u0995\u09A6\u09BF\u09A8\u09C7\u09B0 \u0986\u09A8\u09CD\u09A4\u09B0\u09CD\u099C\u09BE\u09A4\u09BF\u0995 \u0995\u09CD\u09B0\u09BF\u0995\u09C7\u099F \u09B9\u09BF\u09B8\u09C7\u09AC\u09C7\u0964 \u098F\u09B8\u09BF\u09B8\u09BF \u0998\u09CB\u09B7\u09A8\u09BE \u0985\u09A8\u09C1\u09AF\u09BE\u09AF\u09BC\u09C0 \u09AA\u09CD\u09B0\u09A4\u09BF \u09A6\u09C1\u0987 \u09AC\u099B\u09B0 \u09AA\u09B0 \u09AA\u09B0 \u099F\u09C1\u09B0\u09CD\u09A8\u09BE\u09AE\u09C7\u09A8\u09CD\u099F \u0985\u09A8\u09C1\u09B7\u09CD\u09A0\u09BF\u09A4 \u09B9\u09AF\u09BC \u09E8\u09E6\u09E6\u09EE \u09B8\u09BE\u09B2 \u09A5\u09C7\u0995\u09C7\u0964` + }, + { + text: `\u09AD\u09BE\u09B0\u09A4\u09C0\u09AF\u09BC \u09AC\u09BE\u0999\u09BE\u09B2\u09BF \u0995\u09A5\u09BE\u09B8\u09BE\u09B9\u09BF\u09A4\u09CD\u09AF\u09BF\u0995 \u09AE\u09B9\u09BE\u09B6\u09CD\u09AC\u09C7\u09A4\u09BE \u09A6\u09C7\u09AC\u09C0\u09B0 \u09AE\u09C3\u09A4\u09CD\u09AF\u09C1 \u0995\u09AC\u09C7 \u09B9\u09DF ?`, + context: `\u09E8\u09E6\u09E7\u09EC \u09B8\u09BE\u09B2\u09C7\u09B0 \u09E8\u09E9 \u099C\u09C1\u09B2\u09BE\u0987 \u09B9\u09C3\u09A6\u09B0\u09CB\u0997\u09C7 \u0986\u0995\u09CD\u09B0\u09BE\u09A8\u09CD\u09A4 \u09B9\u09AF\u09BC\u09C7 \u09AE\u09B9\u09BE\u09B6\u09CD\u09AC\u09C7\u09A4\u09BE \u09A6\u09C7\u09AC\u09C0 \u0995\u09B2\u0995\u09BE\u09A4\u09BE\u09B0 \u09AC\u09C7\u09B2 \u09AD\u09BF\u0989 \u0995\u09CD\u09B2\u09BF\u09A8\u09BF\u0995\u09C7 \u09AD\u09B0\u09CD\u09A4\u09BF \u09B9\u09A8\u0964 \u09B8\u09C7\u0987 \u09AC\u099B\u09B0\u0987 \u09E8\u09EE \u099C\u09C1\u09B2\u09BE\u0987 \u098F\u0995\u09BE\u09A7\u09BF\u0995 \u0985\u0999\u09CD\u0997 \u09AC\u09BF\u0995\u09B2 \u09B9\u09AF\u09BC\u09C7 \u09A4\u09BE\u0981\u09B0 \u09AE\u09C3\u09A4\u09CD\u09AF\u09C1 \u0998\u099F\u09C7\u0964 \u09A4\u09BF\u09A8\u09BF \u09AE\u09A7\u09C1\u09AE\u09C7\u09B9, \u09B8\u09C7\u09AA\u09CD\u099F\u09BF\u09B8\u09C7\u09AE\u09BF\u09AF\u09BC\u09BE \u0993 \u09AE\u09C2\u09A4\u09CD\u09B0 \u09B8\u0982\u0995\u09CD\u09B0\u09AE\u09A3 \u09B0\u09CB\u0997\u09C7\u0993 \u09AD\u09C1\u0997\u099B\u09BF\u09B2\u09C7\u09A8\u0964` + }, + { + text: `\u09AE\u09BE\u09B8\u09CD\u099F\u09BE\u09B0\u09A6\u09BE \u09B8\u09C2\u09B0\u09CD\u09AF\u0995\u09C1\u09AE\u09BE\u09B0 \u09B8\u09C7\u09A8\u09C7\u09B0 \u09AC\u09BE\u09AC\u09BE\u09B0 \u09A8\u09BE\u09AE \u0995\u09C0 \u099B\u09BF\u09B2 ?`, + context: `\u09B8\u09C2\u09B0\u09CD\u09AF \u09B8\u09C7\u09A8 \u09E7\u09EE\u09EF\u09EA \u09B8\u09BE\u09B2\u09C7\u09B0 \u09E8\u09E8 \u09AE\u09BE\u09B0\u09CD\u099A \u099A\u099F\u09CD\u099F\u0997\u09CD\u09B0\u09BE\u09AE\u09C7\u09B0 \u09B0\u09BE\u0989\u099C\u09BE\u09A8 \u09A5\u09BE\u09A8\u09BE\u09B0 \u09A8\u09CB\u09AF\u09BC\u09BE\u09AA\u09BE\u09A1\u09BC\u09BE\u09AF\u09BC \u0985\u09B0\u09CD\u09A5\u09A8\u09C8\u09A4\u09BF\u0995 \u09AD\u09BE\u09AC\u09C7 \u0985\u09B8\u09CD\u09AC\u099A\u09CD\u099B\u09B2 \u09AA\u09B0\u09BF\u09AC\u09BE\u09B0\u09C7 \u099C\u09A8\u09CD\u09AE\u0997\u09CD\u09B0\u09B9\u09A3 \u0995\u09B0\u09C7\u09A8\u0964 \u09A4\u09BE\u0981\u09B0 \u09AA\u09BF\u09A4\u09BE\u09B0 \u09A8\u09BE\u09AE \u09B0\u09BE\u099C\u09AE\u09A8\u09BF \u09B8\u09C7\u09A8 \u098F\u09AC\u0982 \u09AE\u09BE\u09A4\u09BE\u09B0 \u09A8\u09BE\u09AE \u09B6\u09B6\u09C0 \u09AC\u09BE\u09B2\u09BE \u09B8\u09C7\u09A8\u0964 \u09B0\u09BE\u099C\u09AE\u09A8\u09BF \u09B8\u09C7\u09A8\u09C7\u09B0 \u09A6\u09C1\u0987 \u099B\u09C7\u09B2\u09C7 \u0986\u09B0 \u099A\u09BE\u09B0 \u09AE\u09C7\u09AF\u09BC\u09C7\u0964 \u09B8\u09C2\u09B0\u09CD\u09AF \u09B8\u09C7\u09A8 \u09A4\u09BE\u0981\u09A6\u09C7\u09B0 \u09AA\u09B0\u09BF\u09AC\u09BE\u09B0\u09C7\u09B0 \u099A\u09A4\u09C1\u09B0\u09CD\u09A5 \u09B8\u09A8\u09CD\u09A4\u09BE\u09A8\u0964 \u09A6\u09C1\u0987 \u099B\u09C7\u09B2\u09C7\u09B0 \u09A8\u09BE\u09AE \u09B8\u09C2\u09B0\u09CD\u09AF \u0993 \u0995\u09AE\u09B2\u0964 \u099A\u09BE\u09B0 \u09AE\u09C7\u09AF\u09BC\u09C7\u09B0 \u09A8\u09BE\u09AE \u09AC\u09B0\u09A6\u09BE\u09B8\u09C1\u09A8\u09CD\u09A6\u09B0\u09C0, \u09B8\u09BE\u09AC\u09BF\u09A4\u09CD\u09B0\u09C0, \u09AD\u09BE\u09A8\u09C1\u09AE\u09A4\u09C0 \u0993 \u09AA\u09CD\u09B0\u09AE\u09BF\u09B2\u09BE\u0964 \u09B6\u09C8\u09B6\u09AC\u09C7 \u09AA\u09BF\u09A4\u09BE \u09AE\u09BE\u09A4\u09BE\u0995\u09C7 \u09B9\u09BE\u09B0\u09BE\u09A8\u09CB \u09B8\u09C2\u09B0\u09CD\u09AF \u09B8\u09C7\u09A8 \u0995\u09BE\u0995\u09BE \u0997\u09CC\u09B0\u09AE\u09A8\u09BF \u09B8\u09C7\u09A8\u09C7\u09B0 \u0995\u09BE\u099B\u09C7 \u09AE\u09BE\u09A8\u09C1\u09B7 \u09B9\u09AF\u09BC\u09C7\u099B\u09C7\u09A8\u0964 \u09B8\u09C2\u09B0\u09CD\u09AF \u09B8\u09C7\u09A8 \u099B\u09C7\u09B2\u09C7\u09AC\u09C7\u09B2\u09BE \u09A5\u09C7\u0995\u09C7\u0987 \u0996\u09C1\u09AC \u09AE\u09A8\u09CB\u09AF\u09CB\u0997\u09C0 \u09AD\u09BE\u09B2 \u099B\u09BE\u09A4\u09CD\u09B0 \u099B\u09BF\u09B2\u09C7\u09A8 \u098F\u09AC\u0982 \u09A7\u09B0\u09CD\u09AE\u09AD\u09BE\u09AC\u09BE\u09AA\u09A8\u09CD\u09A8 \u0997\u09AE\u09CD\u09AD\u09C0\u09B0 \u09AA\u09CD\u09B0\u0995\u09C3\u09A4\u09BF\u09B0 \u099B\u09BF\u09B2\u09C7\u09A8\u0964` + } + ] + ], + [ + "sentence-similarity", + [ + { + source_sentence: "\u09B8\u09C7 \u098F\u0995\u099C\u09A8 \u09B8\u09C1\u0996\u09C0 \u09AC\u09CD\u09AF\u0995\u09CD\u09A4\u09BF", + sentences: ["\u09B8\u09C7 \u09B9\u09CD\u09AF\u09BE\u09AA\u09BF \u0995\u09C1\u0995\u09C1\u09B0", "\u09B8\u09C7 \u0996\u09C1\u09AC \u09B8\u09C1\u0996\u09C0 \u09AE\u09BE\u09A8\u09C1\u09B7", "\u0986\u099C \u098F\u0995\u099F\u09BF \u09B0\u09CC\u09A6\u09CD\u09B0\u09CB\u099C\u09CD\u099C\u09CD\u09AC\u09B2 \u09A6\u09BF\u09A8"] + } + ] + ] +]); +var MAPPING_MN = /* @__PURE__ */ new Map([ + ["text-classification", [`\u0411\u0438 \u0447\u0430\u043C\u0434 \u0445\u0430\u0439\u0440\u0442\u0430\u0439`]], + [ + "token-classification", + [ + `\u041D\u0430\u043C\u0430\u0439\u0433 \u0414\u043E\u0440\u0436 \u0433\u044D\u0434\u044D\u0433. \u0411\u0438 \u0423\u043B\u0430\u0430\u043D\u0431\u0430\u0430\u0442\u0430\u0440\u0442 \u0430\u043C\u044C\u0434\u0430\u0440\u0434\u0430\u0433.`, + `\u041D\u0430\u043C\u0430\u0439\u0433 \u0413\u0430\u043D\u0431\u0430\u0442 \u0433\u044D\u0434\u044D\u0433. \u0411\u0438 \u0423\u0432\u0441 \u0430\u0439\u043C\u0430\u0433\u0442 \u0442\u04E9\u0440\u0441\u04E9\u043D.`, + `\u041C\u0430\u043D\u0430\u0439 \u0443\u043B\u0441 \u0442\u0430\u0432\u0430\u043D \u0445\u043E\u0448\u0443\u0443 \u043C\u0430\u043B\u0442\u0430\u0439.` + ] + ], + [ + "question-answering", + [ + { + text: `\u0422\u0430 \u0445\u0430\u0430\u043D\u0430 \u0430\u043C\u044C\u0434\u0430\u0440\u0434\u0430\u0433 \u0432\u044D?`, + context: `\u041D\u0430\u043C\u0430\u0439\u0433 \u0414\u043E\u0440\u0436 \u0433\u044D\u0434\u044D\u0433. \u0411\u0438 \u0423\u043B\u0430\u0430\u043D\u0431\u0430\u0430\u0442\u0430\u0440\u0442 \u0430\u043C\u044C\u0434\u0430\u0440\u0434\u0430\u0433.` + }, + { + text: `\u0422\u0430\u043D\u044B\u0433 \u0445\u044D\u043D \u0433\u044D\u0434\u044D\u0433 \u0432\u044D?`, + context: `\u041D\u0430\u043C\u0430\u0439\u0433 \u0414\u043E\u0440\u0436 \u0433\u044D\u0434\u044D\u0433. \u0411\u0438 \u0423\u043B\u0430\u0430\u043D\u0431\u0430\u0430\u0442\u0430\u0440\u0442 \u0430\u043C\u044C\u0434\u0430\u0440\u0434\u0430\u0433.` + }, + { + text: `\u041C\u0438\u043D\u0438\u0439 \u043D\u044D\u0440\u0438\u0439\u0433 \u0445\u044D\u043D \u0433\u044D\u0434\u044D\u0433 \u0432\u044D?`, + context: `\u041D\u0430\u043C\u0430\u0439\u0433 \u0413\u0430\u043D\u0431\u0430\u0442 \u0433\u044D\u0434\u044D\u0433. \u0411\u0438 \u0423\u0432\u0441 \u0430\u0439\u043C\u0430\u0433\u0442 \u0442\u04E9\u0440\u0441\u04E9\u043D.` + } + ] + ], + ["translation", [`\u041D\u0430\u043C\u0430\u0439\u0433 \u0414\u043E\u0440\u0436 \u0433\u044D\u0434\u044D\u0433. \u0411\u0438 \u0423\u043B\u0430\u0430\u043D\u0431\u0430\u0430\u0442\u0430\u0440\u0442 \u0430\u043C\u044C\u0434\u0430\u0440\u0434\u0430\u0433.`, `\u041D\u0430\u043C\u0430\u0439\u0433 \u0413\u0430\u043D\u0431\u0430\u0442 \u0433\u044D\u0434\u044D\u0433. \u0411\u0438 \u0423\u0432\u0441 \u0430\u0439\u043C\u0430\u0433\u0442 \u0442\u04E9\u0440\u0441\u04E9\u043D.`]], + [ + "summarization", + [ + `\u041C\u043E\u043D\u0433\u043E\u043B \u0423\u043B\u0441 (1992 \u043E\u043D\u043E\u043E\u0441 \u0445\u043E\u0439\u0448) \u2014 \u0434\u043E\u0440\u043D\u043E \u0431\u043E\u043B\u043E\u043D \u0442\u04E9\u0432 \u0410\u0437\u0438\u0434 \u043E\u0440\u0448\u0434\u043E\u0433 \u0431\u04AF\u0440\u044D\u043D \u044D\u0440\u0445\u0442 \u0443\u043B\u0441. \u0425\u043E\u0439\u0434 \u0442\u0430\u043B\u0430\u0430\u0440\u0430\u0430 \u041E\u0440\u043E\u0441, \u0431\u0443\u0441\u0430\u0434 \u0442\u0430\u043B\u0430\u0430\u0440\u0430\u0430 \u0425\u044F\u0442\u0430\u0434 \u0443\u043B\u0441\u0442\u0430\u0439 \u0445\u0438\u043B\u043B\u044D\u0434\u044D\u0433 \u0434\u0430\u043B\u0430\u0439\u0434 \u0433\u0430\u0440\u0446\u0433\u04AF\u0439 \u043E\u0440\u043E\u043D. \u041D\u0438\u0439\u0441\u043B\u044D\u043B \u2014 \u0423\u043B\u0430\u0430\u043D\u0431\u0430\u0430\u0442\u0430\u0440 \u0445\u043E\u0442. \u0410\u043B\u0442\u0430\u0439\u043D \u043D\u0443\u0440\u0443\u0443\u043D\u0430\u0430\u0441 \u0425\u044F\u043D\u0433\u0430\u043D, \u0421\u043E\u0451\u043D\u043E\u043E\u0441 \u0413\u043E\u0432\u044C \u0445\u04AF\u0440\u0441\u044D\u043D 1 \u0441\u0430\u044F 566 \u043C\u044F\u043D\u0433\u0430\u043D \u043A\u043C2 \u0443\u0443\u0434\u0430\u043C \u043D\u0443\u0442\u0430\u0433\u0442\u0430\u0439, \u0434\u044D\u043B\u0445\u0438\u0439\u0434 \u043D\u0443\u0442\u0430\u0433 \u0434\u044D\u0432\u0441\u0433\u044D\u0440\u0438\u0439\u043D \u0445\u044D\u043C\u0436\u044D\u044D\u0433\u044D\u044D\u0440 19-\u0440\u0442 \u0436\u0430\u0433\u0441\u0434\u0430\u0433. 2015 \u043E\u043D\u044B \u044D\u0445\u044D\u043D\u0434 \u041C\u043E\u043D\u0433\u043E\u043B \u0423\u043B\u0441\u044B\u043D \u0445\u04AF\u043D \u0430\u043C 3 \u0441\u0430\u044F \u0445\u04AF\u0440\u0441\u044D\u043D (135-\u0440 \u043E\u043B\u043E\u043D). \u04AE\u043D\u0434\u0441\u044D\u043D\u0434\u044D\u044D \u043C\u043E\u043D\u0433\u043E\u043B \u04AF\u043D\u0434\u044D\u0441\u0442\u044D\u043D (95 \u0445\u0443\u0432\u044C), \u043C\u04E9\u043D \u0445\u0430\u0441\u0430\u0433, \u0442\u0443\u0432\u0430 \u0445\u04AF\u043D \u0431\u0430\u0439\u043D\u0430. 16-\u0440 \u0437\u0443\u0443\u043D\u0430\u0430\u0441 \u0445\u043E\u0439\u0448 \u0431\u0443\u0434\u0434\u044B\u043D \u0448\u0430\u0448\u0438\u043D, 20-\u0440 \u0437\u0443\u0443\u043D\u0430\u0430\u0441 \u0448\u0430\u0448\u0438\u043D\u0433\u04AF\u0439 \u0431\u0430\u0439\u0434\u0430\u043B \u0434\u044D\u043B\u0433\u044D\u0440\u0441\u044D\u043D \u0431\u0430 \u0430\u043B\u0431\u0430\u043D \u0445\u044D\u0440\u044D\u0433\u0442 \u043C\u043E\u043D\u0433\u043E\u043B \u0445\u044D\u043B\u044D\u044D\u0440 \u0445\u0430\u0440\u0438\u043B\u0446\u0430\u043D\u0430.` + ] + ], + [ + "text-generation", + [`\u041D\u0430\u043C\u0430\u0439\u0433 \u0414\u043E\u0440\u0436 \u0433\u044D\u0434\u044D\u0433. \u0411\u0438`, `\u0425\u0430\u043C\u0433\u0438\u0439\u043D \u0441\u0430\u0439\u043D \u0434\u0443\u0443\u0447\u0438\u043D \u0431\u043E\u043B`, `\u041C\u0438\u043D\u0438\u0439 \u0434\u0443\u0440\u0442\u0430\u0439 \u0445\u0430\u043C\u0442\u043B\u0430\u0433 \u0431\u043E\u043B`, `\u042D\u0440\u0442 \u0443\u0440\u044C\u0434\u044B\u043D \u0446\u0430\u0433\u0442`] + ], + ["fill-mask", [`\u041C\u043E\u043D\u0433\u043E\u043B \u0443\u043B\u0441\u044B\u043D \u0423\u043B\u0430\u0430\u043D\u0431\u0430\u0430\u0442\u0430\u0440 \u0445\u043E\u0442\u043E\u043E\u0441 \u044F\u0440\u044C\u0436 \u0431\u0430\u0439\u043D\u0430.`, `\u041C\u0438\u043D\u0438\u0439 \u0430\u043C\u044C\u0434\u0440\u0430\u043B\u044B\u043D \u0437\u043E\u0440\u0438\u043B\u0433\u043E \u0431\u043E\u043B .`]], + [ + "automatic-speech-recognition", + [ + { + label: `Common Voice Train Example`, + src: `https://cdn-media.huggingface.co/common_voice/train/common_voice_mn_18577472.wav` + }, + { + label: `Common Voice Test Example`, + src: `https://cdn-media.huggingface.co/common_voice/test/common_voice_mn_18577346.wav` + } + ] + ], + [ + "text-to-speech", + [ + `\u0411\u0438 \u041C\u043E\u043D\u0433\u043E\u043B \u0443\u043B\u0441\u044B\u043D \u0438\u0440\u0433\u044D\u043D.`, + `\u042D\u043D\u044D\u0445\u04AF\u04AF \u0436\u0438\u0448\u044D\u044D \u043D\u044C \u0446\u0430\u0430\u043D\u0430\u0430 \u044F\u043C\u0430\u0440 \u0447 \u0443\u0442\u0433\u0430 \u0430\u0433\u0443\u0443\u043B\u0430\u0430\u0433\u04AF\u0439 \u0431\u043E\u043B\u043D\u043E`, + `\u0421\u0430\u0440 \u0448\u0438\u043D\u044D\u0434\u044D\u044D \u0441\u0430\u0439\u0445\u0430\u043D \u0448\u0438\u043D\u044D\u043B\u044D\u0436 \u0431\u0430\u0439\u043D\u0430 \u0443\u0443?` + ] + ], + [ + "sentence-similarity", + [ + { + source_sentence: "\u042D\u043D\u044D \u0431\u043E\u043B \u0430\u0437 \u0436\u0430\u0440\u0433\u0430\u043B\u0442\u0430\u0439 \u0445\u04AF\u043D \u044E\u043C", + sentences: ["\u042D\u043D\u044D \u0431\u043E\u043B \u0430\u0437 \u0436\u0430\u0440\u0433\u0430\u043B\u0442\u0430\u0439 \u043D\u043E\u0445\u043E\u0439 \u044E\u043C", "\u042D\u043D\u044D \u0431\u043E\u043B \u043C\u0430\u0448 \u0438\u0445 \u0430\u0437 \u0436\u0430\u0440\u0433\u0430\u043B\u0442\u0430\u0439 \u0445\u04AF\u043D \u044E\u043C", "\u04E8\u043D\u04E9\u04E9\u0434\u04E9\u0440 \u043D\u0430\u0440\u043B\u0430\u0433 \u04E9\u0434\u04E9\u0440 \u0431\u0430\u0439\u043D\u0430"] + } + ] + ] +]); +var MAPPING_SI = /* @__PURE__ */ new Map([ + ["translation", [`\u0DC3\u0DD2\u0D82\u0DC4\u0DBD \u0D89\u0DAD\u0DCF \u0D85\u0DBD\u0D82\u0D9A\u0DCF\u0DBB \u0DB7\u0DCF\u0DC2\u0DCF\u0DC0\u0D9A\u0DD2.`, `\u0DB8\u0DD9\u0DB8 \u0DAD\u0DCF\u0D9A\u0DCA\u0DC2\u0DAB\u0DBA \u0DB7\u0DCF\u0DC0\u0DD2\u0DAD\u0DCF \u0D9A\u0DBB\u0DB1 \u0D94\u0DB6\u0DA7 \u0DC3\u0DCA\u0DAD\u0DD6\u0DAD\u0DD2\u0DBA\u0DD2.`]], + ["fill-mask", [`\u0DB8\u0DB8 \u0D9C\u0DD9\u0DAF\u0DBB .`, ` \u0D89\u0D9C\u0DD9\u0DB1\u0DD3\u0DB8\u0DA7 \u0D9C\u0DD2\u0DBA\u0DCF\u0DBA.`]] +]); +var MAPPING_DE = /* @__PURE__ */ new Map([ + [ + "question-answering", + [ + { + text: `Wo wohne ich?`, + context: `Mein Name ist Wolfgang und ich lebe in Berlin` + }, + { + text: `Welcher Name wird auch verwendet, um den Amazonas-Regenwald auf Englisch zu beschreiben?`, + context: `Der Amazonas-Regenwald, auf Englisch auch als Amazonien oder Amazonas-Dschungel bekannt, ist ein feuchter Laubwald, der den gr\xF6\xDFten Teil des Amazonas-Beckens S\xFCdamerikas bedeckt. Dieses Becken umfasst 7.000.000 Quadratkilometer (2.700.000 Quadratmeilen), von denen 5.500.000 Quadratkilometer (2.100.000 Quadratmeilen) vom Regenwald bedeckt sind. Diese Region umfasst Gebiete von neun Nationen. Der gr\xF6\xDFte Teil des Waldes befindet sich in Brasilien mit 60% des Regenwaldes, gefolgt von Peru mit 13%, Kolumbien mit 10% und geringen Mengen in Venezuela, Ecuador, Bolivien, Guyana, Suriname und Franz\xF6sisch-Guayana. Staaten oder Abteilungen in vier Nationen enthalten "Amazonas" in ihren Namen. Der Amazonas repr\xE4sentiert mehr als die H\xE4lfte der verbleibenden Regenw\xE4lder des Planeten und umfasst den gr\xF6\xDFten und artenreichsten tropischen Regenwald der Welt mit gesch\xE4tzten 390 Milliarden Einzelb\xE4umen, die in 16.000 Arten unterteilt sind.` + } + ] + ], + [ + "sentence-similarity", + [ + { + source_sentence: "Das ist eine gl\xFCckliche Person", + sentences: [ + "Das ist ein gl\xFCcklicher Hund", + "Das ist eine sehr gl\xFCckliche Person", + "Heute ist ein sonniger Tag" + ] + } + ] + ] +]); +var MAPPING_DV = /* @__PURE__ */ new Map([ + ["text-classification", [`\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078E\u07A6\u0794\u07A7\u0788\u07AD. \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078D\u07AF\u0784\u07A8\u0788\u07AD`]], + [ + "token-classification", + [ + `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0787\u07A6\u0780\u07AA\u0789\u07A6\u078B\u07AA \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u0789\u07A7\u078D\u07AD\u078E\u07A6`, + `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0790\u07A7\u0783\u07A7 \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u0787\u07AA\u078C\u07A9\u0789\u07AA\u078E\u07A6`, + `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0787\u07A6\u0787\u07A8\u079D\u07A7 \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u078A\u07AD\u078B\u07AB\u060C \u0787\u07A6\u0787\u07B0\u0791\u07AB\u078E\u07A6` + ] + ], + [ + "question-answering", + [ + { + text: `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u0786\u07AE\u0782\u07B0\u078C\u07A7\u0786\u07AA\u061F`, + context: `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0787\u07A6\u0780\u07AA\u0789\u07A6\u078B\u07AA \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u0789\u07A7\u078D\u07AD\u078E\u07A6` + }, + { + text: `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u0786\u07AE\u0782\u07B0\u078C\u07A7\u0786\u07AA\u061F`, + context: `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0790\u07A7\u0783\u07A7 \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u0787\u07AA\u078C\u07A9\u0789\u07AA\u078E\u07A6` + }, + { + text: `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0786\u07AE\u0784\u07A7\u061F`, + context: `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0787\u07A6\u0787\u07A8\u079D\u07A7 \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u078A\u07AD\u078B\u07AB\u078E\u07A6` + }, + { + text: `\u0787\u07AC\u0789\u07AD\u0792\u07A6\u0782\u07B0 \u0783\u07AC\u0787\u07A8\u0782\u07B0\u078A\u07AE\u0783\u07AC\u0790\u07B0\u0793\u07B0 \u0790\u07A8\u078A\u07A6\u0786\u07AE\u0781\u07B0\u078B\u07A8\u0782\u07AA\u0789\u07A6\u0781\u07B0 \u0787\u07A8\u0782\u078E\u07A8\u0783\u07AD\u0790\u07A8 \u0784\u07A6\u0780\u07AA\u0782\u07B0 \u0784\u07AD\u0782\u07AA\u0782\u07B0\u0786\u07AA\u0783\u07A7\u0782\u07A9 \u0786\u07AE\u0782\u07B0\u0782\u07A6\u0789\u07AC\u0787\u07B0\u061F`, + context: `\u0787\u07AC\u0789\u07AD\u0792\u07A6\u0782\u07B0 \u0783\u07AC\u0787\u07A8\u0782\u07B0\u078A\u07AE\u0783\u07AC\u0790\u07B0\u0793\u07B0 (\u0795\u07AF\u0797\u07AA\u0796\u07A9\u0792\u07B0: \u078A\u07B0\u078D\u07AE\u0783\u07AC\u0790\u07B0\u0793\u07A7 \u0787\u07AC\u0789\u07A6\u0790\u07AE\u0782\u07A8\u0786\u07A7 \u0782\u07AA\u0788\u07A6\u078C\u07A6 \u0787\u07AC\u0789\u07A6\u0790\u07AE\u0782\u07A8\u0787\u07A7\u061B \u0790\u07B0\u0795\u07AC\u0782\u07A8\u079D\u07B0: \u0790\u07AC\u078D\u07B0\u0788\u07A7 \u0787\u07AC\u0789\u07A6\u0790\u07AE\u0782\u07A8\u0786\u07A7, \u0787\u07AC\u0789\u07A6\u0790\u07AE\u0782\u07A8\u0787\u07A7 \u0782\u07AB\u0782\u07A9 \u0787\u07A7\u0782\u07B0\u0789\u07AA\u0786\u07AE\u0781\u07B0 \u0787\u07AC\u0789\u07A6\u0792\u07AF\u0782\u07A8\u0787\u07A7\u061B \u078A\u07B0\u0783\u07AC\u0782\u07B0\u0797\u07B0: \u078A\u07AE\u0783\u07AD \u0787\u07AC\u0789\u07AC\u0792\u07AE\u0782\u07A8\u0787\u07AC\u0782\u07B0\u061B \u0791\u07A6\u0797\u07B0: \u0787\u07AC\u0789\u07AC\u0792\u07AF\u0782\u07B0\u0783\u07AD\u078E\u07AC\u0788\u07A6\u0787\u07AA\u0791\u07B0)\u060C \u0787\u07A8\u078E\u07A8\u0783\u07AD\u0790\u07A8 \u0784\u07A6\u0780\u07AA\u0782\u07B0 \u0784\u07AA\u0782\u07A7 \u0787\u07AC\u0789\u07AC\u0792\u07AF\u0782\u07A8\u0787\u07A7 \u0782\u07AA\u0788\u07A6\u078C\u07A6 \u078B\u07A6 \u0787\u07AC\u0789\u07AD\u0792\u07A6\u0782\u07B0 \u0796\u07A6\u0782\u07B0\u078E\u07A6\u078D\u07B0 \u0787\u07A6\u0786\u07A9, \u0790\u07A6\u0787\u07AA\u078C\u07AA \u0787\u07AC\u0789\u07AC\u0783\u07A8\u0786\u07A7\u078E\u07AC \u0787\u07AC\u0789\u07AD\u0792\u07A6\u0782\u07B0 \u0784\u07AD\u0790\u07A8\u0782\u07B0 \u0790\u07A6\u0783\u07A6\u0780\u07A6\u0787\u07B0\u078B\u07AA\u078E\u07AC \u0784\u07AE\u0791\u07AA\u0784\u07A6\u0787\u07AC\u0787\u07B0\u078E\u07A6\u0787\u07A8 \u0780\u07A8\u0789\u07AC\u0782\u07AD \u0789\u07AE\u0787\u07A8\u0790\u07B0\u0793\u07B0 \u0784\u07AE\u0783\u07AF\u0791\u07B0\u078D\u07A9\u078A\u07B0 \u078A\u07AE\u0783\u07AC\u0790\u07B0\u0793\u07AC\u0787\u07AC\u0786\u07AC\u0788\u07AC. \u0787\u07AC\u0789\u07AD\u0792\u07A6\u0782\u07B0 \u0784\u07AD\u0790\u07A8\u0782\u07B0 \u0790\u07A6\u0783\u07A6\u0780\u07A6\u0787\u07B0\u078B\u07AA\u078E\u07AC \u0784\u07AE\u0791\u07AA \u0789\u07A8\u0782\u07A6\u0786\u07A9 7 \u0789\u07A8\u078D\u07A8\u0787\u07A6\u0782\u07B0 \u0787\u07A6\u0786\u07A6 \u0786\u07A8\u078D\u07AF\u0789\u07A9\u0793\u07A6\u0783 (2.7 \u0789\u07A8\u078D\u07A8\u0787\u07A6\u0782\u07B0 \u0787\u07A6\u0786\u07A6 \u0789\u07A6\u0787\u07A8\u078D\u07B0(. \u0789\u07A9\u078E\u07AC \u078C\u07AC\u0783\u07AC\u0787\u07A8\u0782\u07B0 5.5 \u0789\u07A8\u078D\u07A8\u0787\u07A6\u0782\u07B0 \u0787\u07A6\u0786\u07A6 \u0786\u07A8\u078D\u07AF\u0789\u07A9\u0793\u07A6\u0783 (2.1 \u0789\u07A8\u078D\u07A8\u0787\u07A6\u0782\u07B0 \u0787\u07A6\u0786\u07A6 \u0789\u07A6\u0787\u07A8\u078D\u07B0) \u0787\u07A6\u0786\u07A9 \u0789\u07A8 \u078A\u07AE\u0783\u07AC\u0790\u07B0\u0793\u07AC\u0788\u07AC. \u0789\u07A8 \u0790\u07A6\u0783\u07A6\u0780\u07A6\u0787\u07B0\u078B\u07AA\u078E\u07A6\u0787\u07A8 9 \u078E\u07A6\u0787\u07AA\u0789\u07A6\u0786\u07A6\u0781\u07B0 \u0782\u07A8\u0790\u07B0\u0784\u07A6\u078C\u07B0\u0788\u07A7 \u0793\u07AC\u0783\u07A8\u0793\u07A6\u0783\u07A9 \u0780\u07A8\u0789\u07AC\u0782\u07AC\u0787\u07AC\u0788\u07AC. 60% \u0787\u07A7\u0787\u07A8\u0787\u07AC\u0786\u07AC \u0787\u07AC\u0782\u07B0\u0789\u07AC \u0784\u07AE\u0791\u07AA \u0784\u07A6\u0787\u07AC\u0787\u07B0 \u0782\u07A8\u0790\u07B0\u0784\u07A6\u078C\u07B0\u0788\u07A6\u0782\u07A9 \u0784\u07B0\u0783\u07AC\u0792\u07A8\u078D\u07B0\u0787\u07A6\u0781\u07AC\u0788\u07AC. \u0787\u07AD\u078E\u07AC \u078A\u07A6\u0780\u07AA\u078C\u07AA\u0782\u07B0 13% \u0787\u07A7\u0787\u07AC\u0786\u07AA \u0795\u07AC\u0783\u07AB \u0787\u07A7\u0787\u07A8 10% \u0787\u07A7\u0787\u07AC\u0786\u07AA \u0786\u07AE\u078D\u07A6\u0789\u07B0\u0784\u07A8\u0787\u07A7 \u0787\u07A6\u078B\u07A8 \u0786\u07AA\u0791\u07A6 \u0784\u07A6\u0787\u07AC\u0787\u07B0 \u0780\u07A8\u0789\u07AC\u0782\u07AD \u078E\u07AE\u078C\u07AA\u0782\u07B0 \u0788\u07AC\u0782\u07AC\u0792\u07AA\u0787\u07AC\u078D\u07A7, \u0787\u07AC\u0786\u07B0\u0787\u07A6\u0791\u07AF, \u0784\u07AE\u078D\u07A8\u0788\u07A8\u0787\u07A7, \u078E\u07AA\u0794\u07A7\u0782\u07A7, \u0790\u07AA\u0783\u07A8\u0782\u07A7\u0789\u07B0 \u0787\u07A6\u078B\u07A8 \u078A\u07B0\u0783\u07AC\u0782\u07B0\u0797\u07B0 \u078E\u07B0\u0787\u07A7\u0782\u07A7 \u0787\u07A6\u0781\u07B0 \u0788\u07AC\u0790\u07B0 \u0782\u07A8\u0790\u07B0\u0784\u07A6\u078C\u07B0\u0788\u07AC\u0787\u07AC\u0788\u07AC. \u0789\u07A9\u078E\u07AC \u078C\u07AC\u0783\u07AC\u0787\u07A8\u0782\u07B0 4 \u078E\u07A6\u0787\u07AA\u0789\u07AC\u0787\u07B0\u078E\u07A6\u0787\u07A8 "\u0787\u07AC\u0789\u07AC\u0792\u07AE\u0782\u07A7\u0790\u07B0" \u0780\u07A8\u0789\u07A6\u0782\u07A6\u0787\u07A8\u078E\u07AC\u0782\u07B0 \u0790\u07B0\u0793\u07AD\u0793\u07B0 \u0782\u07AA\u0788\u07A6\u078C\u07A6 \u0791\u07A8\u0795\u07A7\u0793\u07B0\u0789\u07A6\u0782\u07B0\u0793\u07B0 \u0787\u07A6\u0786\u07A6\u0781\u07B0 \u0782\u07A6\u0782\u07B0\u078B\u07A9\u078A\u07A6\u0787\u07A8\u0788\u07AC\u0787\u07AC\u0788\u07AC. \u0789\u07AA\u0785\u07A8 \u078B\u07AA\u0782\u07A8\u0794\u07AD\u078E\u07A6\u0787\u07A8 \u0784\u07A7\u0786\u07A9 \u0780\u07AA\u0783\u07A8 \u0783\u07AC\u0787\u07A8\u0782\u07B0\u078A\u07AE\u0783\u07AC\u0790\u07B0\u0793\u07B0\u078E\u07AC \u078C\u07AC\u0783\u07AC\u0787\u07A8\u0782\u07B0 \u078B\u07AC\u0784\u07A6\u0787\u07A8\u0786\u07AA\u0785\u07A6 \u0787\u07AC\u0787\u07B0\u0784\u07A6\u0794\u07A6\u0781\u07B0\u0788\u07AA\u0783\u07AC\u0784\u07AE\u0791\u07AA\u0788\u07A6\u0783\u07AC\u0787\u07B0 \u0787\u07AC\u0789\u07AD\u0792\u07AE\u0782\u07B0 \u0783\u07AC\u0787\u07A8\u0782\u07B0\u078A\u07AE\u0783\u07AC\u0790\u07B0\u0793\u07B0 \u0780\u07A8\u0787\u07B0\u0790\u07A7\u0786\u07AA\u0783\u07AC\u0787\u07AC\u0788\u07AC. \u0789\u07A8\u0787\u07A9 \u0789\u07AA\u0785\u07A8 \u078B\u07AA\u0782\u07A8\u0794\u07AC\u0787\u07A8\u0782\u07B0 \u0787\u07AC\u0782\u07B0\u0789\u07AE \u0784\u07AE\u0791\u07AA \u0787\u07A6\u078B\u07A8 \u0787\u07AC\u0782\u07B0\u0789\u07AC \u0784\u07A6\u0787\u07AE\u0791\u07A6\u0787\u07A8\u0788\u07A6\u0783\u0790\u07B0 \u0783\u07AC\u0787\u07A8\u0782\u07B0\u078A\u07AE\u0783\u07AC\u0790\u07B0\u0793\u07B0 \u0793\u07B0\u0783\u07AC\u0786\u07B0\u0793\u07AC\u0788\u07AC. \u078D\u07A6\u078A\u07A7\u0786\u07AA\u0783\u07AC\u0788\u07AD \u078E\u07AE\u078C\u07AA\u0782\u07B0 16 \u0780\u07A7\u0790\u07B0 \u0790\u07B0\u0795\u07A9\u079D\u07A9\u0790\u07B0\u0787\u07A6\u0781\u07B0 \u0784\u07AC\u0780\u07A8\u078E\u07AC\u0782\u07B0\u0788\u07A7 390 \u0789\u07A8\u078D\u07A8\u0787\u07A6\u0782\u07B0 \u0788\u07A6\u0787\u07B0\u078C\u07A6\u0783\u07AA\u078E\u07AC \u078E\u07A6\u0790\u07B0 \u0789\u07A8\u078C\u07A7\u078E\u07A6\u0787\u07A8 \u0780\u07A8\u0789\u07AC\u0782\u07AC\u0787\u07AC\u0788\u07AC` + } + ] + ], + [ + "translation", + [ + `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0787\u07A6\u0780\u07AA\u0789\u07A6\u078B\u07AA \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u0789\u07A7\u078D\u07AD\u078E\u07A6`, + `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0790\u07A7\u0783\u07A7 \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u0787\u07AA\u078C\u07A9\u0789\u07AA\u078E\u07A6` + ] + ], + [ + "summarization", + [ + `\u0793\u07A6\u0788\u07A6\u0783\u07AA\u078E\u07AC \u0787\u07AA\u0790\u07B0\u0789\u07A8\u0782\u07A6\u0786\u07A9 324 \u0789\u07A9\u0793\u07A6\u0783\u07AA\u060C \u0787\u07AC\u0787\u07A9 \u078E\u07A7\u078C\u07B0\u078E\u07A6\u0782\u0791\u07A6\u0786\u07A6\u0781\u07B0 81 \u0784\u07AA\u0783\u07A9\u078E\u07AC \u0787\u07A8\u0789\u07A7\u0783\u07A7\u078C\u07A6\u0786\u07A7\u0787\u07A8 \u0787\u07AC\u0787\u07B0\u0788\u07A6\u0783\u07AC\u0788\u07AC. \u0787\u07AC\u0787\u07A9 \u0795\u07AC\u0783\u07A8\u0790\u07B0\u078E\u07A6\u0787\u07A8 \u0780\u07AA\u0783\u07A8 \u0787\u07AC\u0782\u07B0\u0789\u07AC \u0787\u07AA\u0790\u07B0 \u0787\u07A8\u0789\u07A7\u0783\u07A7\u078C\u07AC\u0788\u07AC. \u0787\u07AD\u078E\u07AC \u0780\u07A6\u078C\u07A6\u0783\u07AC\u0790\u07B0\u0786\u07A6\u0782\u07A6\u0781\u07B0 \u0780\u07AA\u0783\u07A8 \u0784\u07AA\u0791\u07AA\u078E\u07AC \u078B\u07A8\u078E\u07AA\u0789\u07A8\u0782\u07A6\u0786\u07A9 \u0786\u07AE\u0782\u07B0\u0789\u07AC \u078A\u07A6\u0783\u07A7\u078C\u07A6\u0786\u07AA\u0782\u07B0 125 \u0789\u07A9\u0793\u07A6\u0783\u07AC\u0788\u07AC. (410 \u078A\u07AB\u0793\u07AA) \u0787\u07A6\u0787\u07A8\u078A\u07A8\u078D\u07B0 \u0793\u07A6\u0788\u07A6\u0783\u07AA \u0784\u07A8\u0782\u07A7\u0786\u07AA\u0783\u07A8 \u0787\u07A8\u0783\u07AA\u060C \u0788\u07AE\u079D\u07A8\u0782\u07B0\u078E\u07B0\u0793\u07A6\u0782\u07B0 \u0789\u07AE\u0782\u07A8\u0787\u07AA\u0789\u07AC\u0782\u07B0\u0793\u07B0\u078E\u07AC \u0787\u07AA\u0790\u07B0\u0789\u07A8\u0782\u07B0 \u078A\u07A6\u0780\u07A6\u0782\u07A6\u0787\u07A6\u0785\u07A7 \u078E\u07AE\u0790\u07B0\u060C \u078B\u07AA\u0782\u07A8\u0794\u07AD\u078E\u07A6\u0787\u07A8 \u0789\u07A9\u0780\u07AA\u0782\u07B0 \u0787\u07AA\u078A\u07AC\u0787\u07B0\u078B\u07A8 \u078C\u07A6\u0782\u07B0\u078C\u07A6\u0782\u07AA\u078E\u07AC \u078C\u07AC\u0783\u07AC\u0787\u07A8\u0782\u07B0 \u0787\u07AC\u0782\u07B0\u0789\u07AC \u0787\u07AA\u0790\u07B0 \u078C\u07A6\u0782\u07AA\u078E\u07AC \u078D\u07A6\u078E\u07A6\u0784\u07AA \u078D\u07A8\u0784\u07AA\u0782\u07AC\u0788\u07AC. \u0787\u07A6\u078B\u07A8 1930 \u078E\u07A6\u0787\u07A8 \u0782\u07A8\u0787\u07AA \u0794\u07AF\u0786\u07B0\u078E\u07AC \u0786\u07B0\u0783\u07A6\u0787\u07A8\u0790\u07B0\u078D\u07A6\u0783 \u0784\u07A8\u078D\u07B0\u0791\u07A8\u0782\u07B0\u078E\u07B0 \u0784\u07A8\u0782\u07A7\u0786\u07AA\u0783\u07AA\u0789\u07A7\u0787\u07A8 \u0780\u07A6\u0789\u07A6\u0787\u07A6\u0781\u07B0 41 \u0787\u07A6\u0780\u07A6\u0783\u07AA \u0788\u07A6\u0782\u07B0\u078B\u07AC\u0782\u07B0 \u0789\u07A8\u078D\u07A6\u078E\u07A6\u0784\u07AA \u0780\u07A8\u078A\u07AC\u0780\u07AC\u0787\u07B0\u0793\u07A8\u0787\u07AC\u0788\u07AC. \u0789\u07A8\u0787\u07A9 300 \u0789\u07A9\u0793\u07A6\u0783\u07A6\u0781\u07B0 \u0788\u07AA\u0783\u07AC \u0787\u07AA\u0790\u07B0\u0786\u07AE\u0781\u07B0 \u0787\u07A8\u0789\u07A7\u0783\u07A7\u078C\u07B0\u0786\u07AA\u0783\u07AC\u0788\u07AA\u0782\u07AA \u078A\u07AA\u0783\u07A6\u078C\u07A6\u0789\u07A6 \u078C\u07A6\u0782\u07AC\u0788\u07AC. 1957 \u078E\u07A6\u0787\u07A8 \u0793\u07A6\u0788\u07A6\u0783\u07AA\u078E\u07AC \u0787\u07AC\u0782\u07B0\u0789\u07AC \u0789\u07A6\u078C\u07A9\u078E\u07A6\u0787\u07A8 \u0780\u07A6\u0783\u07AA\u0786\u07AA\u0783\u07AC\u0788\u07AA\u0782\u07AA \u0784\u07B0\u0783\u07AF\u0791\u07B0\u0786\u07A7\u0790\u07B0\u0793\u07A8\u0782\u07B0\u078E \u0787\u07AD\u0783\u07A8\u0787\u07A6\u078D\u07B0\u078E\u07AC \u0790\u07A6\u0784\u07A6\u0784\u07AA\u0782\u07B0 \u0789\u07A8\u0780\u07A7\u0783\u07AA \u0789\u07A8 \u0793\u07A6\u0788\u07A6\u0783\u07AA \u0786\u07B0\u0783\u07A6\u0787\u07A8\u0790\u07B0\u078D\u07A6\u0783 \u0784\u07A8\u078D\u07B0\u0791\u07A8\u0782\u07B0\u078E\u0787\u07A6\u0781\u07B0 \u0788\u07AA\u0783\u07AC 5.2 \u0789\u07A9\u0793\u07A6\u0783 (17 \u078A\u07AB\u0793\u07AA) \u0787\u07AA\u0780\u07AC\u0788\u07AC. \u0789\u07A8 \u0793\u07B0\u0783\u07A7\u0782\u07B0\u0790\u07B0\u0789\u07A8\u0793\u07A6\u0783\u07AA \u0782\u07AA\u078D\u07A7\u060C \u0787\u07A6\u0787\u07A8\u078A\u07A8\u078D\u07B0 \u0793\u07A6\u0788\u07A6\u0783\u07A6\u0786\u07A9\u060C \u0789\u07A8\u078D\u07A7\u0787\u07AA \u0788\u07A8\u0787\u07A7\u0791\u07A6\u0786\u07B0\u0793\u07A6\u0781\u07B0 \u078A\u07A6\u0780\u07AA \u078A\u07B0\u0783\u07A7\u0782\u07B0\u0790\u07B0\u078E\u07A6\u0787\u07A8 \u0780\u07AA\u0783\u07A8 2 \u0788\u07A6\u0782\u07A6\u0787\u07A6\u0781\u07B0 \u0787\u07AC\u0782\u07B0\u0789\u07AC \u0787\u07AA\u0790\u07B0 \u078A\u07B0\u0783\u07A9\u0790\u07B0\u0793\u07AD\u0782\u07B0\u0791\u07A8\u0782\u07B0\u078E \u0787\u07A8\u0789\u07A7\u0783\u07A7\u078C\u07AC\u0788\u07AC` + ] + ], + [ + "text-generation", + [ + `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0794\u07AB\u0790\u07AA\u078A\u07B0 \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0789\u07A6\u0787\u07A8\u078E\u07A6\u0782\u0791\u07AA`, + `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0789\u07A6\u0783\u07A8\u0787\u07A6\u0789\u07B0\u060C \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u0787\u07AC\u0782\u07B0\u0789\u07AC \u078E\u07A6\u0794\u07A7\u0788\u07A7`, + `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u078A\u07A7\u078C\u07AA\u0789\u07A6\u078C\u07AA \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0`, + `\u060C\u0787\u07AC\u0787\u07B0 \u0792\u07A6\u0789\u07A7\u0782\u07AC\u0787\u07B0\u078E\u07A6\u0787\u07A8` + ] + ], + ["fill-mask", [`. \u0789\u07A7\u078D\u07AC \u0787\u07A6\u0786\u07A9 \u078B\u07A8\u0788\u07AC\u0780\u07A8\u0783\u07A7\u0787\u07B0\u0796\u07AD\u078E\u07AC`, `\u078E\u07A6\u0783\u07AA\u078B\u07A8\u0794\u07A6\u0787\u07A6\u0786\u07A9 \u078B\u07A8\u0788\u07AC\u0780\u07A8\u0782\u07B0\u078E\u07AC \u0789\u07AC\u078B\u07AA\u078E\u07A6\u0787\u07A8 \u0786\u07AC\u0787\u07AA\u0789\u07AC\u0787\u07B0.`]] +]); +var MAPPING_DEFAULT_WIDGET = /* @__PURE__ */ new Map([ + ["en", MAPPING_EN], + ["zh", MAPPING_ZH], + ["fr", MAPPING_FR], + ["es", MAPPING_ES], + ["ru", MAPPING_RU], + ["uk", MAPPING_UK], + ["it", MAPPING_IT], + ["fa", MAPPING_FA], + ["ar", MAPPING_AR], + ["bn", MAPPING_BN], + ["mn", MAPPING_MN], + ["si", MAPPING_SI], + ["de", MAPPING_DE], + ["dv", MAPPING_DV] +]); + +// src/pipelines.ts +var MODALITIES = ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"]; +var MODALITY_LABELS = { + multimodal: "Multimodal", + nlp: "Natural Language Processing", + audio: "Audio", + cv: "Computer Vision", + rl: "Reinforcement Learning", + tabular: "Tabular", + other: "Other" +}; +var PIPELINE_DATA = { + "text-classification": { + name: "Text Classification", + subtasks: [ + { + type: "acceptability-classification", + name: "Acceptability Classification" + }, + { + type: "entity-linking-classification", + name: "Entity Linking Classification" + }, + { + type: "fact-checking", + name: "Fact Checking" + }, + { + type: "intent-classification", + name: "Intent Classification" + }, + { + type: "language-identification", + name: "Language Identification" + }, + { + type: "multi-class-classification", + name: "Multi Class Classification" + }, + { + type: "multi-label-classification", + name: "Multi Label Classification" + }, + { + type: "multi-input-text-classification", + name: "Multi-input Text Classification" + }, + { + type: "natural-language-inference", + name: "Natural Language Inference" + }, + { + type: "semantic-similarity-classification", + name: "Semantic Similarity Classification" + }, + { + type: "sentiment-classification", + name: "Sentiment Classification" + }, + { + type: "topic-classification", + name: "Topic Classification" + }, + { + type: "semantic-similarity-scoring", + name: "Semantic Similarity Scoring" + }, + { + type: "sentiment-scoring", + name: "Sentiment Scoring" + }, + { + type: "sentiment-analysis", + name: "Sentiment Analysis" + }, + { + type: "hate-speech-detection", + name: "Hate Speech Detection" + }, + { + type: "text-scoring", + name: "Text Scoring" + } + ], + modality: "nlp", + color: "orange" + }, + "token-classification": { + name: "Token Classification", + subtasks: [ + { + type: "named-entity-recognition", + name: "Named Entity Recognition" + }, + { + type: "part-of-speech", + name: "Part of Speech" + }, + { + type: "parsing", + name: "Parsing" + }, + { + type: "lemmatization", + name: "Lemmatization" + }, + { + type: "word-sense-disambiguation", + name: "Word Sense Disambiguation" + }, + { + type: "coreference-resolution", + name: "Coreference-resolution" + } + ], + modality: "nlp", + color: "blue" + }, + "table-question-answering": { + name: "Table Question Answering", + modality: "nlp", + color: "green" + }, + "question-answering": { + name: "Question Answering", + subtasks: [ + { + type: "extractive-qa", + name: "Extractive QA" + }, + { + type: "open-domain-qa", + name: "Open Domain QA" + }, + { + type: "closed-domain-qa", + name: "Closed Domain QA" + } + ], + modality: "nlp", + color: "blue" + }, + "zero-shot-classification": { + name: "Zero-Shot Classification", + modality: "nlp", + color: "yellow" + }, + translation: { + name: "Translation", + modality: "nlp", + color: "green" + }, + summarization: { + name: "Summarization", + subtasks: [ + { + type: "news-articles-summarization", + name: "News Articles Summarization" + }, + { + type: "news-articles-headline-generation", + name: "News Articles Headline Generation" + } + ], + modality: "nlp", + color: "indigo" + }, + "feature-extraction": { + name: "Feature Extraction", + modality: "nlp", + color: "red" + }, + "text-generation": { + name: "Text Generation", + subtasks: [ + { + type: "dialogue-modeling", + name: "Dialogue Modeling" + }, + { + type: "dialogue-generation", + name: "Dialogue Generation" + }, + { + type: "conversational", + name: "Conversational" + }, + { + type: "language-modeling", + name: "Language Modeling" + } + ], + modality: "nlp", + color: "indigo" + }, + "text2text-generation": { + name: "Text2Text Generation", + subtasks: [ + { + type: "text-simplification", + name: "Text simplification" + }, + { + type: "explanation-generation", + name: "Explanation Generation" + }, + { + type: "abstractive-qa", + name: "Abstractive QA" + }, + { + type: "open-domain-abstractive-qa", + name: "Open Domain Abstractive QA" + }, + { + type: "closed-domain-qa", + name: "Closed Domain QA" + }, + { + type: "open-book-qa", + name: "Open Book QA" + }, + { + type: "closed-book-qa", + name: "Closed Book QA" + } + ], + modality: "nlp", + color: "indigo" + }, + "fill-mask": { + name: "Fill-Mask", + subtasks: [ + { + type: "slot-filling", + name: "Slot Filling" + }, + { + type: "masked-language-modeling", + name: "Masked Language Modeling" + } + ], + modality: "nlp", + color: "red" + }, + "sentence-similarity": { + name: "Sentence Similarity", + modality: "nlp", + color: "yellow" + }, + "text-to-speech": { + name: "Text-to-Speech", + modality: "audio", + color: "yellow" + }, + "text-to-audio": { + name: "Text-to-Audio", + modality: "audio", + color: "yellow" + }, + "automatic-speech-recognition": { + name: "Automatic Speech Recognition", + modality: "audio", + color: "yellow" + }, + "audio-to-audio": { + name: "Audio-to-Audio", + modality: "audio", + color: "blue" + }, + "audio-classification": { + name: "Audio Classification", + subtasks: [ + { + type: "keyword-spotting", + name: "Keyword Spotting" + }, + { + type: "speaker-identification", + name: "Speaker Identification" + }, + { + type: "audio-intent-classification", + name: "Audio Intent Classification" + }, + { + type: "audio-emotion-recognition", + name: "Audio Emotion Recognition" + }, + { + type: "audio-language-identification", + name: "Audio Language Identification" + } + ], + modality: "audio", + color: "green" + }, + "voice-activity-detection": { + name: "Voice Activity Detection", + modality: "audio", + color: "red" + }, + "depth-estimation": { + name: "Depth Estimation", + modality: "cv", + color: "yellow" + }, + "image-classification": { + name: "Image Classification", + subtasks: [ + { + type: "multi-label-image-classification", + name: "Multi Label Image Classification" + }, + { + type: "multi-class-image-classification", + name: "Multi Class Image Classification" + } + ], + modality: "cv", + color: "blue" + }, + "object-detection": { + name: "Object Detection", + subtasks: [ + { + type: "face-detection", + name: "Face Detection" + }, + { + type: "vehicle-detection", + name: "Vehicle Detection" + } + ], + modality: "cv", + color: "yellow" + }, + "image-segmentation": { + name: "Image Segmentation", + subtasks: [ + { + type: "instance-segmentation", + name: "Instance Segmentation" + }, + { + type: "semantic-segmentation", + name: "Semantic Segmentation" + }, + { + type: "panoptic-segmentation", + name: "Panoptic Segmentation" + } + ], + modality: "cv", + color: "green" + }, + "text-to-image": { + name: "Text-to-Image", + modality: "cv", + color: "yellow" + }, + "image-to-text": { + name: "Image-to-Text", + subtasks: [ + { + type: "image-captioning", + name: "Image Captioning" + } + ], + modality: "cv", + color: "red" + }, + "image-to-image": { + name: "Image-to-Image", + subtasks: [ + { + type: "image-inpainting", + name: "Image Inpainting" + }, + { + type: "image-colorization", + name: "Image Colorization" + }, + { + type: "super-resolution", + name: "Super Resolution" + } + ], + modality: "cv", + color: "indigo" + }, + "image-to-video": { + name: "Image-to-Video", + modality: "cv", + color: "indigo" + }, + "unconditional-image-generation": { + name: "Unconditional Image Generation", + modality: "cv", + color: "green" + }, + "video-classification": { + name: "Video Classification", + modality: "cv", + color: "blue" + }, + "reinforcement-learning": { + name: "Reinforcement Learning", + modality: "rl", + color: "red" + }, + robotics: { + name: "Robotics", + modality: "rl", + subtasks: [ + { + type: "grasping", + name: "Grasping" + }, + { + type: "task-planning", + name: "Task Planning" + } + ], + color: "blue" + }, + "tabular-classification": { + name: "Tabular Classification", + modality: "tabular", + subtasks: [ + { + type: "tabular-multi-class-classification", + name: "Tabular Multi Class Classification" + }, + { + type: "tabular-multi-label-classification", + name: "Tabular Multi Label Classification" + } + ], + color: "blue" + }, + "tabular-regression": { + name: "Tabular Regression", + modality: "tabular", + subtasks: [ + { + type: "tabular-single-column-regression", + name: "Tabular Single Column Regression" + } + ], + color: "blue" + }, + "tabular-to-text": { + name: "Tabular to Text", + modality: "tabular", + subtasks: [ + { + type: "rdf-to-text", + name: "RDF to text" + } + ], + color: "blue", + hideInModels: true + }, + "table-to-text": { + name: "Table to Text", + modality: "nlp", + color: "blue", + hideInModels: true + }, + "multiple-choice": { + name: "Multiple Choice", + subtasks: [ + { + type: "multiple-choice-qa", + name: "Multiple Choice QA" + }, + { + type: "multiple-choice-coreference-resolution", + name: "Multiple Choice Coreference Resolution" + } + ], + modality: "nlp", + color: "blue", + hideInModels: true + }, + "text-retrieval": { + name: "Text Retrieval", + subtasks: [ + { + type: "document-retrieval", + name: "Document Retrieval" + }, + { + type: "utterance-retrieval", + name: "Utterance Retrieval" + }, + { + type: "entity-linking-retrieval", + name: "Entity Linking Retrieval" + }, + { + type: "fact-checking-retrieval", + name: "Fact Checking Retrieval" + } + ], + modality: "nlp", + color: "indigo", + hideInModels: true + }, + "time-series-forecasting": { + name: "Time Series Forecasting", + modality: "tabular", + subtasks: [ + { + type: "univariate-time-series-forecasting", + name: "Univariate Time Series Forecasting" + }, + { + type: "multivariate-time-series-forecasting", + name: "Multivariate Time Series Forecasting" + } + ], + color: "blue" + }, + "text-to-video": { + name: "Text-to-Video", + modality: "cv", + color: "green" + }, + "image-text-to-text": { + name: "Image-Text-to-Text", + modality: "multimodal", + color: "red", + hideInDatasets: true + }, + "visual-question-answering": { + name: "Visual Question Answering", + subtasks: [ + { + type: "visual-question-answering", + name: "Visual Question Answering" + } + ], + modality: "multimodal", + color: "red" + }, + "document-question-answering": { + name: "Document Question Answering", + subtasks: [ + { + type: "document-question-answering", + name: "Document Question Answering" + } + ], + modality: "multimodal", + color: "blue", + hideInDatasets: true + }, + "zero-shot-image-classification": { + name: "Zero-Shot Image Classification", + modality: "cv", + color: "yellow" + }, + "graph-ml": { + name: "Graph Machine Learning", + modality: "other", + color: "green" + }, + "mask-generation": { + name: "Mask Generation", + modality: "cv", + color: "indigo" + }, + "zero-shot-object-detection": { + name: "Zero-Shot Object Detection", + modality: "cv", + color: "yellow" + }, + "text-to-3d": { + name: "Text-to-3D", + modality: "cv", + color: "yellow" + }, + "image-to-3d": { + name: "Image-to-3D", + modality: "cv", + color: "green" + }, + "image-feature-extraction": { + name: "Image Feature Extraction", + modality: "cv", + color: "indigo" + }, + other: { + name: "Other", + modality: "other", + color: "blue", + hideInModels: true, + hideInDatasets: true + } +}; +var PIPELINE_TYPES = Object.keys(PIPELINE_DATA); +var SUBTASK_TYPES = Object.values(PIPELINE_DATA).flatMap((data) => "subtasks" in data ? data.subtasks : []).map((s) => s.type); +var PIPELINE_TYPES_SET = new Set(PIPELINE_TYPES); + +// src/tasks/audio-classification/data.ts +var taskData = { + datasets: [ + { + description: "A benchmark of 10 different audio tasks.", + id: "superb" + } + ], + demo: { + inputs: [ + { + filename: "audio.wav", + type: "audio" + } + ], + outputs: [ + { + data: [ + { + label: "Up", + score: 0.2 + }, + { + label: "Down", + score: 0.8 + } + ], + type: "chart" + } + ] + }, + metrics: [ + { + description: "", + id: "accuracy" + }, + { + description: "", + id: "recall" + }, + { + description: "", + id: "precision" + }, + { + description: "", + id: "f1" + } + ], + models: [ + { + description: "An easy-to-use model for Command Recognition.", + id: "speechbrain/google_speech_command_xvector" + }, + { + description: "An Emotion Recognition model.", + id: "ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" + }, + { + description: "A language identification model.", + id: "facebook/mms-lid-126" + } + ], + spaces: [ + { + description: "An application that can classify music into different genre.", + id: "kurianbenoy/audioclassification" + } + ], + summary: "Audio classification is the task of assigning a label or class to a given audio. It can be used for recognizing which command a user is giving or the emotion of a statement, as well as identifying a speaker.", + widgetModels: ["facebook/mms-lid-126"], + youtubeId: "KWwzcmG98Ds" +}; +var data_default = taskData; + +// src/tasks/audio-to-audio/data.ts +var taskData2 = { + datasets: [ + { + description: "512-element X-vector embeddings of speakers from CMU ARCTIC dataset.", + id: "Matthijs/cmu-arctic-xvectors" + } + ], + demo: { + inputs: [ + { + filename: "input.wav", + type: "audio" + } + ], + outputs: [ + { + filename: "label-0.wav", + type: "audio" + }, + { + filename: "label-1.wav", + type: "audio" + } + ] + }, + metrics: [ + { + description: "The Signal-to-Noise ratio is the relationship between the target signal level and the background noise level. It is calculated as the logarithm of the target signal divided by the background noise, in decibels.", + id: "snri" + }, + { + description: "The Signal-to-Distortion ratio is the relationship between the target signal and the sum of noise, interference, and artifact errors", + id: "sdri" + } + ], + models: [ + { + description: "A solid model of audio source separation.", + id: "speechbrain/sepformer-wham" + }, + { + description: "A speech enhancement model.", + id: "speechbrain/metricgan-plus-voicebank" + } + ], + spaces: [ + { + description: "An application for speech separation.", + id: "younver/speechbrain-speech-separation" + }, + { + description: "An application for audio style transfer.", + id: "nakas/audio-diffusion_style_transfer" + } + ], + summary: "Audio-to-Audio is a family of tasks in which the input is an audio and the output is one or multiple generated audios. Some example tasks are speech enhancement and source separation.", + widgetModels: ["speechbrain/sepformer-wham"], + youtubeId: "iohj7nCCYoM" +}; +var data_default2 = taskData2; + +// src/tasks/automatic-speech-recognition/data.ts +var taskData3 = { + datasets: [ + { + description: "31,175 hours of multilingual audio-text dataset in 108 languages.", + id: "mozilla-foundation/common_voice_17_0" + }, + { + description: "An English dataset with 1,000 hours of data.", + id: "librispeech_asr" + }, + { + description: "A multi-lingual audio dataset with 370K hours of audio.", + id: "espnet/yodas" + } + ], + demo: { + inputs: [ + { + filename: "input.flac", + type: "audio" + } + ], + outputs: [ + { + /// GOING ALONG SLUSHY COUNTRY ROADS AND SPEAKING TO DAMP AUDIENCES I + label: "Transcript", + content: "Going along slushy country roads and speaking to damp audiences in...", + type: "text" + } + ] + }, + metrics: [ + { + description: "", + id: "wer" + }, + { + description: "", + id: "cer" + } + ], + models: [ + { + description: "A powerful ASR model by OpenAI.", + id: "openai/whisper-large-v3" + }, + { + description: "A good generic speech model by MetaAI for fine-tuning.", + id: "facebook/w2v-bert-2.0" + }, + { + description: "An end-to-end model that performs ASR and Speech Translation by MetaAI.", + id: "facebook/seamless-m4t-v2-large" + } + ], + spaces: [ + { + description: "A powerful general-purpose speech recognition application.", + id: "hf-audio/whisper-large-v3" + }, + { + description: "Fastest speech recognition application.", + id: "sanchit-gandhi/whisper-jax" + }, + { + description: "A high quality speech and text translation model by Meta.", + id: "facebook/seamless_m4t" + } + ], + summary: "Automatic Speech Recognition (ASR), also known as Speech to Text (STT), is the task of transcribing a given audio to text. It has many applications, such as voice user interfaces.", + widgetModels: ["openai/whisper-large-v3"], + youtubeId: "TksaY_FDgnk" +}; +var data_default3 = taskData3; + +// src/tasks/document-question-answering/data.ts +var taskData4 = { + datasets: [ + { + description: "Largest document understanding dataset.", + id: "HuggingFaceM4/Docmatix" + }, + { + description: "Dataset from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry Documents Library.", + id: "eliolio/docvqa" + } + ], + demo: { + inputs: [ + { + label: "Question", + content: "What is the idea behind the consumer relations efficiency team?", + type: "text" + }, + { + filename: "document-question-answering-input.png", + type: "img" + } + ], + outputs: [ + { + label: "Answer", + content: "Balance cost efficiency with quality customer service", + type: "text" + } + ] + }, + metrics: [ + { + description: "The evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein Similarity (ANLS). This metric is flexible to character regognition errors and compares the predicted answer with the ground truth answer.", + id: "anls" + }, + { + description: "Exact Match is a metric based on the strict character match of the predicted answer and the right answer. For answers predicted correctly, the Exact Match will be 1. Even if only one character is different, Exact Match will be 0", + id: "exact-match" + } + ], + models: [ + { + description: "A LayoutLM model for the document QA task, fine-tuned on DocVQA and SQuAD2.0.", + id: "impira/layoutlm-document-qa" + }, + { + description: "A special model for OCR-free Document QA task.", + id: "microsoft/udop-large" + }, + { + description: "A powerful model for document question answering.", + id: "google/pix2struct-docvqa-large" + } + ], + spaces: [ + { + description: "A robust document question answering application.", + id: "impira/docquery" + }, + { + description: "An application that can answer questions from invoices.", + id: "impira/invoices" + }, + { + description: "An application to compare different document question answering models.", + id: "merve/compare_docvqa_models" + } + ], + summary: "Document Question Answering (also known as Document Visual Question Answering) is the task of answering questions on document images. Document question answering models take a (document, question) pair as input and return an answer in natural language. Models usually rely on multi-modal features, combining text, position of words (bounding-boxes) and image.", + widgetModels: ["impira/layoutlm-document-qa"], + youtubeId: "" +}; +var data_default4 = taskData4; + +// src/tasks/feature-extraction/data.ts +var taskData5 = { + datasets: [ + { + description: "Wikipedia dataset containing cleaned articles of all languages. Can be used to train `feature-extraction` models.", + id: "wikipedia" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "India, officially the Republic of India, is a country in South Asia.", + type: "text" + } + ], + outputs: [ + { + table: [ + ["Dimension 1", "Dimension 2", "Dimension 3"], + ["2.583383083343506", "2.757075071334839", "0.9023529887199402"], + ["8.29393482208252", "1.1071064472198486", "2.03399395942688"], + ["-0.7754912972450256", "-1.647324562072754", "-0.6113331913948059"], + ["0.07087723910808563", "1.5942802429199219", "1.4610432386398315"] + ], + type: "tabular" + } + ] + }, + metrics: [], + models: [ + { + description: "A powerful feature extraction model for natural language processing tasks.", + id: "thenlper/gte-large" + }, + { + description: "A strong feature extraction model for retrieval.", + id: "Alibaba-NLP/gte-Qwen1.5-7B-instruct" + } + ], + spaces: [ + { + description: "A leaderboard to rank best feature extraction models..", + id: "mteb/leaderboard" + } + ], + summary: "Feature extraction is the task of extracting features learnt in a model.", + widgetModels: ["facebook/bart-base"] +}; +var data_default5 = taskData5; + +// src/tasks/fill-mask/data.ts +var taskData6 = { + datasets: [ + { + description: "A common dataset that is used to train models for many languages.", + id: "wikipedia" + }, + { + description: "A large English dataset with text crawled from the web.", + id: "c4" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "The barked at me", + type: "text" + } + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "wolf", + score: 0.487 + }, + { + label: "dog", + score: 0.061 + }, + { + label: "cat", + score: 0.058 + }, + { + label: "fox", + score: 0.047 + }, + { + label: "squirrel", + score: 0.025 + } + ] + } + ] + }, + metrics: [ + { + description: "Cross Entropy is a metric that calculates the difference between two probability distributions. Each probability distribution is the distribution of predicted words", + id: "cross_entropy" + }, + { + description: "Perplexity is the exponential of the cross-entropy loss. It evaluates the probabilities assigned to the next word by the model. Lower perplexity indicates better performance", + id: "perplexity" + } + ], + models: [ + { + description: "A faster and smaller model than the famous BERT model.", + id: "distilbert-base-uncased" + }, + { + description: "A multilingual model trained on 100 languages.", + id: "xlm-roberta-base" + } + ], + spaces: [], + summary: "Masked language modeling is the task of masking some of the words in a sentence and predicting which words should replace those masks. These models are useful when we want to get a statistical understanding of the language in which the model is trained in.", + widgetModels: ["distilroberta-base"], + youtubeId: "mqElG5QJWUg" +}; +var data_default6 = taskData6; + +// src/tasks/image-classification/data.ts +var taskData7 = { + datasets: [ + { + // TODO write proper description + description: "Benchmark dataset used for image classification with images that belong to 100 classes.", + id: "cifar100" + }, + { + // TODO write proper description + description: "Dataset consisting of images of garments.", + id: "fashion_mnist" + } + ], + demo: { + inputs: [ + { + filename: "image-classification-input.jpeg", + type: "img" + } + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "Egyptian cat", + score: 0.514 + }, + { + label: "Tabby cat", + score: 0.193 + }, + { + label: "Tiger cat", + score: 0.068 + } + ] + } + ] + }, + metrics: [ + { + description: "", + id: "accuracy" + }, + { + description: "", + id: "recall" + }, + { + description: "", + id: "precision" + }, + { + description: "", + id: "f1" + } + ], + models: [ + { + description: "A strong image classification model.", + id: "google/vit-base-patch16-224" + }, + { + description: "A robust image classification model.", + id: "facebook/deit-base-distilled-patch16-224" + }, + { + description: "A strong image classification model.", + id: "facebook/convnext-large-224" + } + ], + spaces: [ + { + // TO DO: write description + description: "An application that classifies what a given image is about.", + id: "nielsr/perceiver-image-classification" + } + ], + summary: "Image classification is the task of assigning a label or class to an entire image. Images are expected to have only one class for each image. Image classification models take an image as input and return a prediction about which class the image belongs to.", + widgetModels: ["google/vit-base-patch16-224"], + youtubeId: "tjAIM7BOYhw" +}; +var data_default7 = taskData7; + +// src/tasks/image-feature-extraction/data.ts +var taskData8 = { + datasets: [ + { + description: "ImageNet-1K is a image classification dataset in which images are used to train image-feature-extraction models.", + id: "imagenet-1k" + } + ], + demo: { + inputs: [ + { + filename: "mask-generation-input.png", + type: "img" + } + ], + outputs: [ + { + table: [ + ["Dimension 1", "Dimension 2", "Dimension 3"], + ["0.21236686408519745", "1.0919708013534546", "0.8512550592422485"], + ["0.809657871723175", "-0.18544459342956543", "-0.7851548194885254"], + ["1.3103108406066895", "-0.2479034662246704", "-0.9107287526130676"], + ["1.8536205291748047", "-0.36419737339019775", "0.09717650711536407"] + ], + type: "tabular" + } + ] + }, + metrics: [], + models: [ + { + description: "A powerful image feature extraction model.", + id: "timm/vit_large_patch14_dinov2.lvd142m" + }, + { + description: "A strong image feature extraction model.", + id: "google/vit-base-patch16-224-in21k" + }, + { + description: "A robust image feature extraction models.", + id: "facebook/dino-vitb16" + }, + { + description: "Strong image-text-to-text model made for information retrieval from documents.", + id: "vidore/colpali" + } + ], + spaces: [], + summary: "Image feature extraction is the task of extracting features learnt in a computer vision model.", + widgetModels: [] +}; +var data_default8 = taskData8; + +// src/tasks/image-to-image/data.ts +var taskData9 = { + datasets: [ + { + description: "Synthetic dataset, for image relighting", + id: "VIDIT" + }, + { + description: "Multiple images of celebrities, used for facial expression translation", + id: "huggan/CelebA-faces" + } + ], + demo: { + inputs: [ + { + filename: "image-to-image-input.jpeg", + type: "img" + } + ], + outputs: [ + { + filename: "image-to-image-output.png", + type: "img" + } + ] + }, + isPlaceholder: false, + metrics: [ + { + description: "Peak Signal to Noise Ratio (PSNR) is an approximation of the human perception, considering the ratio of the absolute intensity with respect to the variations. Measured in dB, a high value indicates a high fidelity.", + id: "PSNR" + }, + { + description: "Structural Similarity Index (SSIM) is a perceptual metric which compares the luminance, contrast and structure of two images. The values of SSIM range between -1 and 1, and higher values indicate closer resemblance to the original image.", + id: "SSIM" + }, + { + description: "Inception Score (IS) is an analysis of the labels predicted by an image classification model when presented with a sample of the generated images.", + id: "IS" + } + ], + models: [ + { + description: "A model that enhances images captured in low light conditions.", + id: "keras-io/low-light-image-enhancement" + }, + { + description: "A model that increases the resolution of an image.", + id: "keras-io/super-resolution" + }, + { + description: "A model that creates a set of variations of the input image in the style of DALL-E using Stable Diffusion.", + id: "lambdalabs/sd-image-variations-diffusers" + }, + { + description: "A model that generates images based on segments in the input image and the text prompt.", + id: "mfidabel/controlnet-segment-anything" + }, + { + description: "A model that takes an image and an instruction to edit the image.", + id: "timbrooks/instruct-pix2pix" + } + ], + spaces: [ + { + description: "Image enhancer application for low light.", + id: "keras-io/low-light-image-enhancement" + }, + { + description: "Style transfer application.", + id: "keras-io/neural-style-transfer" + }, + { + description: "An application that generates images based on segment control.", + id: "mfidabel/controlnet-segment-anything" + }, + { + description: "Image generation application that takes image control and text prompt.", + id: "hysts/ControlNet" + }, + { + description: "Colorize any image using this app.", + id: "ioclab/brightness-controlnet" + }, + { + description: "Edit images with instructions.", + id: "timbrooks/instruct-pix2pix" + } + ], + summary: "Image-to-image is the task of transforming a source image to match the characteristics of a target image or a target image domain. Any image manipulation and enhancement is possible with image to image models.", + widgetModels: ["lllyasviel/sd-controlnet-canny"], + youtubeId: "" +}; +var data_default9 = taskData9; + +// src/tasks/image-to-text/data.ts +var taskData10 = { + datasets: [ + { + // TODO write proper description + description: "Dataset from 12M image-text of Reddit", + id: "red_caps" + }, + { + // TODO write proper description + description: "Dataset from 3.3M images of Google", + id: "datasets/conceptual_captions" + } + ], + demo: { + inputs: [ + { + filename: "savanna.jpg", + type: "img" + } + ], + outputs: [ + { + label: "Detailed description", + content: "a herd of giraffes and zebras grazing in a field", + type: "text" + } + ] + }, + metrics: [], + models: [ + { + description: "A robust image captioning model.", + id: "Salesforce/blip2-opt-2.7b" + }, + { + description: "A powerful and accurate image-to-text model that can also localize concepts in images.", + id: "microsoft/kosmos-2-patch14-224" + }, + { + description: "A strong optical character recognition model.", + id: "facebook/nougat-base" + }, + { + description: "A powerful model that lets you have a conversation with the image.", + id: "llava-hf/llava-1.5-7b-hf" + } + ], + spaces: [ + { + description: "An application that compares various image captioning models.", + id: "nielsr/comparing-captioning-models" + }, + { + description: "A robust image captioning application.", + id: "flax-community/image-captioning" + }, + { + description: "An application that transcribes handwritings into text.", + id: "nielsr/TrOCR-handwritten" + }, + { + description: "An application that can caption images and answer questions about a given image.", + id: "Salesforce/BLIP" + }, + { + description: "An application that can caption images and answer questions with a conversational agent.", + id: "Salesforce/BLIP2" + }, + { + description: "An image captioning application that demonstrates the effect of noise on captions.", + id: "johko/capdec-image-captioning" + } + ], + summary: "Image to text models output a text from a given image. Image captioning or optical character recognition can be considered as the most common applications of image to text.", + widgetModels: ["Salesforce/blip-image-captioning-base"], + youtubeId: "" +}; +var data_default10 = taskData10; + +// src/tasks/image-text-to-text/data.ts +var taskData11 = { + datasets: [ + { + description: "Instructions composed of image and text.", + id: "liuhaotian/LLaVA-Instruct-150K" + }, + { + description: "Conversation turns where questions involve image and text.", + id: "liuhaotian/LLaVA-Pretrain" + }, + { + description: "A collection of datasets made for model fine-tuning.", + id: "HuggingFaceM4/the_cauldron" + }, + { + description: "Screenshots of websites with their HTML/CSS codes.", + id: "HuggingFaceM4/WebSight" + } + ], + demo: { + inputs: [ + { + filename: "image-text-to-text-input.png", + type: "img" + }, + { + label: "Text Prompt", + content: "Describe the position of the bee in detail.", + type: "text" + } + ], + outputs: [ + { + label: "Answer", + content: "The bee is sitting on a pink flower, surrounded by other flowers. The bee is positioned in the center of the flower, with its head and front legs sticking out.", + type: "text" + } + ] + }, + metrics: [], + models: [ + { + description: "Cutting-edge vision language model that can take multiple image inputs.", + id: "facebook/chameleon-7b" + }, + { + description: "Cutting-edge conversational vision language model that can take multiple image inputs.", + id: "HuggingFaceM4/idefics2-8b-chatty" + }, + { + description: "Small yet powerful model.", + id: "vikhyatk/moondream2" + }, + { + description: "Strong image-text-to-text model made to understand documents.", + id: "mPLUG/DocOwl1.5" + }, + { + description: "Strong image-text-to-text model.", + id: "llava-hf/llava-v1.6-mistral-7b-hf" + } + ], + spaces: [ + { + description: "Leaderboard to evaluate vision language models.", + id: "opencompass/open_vlm_leaderboard" + }, + { + description: "Vision language models arena, where models are ranked by votes of users.", + id: "WildVision/vision-arena" + }, + { + description: "Powerful vision-language model assistant.", + id: "liuhaotian/LLaVA-1.6" + }, + { + description: "An application to compare outputs of different vision language models.", + id: "merve/compare_VLMs" + }, + { + description: "An application for document vision language tasks.", + id: "mPLUG/DocOwl" + } + ], + summary: "Image-text-to-text models take in an image and text prompt and output text. These models are also called vision-language models, or VLMs. The difference from image-to-text models is that these models take an additional text input, not restricting the model to certain use cases like image captioning, and may also be trained to accept a conversation as input.", + widgetModels: ["microsoft/kosmos-2-patch14-224"], + youtubeId: "" +}; +var data_default11 = taskData11; + +// src/tasks/image-segmentation/data.ts +var taskData12 = { + datasets: [ + { + description: "Scene segmentation dataset.", + id: "scene_parse_150" + } + ], + demo: { + inputs: [ + { + filename: "image-segmentation-input.jpeg", + type: "img" + } + ], + outputs: [ + { + filename: "image-segmentation-output.png", + type: "img" + } + ] + }, + metrics: [ + { + description: "Average Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for each semantic class separately", + id: "Average Precision" + }, + { + description: "Mean Average Precision (mAP) is the overall average of the AP values", + id: "Mean Average Precision" + }, + { + description: "Intersection over Union (IoU) is the overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic classes", + id: "Mean Intersection over Union" + }, + { + description: "AP\u03B1 is the Average Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75", + id: "AP\u03B1" + } + ], + models: [ + { + // TO DO: write description + description: "Solid panoptic segmentation model trained on the COCO 2017 benchmark dataset.", + id: "facebook/detr-resnet-50-panoptic" + }, + { + description: "Semantic segmentation model trained on ADE20k benchmark dataset.", + id: "microsoft/beit-large-finetuned-ade-640-640" + }, + { + description: "Semantic segmentation model trained on ADE20k benchmark dataset with 512x512 resolution.", + id: "nvidia/segformer-b0-finetuned-ade-512-512" + }, + { + description: "Semantic segmentation model trained Cityscapes dataset.", + id: "facebook/mask2former-swin-large-cityscapes-semantic" + }, + { + description: "Panoptic segmentation model trained COCO (common objects) dataset.", + id: "facebook/mask2former-swin-large-coco-panoptic" + } + ], + spaces: [ + { + description: "A semantic segmentation application that can predict unseen instances out of the box.", + id: "facebook/ov-seg" + }, + { + description: "One of the strongest segmentation applications.", + id: "jbrinkma/segment-anything" + }, + { + description: "A semantic segmentation application that predicts human silhouettes.", + id: "keras-io/Human-Part-Segmentation" + }, + { + description: "An instance segmentation application to predict neuronal cell types from microscopy images.", + id: "rashmi/sartorius-cell-instance-segmentation" + }, + { + description: "An application that segments videos.", + id: "ArtGAN/Segment-Anything-Video" + }, + { + description: "An panoptic segmentation application built for outdoor environments.", + id: "segments/panoptic-segment-anything" + } + ], + summary: "Image Segmentation divides an image into segments where each pixel in the image is mapped to an object. This task has multiple variants such as instance segmentation, panoptic segmentation and semantic segmentation.", + widgetModels: ["facebook/detr-resnet-50-panoptic"], + youtubeId: "dKE8SIt9C-w" +}; +var data_default12 = taskData12; + +// src/tasks/mask-generation/data.ts +var taskData13 = { + datasets: [], + demo: { + inputs: [ + { + filename: "mask-generation-input.png", + type: "img" + } + ], + outputs: [ + { + filename: "mask-generation-output.png", + type: "img" + } + ] + }, + metrics: [], + models: [ + { + description: "Small yet powerful mask generation model.", + id: "Zigeng/SlimSAM-uniform-50" + }, + { + description: "Very strong mask generation model.", + id: "facebook/sam-vit-huge" + } + ], + spaces: [ + { + description: "An application that combines a mask generation model with an image embedding model for open-vocabulary image segmentation.", + id: "SkalskiP/SAM_and_MetaCLIP" + }, + { + description: "An application that compares the performance of a large and a small mask generation model.", + id: "merve/slimsam" + }, + { + description: "An application based on an improved mask generation model.", + id: "linfanluntan/Grounded-SAM" + }, + { + description: "An application to remove objects from videos using mask generation models.", + id: "SkalskiP/SAM_and_ProPainter" + } + ], + summary: "Mask generation is the task of generating masks that identify a specific object or region of interest in a given image. Masks are often used in segmentation tasks, where they provide a precise way to isolate the object of interest for further processing or analysis.", + widgetModels: [], + youtubeId: "" +}; +var data_default13 = taskData13; + +// src/tasks/object-detection/data.ts +var taskData14 = { + datasets: [ + { + description: "Widely used benchmark dataset for multiple vision tasks.", + id: "merve/coco2017" + }, + { + description: "Multi-task computer vision benchmark.", + id: "merve/pascal-voc" + } + ], + demo: { + inputs: [ + { + filename: "object-detection-input.jpg", + type: "img" + } + ], + outputs: [ + { + filename: "object-detection-output.jpg", + type: "img" + } + ] + }, + metrics: [ + { + description: "The Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It is calculated for each class separately", + id: "Average Precision" + }, + { + description: "The Mean Average Precision (mAP) metric is the overall average of the AP values", + id: "Mean Average Precision" + }, + { + description: "The AP\u03B1 metric is the Average Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75", + id: "AP\u03B1" + } + ], + models: [ + { + description: "Solid object detection model trained on the benchmark dataset COCO 2017.", + id: "facebook/detr-resnet-50" + }, + { + description: "Strong object detection model trained on ImageNet-21k dataset.", + id: "microsoft/beit-base-patch16-224-pt22k-ft22k" + }, + { + description: "Fast and accurate object detection model trained on COCO dataset.", + id: "PekingU/rtdetr_r18vd_coco_o365" + } + ], + spaces: [ + { + description: "Leaderboard to compare various object detection models across several metrics.", + id: "hf-vision/object_detection_leaderboard" + }, + { + description: "An application that contains various object detection models to try from.", + id: "Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS" + }, + { + description: "An application that shows multiple cutting edge techniques for object detection and tracking.", + id: "kadirnar/torchyolo" + }, + { + description: "An object tracking, segmentation and inpainting application.", + id: "VIPLab/Track-Anything" + }, + { + description: "Very fast object tracking application based on object detection.", + id: "merve/RT-DETR-tracking-coco" + } + ], + summary: "Object Detection models allow users to identify objects of certain defined classes. Object detection models receive an image as input and output the images with bounding boxes and labels on detected objects.", + widgetModels: ["facebook/detr-resnet-50"], + youtubeId: "WdAeKSOpxhw" +}; +var data_default14 = taskData14; + +// src/tasks/depth-estimation/data.ts +var taskData15 = { + datasets: [ + { + description: "NYU Depth V2 Dataset: Video dataset containing both RGB and depth sensor data.", + id: "sayakpaul/nyu_depth_v2" + }, + { + description: "Monocular depth estimation benchmark based without noise and errors.", + id: "depth-anything/DA-2K" + } + ], + demo: { + inputs: [ + { + filename: "depth-estimation-input.jpg", + type: "img" + } + ], + outputs: [ + { + filename: "depth-estimation-output.png", + type: "img" + } + ] + }, + metrics: [], + models: [ + { + description: "Cutting-edge depth estimation model.", + id: "depth-anything/Depth-Anything-V2-Large" + }, + { + description: "A strong monocular depth estimation model.", + id: "Bingxin/Marigold" + }, + { + description: "A metric depth estimation model trained on NYU dataset.", + id: "Intel/zoedepth-nyu" + } + ], + spaces: [ + { + description: "An application that predicts the depth of an image and then reconstruct the 3D model as voxels.", + id: "radames/dpt-depth-estimation-3d-voxels" + }, + { + description: "An application on cutting-edge depth estimation.", + id: "depth-anything/Depth-Anything-V2" + }, + { + description: "An application to try state-of-the-art depth estimation.", + id: "merve/compare_depth_models" + } + ], + summary: "Depth estimation is the task of predicting depth of the objects present in an image.", + widgetModels: [""], + youtubeId: "" +}; +var data_default15 = taskData15; + +// src/tasks/placeholder/data.ts +var taskData16 = { + datasets: [], + demo: { + inputs: [], + outputs: [] + }, + isPlaceholder: true, + metrics: [], + models: [], + spaces: [], + summary: "", + widgetModels: [], + youtubeId: void 0, + /// If this is a subtask, link to the most general task ID + /// (eg, text2text-generation is the canonical ID of translation) + canonicalId: void 0 +}; +var data_default16 = taskData16; + +// src/tasks/reinforcement-learning/data.ts +var taskData17 = { + datasets: [ + { + description: "A curation of widely used datasets for Data Driven Deep Reinforcement Learning (D4RL)", + id: "edbeeching/decision_transformer_gym_replay" + } + ], + demo: { + inputs: [ + { + label: "State", + content: "Red traffic light, pedestrians are about to pass.", + type: "text" + } + ], + outputs: [ + { + label: "Action", + content: "Stop the car.", + type: "text" + }, + { + label: "Next State", + content: "Yellow light, pedestrians have crossed.", + type: "text" + } + ] + }, + metrics: [ + { + description: "Accumulated reward across all time steps discounted by a factor that ranges between 0 and 1 and determines how much the agent optimizes for future relative to immediate rewards. Measures how good is the policy ultimately found by a given algorithm considering uncertainty over the future.", + id: "Discounted Total Reward" + }, + { + description: "Average return obtained after running the policy for a certain number of evaluation episodes. As opposed to total reward, mean reward considers how much reward a given algorithm receives while learning.", + id: "Mean Reward" + }, + { + description: "Measures how good a given algorithm is after a predefined time. Some algorithms may be guaranteed to converge to optimal behavior across many time steps. However, an agent that reaches an acceptable level of optimality after a given time horizon may be preferable to one that ultimately reaches optimality but takes a long time.", + id: "Level of Performance After Some Time" + } + ], + models: [ + { + description: "A Reinforcement Learning model trained on expert data from the Gym Hopper environment", + id: "edbeeching/decision-transformer-gym-hopper-expert" + }, + { + description: "A PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and the RL Zoo.", + id: "HumanCompatibleAI/ppo-seals-CartPole-v0" + } + ], + spaces: [ + { + description: "An application for a cute puppy agent learning to catch a stick.", + id: "ThomasSimonini/Huggy" + }, + { + description: "An application to play Snowball Fight with a reinforcement learning agent.", + id: "ThomasSimonini/SnowballFight" + } + ], + summary: "Reinforcement learning is the computational approach of learning from action by interacting with an environment through trial and error and receiving rewards (negative or positive) as feedback", + widgetModels: [], + youtubeId: "q0BiUn5LiBc" +}; +var data_default17 = taskData17; + +// src/tasks/question-answering/data.ts +var taskData18 = { + datasets: [ + { + // TODO write proper description + description: "A famous question answering dataset based on English articles from Wikipedia.", + id: "squad_v2" + }, + { + // TODO write proper description + description: "A dataset of aggregated anonymized actual queries issued to the Google search engine.", + id: "natural_questions" + } + ], + demo: { + inputs: [ + { + label: "Question", + content: "Which name is also used to describe the Amazon rainforest in English?", + type: "text" + }, + { + label: "Context", + content: "The Amazon rainforest, also known in English as Amazonia or the Amazon Jungle", + type: "text" + } + ], + outputs: [ + { + label: "Answer", + content: "Amazonia", + type: "text" + } + ] + }, + metrics: [ + { + description: "Exact Match is a metric based on the strict character match of the predicted answer and the right answer. For answers predicted correctly, the Exact Match will be 1. Even if only one character is different, Exact Match will be 0", + id: "exact-match" + }, + { + description: " The F1-Score metric is useful if we value both false positives and false negatives equally. The F1-Score is calculated on each word in the predicted sequence against the correct answer", + id: "f1" + } + ], + models: [ + { + description: "A robust baseline model for most question answering domains.", + id: "deepset/roberta-base-squad2" + }, + { + description: "A special model that can answer questions from tables!", + id: "google/tapas-base-finetuned-wtq" + } + ], + spaces: [ + { + description: "An application that can answer a long question from Wikipedia.", + id: "deepset/wikipedia-assistant" + } + ], + summary: "Question Answering models can retrieve the answer to a question from a given text, which is useful for searching for an answer in a document. Some question answering models can generate answers without context!", + widgetModels: ["deepset/roberta-base-squad2"], + youtubeId: "ajPx5LwJD-I" +}; +var data_default18 = taskData18; + +// src/tasks/sentence-similarity/data.ts +var taskData19 = { + datasets: [ + { + description: "Bing queries with relevant passages from various web sources.", + id: "ms_marco" + } + ], + demo: { + inputs: [ + { + label: "Source sentence", + content: "Machine learning is so easy.", + type: "text" + }, + { + label: "Sentences to compare to", + content: "Deep learning is so straightforward.", + type: "text" + }, + { + label: "", + content: "This is so difficult, like rocket science.", + type: "text" + }, + { + label: "", + content: "I can't believe how much I struggled with this.", + type: "text" + } + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "Deep learning is so straightforward.", + score: 0.623 + }, + { + label: "This is so difficult, like rocket science.", + score: 0.413 + }, + { + label: "I can't believe how much I struggled with this.", + score: 0.256 + } + ] + } + ] + }, + metrics: [ + { + description: "Reciprocal Rank is a measure used to rank the relevancy of documents given a set of documents. Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal Rank is 1", + id: "Mean Reciprocal Rank" + }, + { + description: "The similarity of the embeddings is evaluated mainly on cosine similarity. It is calculated as the cosine of the angle between two vectors. It is particularly useful when your texts are not the same length", + id: "Cosine Similarity" + } + ], + models: [ + { + description: "This model works well for sentences and paragraphs and can be used for clustering/grouping and semantic searches.", + id: "sentence-transformers/all-mpnet-base-v2" + }, + { + description: "A multilingual model trained for FAQ retrieval.", + id: "clips/mfaq" + } + ], + spaces: [ + { + description: "An application that leverages sentence similarity to answer questions from YouTube videos.", + id: "Gradio-Blocks/Ask_Questions_To_YouTube_Videos" + }, + { + description: "An application that retrieves relevant PubMed abstracts for a given online article which can be used as further references.", + id: "Gradio-Blocks/pubmed-abstract-retriever" + }, + { + description: "An application that leverages sentence similarity to summarize text.", + id: "nickmuchi/article-text-summarizer" + }, + { + description: "A guide that explains how Sentence Transformers can be used for semantic search.", + id: "sentence-transformers/Sentence_Transformers_for_semantic_search" + } + ], + summary: "Sentence Similarity is the task of determining how similar two texts are. Sentence similarity models convert input texts into vectors (embeddings) that capture semantic information and calculate how close (similar) they are between them. This task is particularly useful for information retrieval and clustering/grouping.", + widgetModels: ["sentence-transformers/all-MiniLM-L6-v2"], + youtubeId: "VCZq5AkbNEU" +}; +var data_default19 = taskData19; + +// src/tasks/summarization/data.ts +var taskData20 = { + canonicalId: "text2text-generation", + datasets: [ + { + description: "News articles in five different languages along with their summaries. Widely used for benchmarking multilingual summarization models.", + id: "mlsum" + }, + { + description: "English conversations and their summaries. Useful for benchmarking conversational agents.", + id: "samsum" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. It was the first structure to reach a height of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.", + type: "text" + } + ], + outputs: [ + { + label: "Output", + content: "The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building. It was the first structure to reach a height of 300 metres.", + type: "text" + } + ] + }, + metrics: [ + { + description: "The generated sequence is compared against its summary, and the overlap of tokens are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.", + id: "rouge" + } + ], + models: [ + { + description: "A strong summarization model trained on English news articles. Excels at generating factual summaries.", + id: "facebook/bart-large-cnn" + }, + { + description: "A summarization model trained on medical articles.", + id: "google/bigbird-pegasus-large-pubmed" + } + ], + spaces: [ + { + description: "An application that can summarize long paragraphs.", + id: "pszemraj/summarize-long-text" + }, + { + description: "A much needed summarization application for terms and conditions.", + id: "ml6team/distilbart-tos-summarizer-tosdr" + }, + { + description: "An application that summarizes long documents.", + id: "pszemraj/document-summarization" + }, + { + description: "An application that can detect errors in abstractive summarization.", + id: "ml6team/post-processing-summarization" + } + ], + summary: "Summarization is the task of producing a shorter version of a document while preserving its important information. Some models can extract text from the original input, while other models can generate entirely new text.", + widgetModels: ["sshleifer/distilbart-cnn-12-6"], + youtubeId: "yHnr5Dk2zCI" +}; +var data_default20 = taskData20; + +// src/tasks/table-question-answering/data.ts +var taskData21 = { + datasets: [ + { + description: "The WikiTableQuestions dataset is a large-scale dataset for the task of question answering on semi-structured tables.", + id: "wikitablequestions" + }, + { + description: "WikiSQL is a dataset of 80654 hand-annotated examples of questions and SQL queries distributed across 24241 tables from Wikipedia.", + id: "wikisql" + } + ], + demo: { + inputs: [ + { + table: [ + ["Rank", "Name", "No.of reigns", "Combined days"], + ["1", "lou Thesz", "3", "3749"], + ["2", "Ric Flair", "8", "3103"], + ["3", "Harley Race", "7", "1799"] + ], + type: "tabular" + }, + { label: "Question", content: "What is the number of reigns for Harley Race?", type: "text" } + ], + outputs: [{ label: "Result", content: "7", type: "text" }] + }, + metrics: [ + { + description: "Checks whether the predicted answer(s) is the same as the ground-truth answer(s).", + id: "Denotation Accuracy" + } + ], + models: [ + { + description: "A table question answering model that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL query on a given table.", + id: "microsoft/tapex-base" + }, + { + description: "A robust table question answering model.", + id: "google/tapas-base-finetuned-wtq" + } + ], + spaces: [ + { + description: "An application that answers questions based on table CSV files.", + id: "katanaml/table-query" + } + ], + summary: "Table Question Answering (Table QA) is the answering a question about an information on a given table.", + widgetModels: ["google/tapas-base-finetuned-wtq"] +}; +var data_default21 = taskData21; + +// src/tasks/tabular-classification/data.ts +var taskData22 = { + datasets: [ + { + description: "A comprehensive curation of datasets covering all benchmarks.", + id: "inria-soda/tabular-benchmark" + } + ], + demo: { + inputs: [ + { + table: [ + ["Glucose", "Blood Pressure ", "Skin Thickness", "Insulin", "BMI"], + ["148", "72", "35", "0", "33.6"], + ["150", "50", "30", "0", "35.1"], + ["141", "60", "29", "1", "39.2"] + ], + type: "tabular" + } + ], + outputs: [ + { + table: [["Diabetes"], ["1"], ["1"], ["0"]], + type: "tabular" + } + ] + }, + metrics: [ + { + description: "", + id: "accuracy" + }, + { + description: "", + id: "recall" + }, + { + description: "", + id: "precision" + }, + { + description: "", + id: "f1" + } + ], + models: [ + { + description: "Breast cancer prediction model based on decision trees.", + id: "scikit-learn/cancer-prediction-trees" + } + ], + spaces: [ + { + description: "An application that can predict defective products on a production line.", + id: "scikit-learn/tabular-playground" + }, + { + description: "An application that compares various tabular classification techniques on different datasets.", + id: "scikit-learn/classification" + } + ], + summary: "Tabular classification is the task of classifying a target category (a group) based on set of attributes.", + widgetModels: ["scikit-learn/tabular-playground"], + youtubeId: "" +}; +var data_default22 = taskData22; + +// src/tasks/tabular-regression/data.ts +var taskData23 = { + datasets: [ + { + description: "A comprehensive curation of datasets covering all benchmarks.", + id: "inria-soda/tabular-benchmark" + } + ], + demo: { + inputs: [ + { + table: [ + ["Car Name", "Horsepower", "Weight"], + ["ford torino", "140", "3,449"], + ["amc hornet", "97", "2,774"], + ["toyota corolla", "65", "1,773"] + ], + type: "tabular" + } + ], + outputs: [ + { + table: [["MPG (miles per gallon)"], ["17"], ["18"], ["31"]], + type: "tabular" + } + ] + }, + metrics: [ + { + description: "", + id: "mse" + }, + { + description: "Coefficient of determination (or R-squared) is a measure of how well the model fits the data. Higher R-squared is considered a better fit.", + id: "r-squared" + } + ], + models: [ + { + description: "Fish weight prediction based on length measurements and species.", + id: "scikit-learn/Fish-Weight" + } + ], + spaces: [ + { + description: "An application that can predict weight of a fish based on set of attributes.", + id: "scikit-learn/fish-weight-prediction" + } + ], + summary: "Tabular regression is the task of predicting a numerical value given a set of attributes.", + widgetModels: ["scikit-learn/Fish-Weight"], + youtubeId: "" +}; +var data_default23 = taskData23; + +// src/tasks/text-to-image/data.ts +var taskData24 = { + datasets: [ + { + description: "RedCaps is a large-scale dataset of 12M image-text pairs collected from Reddit.", + id: "red_caps" + }, + { + description: "Conceptual Captions is a dataset consisting of ~3.3M images annotated with captions.", + id: "conceptual_captions" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "A city above clouds, pastel colors, Victorian style", + type: "text" + } + ], + outputs: [ + { + filename: "image.jpeg", + type: "img" + } + ] + }, + metrics: [ + { + description: "The Inception Score (IS) measure assesses diversity and meaningfulness. It uses a generated image sample to predict its label. A higher score signifies more diverse and meaningful images.", + id: "IS" + }, + { + description: "The Fr\xE9chet Inception Distance (FID) calculates the distance between distributions between synthetic and real samples. A lower FID score indicates better similarity between the distributions of real and generated images.", + id: "FID" + }, + { + description: "R-precision assesses how the generated image aligns with the provided text description. It uses the generated images as queries to retrieve relevant text descriptions. The top 'r' relevant descriptions are selected and used to calculate R-precision as r/R, where 'R' is the number of ground truth descriptions associated with the generated images. A higher R-precision value indicates a better model.", + id: "R-Precision" + } + ], + models: [ + { + description: "One of the most powerful image generation models that can generate realistic outputs.", + id: "stabilityai/stable-diffusion-xl-base-1.0" + }, + { + description: "A powerful yet fast image generation model.", + id: "latent-consistency/lcm-lora-sdxl" + }, + { + description: "A very fast text-to-image model.", + id: "ByteDance/SDXL-Lightning" + }, + { + description: "A powerful text-to-image model.", + id: "stabilityai/stable-diffusion-3-medium-diffusers" + } + ], + spaces: [ + { + description: "A powerful text-to-image application.", + id: "stabilityai/stable-diffusion-3-medium" + }, + { + description: "A text-to-image application to generate comics.", + id: "jbilcke-hf/ai-comic-factory" + }, + { + description: "A text-to-image application that can generate coherent text inside the image.", + id: "DeepFloyd/IF" + }, + { + description: "A powerful yet very fast image generation application.", + id: "latent-consistency/lcm-lora-for-sdxl" + }, + { + description: "A gallery to explore various text-to-image models.", + id: "multimodalart/LoraTheExplorer" + }, + { + description: "An application for `text-to-image`, `image-to-image` and image inpainting.", + id: "ArtGAN/Stable-Diffusion-ControlNet-WebUI" + }, + { + description: "An application to generate realistic images given photos of a person and a prompt.", + id: "InstantX/InstantID" + } + ], + summary: "Generates images from input text. These models can be used to generate and modify images based on text prompts.", + widgetModels: ["CompVis/stable-diffusion-v1-4"], + youtubeId: "" +}; +var data_default24 = taskData24; + +// src/tasks/text-to-speech/data.ts +var taskData25 = { + canonicalId: "text-to-audio", + datasets: [ + { + description: "10K hours of multi-speaker English dataset.", + id: "parler-tts/mls_eng_10k" + }, + { + description: "Multi-speaker English dataset.", + id: "LibriTTS" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "I love audio models on the Hub!", + type: "text" + } + ], + outputs: [ + { + filename: "audio.wav", + type: "audio" + } + ] + }, + metrics: [ + { + description: "The Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated speech.", + id: "mel cepstral distortion" + } + ], + models: [ + { + description: "A powerful TTS model.", + id: "suno/bark" + }, + { + description: "A massively multi-lingual TTS model.", + id: "facebook/mms-tts" + }, + { + description: "A prompt based, powerful TTS model.", + id: "parler-tts/parler_tts_mini_v0.1" + } + ], + spaces: [ + { + description: "An application for generate highly realistic, multilingual speech.", + id: "suno/bark" + }, + { + description: "XTTS is a Voice generation model that lets you clone voices into different languages.", + id: "coqui/xtts" + }, + { + description: "An application that synthesizes speech for diverse speaker prompts.", + id: "parler-tts/parler_tts_mini" + } + ], + summary: "Text-to-Speech (TTS) is the task of generating natural sounding speech given text input. TTS models can be extended to have a single model that generates speech for multiple speakers and multiple languages.", + widgetModels: ["suno/bark"], + youtubeId: "NW62DpzJ274" +}; +var data_default25 = taskData25; + +// src/tasks/token-classification/data.ts +var taskData26 = { + datasets: [ + { + description: "A widely used dataset useful to benchmark named entity recognition models.", + id: "conll2003" + }, + { + description: "A multilingual dataset of Wikipedia articles annotated for named entity recognition in over 150 different languages.", + id: "wikiann" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "My name is Omar and I live in Z\xFCrich.", + type: "text" + } + ], + outputs: [ + { + text: "My name is Omar and I live in Z\xFCrich.", + tokens: [ + { + type: "PERSON", + start: 11, + end: 15 + }, + { + type: "GPE", + start: 30, + end: 36 + } + ], + type: "text-with-tokens" + } + ] + }, + metrics: [ + { + description: "", + id: "accuracy" + }, + { + description: "", + id: "recall" + }, + { + description: "", + id: "precision" + }, + { + description: "", + id: "f1" + } + ], + models: [ + { + description: "A robust performance model to identify people, locations, organizations and names of miscellaneous entities.", + id: "dslim/bert-base-NER" + }, + { + description: "Flair models are typically the state of the art in named entity recognition tasks.", + id: "flair/ner-english" + } + ], + spaces: [ + { + description: "An application that can recognizes entities, extracts noun chunks and recognizes various linguistic features of each token.", + id: "spacy/gradio_pipeline_visualizer" + } + ], + summary: "Token classification is a natural language understanding task in which a label is assigned to some tokens in a text. Some popular token classification subtasks are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models could be trained to identify specific entities in a text, such as dates, individuals and places; and PoS tagging would identify, for example, which words in a text are verbs, nouns, and punctuation marks.", + widgetModels: ["dslim/bert-base-NER"], + youtubeId: "wVHdVlPScxA" +}; +var data_default26 = taskData26; + +// src/tasks/translation/data.ts +var taskData27 = { + canonicalId: "text2text-generation", + datasets: [ + { + description: "A dataset of copyright-free books translated into 16 different languages.", + id: "opus_books" + }, + { + description: "An example of translation between programming languages. This dataset consists of functions in Java and C#.", + id: "code_x_glue_cc_code_to_code_trans" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "My name is Omar and I live in Z\xFCrich.", + type: "text" + } + ], + outputs: [ + { + label: "Output", + content: "Mein Name ist Omar und ich wohne in Z\xFCrich.", + type: "text" + } + ] + }, + metrics: [ + { + description: "BLEU score is calculated by counting the number of shared single or subsequent tokens between the generated sequence and the reference. Subsequent n tokens are called \u201Cn-grams\u201D. Unigram refers to a single token while bi-gram refers to token pairs and n-grams refer to n subsequent tokens. The score ranges from 0 to 1, where 1 means the translation perfectly matched and 0 did not match at all", + id: "bleu" + }, + { + description: "", + id: "sacrebleu" + } + ], + models: [ + { + description: "A model that translates from English to French.", + id: "Helsinki-NLP/opus-mt-en-fr" + }, + { + description: "A general-purpose Transformer that can be used to translate from English to German, French, or Romanian.", + id: "t5-base" + } + ], + spaces: [ + { + description: "An application that can translate between 100 languages.", + id: "Iker/Translate-100-languages" + }, + { + description: "An application that can translate between English, Spanish and Hindi.", + id: "EuroPython2022/Translate-with-Bloom" + } + ], + summary: "Translation is the task of converting text from one language to another.", + widgetModels: ["t5-small"], + youtubeId: "1JvfrvZgi6c" +}; +var data_default27 = taskData27; + +// src/tasks/text-classification/data.ts +var taskData28 = { + datasets: [ + { + description: "A widely used dataset used to benchmark multiple variants of text classification.", + id: "glue" + }, + { + description: "A text classification dataset used to benchmark natural language inference models", + id: "snli" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "I love Hugging Face!", + type: "text" + } + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "POSITIVE", + score: 0.9 + }, + { + label: "NEUTRAL", + score: 0.1 + }, + { + label: "NEGATIVE", + score: 0 + } + ] + } + ] + }, + metrics: [ + { + description: "", + id: "accuracy" + }, + { + description: "", + id: "recall" + }, + { + description: "", + id: "precision" + }, + { + description: "The F1 metric is the harmonic mean of the precision and recall. It can be calculated as: F1 = 2 * (precision * recall) / (precision + recall)", + id: "f1" + } + ], + models: [ + { + description: "A robust model trained for sentiment analysis.", + id: "distilbert-base-uncased-finetuned-sst-2-english" + }, + { + description: "Multi-genre natural language inference model.", + id: "roberta-large-mnli" + } + ], + spaces: [ + { + description: "An application that can classify financial sentiment.", + id: "IoannisTr/Tech_Stocks_Trading_Assistant" + }, + { + description: "A dashboard that contains various text classification tasks.", + id: "miesnerjacob/Multi-task-NLP" + }, + { + description: "An application that analyzes user reviews in healthcare.", + id: "spacy/healthsea-demo" + } + ], + summary: "Text Classification is the task of assigning a label or class to a given text. Some use cases are sentiment analysis, natural language inference, and assessing grammatical correctness.", + widgetModels: ["distilbert-base-uncased-finetuned-sst-2-english"], + youtubeId: "leNG9fN9FQU" +}; +var data_default28 = taskData28; + +// src/tasks/text-generation/data.ts +var taskData29 = { + datasets: [ + { + description: "A large multilingual dataset of text crawled from the web.", + id: "mc4" + }, + { + description: "Diverse open-source data consisting of 22 smaller high-quality datasets. It was used to train GPT-Neo.", + id: "the_pile" + }, + { + description: "Truly open-source, curated and cleaned dialogue dataset.", + id: "HuggingFaceH4/ultrachat_200k" + }, + { + description: "An instruction dataset with preference ratings on responses.", + id: "openbmb/UltraFeedback" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "Once upon a time,", + type: "text" + } + ], + outputs: [ + { + label: "Output", + content: "Once upon a time, we knew that our ancestors were on the verge of extinction. The great explorers and poets of the Old World, from Alexander the Great to Chaucer, are dead and gone. A good many of our ancient explorers and poets have", + type: "text" + } + ] + }, + metrics: [ + { + description: "Cross Entropy is a metric that calculates the difference between two probability distributions. Each probability distribution is the distribution of predicted words", + id: "Cross Entropy" + }, + { + description: "The Perplexity metric is the exponential of the cross-entropy loss. It evaluates the probabilities assigned to the next word by the model. Lower perplexity indicates better performance", + id: "Perplexity" + } + ], + models: [ + { + description: "A large language model trained for text generation.", + id: "bigscience/bloom-560m" + }, + { + description: "A large code generation model that can generate code in 80+ languages.", + id: "bigcode/starcoder" + }, + { + description: "A very powerful text generation model.", + id: "mistralai/Mixtral-8x7B-Instruct-v0.1" + }, + { + description: "Small yet powerful text generation model.", + id: "microsoft/phi-2" + }, + { + description: "A very powerful model that can chat, do mathematical reasoning and write code.", + id: "openchat/openchat-3.5-0106" + }, + { + description: "Very strong yet small assistant model.", + id: "HuggingFaceH4/zephyr-7b-beta" + }, + { + description: "Very strong open-source large language model.", + id: "meta-llama/Llama-2-70b-hf" + } + ], + spaces: [ + { + description: "A leaderboard to compare different open-source text generation models based on various benchmarks.", + id: "open-llm-leaderboard/open_llm_leaderboard" + }, + { + description: "An text generation based application based on a very powerful LLaMA2 model.", + id: "ysharma/Explore_llamav2_with_TGI" + }, + { + description: "An text generation based application to converse with Zephyr model.", + id: "HuggingFaceH4/zephyr-chat" + }, + { + description: "An text generation application that combines OpenAI and Hugging Face models.", + id: "microsoft/HuggingGPT" + }, + { + description: "An chatbot to converse with a very powerful text generation model.", + id: "mlabonne/phixtral-chat" + } + ], + summary: "Generating text is the task of generating new text given another text. These models can, for example, fill in incomplete text or paraphrase.", + widgetModels: ["HuggingFaceH4/zephyr-7b-beta"], + youtubeId: "Vpjb1lu0MDk" +}; +var data_default29 = taskData29; + +// src/tasks/text-to-video/data.ts +var taskData30 = { + datasets: [ + { + description: "Microsoft Research Video to Text is a large-scale dataset for open domain video captioning", + id: "iejMac/CLIP-MSR-VTT" + }, + { + description: "UCF101 Human Actions dataset consists of 13,320 video clips from YouTube, with 101 classes.", + id: "quchenyuan/UCF101-ZIP" + }, + { + description: "A high-quality dataset for human action recognition in YouTube videos.", + id: "nateraw/kinetics" + }, + { + description: "A dataset of video clips of humans performing pre-defined basic actions with everyday objects.", + id: "HuggingFaceM4/something_something_v2" + }, + { + description: "This dataset consists of text-video pairs and contains noisy samples with irrelevant video descriptions", + id: "HuggingFaceM4/webvid" + }, + { + description: "A dataset of short Flickr videos for the temporal localization of events with descriptions.", + id: "iejMac/CLIP-DiDeMo" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "Darth Vader is surfing on the waves.", + type: "text" + } + ], + outputs: [ + { + filename: "text-to-video-output.gif", + type: "img" + } + ] + }, + metrics: [ + { + description: "Inception Score uses an image classification model that predicts class labels and evaluates how distinct and diverse the images are. A higher score indicates better video generation.", + id: "is" + }, + { + description: "Frechet Inception Distance uses an image classification model to obtain image embeddings. The metric compares mean and standard deviation of the embeddings of real and generated images. A smaller score indicates better video generation.", + id: "fid" + }, + { + description: "Frechet Video Distance uses a model that captures coherence for changes in frames and the quality of each frame. A smaller score indicates better video generation.", + id: "fvd" + }, + { + description: "CLIPSIM measures similarity between video frames and text using an image-text similarity model. A higher score indicates better video generation.", + id: "clipsim" + } + ], + models: [ + { + description: "A strong model for video generation.", + id: "Vchitect/LaVie" + }, + { + description: "A robust model for text-to-video generation.", + id: "damo-vilab/text-to-video-ms-1.7b" + }, + { + description: "A text-to-video generation model with high quality and smooth outputs.", + id: "hotshotco/Hotshot-XL" + } + ], + spaces: [ + { + description: "An application that generates video from text.", + id: "fffiloni/zeroscope" + }, + { + description: "An application that generates video from image and text.", + id: "Vchitect/LaVie" + }, + { + description: "An application that generates videos from text and provides multi-model support.", + id: "ArtGAN/Video-Diffusion-WebUI" + } + ], + summary: "Text-to-video models can be used in any application that requires generating consistent sequence of images from text. ", + widgetModels: [], + youtubeId: void 0 +}; +var data_default30 = taskData30; + +// src/tasks/unconditional-image-generation/data.ts +var taskData31 = { + datasets: [ + { + description: "The CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with 600 images per class.", + id: "cifar100" + }, + { + description: "Multiple images of celebrities, used for facial expression translation.", + id: "CelebA" + } + ], + demo: { + inputs: [ + { + label: "Seed", + content: "42", + type: "text" + }, + { + label: "Number of images to generate:", + content: "4", + type: "text" + } + ], + outputs: [ + { + filename: "unconditional-image-generation-output.jpeg", + type: "img" + } + ] + }, + metrics: [ + { + description: "The inception score (IS) evaluates the quality of generated images. It measures the diversity of the generated images (the model predictions are evenly distributed across all possible labels) and their 'distinction' or 'sharpness' (the model confidently predicts a single label for each image).", + id: "Inception score (IS)" + }, + { + description: "The Fr\xE9chet Inception Distance (FID) evaluates the quality of images created by a generative model by calculating the distance between feature vectors for real and generated images.", + id: "Fre\u0107het Inception Distance (FID)" + } + ], + models: [ + { + description: "High-quality image generation model trained on the CIFAR-10 dataset. It synthesizes images of the ten classes presented in the dataset using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics.", + id: "google/ddpm-cifar10-32" + }, + { + description: "High-quality image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes images of faces using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics.", + id: "google/ddpm-celebahq-256" + } + ], + spaces: [ + { + description: "An application that can generate realistic faces.", + id: "CompVis/celeba-latent-diffusion" + } + ], + summary: "Unconditional image generation is the task of generating images with no condition in any context (like a prompt text or another image). Once trained, the model will create images that resemble its training data distribution.", + widgetModels: [""], + // TODO: Add related video + youtubeId: "" +}; +var data_default31 = taskData31; + +// src/tasks/video-classification/data.ts +var taskData32 = { + datasets: [ + { + // TODO write proper description + description: "Benchmark dataset used for video classification with videos that belong to 400 classes.", + id: "kinetics400" + } + ], + demo: { + inputs: [ + { + filename: "video-classification-input.gif", + type: "img" + } + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "Playing Guitar", + score: 0.514 + }, + { + label: "Playing Tennis", + score: 0.193 + }, + { + label: "Cooking", + score: 0.068 + } + ] + } + ] + }, + metrics: [ + { + description: "", + id: "accuracy" + }, + { + description: "", + id: "recall" + }, + { + description: "", + id: "precision" + }, + { + description: "", + id: "f1" + } + ], + models: [ + { + // TO DO: write description + description: "Strong Video Classification model trained on the Kinects 400 dataset.", + id: "MCG-NJU/videomae-base-finetuned-kinetics" + }, + { + // TO DO: write description + description: "Strong Video Classification model trained on the Kinects 400 dataset.", + id: "microsoft/xclip-base-patch32" + } + ], + spaces: [ + { + description: "An application that classifies video at different timestamps.", + id: "nateraw/lavila" + }, + { + description: "An application that classifies video.", + id: "fcakyon/video-classification" + } + ], + summary: "Video classification is the task of assigning a label or class to an entire video. Videos are expected to have only one class for each video. Video classification models take a video as input and return a prediction about which class the video belongs to.", + widgetModels: [], + youtubeId: "" +}; +var data_default32 = taskData32; + +// src/tasks/visual-question-answering/data.ts +var taskData33 = { + datasets: [ + { + description: "A widely used dataset containing questions (with answers) about images.", + id: "Graphcore/vqa" + }, + { + description: "A dataset to benchmark visual reasoning based on text in images.", + id: "textvqa" + } + ], + demo: { + inputs: [ + { + filename: "elephant.jpeg", + type: "img" + }, + { + label: "Question", + content: "What is in this image?", + type: "text" + } + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "elephant", + score: 0.97 + }, + { + label: "elephants", + score: 0.06 + }, + { + label: "animal", + score: 3e-3 + } + ] + } + ] + }, + isPlaceholder: false, + metrics: [ + { + description: "", + id: "accuracy" + }, + { + description: "Measures how much a predicted answer differs from the ground truth based on the difference in their semantic meaning.", + id: "wu-palmer similarity" + } + ], + models: [ + { + description: "A visual question answering model trained to convert charts and plots to text.", + id: "google/deplot" + }, + { + description: "A visual question answering model trained for mathematical reasoning and chart derendering from images.", + id: "google/matcha-base " + }, + { + description: "A strong visual question answering that answers questions from book covers.", + id: "google/pix2struct-ocrvqa-large" + } + ], + spaces: [ + { + description: "An application that compares visual question answering models across different tasks.", + id: "merve/pix2struct" + }, + { + description: "An application that can answer questions based on images.", + id: "nielsr/vilt-vqa" + }, + { + description: "An application that can caption images and answer questions about a given image. ", + id: "Salesforce/BLIP" + }, + { + description: "An application that can caption images and answer questions about a given image. ", + id: "vumichien/Img2Prompt" + } + ], + summary: "Visual Question Answering is the task of answering open-ended questions based on an image. They output natural language responses to natural language questions.", + widgetModels: ["dandelin/vilt-b32-finetuned-vqa"], + youtubeId: "" +}; +var data_default33 = taskData33; + +// src/tasks/zero-shot-classification/data.ts +var taskData34 = { + datasets: [ + { + description: "A widely used dataset used to benchmark multiple variants of text classification.", + id: "glue" + }, + { + description: "The Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced collection of 433k sentence pairs annotated with textual entailment information.", + id: "MultiNLI" + }, + { + description: "FEVER is a publicly available dataset for fact extraction and verification against textual sources.", + id: "FEVER" + } + ], + demo: { + inputs: [ + { + label: "Text Input", + content: "Dune is the best movie ever.", + type: "text" + }, + { + label: "Candidate Labels", + content: "CINEMA, ART, MUSIC", + type: "text" + } + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "CINEMA", + score: 0.9 + }, + { + label: "ART", + score: 0.1 + }, + { + label: "MUSIC", + score: 0 + } + ] + } + ] + }, + metrics: [], + models: [ + { + description: "Powerful zero-shot text classification model", + id: "facebook/bart-large-mnli" + } + ], + spaces: [], + summary: "Zero-shot text classification is a task in natural language processing where a model is trained on a set of labeled examples but is then able to classify new examples from previously unseen classes.", + widgetModels: ["facebook/bart-large-mnli"] +}; +var data_default34 = taskData34; + +// src/tasks/zero-shot-image-classification/data.ts +var taskData35 = { + datasets: [ + { + // TODO write proper description + description: "", + id: "" + } + ], + demo: { + inputs: [ + { + filename: "image-classification-input.jpeg", + type: "img" + }, + { + label: "Classes", + content: "cat, dog, bird", + type: "text" + } + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "Cat", + score: 0.664 + }, + { + label: "Dog", + score: 0.329 + }, + { + label: "Bird", + score: 8e-3 + } + ] + } + ] + }, + metrics: [ + { + description: "Computes the number of times the correct label appears in top K labels predicted", + id: "top-K accuracy" + } + ], + models: [ + { + description: "Robust image classification model trained on publicly available image-caption data.", + id: "openai/clip-vit-base-patch16" + }, + { + description: "Strong zero-shot image classification model.", + id: "google/siglip-base-patch16-224" + }, + { + description: "Small yet powerful zero-shot image classification model that can run on edge devices.", + id: "apple/MobileCLIP-S1-OpenCLIP" + }, + { + description: "Strong image classification model for biomedical domain.", + id: "microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224" + } + ], + spaces: [ + { + description: "An application that leverages zero-shot image classification to find best captions to generate an image. ", + id: "pharma/CLIP-Interrogator" + }, + { + description: "An application to compare different zero-shot image classification models. ", + id: "merve/compare_clip_siglip" + } + ], + summary: "Zero-shot image classification is the task of classifying previously unseen classes during training of a model.", + widgetModels: ["openai/clip-vit-large-patch14-336"], + youtubeId: "" +}; +var data_default35 = taskData35; + +// src/tasks/zero-shot-object-detection/data.ts +var taskData36 = { + datasets: [], + demo: { + inputs: [ + { + filename: "zero-shot-object-detection-input.jpg", + type: "img" + }, + { + label: "Classes", + content: "cat, dog, bird", + type: "text" + } + ], + outputs: [ + { + filename: "zero-shot-object-detection-output.jpg", + type: "img" + } + ] + }, + metrics: [ + { + description: "The Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It is calculated for each class separately", + id: "Average Precision" + }, + { + description: "The Mean Average Precision (mAP) metric is the overall average of the AP values", + id: "Mean Average Precision" + }, + { + description: "The AP\u03B1 metric is the Average Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75", + id: "AP\u03B1" + } + ], + models: [ + { + description: "Solid zero-shot object detection model.", + id: "IDEA-Research/grounding-dino-base" + }, + { + description: "Cutting-edge zero-shot object detection model.", + id: "google/owlv2-base-patch16-ensemble" + } + ], + spaces: [ + { + description: "A demo to try the state-of-the-art zero-shot object detection model, OWLv2.", + id: "merve/owlv2" + }, + { + description: "A demo that combines a zero-shot object detection and mask generation model for zero-shot segmentation.", + id: "merve/OWLSAM" + } + ], + summary: "Zero-shot object detection is a computer vision task to detect objects and their classes in images, without any prior training or knowledge of the classes. Zero-shot object detection models receive an image as input, as well as a list of candidate classes, and output the bounding boxes and labels where the objects have been detected.", + widgetModels: [], + youtubeId: "" +}; +var data_default36 = taskData36; + +// src/tasks/image-to-3d/data.ts +var taskData37 = { + datasets: [ + { + description: "A large dataset of over 10 million 3D objects.", + id: "allenai/objaverse-xl" + }, + { + description: "A dataset of isolated object images for evaluating image-to-3D models.", + id: "dylanebert/iso3d" + } + ], + demo: { + inputs: [ + { + filename: "image-to-3d-image-input.png", + type: "img" + } + ], + outputs: [ + { + label: "Result", + content: "image-to-3d-3d-output-filename.glb", + type: "text" + } + ] + }, + metrics: [], + models: [ + { + description: "Fast image-to-3D mesh model by Tencent.", + id: "TencentARC/InstantMesh" + }, + { + description: "Fast image-to-3D mesh model by StabilityAI", + id: "stabilityai/TripoSR" + }, + { + description: "A scaled up image-to-3D mesh model derived from TripoSR.", + id: "hwjiang/Real3D" + }, + { + description: "Generative 3D gaussian splatting model.", + id: "ashawkey/LGM" + } + ], + spaces: [ + { + description: "Leaderboard to evaluate image-to-3D models.", + id: "dylanebert/3d-arena" + }, + { + description: "Image-to-3D demo with mesh outputs.", + id: "TencentARC/InstantMesh" + }, + { + description: "Image-to-3D demo with mesh outputs.", + id: "stabilityai/TripoSR" + }, + { + description: "Image-to-3D demo with mesh outputs.", + id: "hwjiang/Real3D" + }, + { + description: "Image-to-3D demo with splat outputs.", + id: "dylanebert/LGM-mini" + } + ], + summary: "Image-to-3D models take in image input and produce 3D output.", + widgetModels: [], + youtubeId: "" +}; +var data_default37 = taskData37; + +// src/tasks/text-to-3d/data.ts +var taskData38 = { + datasets: [ + { + description: "A large dataset of over 10 million 3D objects.", + id: "allenai/objaverse-xl" + }, + { + description: "Descriptive captions for 3D objects in Objaverse.", + id: "tiange/Cap3D" + } + ], + demo: { + inputs: [ + { + label: "Prompt", + content: "a cat statue", + type: "text" + } + ], + outputs: [ + { + label: "Result", + content: "text-to-3d-3d-output-filename.glb", + type: "text" + } + ] + }, + metrics: [], + models: [ + { + description: "Text-to-3D mesh model by OpenAI", + id: "openai/shap-e" + }, + { + description: "Generative 3D gaussian splatting model.", + id: "ashawkey/LGM" + } + ], + spaces: [ + { + description: "Text-to-3D demo with mesh outputs.", + id: "hysts/Shap-E" + }, + { + description: "Text/image-to-3D demo with splat outputs.", + id: "ashawkey/LGM" + } + ], + summary: "Text-to-3D models take in text input and produce 3D output.", + widgetModels: [], + youtubeId: "" +}; +var data_default38 = taskData38; + +// src/tasks/index.ts +var TASKS_MODEL_LIBRARIES = { + "audio-classification": ["speechbrain", "transformers", "transformers.js"], + "audio-to-audio": ["asteroid", "fairseq", "speechbrain"], + "automatic-speech-recognition": ["espnet", "nemo", "speechbrain", "transformers", "transformers.js"], + "depth-estimation": ["transformers", "transformers.js"], + "document-question-answering": ["transformers", "transformers.js"], + "feature-extraction": ["sentence-transformers", "transformers", "transformers.js"], + "fill-mask": ["transformers", "transformers.js"], + "graph-ml": ["transformers"], + "image-classification": ["keras", "timm", "transformers", "transformers.js"], + "image-feature-extraction": ["timm", "transformers"], + "image-segmentation": ["transformers", "transformers.js"], + "image-text-to-text": ["transformers"], + "image-to-image": ["diffusers", "transformers", "transformers.js"], + "image-to-text": ["transformers", "transformers.js"], + "image-to-video": ["diffusers"], + "video-classification": ["transformers"], + "mask-generation": ["transformers"], + "multiple-choice": ["transformers"], + "object-detection": ["transformers", "transformers.js"], + other: [], + "question-answering": ["adapter-transformers", "allennlp", "transformers", "transformers.js"], + robotics: [], + "reinforcement-learning": ["transformers", "stable-baselines3", "ml-agents", "sample-factory"], + "sentence-similarity": ["sentence-transformers", "spacy", "transformers.js"], + summarization: ["transformers", "transformers.js"], + "table-question-answering": ["transformers"], + "table-to-text": ["transformers"], + "tabular-classification": ["sklearn"], + "tabular-regression": ["sklearn"], + "tabular-to-text": ["transformers"], + "text-classification": ["adapter-transformers", "setfit", "spacy", "transformers", "transformers.js"], + "text-generation": ["transformers", "transformers.js"], + "text-retrieval": [], + "text-to-image": ["diffusers"], + "text-to-speech": ["espnet", "tensorflowtts", "transformers", "transformers.js"], + "text-to-audio": ["transformers", "transformers.js"], + "text-to-video": ["diffusers"], + "text2text-generation": ["transformers", "transformers.js"], + "time-series-forecasting": [], + "token-classification": [ + "adapter-transformers", + "flair", + "spacy", + "span-marker", + "stanza", + "transformers", + "transformers.js" + ], + translation: ["transformers", "transformers.js"], + "unconditional-image-generation": ["diffusers"], + "visual-question-answering": ["transformers", "transformers.js"], + "voice-activity-detection": [], + "zero-shot-classification": ["transformers", "transformers.js"], + "zero-shot-image-classification": ["transformers", "transformers.js"], + "zero-shot-object-detection": ["transformers", "transformers.js"], + "text-to-3d": ["diffusers"], + "image-to-3d": ["diffusers"] +}; +function getData(type, partialTaskData = data_default16) { + return { + ...partialTaskData, + id: type, + label: PIPELINE_DATA[type].name, + libraries: TASKS_MODEL_LIBRARIES[type] + }; +} +var TASKS_DATA = { + "audio-classification": getData("audio-classification", data_default), + "audio-to-audio": getData("audio-to-audio", data_default2), + "automatic-speech-recognition": getData("automatic-speech-recognition", data_default3), + "depth-estimation": getData("depth-estimation", data_default15), + "document-question-answering": getData("document-question-answering", data_default4), + "feature-extraction": getData("feature-extraction", data_default5), + "fill-mask": getData("fill-mask", data_default6), + "graph-ml": void 0, + "image-classification": getData("image-classification", data_default7), + "image-feature-extraction": getData("image-feature-extraction", data_default8), + "image-segmentation": getData("image-segmentation", data_default12), + "image-to-image": getData("image-to-image", data_default9), + "image-text-to-text": getData("image-text-to-text", data_default11), + "image-to-text": getData("image-to-text", data_default10), + "image-to-video": void 0, + "mask-generation": getData("mask-generation", data_default13), + "multiple-choice": void 0, + "object-detection": getData("object-detection", data_default14), + "video-classification": getData("video-classification", data_default32), + other: void 0, + "question-answering": getData("question-answering", data_default18), + "reinforcement-learning": getData("reinforcement-learning", data_default17), + robotics: void 0, + "sentence-similarity": getData("sentence-similarity", data_default19), + summarization: getData("summarization", data_default20), + "table-question-answering": getData("table-question-answering", data_default21), + "table-to-text": void 0, + "tabular-classification": getData("tabular-classification", data_default22), + "tabular-regression": getData("tabular-regression", data_default23), + "tabular-to-text": void 0, + "text-classification": getData("text-classification", data_default28), + "text-generation": getData("text-generation", data_default29), + "text-retrieval": void 0, + "text-to-image": getData("text-to-image", data_default24), + "text-to-speech": getData("text-to-speech", data_default25), + "text-to-audio": void 0, + "text-to-video": getData("text-to-video", data_default30), + "text2text-generation": void 0, + "time-series-forecasting": void 0, + "token-classification": getData("token-classification", data_default26), + translation: getData("translation", data_default27), + "unconditional-image-generation": getData("unconditional-image-generation", data_default31), + "visual-question-answering": getData("visual-question-answering", data_default33), + "voice-activity-detection": void 0, + "zero-shot-classification": getData("zero-shot-classification", data_default34), + "zero-shot-image-classification": getData("zero-shot-image-classification", data_default35), + "zero-shot-object-detection": getData("zero-shot-object-detection", data_default36), + "text-to-3d": getData("text-to-3d", data_default38), + "image-to-3d": getData("image-to-3d", data_default37) +}; + +// src/model-libraries-snippets.ts +var TAG_CUSTOM_CODE = "custom_code"; +function nameWithoutNamespace(modelId) { + const splitted = modelId.split("/"); + return splitted.length === 1 ? splitted[0] : splitted[1]; +} +var adapters = (model) => [ + `from adapters import AutoAdapterModel + +model = AutoAdapterModel.from_pretrained("${model.config?.adapter_transformers?.model_name}") +model.load_adapter("${model.id}", set_active=True)` +]; +var allennlpUnknown = (model) => [ + `import allennlp_models +from allennlp.predictors.predictor import Predictor + +predictor = Predictor.from_path("hf://${model.id}")` +]; +var allennlpQuestionAnswering = (model) => [ + `import allennlp_models +from allennlp.predictors.predictor import Predictor + +predictor = Predictor.from_path("hf://${model.id}") +predictor_input = {"passage": "My name is Wolfgang and I live in Berlin", "question": "Where do I live?"} +predictions = predictor.predict_json(predictor_input)` +]; +var allennlp = (model) => { + if (model.tags.includes("question-answering")) { + return allennlpQuestionAnswering(model); + } + return allennlpUnknown(model); +}; +var asteroid = (model) => [ + `from asteroid.models import BaseModel + +model = BaseModel.from_pretrained("${model.id}")` +]; +var audioseal = (model) => { + const watermarkSnippet = `# Watermark Generator +from audioseal import AudioSeal + +model = AudioSeal.load_generator("${model.id}") +# pass a tensor (tensor_wav) of shape (batch, channels, samples) and a sample rate +wav, sr = tensor_wav, 16000 + +watermark = model.get_watermark(wav, sr) +watermarked_audio = wav + watermark`; + const detectorSnippet = `# Watermark Detector +from audioseal import AudioSeal + +detector = AudioSeal.load_detector("${model.id}") + +result, message = detector.detect_watermark(watermarked_audio, sr)`; + return [watermarkSnippet, detectorSnippet]; +}; +function get_base_diffusers_model(model) { + return model.cardData?.base_model?.toString() ?? "fill-in-base-model"; +} +var bertopic = (model) => [ + `from bertopic import BERTopic + +model = BERTopic.load("${model.id}")` +]; +var bm25s = (model) => [ + `from bm25s.hf import BM25HF + +retriever = BM25HF.load_from_hub("${model.id}")` +]; +var depth_anything_v2 = (model) => { + let encoder; + let features; + let out_channels; + encoder = ""; + features = ""; + out_channels = ""; + if (model.id === "depth-anything/Depth-Anything-V2-Small") { + encoder = "vits"; + features = "64"; + out_channels = "[48, 96, 192, 384]"; + } else if (model.id === "depth-anything/Depth-Anything-V2-Base") { + encoder = "vitb"; + features = "128"; + out_channels = "[96, 192, 384, 768]"; + } else if (model.id === "depth-anything/Depth-Anything-V2-Large") { + encoder = "vitl"; + features = "256"; + out_channels = "[256, 512, 1024, 1024"; + } + return [ + ` +# Install from https://github.com/DepthAnything/Depth-Anything-V2 + +# Load the model and infer depth from an image +import cv2 +import torch + +from depth_anything_v2.dpt import DepthAnythingV2 + +# instantiate the model +model = DepthAnythingV2(encoder="${encoder}", features=${features}, out_channels=${out_channels}) + +# load the weights +filepath = hf_hub_download(repo_id="${model.id}", filename="depth_anything_v2_${encoder}.pth", repo_type="model") +state_dict = torch.load(filepath, map_location="cpu") +model.load_state_dict(state_dict).eval() + +raw_img = cv2.imread("your/image/path") +depth = model.infer_image(raw_img) # HxW raw depth map in numpy + ` + ]; +}; +var diffusers_default = (model) => [ + `from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained("${model.id}")` +]; +var diffusers_controlnet = (model) => [ + `from diffusers import ControlNetModel, StableDiffusionControlNetPipeline + +controlnet = ControlNetModel.from_pretrained("${model.id}") +pipeline = StableDiffusionControlNetPipeline.from_pretrained( + "${get_base_diffusers_model(model)}", controlnet=controlnet +)` +]; +var diffusers_lora = (model) => [ + `from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}") +pipeline.load_lora_weights("${model.id}")` +]; +var diffusers_textual_inversion = (model) => [ + `from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}") +pipeline.load_textual_inversion("${model.id}")` +]; +var diffusers = (model) => { + if (model.tags.includes("controlnet")) { + return diffusers_controlnet(model); + } else if (model.tags.includes("lora")) { + return diffusers_lora(model); + } else if (model.tags.includes("textual_inversion")) { + return diffusers_textual_inversion(model); + } else { + return diffusers_default(model); + } +}; +var edsnlp = (model) => { + const packageName = nameWithoutNamespace(model.id).replaceAll("-", "_"); + return [ + `# Load it from the Hub directly +import edsnlp +nlp = edsnlp.load("${model.id}") +`, + `# Or install it as a package +!pip install git+https://huggingface.co/${model.id} + +# and import it as a module +import ${packageName} + +nlp = ${packageName}.load() # or edsnlp.load("${packageName}") +` + ]; +}; +var espnetTTS = (model) => [ + `from espnet2.bin.tts_inference import Text2Speech + +model = Text2Speech.from_pretrained("${model.id}") + +speech, *_ = model("text to generate speech from")` +]; +var espnetASR = (model) => [ + `from espnet2.bin.asr_inference import Speech2Text + +model = Speech2Text.from_pretrained( + "${model.id}" +) + +speech, rate = soundfile.read("speech.wav") +text, *_ = model(speech)[0]` +]; +var espnetUnknown = () => [`unknown model type (must be text-to-speech or automatic-speech-recognition)`]; +var espnet = (model) => { + if (model.tags.includes("text-to-speech")) { + return espnetTTS(model); + } else if (model.tags.includes("automatic-speech-recognition")) { + return espnetASR(model); + } + return espnetUnknown(); +}; +var fairseq = (model) => [ + `from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub + +models, cfg, task = load_model_ensemble_and_task_from_hf_hub( + "${model.id}" +)` +]; +var flair = (model) => [ + `from flair.models import SequenceTagger + +tagger = SequenceTagger.load("${model.id}")` +]; +var gliner = (model) => [ + `from gliner import GLiNER + +model = GLiNER.from_pretrained("${model.id}")` +]; +var keras = (model) => [ + `# Available backend options are: "jax", "tensorflow", "torch". +import os +os.environ["KERAS_BACKEND"] = "tensorflow" + +import keras + +model = keras.saving.load_model("hf://${model.id}") +` +]; +var keras_nlp = (model) => [ + `# Available backend options are: "jax", "tensorflow", "torch". +import os +os.environ["KERAS_BACKEND"] = "tensorflow" + +import keras_nlp + +tokenizer = keras_nlp.models.Tokenizer.from_preset("hf://${model.id}") +backbone = keras_nlp.models.Backbone.from_preset("hf://${model.id}") +` +]; +var tf_keras = (model) => [ + `# Note: 'keras<3.x' or 'tf_keras' must be installed (legacy) +# See https://github.com/keras-team/tf-keras for more details. +from huggingface_hub import from_pretrained_keras + +model = from_pretrained_keras("${model.id}") +` +]; +var mamba_ssm = (model) => [ + `from mamba_ssm import MambaLMHeadModel + +model = MambaLMHeadModel.from_pretrained("${model.id}")` +]; +var mars5_tts = (model) => [ + `# Install from https://github.com/Camb-ai/MARS5-TTS + +from inference import Mars5TTS +mars5 = Mars5TTS.from_pretrained("${model.id}")` +]; +var mesh_anything = () => [ + `# Install from https://github.com/buaacyw/MeshAnything.git + +from MeshAnything.models.meshanything import MeshAnything + +# refer to https://github.com/buaacyw/MeshAnything/blob/main/main.py#L91 on how to define args +# and https://github.com/buaacyw/MeshAnything/blob/main/app.py regarding usage +model = MeshAnything(args)` +]; +var open_clip = (model) => [ + `import open_clip + +model, preprocess_train, preprocess_val = open_clip.create_model_and_transforms('hf-hub:${model.id}') +tokenizer = open_clip.get_tokenizer('hf-hub:${model.id}')` +]; +var paddlenlp = (model) => { + if (model.config?.architectures?.[0]) { + const architecture = model.config.architectures[0]; + return [ + [ + `from paddlenlp.transformers import AutoTokenizer, ${architecture}`, + "", + `tokenizer = AutoTokenizer.from_pretrained("${model.id}", from_hf_hub=True)`, + `model = ${architecture}.from_pretrained("${model.id}", from_hf_hub=True)` + ].join("\n") + ]; + } else { + return [ + [ + `# \u26A0\uFE0F Type of model unknown`, + `from paddlenlp.transformers import AutoTokenizer, AutoModel`, + "", + `tokenizer = AutoTokenizer.from_pretrained("${model.id}", from_hf_hub=True)`, + `model = AutoModel.from_pretrained("${model.id}", from_hf_hub=True)` + ].join("\n") + ]; + } +}; +var pyannote_audio_pipeline = (model) => [ + `from pyannote.audio import Pipeline + +pipeline = Pipeline.from_pretrained("${model.id}") + +# inference on the whole file +pipeline("file.wav") + +# inference on an excerpt +from pyannote.core import Segment +excerpt = Segment(start=2.0, end=5.0) + +from pyannote.audio import Audio +waveform, sample_rate = Audio().crop("file.wav", excerpt) +pipeline({"waveform": waveform, "sample_rate": sample_rate})` +]; +var pyannote_audio_model = (model) => [ + `from pyannote.audio import Model, Inference + +model = Model.from_pretrained("${model.id}") +inference = Inference(model) + +# inference on the whole file +inference("file.wav") + +# inference on an excerpt +from pyannote.core import Segment +excerpt = Segment(start=2.0, end=5.0) +inference.crop("file.wav", excerpt)` +]; +var pyannote_audio = (model) => { + if (model.tags.includes("pyannote-audio-pipeline")) { + return pyannote_audio_pipeline(model); + } + return pyannote_audio_model(model); +}; +var tensorflowttsTextToMel = (model) => [ + `from tensorflow_tts.inference import AutoProcessor, TFAutoModel + +processor = AutoProcessor.from_pretrained("${model.id}") +model = TFAutoModel.from_pretrained("${model.id}") +` +]; +var tensorflowttsMelToWav = (model) => [ + `from tensorflow_tts.inference import TFAutoModel + +model = TFAutoModel.from_pretrained("${model.id}") +audios = model.inference(mels) +` +]; +var tensorflowttsUnknown = (model) => [ + `from tensorflow_tts.inference import TFAutoModel + +model = TFAutoModel.from_pretrained("${model.id}") +` +]; +var tensorflowtts = (model) => { + if (model.tags.includes("text-to-mel")) { + return tensorflowttsTextToMel(model); + } else if (model.tags.includes("mel-to-wav")) { + return tensorflowttsMelToWav(model); + } + return tensorflowttsUnknown(model); +}; +var timm = (model) => [ + `import timm + +model = timm.create_model("hf_hub:${model.id}", pretrained=True)` +]; +var skopsPickle = (model, modelFile) => { + return [ + `import joblib +from skops.hub_utils import download +download("${model.id}", "path_to_folder") +model = joblib.load( + "${modelFile}" +) +# only load pickle files from sources you trust +# read more about it here https://skops.readthedocs.io/en/stable/persistence.html` + ]; +}; +var skopsFormat = (model, modelFile) => { + return [ + `from skops.hub_utils import download +from skops.io import load +download("${model.id}", "path_to_folder") +# make sure model file is in skops format +# if model is a pickle file, make sure it's from a source you trust +model = load("path_to_folder/${modelFile}")` + ]; +}; +var skopsJobLib = (model) => { + return [ + `from huggingface_hub import hf_hub_download +import joblib +model = joblib.load( + hf_hub_download("${model.id}", "sklearn_model.joblib") +) +# only load pickle files from sources you trust +# read more about it here https://skops.readthedocs.io/en/stable/persistence.html` + ]; +}; +var sklearn = (model) => { + if (model.tags.includes("skops")) { + const skopsmodelFile = model.config?.sklearn?.model?.file; + const skopssaveFormat = model.config?.sklearn?.model_format; + if (!skopsmodelFile) { + return [`# \u26A0\uFE0F Model filename not specified in config.json`]; + } + if (skopssaveFormat === "pickle") { + return skopsPickle(model, skopsmodelFile); + } else { + return skopsFormat(model, skopsmodelFile); + } + } else { + return skopsJobLib(model); + } +}; +var stable_audio_tools = (model) => [ + `import torch +import torchaudio +from einops import rearrange +from stable_audio_tools import get_pretrained_model +from stable_audio_tools.inference.generation import generate_diffusion_cond + +device = "cuda" if torch.cuda.is_available() else "cpu" + +# Download model +model, model_config = get_pretrained_model("${model.id}") +sample_rate = model_config["sample_rate"] +sample_size = model_config["sample_size"] + +model = model.to(device) + +# Set up text and timing conditioning +conditioning = [{ + "prompt": "128 BPM tech house drum loop", +}] + +# Generate stereo audio +output = generate_diffusion_cond( + model, + conditioning=conditioning, + sample_size=sample_size, + device=device +) + +# Rearrange audio batch to a single sequence +output = rearrange(output, "b d n -> d (b n)") + +# Peak normalize, clip, convert to int16, and save to file +output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu() +torchaudio.save("output.wav", output, sample_rate)` +]; +var fastai = (model) => [ + `from huggingface_hub import from_pretrained_fastai + +learn = from_pretrained_fastai("${model.id}")` +]; +var sampleFactory = (model) => [ + `python -m sample_factory.huggingface.load_from_hub -r ${model.id} -d ./train_dir` +]; +var sentenceTransformers = (model) => [ + `from sentence_transformers import SentenceTransformer + +model = SentenceTransformer("${model.id}")` +]; +var setfit = (model) => [ + `from setfit import SetFitModel + +model = SetFitModel.from_pretrained("${model.id}")` +]; +var spacy = (model) => [ + `!pip install https://huggingface.co/${model.id}/resolve/main/${nameWithoutNamespace(model.id)}-any-py3-none-any.whl + +# Using spacy.load(). +import spacy +nlp = spacy.load("${nameWithoutNamespace(model.id)}") + +# Importing as module. +import ${nameWithoutNamespace(model.id)} +nlp = ${nameWithoutNamespace(model.id)}.load()` +]; +var span_marker = (model) => [ + `from span_marker import SpanMarkerModel + +model = SpanMarkerModel.from_pretrained("${model.id}")` +]; +var stanza = (model) => [ + `import stanza + +stanza.download("${nameWithoutNamespace(model.id).replace("stanza-", "")}") +nlp = stanza.Pipeline("${nameWithoutNamespace(model.id).replace("stanza-", "")}")` +]; +var speechBrainMethod = (speechbrainInterface) => { + switch (speechbrainInterface) { + case "EncoderClassifier": + return "classify_file"; + case "EncoderDecoderASR": + case "EncoderASR": + return "transcribe_file"; + case "SpectralMaskEnhancement": + return "enhance_file"; + case "SepformerSeparation": + return "separate_file"; + default: + return void 0; + } +}; +var speechbrain = (model) => { + const speechbrainInterface = model.config?.speechbrain?.speechbrain_interface; + if (speechbrainInterface === void 0) { + return [`# interface not specified in config.json`]; + } + const speechbrainMethod = speechBrainMethod(speechbrainInterface); + if (speechbrainMethod === void 0) { + return [`# interface in config.json invalid`]; + } + return [ + `from speechbrain.pretrained import ${speechbrainInterface} +model = ${speechbrainInterface}.from_hparams( + "${model.id}" +) +model.${speechbrainMethod}("file.wav")` + ]; +}; +var transformers = (model) => { + const info = model.transformersInfo; + if (!info) { + return [`# \u26A0\uFE0F Type of model unknown`]; + } + const remote_code_snippet = model.tags.includes(TAG_CUSTOM_CODE) ? ", trust_remote_code=True" : ""; + let autoSnippet; + if (info.processor) { + const varName = info.processor === "AutoTokenizer" ? "tokenizer" : info.processor === "AutoFeatureExtractor" ? "extractor" : "processor"; + autoSnippet = [ + "# Load model directly", + `from transformers import ${info.processor}, ${info.auto_model}`, + "", + `${varName} = ${info.processor}.from_pretrained("${model.id}"` + remote_code_snippet + ")", + `model = ${info.auto_model}.from_pretrained("${model.id}"` + remote_code_snippet + ")" + ].join("\n"); + } else { + autoSnippet = [ + "# Load model directly", + `from transformers import ${info.auto_model}`, + `model = ${info.auto_model}.from_pretrained("${model.id}"` + remote_code_snippet + ")" + ].join("\n"); + } + if (model.pipeline_tag && LIBRARY_TASK_MAPPING.transformers?.includes(model.pipeline_tag)) { + const pipelineSnippet = ["# Use a pipeline as a high-level helper", "from transformers import pipeline", ""]; + if (model.tags.includes("conversational") && model.config?.tokenizer_config?.chat_template) { + pipelineSnippet.push("messages = [", ' {"role": "user", "content": "Who are you?"},', "]"); + } + pipelineSnippet.push(`pipe = pipeline("${model.pipeline_tag}", model="${model.id}"` + remote_code_snippet + ")"); + if (model.tags.includes("conversational") && model.config?.tokenizer_config?.chat_template) { + pipelineSnippet.push("pipe(messages)"); + } + return [pipelineSnippet.join("\n"), autoSnippet]; + } + return [autoSnippet]; +}; +var transformersJS = (model) => { + if (!model.pipeline_tag) { + return [`// \u26A0\uFE0F Unknown pipeline tag`]; + } + const libName = "@xenova/transformers"; + return [ + `// npm i ${libName} +import { pipeline } from '${libName}'; + +// Allocate pipeline +const pipe = await pipeline('${model.pipeline_tag}', '${model.id}');` + ]; +}; +var peftTask = (peftTaskType) => { + switch (peftTaskType) { + case "CAUSAL_LM": + return "CausalLM"; + case "SEQ_2_SEQ_LM": + return "Seq2SeqLM"; + case "TOKEN_CLS": + return "TokenClassification"; + case "SEQ_CLS": + return "SequenceClassification"; + default: + return void 0; + } +}; +var peft = (model) => { + const { base_model_name_or_path: peftBaseModel, task_type: peftTaskType } = model.config?.peft ?? {}; + const pefttask = peftTask(peftTaskType); + if (!pefttask) { + return [`Task type is invalid.`]; + } + if (!peftBaseModel) { + return [`Base model is not found.`]; + } + return [ + `from peft import PeftModel, PeftConfig +from transformers import AutoModelFor${pefttask} + +config = PeftConfig.from_pretrained("${model.id}") +base_model = AutoModelFor${pefttask}.from_pretrained("${peftBaseModel}") +model = PeftModel.from_pretrained(base_model, "${model.id}")` + ]; +}; +var fasttext = (model) => [ + `from huggingface_hub import hf_hub_download +import fasttext + +model = fasttext.load_model(hf_hub_download("${model.id}", "model.bin"))` +]; +var stableBaselines3 = (model) => [ + `from huggingface_sb3 import load_from_hub +checkpoint = load_from_hub( + repo_id="${model.id}", + filename="{MODEL FILENAME}.zip", +)` +]; +var nemoDomainResolver = (domain, model) => { + switch (domain) { + case "ASR": + return [ + `import nemo.collections.asr as nemo_asr +asr_model = nemo_asr.models.ASRModel.from_pretrained("${model.id}") + +transcriptions = asr_model.transcribe(["file.wav"])` + ]; + default: + return void 0; + } +}; +var mlAgents = (model) => [ + `mlagents-load-from-hf --repo-id="${model.id}" --local-dir="./download: string[]s"` +]; +var sentis = () => [ + `string modelName = "[Your model name here].sentis"; +Model model = ModelLoader.Load(Application.streamingAssetsPath + "/" + modelName); +IWorker engine = WorkerFactory.CreateWorker(BackendType.GPUCompute, model); +// Please see provided C# file for more details +` +]; +var voicecraft = (model) => [ + `from voicecraft import VoiceCraft + +model = VoiceCraft.from_pretrained("${model.id}")` +]; +var chattts = () => [ + `import ChatTTS +import torchaudio + +chat = ChatTTS.Chat() +chat.load_models(compile=False) # Set to True for better performance + +texts = ["PUT YOUR TEXT HERE",] + +wavs = chat.infer(texts, ) + +torchaudio.save("output1.wav", torch.from_numpy(wavs[0]), 24000)` +]; +var mlx = (model) => [ + `pip install huggingface_hub hf_transfer + +export HF_HUB_ENABLE_HF_TRANS: string[]FER=1 +huggingface-cli download --local-dir ${nameWithoutNamespace(model.id)} ${model.id}` +]; +var mlxim = (model) => [ + `from mlxim.model import create_model + +model = create_model(${model.id})` +]; +var nemo = (model) => { + let command = void 0; + if (model.tags.includes("automatic-speech-recognition")) { + command = nemoDomainResolver("ASR", model); + } + return command ?? [`# tag did not correspond to a valid NeMo domain.`]; +}; +var pythae = (model) => [ + `from pythae.models import AutoModel + +model = AutoModel.load_from_hf_hub("${model.id}")` +]; +var musicgen = (model) => [ + `from audiocraft.models import MusicGen + +model = MusicGen.get_pretrained("${model.id}") + +descriptions = ['happy rock', 'energetic EDM', 'sad jazz'] +wav = model.generate(descriptions) # generates 3 samples.` +]; +var magnet = (model) => [ + `from audiocraft.models import MAGNeT + +model = MAGNeT.get_pretrained("${model.id}") + +descriptions = ['disco beat', 'energetic EDM', 'funky groove'] +wav = model.generate(descriptions) # generates 3 samples.` +]; +var audiogen = (model) => [ + `from audiocraft.models import AudioGen + +model = AudioGen.get_pretrained("${model.id}") +model.set_generation_params(duration=5) # generate 5 seconds. +descriptions = ['dog barking', 'sirene of an emergency vehicle', 'footsteps in a corridor'] +wav = model.generate(descriptions) # generates 3 samples.` +]; +var audiocraft = (model) => { + if (model.tags.includes("musicgen")) { + return musicgen(model); + } else if (model.tags.includes("audiogen")) { + return audiogen(model); + } else if (model.tags.includes("magnet")) { + return magnet(model); + } else { + return [`# Type of model unknown.`]; + } +}; +var whisperkit = () => [ + `# Install CLI with Homebrew on macOS device +brew install whisperkit-cli + +# View all available inference options +whisperkit-cli transcribe --help + +# Download and run inference using whisper base model +whisperkit-cli transcribe --audio-path /path/to/audio.mp3 + +# Or use your preferred model variant +whisperkit-cli transcribe --model "large-v3" --model-prefix "distil" --audio-path /path/to/audio.mp3 --verbose` +]; + +// src/model-libraries.ts +var MODEL_LIBRARIES_UI_ELEMENTS = { + "adapter-transformers": { + prettyLabel: "Adapters", + repoName: "adapters", + repoUrl: "https://github.com/Adapter-Hub/adapters", + docsUrl: "https://huggingface.co/docs/hub/adapters", + snippets: adapters, + filter: true, + countDownloads: `path:"adapter_config.json"` + }, + allennlp: { + prettyLabel: "AllenNLP", + repoName: "AllenNLP", + repoUrl: "https://github.com/allenai/allennlp", + docsUrl: "https://huggingface.co/docs/hub/allennlp", + snippets: allennlp, + filter: true + }, + asteroid: { + prettyLabel: "Asteroid", + repoName: "Asteroid", + repoUrl: "https://github.com/asteroid-team/asteroid", + docsUrl: "https://huggingface.co/docs/hub/asteroid", + snippets: asteroid, + filter: true, + countDownloads: `path:"pytorch_model.bin"` + }, + audiocraft: { + prettyLabel: "Audiocraft", + repoName: "audiocraft", + repoUrl: "https://github.com/facebookresearch/audiocraft", + snippets: audiocraft, + filter: false, + countDownloads: `path:"state_dict.bin"` + }, + audioseal: { + prettyLabel: "AudioSeal", + repoName: "audioseal", + repoUrl: "https://github.com/facebookresearch/audioseal", + filter: false, + countDownloads: `path_extension:"pth"`, + snippets: audioseal + }, + bertopic: { + prettyLabel: "BERTopic", + repoName: "BERTopic", + repoUrl: "https://github.com/MaartenGr/BERTopic", + snippets: bertopic, + filter: true + }, + big_vision: { + prettyLabel: "Big Vision", + repoName: "big_vision", + repoUrl: "https://github.com/google-research/big_vision", + filter: false, + countDownloads: `path_extension:"npz"` + }, + bm25s: { + prettyLabel: "BM25S", + repoName: "bm25s", + repoUrl: "https://github.com/xhluca/bm25s", + snippets: bm25s, + filter: false, + countDownloads: `path:"params.index.json"` + }, + champ: { + prettyLabel: "Champ", + repoName: "Champ", + repoUrl: "https://github.com/fudan-generative-vision/champ", + countDownloads: `path:"champ/motion_module.pth"` + }, + chat_tts: { + prettyLabel: "ChatTTS", + repoName: "ChatTTS", + repoUrl: "https://github.com/2noise/ChatTTS.git", + snippets: chattts, + filter: false, + countDownloads: `path:"asset/GPT.pt"` + }, + colpali: { + prettyLabel: "ColPali", + repoName: "ColPali", + repoUrl: "https://github.com/ManuelFay/colpali", + filter: false, + countDownloads: `path:"adapter_config.json"` + }, + "depth-anything-v2": { + prettyLabel: "DepthAnythingV2", + repoName: "Depth Anything V2", + repoUrl: "https://github.com/DepthAnything/Depth-Anything-V2", + snippets: depth_anything_v2, + filter: false, + countDownloads: `path_extension:"pth"` + }, + diffusers: { + prettyLabel: "Diffusers", + repoName: "\u{1F917}/diffusers", + repoUrl: "https://github.com/huggingface/diffusers", + docsUrl: "https://huggingface.co/docs/hub/diffusers", + snippets: diffusers, + filter: true + /// diffusers has its own more complex "countDownloads" query + }, + doctr: { + prettyLabel: "docTR", + repoName: "doctr", + repoUrl: "https://github.com/mindee/doctr" + }, + edsnlp: { + prettyLabel: "EDS-NLP", + repoName: "edsnlp", + repoUrl: "https://github.com/aphp/edsnlp", + docsUrl: "https://aphp.github.io/edsnlp/latest/", + filter: false, + snippets: edsnlp, + countDownloads: `path_filename:"config" AND path_extension:"cfg"` + }, + elm: { + prettyLabel: "ELM", + repoName: "elm", + repoUrl: "https://github.com/slicex-ai/elm", + filter: false, + countDownloads: `path_filename:"slicex_elm_config" AND path_extension:"json"` + }, + espnet: { + prettyLabel: "ESPnet", + repoName: "ESPnet", + repoUrl: "https://github.com/espnet/espnet", + docsUrl: "https://huggingface.co/docs/hub/espnet", + snippets: espnet, + filter: true + }, + fairseq: { + prettyLabel: "Fairseq", + repoName: "fairseq", + repoUrl: "https://github.com/pytorch/fairseq", + snippets: fairseq, + filter: true + }, + fastai: { + prettyLabel: "fastai", + repoName: "fastai", + repoUrl: "https://github.com/fastai/fastai", + docsUrl: "https://huggingface.co/docs/hub/fastai", + snippets: fastai, + filter: true + }, + fasttext: { + prettyLabel: "fastText", + repoName: "fastText", + repoUrl: "https://fasttext.cc/", + snippets: fasttext, + filter: true, + countDownloads: `path_extension:"bin"` + }, + flair: { + prettyLabel: "Flair", + repoName: "Flair", + repoUrl: "https://github.com/flairNLP/flair", + docsUrl: "https://huggingface.co/docs/hub/flair", + snippets: flair, + filter: true, + countDownloads: `path:"pytorch_model.bin"` + }, + "gemma.cpp": { + prettyLabel: "gemma.cpp", + repoName: "gemma.cpp", + repoUrl: "https://github.com/google/gemma.cpp", + filter: false, + countDownloads: `path_extension:"sbs"` + }, + gliner: { + prettyLabel: "GLiNER", + repoName: "GLiNER", + repoUrl: "https://github.com/urchade/GLiNER", + snippets: gliner, + filter: false, + countDownloads: `path:"gliner_config.json"` + }, + "glyph-byt5": { + prettyLabel: "Glyph-ByT5", + repoName: "Glyph-ByT5", + repoUrl: "https://github.com/AIGText/Glyph-ByT5", + filter: false, + countDownloads: `path:"checkpoints/byt5_model.pt"` + }, + grok: { + prettyLabel: "Grok", + repoName: "Grok", + repoUrl: "https://github.com/xai-org/grok-1", + filter: false, + countDownloads: `path:"ckpt/tensor00000_000" OR path:"ckpt-0/tensor00000_000"` + }, + hallo: { + prettyLabel: "Hallo", + repoName: "Hallo", + repoUrl: "https://github.com/fudan-generative-vision/hallo", + countDownloads: `path:"hallo/net.pth"` + }, + "hunyuan-dit": { + prettyLabel: "HunyuanDiT", + repoName: "HunyuanDiT", + repoUrl: "https://github.com/Tencent/HunyuanDiT", + countDownloads: `path:"pytorch_model_ema.pt" OR path:"pytorch_model_distill.pt"` + }, + keras: { + prettyLabel: "Keras", + repoName: "Keras", + repoUrl: "https://github.com/keras-team/keras", + docsUrl: "https://huggingface.co/docs/hub/keras", + snippets: keras, + filter: true, + countDownloads: `path:"config.json" OR path_extension:"keras"` + }, + "tf-keras": { + // Legacy "Keras 2" library (tensorflow-only) + prettyLabel: "TF-Keras", + repoName: "TF-Keras", + repoUrl: "https://github.com/keras-team/tf-keras", + docsUrl: "https://huggingface.co/docs/hub/tf-keras", + snippets: tf_keras, + filter: true, + countDownloads: `path:"saved_model.pb"` + }, + "keras-nlp": { + prettyLabel: "KerasNLP", + repoName: "KerasNLP", + repoUrl: "https://keras.io/keras_nlp/", + docsUrl: "https://github.com/keras-team/keras-nlp", + snippets: keras_nlp + }, + k2: { + prettyLabel: "K2", + repoName: "k2", + repoUrl: "https://github.com/k2-fsa/k2" + }, + liveportrait: { + prettyLabel: "LivePortrait", + repoName: "LivePortrait", + repoUrl: "https://github.com/KwaiVGI/LivePortrait", + filter: false, + countDownloads: `path:"liveportrait/landmark.onnx"` + }, + mindspore: { + prettyLabel: "MindSpore", + repoName: "mindspore", + repoUrl: "https://github.com/mindspore-ai/mindspore" + }, + "mamba-ssm": { + prettyLabel: "MambaSSM", + repoName: "MambaSSM", + repoUrl: "https://github.com/state-spaces/mamba", + filter: false, + snippets: mamba_ssm + }, + "mars5-tts": { + prettyLabel: "MARS5-TTS", + repoName: "MARS5-TTS", + repoUrl: "https://github.com/Camb-ai/MARS5-TTS", + filter: false, + countDownloads: `path:"mars5_ar.safetensors"`, + snippets: mars5_tts + }, + "mesh-anything": { + prettyLabel: "MeshAnything", + repoName: "MeshAnything", + repoUrl: "https://github.com/buaacyw/MeshAnything", + filter: false, + countDownloads: `path:"MeshAnything_350m.pth"`, + snippets: mesh_anything + }, + "ml-agents": { + prettyLabel: "ml-agents", + repoName: "ml-agents", + repoUrl: "https://github.com/Unity-Technologies/ml-agents", + docsUrl: "https://huggingface.co/docs/hub/ml-agents", + snippets: mlAgents, + filter: true, + countDownloads: `path_extension:"onnx"` + }, + mlx: { + prettyLabel: "MLX", + repoName: "MLX", + repoUrl: "https://github.com/ml-explore/mlx-examples/tree/main", + snippets: mlx, + filter: true + }, + "mlx-image": { + prettyLabel: "mlx-image", + repoName: "mlx-image", + repoUrl: "https://github.com/riccardomusmeci/mlx-image", + docsUrl: "https://huggingface.co/docs/hub/mlx-image", + snippets: mlxim, + filter: false, + countDownloads: `path:"model.safetensors"` + }, + "mlc-llm": { + prettyLabel: "MLC-LLM", + repoName: "MLC-LLM", + repoUrl: "https://github.com/mlc-ai/mlc-llm", + docsUrl: "https://llm.mlc.ai/docs/", + filter: false, + countDownloads: `path:"mlc-chat-config.json"` + }, + nemo: { + prettyLabel: "NeMo", + repoName: "NeMo", + repoUrl: "https://github.com/NVIDIA/NeMo", + snippets: nemo, + filter: true, + countDownloads: `path_extension:"nemo" OR path:"model_config.yaml"` + }, + open_clip: { + prettyLabel: "OpenCLIP", + repoName: "OpenCLIP", + repoUrl: "https://github.com/mlfoundations/open_clip", + snippets: open_clip, + filter: true, + countDownloads: `path_extension:"bin" AND path_filename:*pytorch_model` + }, + paddlenlp: { + prettyLabel: "paddlenlp", + repoName: "PaddleNLP", + repoUrl: "https://github.com/PaddlePaddle/PaddleNLP", + docsUrl: "https://huggingface.co/docs/hub/paddlenlp", + snippets: paddlenlp, + filter: true, + countDownloads: `path:"model_config.json"` + }, + peft: { + prettyLabel: "PEFT", + repoName: "PEFT", + repoUrl: "https://github.com/huggingface/peft", + snippets: peft, + filter: true, + countDownloads: `path:"adapter_config.json"` + }, + "pyannote-audio": { + prettyLabel: "pyannote.audio", + repoName: "pyannote-audio", + repoUrl: "https://github.com/pyannote/pyannote-audio", + snippets: pyannote_audio, + filter: true + }, + pythae: { + prettyLabel: "pythae", + repoName: "pythae", + repoUrl: "https://github.com/clementchadebec/benchmark_VAE", + snippets: pythae, + filter: true + }, + recurrentgemma: { + prettyLabel: "RecurrentGemma", + repoName: "recurrentgemma", + repoUrl: "https://github.com/google-deepmind/recurrentgemma", + filter: false, + countDownloads: `path:"tokenizer.model"` + }, + "sample-factory": { + prettyLabel: "sample-factory", + repoName: "sample-factory", + repoUrl: "https://github.com/alex-petrenko/sample-factory", + docsUrl: "https://huggingface.co/docs/hub/sample-factory", + snippets: sampleFactory, + filter: true, + countDownloads: `path:"cfg.json"` + }, + "sentence-transformers": { + prettyLabel: "sentence-transformers", + repoName: "sentence-transformers", + repoUrl: "https://github.com/UKPLab/sentence-transformers", + docsUrl: "https://huggingface.co/docs/hub/sentence-transformers", + snippets: sentenceTransformers, + filter: true + }, + setfit: { + prettyLabel: "setfit", + repoName: "setfit", + repoUrl: "https://github.com/huggingface/setfit", + docsUrl: "https://huggingface.co/docs/hub/setfit", + snippets: setfit, + filter: true + }, + sklearn: { + prettyLabel: "Scikit-learn", + repoName: "Scikit-learn", + repoUrl: "https://github.com/scikit-learn/scikit-learn", + snippets: sklearn, + filter: true, + countDownloads: `path:"sklearn_model.joblib"` + }, + spacy: { + prettyLabel: "spaCy", + repoName: "spaCy", + repoUrl: "https://github.com/explosion/spaCy", + docsUrl: "https://huggingface.co/docs/hub/spacy", + snippets: spacy, + filter: true, + countDownloads: `path_extension:"whl"` + }, + "span-marker": { + prettyLabel: "SpanMarker", + repoName: "SpanMarkerNER", + repoUrl: "https://github.com/tomaarsen/SpanMarkerNER", + docsUrl: "https://huggingface.co/docs/hub/span_marker", + snippets: span_marker, + filter: true + }, + speechbrain: { + prettyLabel: "speechbrain", + repoName: "speechbrain", + repoUrl: "https://github.com/speechbrain/speechbrain", + docsUrl: "https://huggingface.co/docs/hub/speechbrain", + snippets: speechbrain, + filter: true, + countDownloads: `path:"hyperparams.yaml"` + }, + "stable-audio-tools": { + prettyLabel: "Stable Audio Tools", + repoName: "stable-audio-tools", + repoUrl: "https://github.com/Stability-AI/stable-audio-tools.git", + filter: false, + countDownloads: `path:"model.safetensors"`, + snippets: stable_audio_tools + }, + "diffusion-single-file": { + prettyLabel: "Diffusion Single File", + repoName: "diffusion-single-file", + repoUrl: "https://github.com/comfyanonymous/ComfyUI", + filter: false, + countDownloads: `path_extension:"safetensors"` + }, + "stable-baselines3": { + prettyLabel: "stable-baselines3", + repoName: "stable-baselines3", + repoUrl: "https://github.com/huggingface/huggingface_sb3", + docsUrl: "https://huggingface.co/docs/hub/stable-baselines3", + snippets: stableBaselines3, + filter: true, + countDownloads: `path_extension:"zip"` + }, + stanza: { + prettyLabel: "Stanza", + repoName: "stanza", + repoUrl: "https://github.com/stanfordnlp/stanza", + docsUrl: "https://huggingface.co/docs/hub/stanza", + snippets: stanza, + filter: true, + countDownloads: `path:"models/default.zip"` + }, + tensorflowtts: { + prettyLabel: "TensorFlowTTS", + repoName: "TensorFlowTTS", + repoUrl: "https://github.com/TensorSpeech/TensorFlowTTS", + snippets: tensorflowtts + }, + "tic-clip": { + prettyLabel: "TiC-CLIP", + repoName: "TiC-CLIP", + repoUrl: "https://github.com/apple/ml-tic-clip", + filter: false, + countDownloads: `path_extension:"pt" AND path_prefix:"checkpoints/"` + }, + timesfm: { + prettyLabel: "TimesFM", + repoName: "timesfm", + repoUrl: "https://github.com/google-research/timesfm", + filter: false, + countDownloads: `path:"checkpoints/checkpoint_1100000/state/checkpoint"` + }, + timm: { + prettyLabel: "timm", + repoName: "pytorch-image-models", + repoUrl: "https://github.com/rwightman/pytorch-image-models", + docsUrl: "https://huggingface.co/docs/hub/timm", + snippets: timm, + filter: true, + countDownloads: `path:"pytorch_model.bin" OR path:"model.safetensors"` + }, + transformers: { + prettyLabel: "Transformers", + repoName: "\u{1F917}/transformers", + repoUrl: "https://github.com/huggingface/transformers", + docsUrl: "https://huggingface.co/docs/hub/transformers", + snippets: transformers, + filter: true + }, + "transformers.js": { + prettyLabel: "Transformers.js", + repoName: "transformers.js", + repoUrl: "https://github.com/xenova/transformers.js", + docsUrl: "https://huggingface.co/docs/hub/transformers-js", + snippets: transformersJS, + filter: true + }, + "unity-sentis": { + prettyLabel: "unity-sentis", + repoName: "unity-sentis", + repoUrl: "https://github.com/Unity-Technologies/sentis-samples", + snippets: sentis, + filter: true, + countDownloads: `path_extension:"sentis"` + }, + voicecraft: { + prettyLabel: "VoiceCraft", + repoName: "VoiceCraft", + repoUrl: "https://github.com/jasonppy/VoiceCraft", + docsUrl: "https://github.com/jasonppy/VoiceCraft", + snippets: voicecraft + }, + whisperkit: { + prettyLabel: "WhisperKit", + repoName: "WhisperKit", + repoUrl: "https://github.com/argmaxinc/WhisperKit", + docsUrl: "https://github.com/argmaxinc/WhisperKit?tab=readme-ov-file#homebrew", + snippets: whisperkit, + countDownloads: `path_filename:"model" AND path_extension:"mil" AND _exists_:"path_prefix"` + } +}; +var ALL_MODEL_LIBRARY_KEYS = Object.keys(MODEL_LIBRARIES_UI_ELEMENTS); +var ALL_DISPLAY_MODEL_LIBRARY_KEYS = Object.entries(MODEL_LIBRARIES_UI_ELEMENTS).filter(([_, v]) => v.filter).map(([k]) => k); + +// src/tokenizer-data.ts +var SPECIAL_TOKENS_ATTRIBUTES = [ + "bos_token", + "eos_token", + "unk_token", + "sep_token", + "pad_token", + "cls_token", + "mask_token" + // additional_special_tokens (TODO) +]; + +// src/snippets/index.ts +var snippets_exports = {}; +__export(snippets_exports, { + curl: () => curl_exports, + inputs: () => inputs_exports, + js: () => js_exports, + python: () => python_exports +}); + +// src/snippets/inputs.ts +var inputs_exports = {}; +__export(inputs_exports, { + getModelInputSnippet: () => getModelInputSnippet +}); +var inputsZeroShotClassification = () => `"Hi, I recently bought a device from your company but it is not working as advertised and I would like to get reimbursed!"`; +var inputsTranslation = () => `"\u041C\u0435\u043D\u044F \u0437\u043E\u0432\u0443\u0442 \u0412\u043E\u043B\u044C\u0444\u0433\u0430\u043D\u0433 \u0438 \u044F \u0436\u0438\u0432\u0443 \u0432 \u0411\u0435\u0440\u043B\u0438\u043D\u0435"`; +var inputsSummarization = () => `"The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct."`; +var inputsTableQuestionAnswering = () => `{ + "query": "How many stars does the transformers repository have?", + "table": { + "Repository": ["Transformers", "Datasets", "Tokenizers"], + "Stars": ["36542", "4512", "3934"], + "Contributors": ["651", "77", "34"], + "Programming language": [ + "Python", + "Python", + "Rust, Python and NodeJS" + ] + } +}`; +var inputsVisualQuestionAnswering = () => `{ + "image": "cat.png", + "question": "What is in this image?" +}`; +var inputsQuestionAnswering = () => `{ + "question": "What is my name?", + "context": "My name is Clara and I live in Berkeley." +}`; +var inputsTextClassification = () => `"I like you. I love you"`; +var inputsTokenClassification = () => `"My name is Sarah Jessica Parker but you can call me Jessica"`; +var inputsTextGeneration = () => `"Can you please let us know more details about your "`; +var inputsText2TextGeneration = () => `"The answer to the universe is"`; +var inputsFillMask = (model) => `"The answer to the universe is ${model.mask_token}."`; +var inputsSentenceSimilarity = () => `{ + "source_sentence": "That is a happy person", + "sentences": [ + "That is a happy dog", + "That is a very happy person", + "Today is a sunny day" + ] +}`; +var inputsFeatureExtraction = () => `"Today is a sunny day and I will get some ice cream."`; +var inputsImageClassification = () => `"cats.jpg"`; +var inputsImageToText = () => `"cats.jpg"`; +var inputsImageSegmentation = () => `"cats.jpg"`; +var inputsObjectDetection = () => `"cats.jpg"`; +var inputsAudioToAudio = () => `"sample1.flac"`; +var inputsAudioClassification = () => `"sample1.flac"`; +var inputsTextToImage = () => `"Astronaut riding a horse"`; +var inputsTextToSpeech = () => `"The answer to the universe is 42"`; +var inputsTextToAudio = () => `"liquid drum and bass, atmospheric synths, airy sounds"`; +var inputsAutomaticSpeechRecognition = () => `"sample1.flac"`; +var inputsTabularPrediction = () => `'{"Height":[11.52,12.48],"Length1":[23.2,24.0],"Length2":[25.4,26.3],"Species": ["Bream","Bream"]}'`; +var inputsZeroShotImageClassification = () => `"cats.jpg"`; +var modelInputSnippets = { + "audio-to-audio": inputsAudioToAudio, + "audio-classification": inputsAudioClassification, + "automatic-speech-recognition": inputsAutomaticSpeechRecognition, + "document-question-answering": inputsVisualQuestionAnswering, + "feature-extraction": inputsFeatureExtraction, + "fill-mask": inputsFillMask, + "image-classification": inputsImageClassification, + "image-to-text": inputsImageToText, + "image-segmentation": inputsImageSegmentation, + "object-detection": inputsObjectDetection, + "question-answering": inputsQuestionAnswering, + "sentence-similarity": inputsSentenceSimilarity, + summarization: inputsSummarization, + "table-question-answering": inputsTableQuestionAnswering, + "tabular-regression": inputsTabularPrediction, + "tabular-classification": inputsTabularPrediction, + "text-classification": inputsTextClassification, + "text-generation": inputsTextGeneration, + "text-to-image": inputsTextToImage, + "text-to-speech": inputsTextToSpeech, + "text-to-audio": inputsTextToAudio, + "text2text-generation": inputsText2TextGeneration, + "token-classification": inputsTokenClassification, + translation: inputsTranslation, + "zero-shot-classification": inputsZeroShotClassification, + "zero-shot-image-classification": inputsZeroShotImageClassification +}; +function getModelInputSnippet(model, noWrap = false, noQuotes = false) { + if (model.pipeline_tag) { + const inputs = modelInputSnippets[model.pipeline_tag]; + if (inputs) { + let result = inputs(model); + if (noWrap) { + result = result.replace(/(?:(?:\r?\n|\r)\t*)|\t+/g, " "); + } + if (noQuotes) { + const REGEX_QUOTES = /^"(.+)"$/s; + const match = result.match(REGEX_QUOTES); + result = match ? match[1] : result; + } + return result; + } + } + return "No input example has been defined for this model task."; +} + +// src/snippets/curl.ts +var curl_exports = {}; +__export(curl_exports, { + curlSnippets: () => curlSnippets, + getCurlInferenceSnippet: () => getCurlInferenceSnippet, + hasCurlInferenceSnippet: () => hasCurlInferenceSnippet, + snippetBasic: () => snippetBasic, + snippetFile: () => snippetFile, + snippetTextGeneration: () => snippetTextGeneration, + snippetZeroShotClassification: () => snippetZeroShotClassification +}); +var snippetBasic = (model, accessToken) => `curl https://api-inference.huggingface.co/models/${model.id} \\ + -X POST \\ + -d '{"inputs": ${getModelInputSnippet(model, true)}}' \\ + -H 'Content-Type: application/json' \\ + -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" +`; +var snippetTextGeneration = (model, accessToken) => { + if (model.config?.tokenizer_config?.chat_template) { + return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\ +-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\ +-H 'Content-Type: application/json' \\ +-d '{ + "model": "${model.id}", + "messages": [{"role": "user", "content": "What is the capital of France?"}], + "max_tokens": 500, + "stream": false +}' +`; + } else { + return snippetBasic(model, accessToken); + } +}; +var snippetZeroShotClassification = (model, accessToken) => `curl https://api-inference.huggingface.co/models/${model.id} \\ + -X POST \\ + -d '{"inputs": ${getModelInputSnippet(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\ + -H 'Content-Type: application/json' \\ + -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" +`; +var snippetFile = (model, accessToken) => `curl https://api-inference.huggingface.co/models/${model.id} \\ + -X POST \\ + --data-binary '@${getModelInputSnippet(model, true, true)}' \\ + -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" +`; +var curlSnippets = { + // Same order as in js/src/lib/interfaces/Types.ts + "text-classification": snippetBasic, + "token-classification": snippetBasic, + "table-question-answering": snippetBasic, + "question-answering": snippetBasic, + "zero-shot-classification": snippetZeroShotClassification, + translation: snippetBasic, + summarization: snippetBasic, + "feature-extraction": snippetBasic, + "text-generation": snippetTextGeneration, + "text2text-generation": snippetBasic, + "fill-mask": snippetBasic, + "sentence-similarity": snippetBasic, + "automatic-speech-recognition": snippetFile, + "text-to-image": snippetBasic, + "text-to-speech": snippetBasic, + "text-to-audio": snippetBasic, + "audio-to-audio": snippetFile, + "audio-classification": snippetFile, + "image-classification": snippetFile, + "image-to-text": snippetFile, + "object-detection": snippetFile, + "image-segmentation": snippetFile +}; +function getCurlInferenceSnippet(model, accessToken) { + return model.pipeline_tag && model.pipeline_tag in curlSnippets ? curlSnippets[model.pipeline_tag]?.(model, accessToken) ?? "" : ""; +} +function hasCurlInferenceSnippet(model) { + return !!model.pipeline_tag && model.pipeline_tag in curlSnippets; +} + +// src/snippets/python.ts +var python_exports = {}; +__export(python_exports, { + getPythonInferenceSnippet: () => getPythonInferenceSnippet, + hasPythonInferenceSnippet: () => hasPythonInferenceSnippet, + pythonSnippets: () => pythonSnippets, + snippetBasic: () => snippetBasic2, + snippetConversational: () => snippetConversational, + snippetDocumentQuestionAnswering: () => snippetDocumentQuestionAnswering, + snippetFile: () => snippetFile2, + snippetTabular: () => snippetTabular, + snippetTextToAudio: () => snippetTextToAudio, + snippetTextToImage: () => snippetTextToImage, + snippetZeroShotClassification: () => snippetZeroShotClassification2, + snippetZeroShotImageClassification: () => snippetZeroShotImageClassification +}); +var snippetConversational = (model, accessToken) => `from huggingface_hub import InferenceClient + +client = InferenceClient( + "${model.id}", + token="${accessToken || "{API_TOKEN}"}", +) + +for message in client.chat_completion( + messages=[{"role": "user", "content": "What is the capital of France?"}], + max_tokens=500, + stream=True, +): + print(message.choices[0].delta.content, end="") +`; +var snippetZeroShotClassification2 = (model) => `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +output = query({ + "inputs": ${getModelInputSnippet(model)}, + "parameters": {"candidate_labels": ["refund", "legal", "faq"]}, +})`; +var snippetZeroShotImageClassification = (model) => `def query(data): + with open(data["image_path"], "rb") as f: + img = f.read() + payload={ + "parameters": data["parameters"], + "inputs": base64.b64encode(img).decode("utf-8") + } + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +output = query({ + "image_path": ${getModelInputSnippet(model)}, + "parameters": {"candidate_labels": ["cat", "dog", "llama"]}, +})`; +var snippetBasic2 = (model) => `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +output = query({ + "inputs": ${getModelInputSnippet(model)}, +})`; +var snippetFile2 = (model) => `def query(filename): + with open(filename, "rb") as f: + data = f.read() + response = requests.post(API_URL, headers=headers, data=data) + return response.json() + +output = query(${getModelInputSnippet(model)})`; +var snippetTextToImage = (model) => `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.content +image_bytes = query({ + "inputs": ${getModelInputSnippet(model)}, +}) +# You can access the image with PIL.Image for example +import io +from PIL import Image +image = Image.open(io.BytesIO(image_bytes))`; +var snippetTabular = (model) => `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.content +response = query({ + "inputs": {"data": ${getModelInputSnippet(model)}}, +})`; +var snippetTextToAudio = (model) => { + if (model.library_name === "transformers") { + return `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.content + +audio_bytes = query({ + "inputs": ${getModelInputSnippet(model)}, +}) +# You can access the audio with IPython.display for example +from IPython.display import Audio +Audio(audio_bytes)`; + } else { + return `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +audio, sampling_rate = query({ + "inputs": ${getModelInputSnippet(model)}, +}) +# You can access the audio with IPython.display for example +from IPython.display import Audio +Audio(audio, rate=sampling_rate)`; + } +}; +var snippetDocumentQuestionAnswering = (model) => `def query(payload): + with open(payload["image"], "rb") as f: + img = f.read() + payload["image"] = base64.b64encode(img).decode("utf-8") + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +output = query({ + "inputs": ${getModelInputSnippet(model)}, +})`; +var pythonSnippets = { + // Same order as in tasks/src/pipelines.ts + "text-classification": snippetBasic2, + "token-classification": snippetBasic2, + "table-question-answering": snippetBasic2, + "question-answering": snippetBasic2, + "zero-shot-classification": snippetZeroShotClassification2, + translation: snippetBasic2, + summarization: snippetBasic2, + "feature-extraction": snippetBasic2, + "text-generation": snippetBasic2, + "text2text-generation": snippetBasic2, + "fill-mask": snippetBasic2, + "sentence-similarity": snippetBasic2, + "automatic-speech-recognition": snippetFile2, + "text-to-image": snippetTextToImage, + "text-to-speech": snippetTextToAudio, + "text-to-audio": snippetTextToAudio, + "audio-to-audio": snippetFile2, + "audio-classification": snippetFile2, + "image-classification": snippetFile2, + "tabular-regression": snippetTabular, + "tabular-classification": snippetTabular, + "object-detection": snippetFile2, + "image-segmentation": snippetFile2, + "document-question-answering": snippetDocumentQuestionAnswering, + "image-to-text": snippetFile2, + "zero-shot-image-classification": snippetZeroShotImageClassification +}; +function getPythonInferenceSnippet(model, accessToken) { + if (model.pipeline_tag === "text-generation" && model.config?.tokenizer_config?.chat_template) { + return snippetConversational(model, accessToken); + } else { + const body = model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model, accessToken) ?? "" : ""; + return `import requests + +API_URL = "https://api-inference.huggingface.co/models/${model.id}" +headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}} + +${body}`; + } +} +function hasPythonInferenceSnippet(model) { + return !!model.pipeline_tag && model.pipeline_tag in pythonSnippets; +} + +// src/snippets/js.ts +var js_exports = {}; +__export(js_exports, { + getJsInferenceSnippet: () => getJsInferenceSnippet, + hasJsInferenceSnippet: () => hasJsInferenceSnippet, + jsSnippets: () => jsSnippets, + snippetBasic: () => snippetBasic3, + snippetFile: () => snippetFile3, + snippetTextGeneration: () => snippetTextGeneration2, + snippetTextToAudio: () => snippetTextToAudio2, + snippetTextToImage: () => snippetTextToImage2, + snippetZeroShotClassification: () => snippetZeroShotClassification3 +}); +var snippetBasic3 = (model, accessToken) => `async function query(data) { + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { + Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" + "Content-Type": "application/json", + }, + method: "POST", + body: JSON.stringify(data), + } + ); + const result = await response.json(); + return result; +} + +query({"inputs": ${getModelInputSnippet(model)}}).then((response) => { + console.log(JSON.stringify(response)); +});`; +var snippetTextGeneration2 = (model, accessToken) => { + if (model.config?.tokenizer_config?.chat_template) { + return `import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference("${accessToken || `{API_TOKEN}`}"); + +for await (const chunk of inference.chatCompletionStream({ + model: "${model.id}", + messages: [{ role: "user", content: "What is the capital of France?" }], + max_tokens: 500, +})) { + process.stdout.write(chunk.choices[0]?.delta?.content || ""); +} +`; + } else { + return snippetBasic3(model, accessToken); + } +}; +var snippetZeroShotClassification3 = (model, accessToken) => `async function query(data) { + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { + Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" + "Content-Type": "application/json", + }, + method: "POST", + body: JSON.stringify(data), + } + ); + const result = await response.json(); + return result; +} + +query({"inputs": ${getModelInputSnippet( + model +)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}).then((response) => { + console.log(JSON.stringify(response)); +});`; +var snippetTextToImage2 = (model, accessToken) => `async function query(data) { + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { + Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" + "Content-Type": "application/json", + }, + method: "POST", + body: JSON.stringify(data), + } + ); + const result = await response.blob(); + return result; +} +query({"inputs": ${getModelInputSnippet(model)}}).then((response) => { + // Use image +});`; +var snippetTextToAudio2 = (model, accessToken) => { + const commonSnippet = `async function query(data) { + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { + Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" + "Content-Type": "application/json", + }, + method: "POST", + body: JSON.stringify(data), + } + );`; + if (model.library_name === "transformers") { + return commonSnippet + ` + const result = await response.blob(); + return result; + } + query({"inputs": ${getModelInputSnippet(model)}}).then((response) => { + // Returns a byte object of the Audio wavform. Use it directly! + });`; + } else { + return commonSnippet + ` + const result = await response.json(); + return result; + } + + query({"inputs": ${getModelInputSnippet(model)}}).then((response) => { + console.log(JSON.stringify(response)); + });`; + } +}; +var snippetFile3 = (model, accessToken) => `async function query(filename) { + const data = fs.readFileSync(filename); + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { + Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" + "Content-Type": "application/json", + }, + method: "POST", + body: data, + } + ); + const result = await response.json(); + return result; +} + +query(${getModelInputSnippet(model)}).then((response) => { + console.log(JSON.stringify(response)); +});`; +var jsSnippets = { + // Same order as in js/src/lib/interfaces/Types.ts + "text-classification": snippetBasic3, + "token-classification": snippetBasic3, + "table-question-answering": snippetBasic3, + "question-answering": snippetBasic3, + "zero-shot-classification": snippetZeroShotClassification3, + translation: snippetBasic3, + summarization: snippetBasic3, + "feature-extraction": snippetBasic3, + "text-generation": snippetTextGeneration2, + "text2text-generation": snippetBasic3, + "fill-mask": snippetBasic3, + "sentence-similarity": snippetBasic3, + "automatic-speech-recognition": snippetFile3, + "text-to-image": snippetTextToImage2, + "text-to-speech": snippetTextToAudio2, + "text-to-audio": snippetTextToAudio2, + "audio-to-audio": snippetFile3, + "audio-classification": snippetFile3, + "image-classification": snippetFile3, + "image-to-text": snippetFile3, + "object-detection": snippetFile3, + "image-segmentation": snippetFile3 +}; +function getJsInferenceSnippet(model, accessToken) { + return model.pipeline_tag && model.pipeline_tag in jsSnippets ? jsSnippets[model.pipeline_tag]?.(model, accessToken) ?? "" : ""; +} +function hasJsInferenceSnippet(model) { + return !!model.pipeline_tag && model.pipeline_tag in jsSnippets; +} + +// src/hardware.ts +var TFLOPS_THRESHOLD_WHITE_HOUSE_MODEL_TRAINING_TOTAL = 10 ** 14; +var TFLOPS_THRESHOLD_WHITE_HOUSE_MODEL_TRAINING_TOTAL_BIOLOGY = 10 ** 11; +var TFLOPS_THRESHOLD_WHITE_HOUSE_CLUSTER = 10 ** 8; +var TFLOPS_THRESHOLD_EU_AI_ACT_MODEL_TRAINING_TOTAL = 10 ** 13; +var DEFAULT_MEMORY_OPTIONS = [8, 16, 24, 32, 40, 48, 64, 80, 96, 128, 256, 512]; +var SKUS = { + GPU: { + NVIDIA: { + H100: { + tflops: 267.6, + memory: [80] + }, + L40: { + tflops: 90.52, + memory: [48] + }, + "RTX 6000 Ada": { + tflops: 91.1, + memory: [48] + }, + "RTX 5880 Ada": { + tflops: 69.3, + memory: [48] + }, + "RTX 5000 Ada": { + tflops: 65.3, + memory: [32] + }, + "RTX 4500 Ada": { + tflops: 39.6, + memory: [24] + }, + "RTX 4000 Ada": { + tflops: 26.7, + memory: [20] + }, + "RTX 4000 SFF Ada": { + tflops: 19.2, + memory: [20] + }, + "RTX 2000 Ada": { + tflops: 12, + memory: [16] + }, + A100: { + tflops: 77.97, + memory: [80, 40] + }, + A40: { + tflops: 37.42, + memory: [48] + }, + A10: { + tflops: 31.24, + memory: [24] + }, + "RTX 4090": { + tflops: 82.58, + memory: [24] + }, + "RTX 4090D": { + tflops: 79.49, + memory: [24] + }, + "RTX 4080 SUPER": { + tflops: 52.2, + memory: [16] + }, + "RTX 4080": { + tflops: 48.7, + memory: [16] + }, + "RTX 4070": { + tflops: 29.15, + memory: [12] + }, + "RTX 4070 Ti": { + tflops: 40.09, + memory: [12] + }, + "RTX 4070 Super": { + tflops: 35.48, + memory: [12] + }, + "RTX 4070 Ti Super": { + tflops: 44.1, + memory: [16] + }, + "RTX 4060": { + tflops: 15.11, + memory: [8] + }, + "RTX 4060 Ti": { + tflops: 22.06, + memory: [8, 16] + }, + "RTX 3090": { + tflops: 35.58, + memory: [24] + }, + "RTX 3090 Ti": { + tflops: 40, + memory: [24] + }, + "RTX 3080": { + tflops: 30.6, + memory: [12, 10] + }, + "RTX 3080 Ti": { + tflops: 34.1, + memory: [12] + }, + "RTX 3070": { + tflops: 20.31, + memory: [8] + }, + "RTX 3070 Ti": { + tflops: 21.75, + memory: [8] + }, + "RTX 3070 Ti Laptop": { + tflops: 16.6, + memory: [8] + }, + "RTX 3060 Ti": { + tflops: 16.2, + memory: [8] + }, + "RTX 3060": { + tflops: 12.74, + memory: [12, 8] + }, + "RTX 2070": { + tflops: 14.93, + memory: [8] + }, + "RTX 3050 Mobile": { + tflops: 7.639, + memory: [6] + }, + "RTX 2060 Mobile": { + tflops: 9.22, + memory: [6] + }, + "GTX 1080 Ti": { + tflops: 11.34, + // float32 (GPU does not support native float16) + memory: [11] + }, + "GTX 1070 Ti": { + tflops: 8.2, + // float32 (GPU does not support native float16) + memory: [8] + }, + "RTX Titan": { + tflops: 32.62, + memory: [24] + }, + "GTX 1660": { + tflops: 10.05, + memory: [6] + }, + "GTX 1650 Mobile": { + tflops: 6.39, + memory: [4] + }, + T4: { + tflops: 65.13, + memory: [16] + }, + V100: { + tflops: 28.26, + memory: [32, 16] + }, + "Quadro P6000": { + tflops: 12.63, + // float32 (GPU does not support native float16) + memory: [24] + }, + P40: { + tflops: 11.76, + // float32 (GPU does not support native float16) + memory: [24] + } + }, + AMD: { + MI300: { + tflops: 383, + memory: [192] + }, + MI250: { + tflops: 362.1, + memory: [128] + }, + MI210: { + tflops: 181, + memory: [64] + }, + MI100: { + tflops: 184.6, + memory: [32] + }, + "RX 7900 XTX": { + tflops: 122.8, + memory: [24] + }, + "RX 7900 XT": { + tflops: 103, + memory: [20] + }, + "RX 7900 GRE": { + tflops: 91.96, + memory: [16] + }, + "RX 7800 XT": { + tflops: 74.65, + memory: [16] + }, + "RX 7700 XT": { + tflops: 70.34, + memory: [12] + }, + "RX 7600 XT": { + tflops: 45.14, + memory: [16, 8] + }, + "RX 6950 XT": { + tflops: 47.31, + memory: [16] + }, + "RX 6800": { + tflops: 32.33, + memory: [16] + }, + "Radeon Pro VII": { + tflops: 26.11, + memory: [16] + } + } + }, + CPU: { + Intel: { + "Xeon 4th Generation (Sapphire Rapids)": { + tflops: 1.3 + }, + "Xeon 3th Generation (Ice Lake)": { + tflops: 0.8 + }, + "Xeon 2th Generation (Cascade Lake)": { + tflops: 0.55 + }, + "Intel Core 13th Generation (i9)": { + tflops: 0.85 + }, + "Intel Core 13th Generation (i7)": { + tflops: 0.82 + }, + "Intel Core 13th Generation (i5)": { + tflops: 0.68 + }, + "Intel Core 13th Generation (i3)": { + tflops: 0.57 + }, + "Intel Core 12th Generation (i9)": { + tflops: 0.79 + }, + "Intel Core 12th Generation (i7)": { + tflops: 0.77 + }, + "Intel Core 12th Generation (i5)": { + tflops: 0.65 + }, + "Intel Core 12th Generation (i3)": { + tflops: 0.53 + }, + "Intel Core 11th Generation (i9)": { + tflops: 0.7 + }, + "Intel Core 11th Generation (i7)": { + tflops: 0.6 + }, + "Intel Core 11th Generation (i5)": { + tflops: 0.5 + }, + "Intel Core 11th Generation (i3)": { + tflops: 0.35 + }, + "Intel Core 10th Generation (i9)": { + tflops: 0.46 + }, + "Intel Core 10th Generation (i7)": { + tflops: 0.46 + }, + "Intel Core 10th Generation (i5)": { + tflops: 0.46 + }, + "Intel Core 10th Generation (i3)": { + tflops: 0.44 + } + }, + AMD: { + "EPYC 4th Generation (Genoa)": { + tflops: 5 + }, + "EPYC 3th Generation (Milan)": { + tflops: 2.4 + }, + "EPYC 2th Generation (Rome)": { + tflops: 0.6 + }, + "EPYC 1st Generation (Naples)": { + tflops: 0.6 + }, + "Ryzen Zen4 7000 (Ryzen 9)": { + tflops: 0.56 + }, + "Ryzen Zen4 7000 (Ryzen 7)": { + tflops: 0.56 + }, + "Ryzen Zen4 7000 (Ryzen 5)": { + tflops: 0.56 + }, + "Ryzen Zen3 5000 (Ryzen 9)": { + tflops: 1.33 + }, + "Ryzen Zen3 5000 (Ryzen 7)": { + tflops: 1.33 + }, + "Ryzen Zen3 5000 (Ryzen 5)": { + tflops: 0.72 + }, + "Ryzen Zen 2 3000 (Threadripper)": { + tflops: 0.72 + }, + "Ryzen Zen 2 3000 (Ryzen 9)": { + tflops: 0.72 + }, + "Ryzen Zen 2 3000 (Ryzen 7)": { + tflops: 0.72 + }, + "Ryzen Zen 2 3000 (Ryzen 5)": { + tflops: 0.72 + }, + "Ryzen Zen 2 3000 (Ryzen 3)": { + tflops: 0.72 + } + } + }, + "Apple Silicon": { + "-": { + "Apple M1": { + tflops: 2.6, + memory: [8, 16] + }, + "Apple M1 Pro": { + tflops: 5.2, + memory: [16, 24, 32] + }, + "Apple M1 Max": { + tflops: 10.4, + memory: [16, 24, 32, 64] + }, + "Apple M1 Ultra": { + tflops: 21, + memory: [16, 24, 32, 64, 96, 128] + }, + "Apple M2": { + tflops: 3.6, + memory: [8, 16, 24] + }, + "Apple M2 Pro": { + tflops: 13.6, + memory: [16, 24, 32] + }, + "Apple M2 Max": { + tflops: 13.49, + memory: [32, 64, 96] + }, + "Apple M2 Ultra": { + tflops: 27.2, + memory: [64, 96, 128, 192] + }, + "Apple M3": { + tflops: 2.84, + memory: [8, 16, 24] + }, + "Apple M3 Pro": { + tflops: 14, + memory: [18, 36] + }, + "Apple M3 Max": { + tflops: 14.2, + memory: [36, 48, 64, 96, 128] + } + } + } +}; + +// src/local-apps.ts +function isGgufModel(model) { + return model.tags.includes("gguf"); +} +var snippetLlamacpp = (model, filepath) => { + return [ + `# Option 1: use llama.cpp with brew +brew install llama.cpp + +# Load and run the model +llama \\ + --hf-repo "${model.id}" \\ + --hf-file ${filepath ?? "{{GGUF_FILE}}"} \\ + -p "I believe the meaning of life is" \\ + -n 128`, + `# Option 2: build llama.cpp from source with curl support +git clone https://github.com/ggerganov/llama.cpp.git +cd llama.cpp +LLAMA_CURL=1 make + +# Load and run the model +./main \\ + --hf-repo "${model.id}" \\ + -m ${filepath ?? "{{GGUF_FILE}}"} \\ + -p "I believe the meaning of life is" \\ + -n 128` + ]; +}; +var LOCAL_APPS = { + "llama.cpp": { + prettyLabel: "llama.cpp", + docsUrl: "https://github.com/ggerganov/llama.cpp", + mainTask: "text-generation", + displayOnModelPage: isGgufModel, + snippet: snippetLlamacpp + }, + lmstudio: { + prettyLabel: "LM Studio", + docsUrl: "https://lmstudio.ai", + mainTask: "text-generation", + displayOnModelPage: isGgufModel, + deeplink: (model, filepath) => new URL(`lmstudio://open_from_hf?model=${model.id}${filepath ? `&file=${filepath}` : ""}`) + }, + jan: { + prettyLabel: "Jan", + docsUrl: "https://jan.ai", + mainTask: "text-generation", + displayOnModelPage: isGgufModel, + deeplink: (model) => new URL(`jan://models/huggingface/${model.id}`) + }, + backyard: { + prettyLabel: "Backyard AI", + docsUrl: "https://backyard.ai", + mainTask: "text-generation", + displayOnModelPage: isGgufModel, + deeplink: (model) => new URL(`https://backyard.ai/hf/model/${model.id}`) + }, + sanctum: { + prettyLabel: "Sanctum", + docsUrl: "https://sanctum.ai", + mainTask: "text-generation", + displayOnModelPage: isGgufModel, + deeplink: (model) => new URL(`sanctum://open_from_hf?model=${model.id}`) + }, + jellybox: { + prettyLabel: "Jellybox", + docsUrl: "https://jellybox.com", + mainTask: "text-generation", + displayOnModelPage: (model) => isGgufModel(model) || model.library_name === "diffusers" && model.tags.includes("safetensors") && (model.pipeline_tag === "text-to-image" || model.tags.includes("lora")), + deeplink: (model) => { + if (isGgufModel(model)) { + return new URL(`jellybox://llm/models/huggingface/LLM/${model.id}`); + } else if (model.tags.includes("lora")) { + return new URL(`jellybox://image/models/huggingface/ImageLora/${model.id}`); + } else { + return new URL(`jellybox://image/models/huggingface/Image/${model.id}`); + } + } + }, + msty: { + prettyLabel: "Msty", + docsUrl: "https://msty.app", + mainTask: "text-generation", + displayOnModelPage: isGgufModel, + deeplink: (model) => new URL(`msty://models/search/hf/${model.id}`) + }, + recursechat: { + prettyLabel: "RecurseChat", + docsUrl: "https://recurse.chat", + mainTask: "text-generation", + macOSOnly: true, + displayOnModelPage: isGgufModel, + deeplink: (model) => new URL(`recursechat://new-hf-gguf-model?hf-model-id=${model.id}`) + }, + drawthings: { + prettyLabel: "Draw Things", + docsUrl: "https://drawthings.ai", + mainTask: "text-to-image", + macOSOnly: true, + displayOnModelPage: (model) => model.library_name === "diffusers" && (model.pipeline_tag === "text-to-image" || model.tags.includes("lora")), + deeplink: (model) => { + if (model.tags.includes("lora")) { + return new URL(`https://drawthings.ai/import/diffusers/pipeline.load_lora_weights?repo_id=${model.id}`); + } else { + return new URL(`https://drawthings.ai/import/diffusers/pipeline.from_pretrained?repo_id=${model.id}`); + } + } + }, + diffusionbee: { + prettyLabel: "DiffusionBee", + docsUrl: "https://diffusionbee.com", + mainTask: "text-to-image", + macOSOnly: true, + comingSoon: true, + displayOnModelPage: (model) => model.library_name === "diffusers" && model.pipeline_tag === "text-to-image", + deeplink: (model) => new URL(`diffusionbee://open_from_hf?model=${model.id}`) + } +}; + +// src/dataset-libraries.ts +var DATASET_LIBRARIES_UI_ELEMENTS = { + mlcroissant: { + prettyLabel: "Croissant", + repoName: "croissant", + repoUrl: "https://github.com/mlcommons/croissant/tree/main/python/mlcroissant", + docsUrl: "https://github.com/mlcommons/croissant/blob/main/python/mlcroissant/README.md" + }, + webdataset: { + prettyLabel: "WebDataset", + repoName: "webdataset", + repoUrl: "https://github.com/webdataset/webdataset", + docsUrl: "https://huggingface.co/docs/hub/datasets-webdataset" + }, + datasets: { + prettyLabel: "Datasets", + repoName: "datasets", + repoUrl: "https://github.com/huggingface/datasets", + docsUrl: "https://huggingface.co/docs/hub/datasets-usage" + }, + pandas: { + prettyLabel: "pandas", + repoName: "pandas", + repoUrl: "https://github.com/pandas-dev/pandas", + docsUrl: "https://huggingface.co/docs/hub/datasets-pandas" + }, + dask: { + prettyLabel: "Dask", + repoName: "dask", + repoUrl: "https://github.com/dask/dask", + docsUrl: "https://huggingface.co/docs/hub/datasets-dask" + }, + distilabel: { + prettyLabel: "Distilabel", + repoName: "distilabel", + repoUrl: "https://github.com/argilla-io/distilabel", + docsUrl: "https://distilabel.argilla.io" + }, + fiftyone: { + prettyLabel: "FiftyOne", + repoName: "fiftyone", + repoUrl: "https://github.com/voxel51/fiftyone", + docsUrl: "https://docs.voxel51.com" + }, + argilla: { + prettyLabel: "Argilla", + repoName: "argilla", + repoUrl: "https://github.com/argilla-io/argilla", + docsUrl: "https://argilla-io.github.io/argilla" + }, + polars: { + prettyLabel: "Polars", + repoName: "polars", + repoUrl: "https://github.com/pola-rs/polars", + docsUrl: "https://docs.pola.rs/" + } +}; +// Annotate the CommonJS export names for ESM import in node: +0 && (module.exports = { + ALL_DISPLAY_MODEL_LIBRARY_KEYS, + ALL_MODEL_LIBRARY_KEYS, + DATASET_LIBRARIES_UI_ELEMENTS, + DEFAULT_MEMORY_OPTIONS, + LIBRARY_TASK_MAPPING, + LOCAL_APPS, + MAPPING_DEFAULT_WIDGET, + MODALITIES, + MODALITY_LABELS, + MODEL_LIBRARIES_UI_ELEMENTS, + PIPELINE_DATA, + PIPELINE_TYPES, + PIPELINE_TYPES_SET, + SKUS, + SPECIAL_TOKENS_ATTRIBUTES, + SUBTASK_TYPES, + TASKS_DATA, + TASKS_MODEL_LIBRARIES, + snippets +}); diff --git a/data/node_modules/@huggingface/tasks/dist/index.js b/data/node_modules/@huggingface/tasks/dist/index.js new file mode 100644 index 0000000000000000000000000000000000000000..c9eed005c05e2b5a43518130d0cb62b4d4d39676 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/index.js @@ -0,0 +1,6630 @@ +var __defProp = Object.defineProperty; +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; + +// src/library-to-tasks.ts +var LIBRARY_TASK_MAPPING = { + "adapter-transformers": ["question-answering", "text-classification", "token-classification"], + allennlp: ["question-answering"], + asteroid: [ + // "audio-source-separation", + "audio-to-audio" + ], + bertopic: ["text-classification"], + diffusers: ["image-to-image", "text-to-image"], + doctr: ["object-detection"], + espnet: ["text-to-speech", "automatic-speech-recognition"], + fairseq: ["text-to-speech", "audio-to-audio"], + fastai: ["image-classification"], + fasttext: ["feature-extraction", "text-classification"], + flair: ["token-classification"], + k2: ["automatic-speech-recognition"], + keras: ["image-classification"], + nemo: ["automatic-speech-recognition"], + open_clip: ["zero-shot-classification", "zero-shot-image-classification"], + paddlenlp: ["fill-mask", "summarization", "zero-shot-classification"], + peft: ["text-generation"], + "pyannote-audio": ["automatic-speech-recognition"], + "sentence-transformers": ["feature-extraction", "sentence-similarity"], + setfit: ["text-classification"], + sklearn: ["tabular-classification", "tabular-regression", "text-classification"], + spacy: ["token-classification", "text-classification", "sentence-similarity"], + "span-marker": ["token-classification"], + speechbrain: [ + "audio-classification", + "audio-to-audio", + "automatic-speech-recognition", + "text-to-speech", + "text2text-generation" + ], + stanza: ["token-classification"], + timm: ["image-classification"], + transformers: [ + "audio-classification", + "automatic-speech-recognition", + "depth-estimation", + "document-question-answering", + "feature-extraction", + "fill-mask", + "image-classification", + "image-segmentation", + "image-to-image", + "image-to-text", + "object-detection", + "question-answering", + "summarization", + "table-question-answering", + "text2text-generation", + "text-classification", + "text-generation", + "text-to-audio", + "text-to-speech", + "token-classification", + "translation", + "video-classification", + "visual-question-answering", + "zero-shot-classification", + "zero-shot-image-classification", + "zero-shot-object-detection" + ], + mindspore: ["image-classification"] +}; + +// src/default-widget-inputs.ts +var MAPPING_EN = /* @__PURE__ */ new Map([ + ["text-classification", [`I like you. I love you`]], + [ + "token-classification", + [ + `My name is Wolfgang and I live in Berlin`, + `My name is Sarah and I live in London`, + `My name is Clara and I live in Berkeley, California.` + ] + ], + [ + "table-question-answering", + [ + { + text: `How many stars does the transformers repository have?`, + table: { + Repository: ["Transformers", "Datasets", "Tokenizers"], + Stars: [36542, 4512, 3934], + Contributors: [651, 77, 34], + "Programming language": ["Python", "Python", "Rust, Python and NodeJS"] + } + } + ] + ], + [ + "question-answering", + [ + { + text: `Where do I live?`, + context: `My name is Wolfgang and I live in Berlin` + }, + { + text: `Where do I live?`, + context: `My name is Sarah and I live in London` + }, + { + text: `What's my name?`, + context: `My name is Clara and I live in Berkeley.` + }, + { + text: `Which name is also used to describe the Amazon rainforest in English?`, + context: `The Amazon rainforest (Portuguese: Floresta Amaz\xF4nica or Amaz\xF4nia; Spanish: Selva Amaz\xF3nica, Amazon\xEDa or usually Amazonia; French: For\xEAt amazonienne; Dutch: Amazoneregenwoud), also known in English as Amazonia or the Amazon Jungle, is a moist broadleaf forest that covers most of the Amazon basin of South America. This basin encompasses 7,000,000 square kilometres (2,700,000 sq mi), of which 5,500,000 square kilometres (2,100,000 sq mi) are covered by the rainforest. This region includes territory belonging to nine nations. The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. States or departments in four nations contain "Amazonas" in their names. The Amazon represents over half of the planet's remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species.` + } + ] + ], + [ + "zero-shot-classification", + [ + { + text: "I have a problem with my iphone that needs to be resolved asap!!", + candidate_labels: "urgent, not urgent, phone, tablet, computer", + multi_class: true + }, + { + text: "Last week I upgraded my iOS version and ever since then my phone has been overheating whenever I use your app.", + candidate_labels: "mobile, website, billing, account access", + multi_class: false + }, + { + text: "A new model offers an explanation for how the Galilean satellites formed around the solar system\u2019s largest world. Konstantin Batygin did not set out to solve one of the solar system\u2019s most puzzling mysteries when he went for a run up a hill in Nice, France. Dr. Batygin, a Caltech researcher, best known for his contributions to the search for the solar system\u2019s missing \u201CPlanet Nine,\u201D spotted a beer bottle. At a steep, 20 degree grade, he wondered why it wasn\u2019t rolling down the hill. He realized there was a breeze at his back holding the bottle in place. Then he had a thought that would only pop into the mind of a theoretical astrophysicist: \u201COh! This is how Europa formed.\u201D Europa is one of Jupiter\u2019s four large Galilean moons. And in a paper published Monday in the Astrophysical Journal, Dr. Batygin and a co-author, Alessandro Morbidelli, a planetary scientist at the C\xF4te d\u2019Azur Observatory in France, present a theory explaining how some moons form around gas giants like Jupiter and Saturn, suggesting that millimeter-sized grains of hail produced during the solar system\u2019s formation became trapped around these massive worlds, taking shape one at a time into the potentially habitable moons we know today.", + candidate_labels: "space & cosmos, scientific discovery, microbiology, robots, archeology", + multi_class: true + } + ] + ], + ["translation", [`My name is Wolfgang and I live in Berlin`, `My name is Sarah and I live in London`]], + [ + "summarization", + [ + `The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.` + ] + ], + [ + "conversational", + [ + `Hey my name is Julien! How are you?`, + `Hey my name is Thomas! How are you?`, + `Hey my name is Mariama! How are you?`, + `Hey my name is Clara! How are you?`, + `Hey my name is Julien! How are you?`, + `Hi.` + ] + ], + [ + "text-generation", + [ + `My name is Julien and I like to`, + `My name is Thomas and my main`, + `My name is Mariama, my favorite`, + `My name is Clara and I am`, + `My name is Lewis and I like to`, + `My name is Merve and my favorite`, + `My name is Teven and I am`, + `Once upon a time,` + ] + ], + ["fill-mask", [`Paris is the of France.`, `The goal of life is .`]], + [ + "sentence-similarity", + [ + { + source_sentence: "That is a happy person", + sentences: ["That is a happy dog", "That is a very happy person", "Today is a sunny day"] + } + ] + ] +]); +var MAPPING_ZH = /* @__PURE__ */ new Map([ + ["text-classification", [`\u6211\u559C\u6B22\u4F60\u3002 \u6211\u7231\u4F60`]], + ["token-classification", [`\u6211\u53EB\u6C83\u5C14\u592B\u5188\uFF0C\u6211\u4F4F\u5728\u67CF\u6797\u3002`, `\u6211\u53EB\u8428\u62C9\uFF0C\u6211\u4F4F\u5728\u4F26\u6566\u3002`, `\u6211\u53EB\u514B\u62C9\u62C9\uFF0C\u6211\u4F4F\u5728\u52A0\u5DDE\u4F2F\u514B\u5229\u3002`]], + [ + "question-answering", + [ + { + text: `\u6211\u4F4F\u5728\u54EA\u91CC\uFF1F`, + context: `\u6211\u53EB\u6C83\u5C14\u592B\u5188\uFF0C\u6211\u4F4F\u5728\u67CF\u6797\u3002` + }, + { + text: `\u6211\u4F4F\u5728\u54EA\u91CC\uFF1F`, + context: `\u6211\u53EB\u8428\u62C9\uFF0C\u6211\u4F4F\u5728\u4F26\u6566\u3002` + }, + { + text: `\u6211\u7684\u540D\u5B57\u662F\u4EC0\u4E48\uFF1F`, + context: `\u6211\u53EB\u514B\u62C9\u62C9\uFF0C\u6211\u4F4F\u5728\u4F2F\u514B\u5229\u3002` + } + ] + ], + ["translation", [`\u6211\u53EB\u6C83\u5C14\u592B\u5188\uFF0C\u6211\u4F4F\u5728\u67CF\u6797\u3002`, `\u6211\u53EB\u8428\u62C9\uFF0C\u6211\u4F4F\u5728\u4F26\u6566\u3002`]], + [ + "zero-shot-classification", + [ + { + text: "\u623F\u95F4\u5E72\u51C0\u660E\u4EAE\uFF0C\u975E\u5E38\u4E0D\u9519", + candidate_labels: "\u8FD9\u662F\u4E00\u6761\u5DEE\u8BC4, \u8FD9\u662F\u4E00\u6761\u597D\u8BC4" + } + ] + ], + [ + "summarization", + [ + `\u8BE5\u5854\u9AD8324\u7C73\uFF081063\u82F1\u5C3A\uFF09\uFF0C\u4E0E\u4E00\u5E6281\u5C42\u7684\u5EFA\u7B51\u7269\u4E00\u6837\u9AD8\uFF0C\u662F\u5DF4\u9ECE\u6700\u9AD8\u7684\u5EFA\u7B51\u7269\u3002 \u5B83\u7684\u5E95\u5EA7\u662F\u65B9\u5F62\u7684\uFF0C\u6BCF\u8FB9\u957F125\u7C73\uFF08410\u82F1\u5C3A\uFF09\u3002 \u5728\u5EFA\u9020\u8FC7\u7A0B\u4E2D\uFF0C\u827E\u83F2\u5C14\u94C1\u5854\u8D85\u8FC7\u4E86\u534E\u76DB\u987F\u7EAA\u5FF5\u7891\uFF0C\u6210\u4E3A\u4E16\u754C\u4E0A\u6700\u9AD8\u7684\u4EBA\u9020\u7ED3\u6784\uFF0C\u5B83\u4FDD\u6301\u4E8641\u5E74\u7684\u5934\u8854\uFF0C\u76F4\u52301930\u5E74\u7EBD\u7EA6\u5E02\u7684\u514B\u83B1\u65AF\u52D2\u5927\u697C\u7AE3\u5DE5\u3002\u8FD9\u662F\u7B2C\u4E00\u4E2A\u5230\u8FBE300\u7C73\u9AD8\u5EA6\u7684\u7ED3\u6784\u3002 \u7531\u4E8E1957\u5E74\u5728\u5854\u9876\u589E\u52A0\u4E86\u5E7F\u64AD\u5929\u7EBF\uFF0C\u56E0\u6B64\u5B83\u73B0\u5728\u6BD4\u514B\u83B1\u65AF\u52D2\u5927\u53A6\u9AD85.2\u7C73\uFF0817\u82F1\u5C3A\uFF09\u3002 \u9664\u53D1\u5C04\u5668\u5916\uFF0C\u827E\u83F2\u5C14\u94C1\u5854\u662F\u6CD5\u56FD\u7B2C\u4E8C\u9AD8\u7684\u72EC\u7ACB\u5F0F\u5EFA\u7B51\uFF0C\u4EC5\u6B21\u4E8E\u7C73\u52B3\u9AD8\u67B6\u6865\u3002` + ] + ], + [ + "text-generation", + [`\u6211\u53EB\u6731\u5229\u5B89\uFF0C\u6211\u559C\u6B22`, `\u6211\u53EB\u6258\u9A6C\u65AF\uFF0C\u6211\u7684\u4E3B\u8981`, `\u6211\u53EB\u739B\u4E3D\u4E9A\uFF0C\u6211\u6700\u559C\u6B22\u7684`, `\u6211\u53EB\u514B\u62C9\u62C9\uFF0C\u6211\u662F`, `\u4ECE\u524D\uFF0C`] + ], + ["fill-mask", [`\u5DF4\u9ECE\u662F\u56FD\u7684\u9996\u90FD\u3002`, `\u751F\u6D3B\u7684\u771F\u8C1B\u662F\u3002`]], + [ + "sentence-similarity", + [ + { + source_sentence: "\u90A3\u662F \u500B\u5FEB\u6A02\u7684\u4EBA", + sentences: ["\u90A3\u662F \u689D\u5FEB\u6A02\u7684\u72D7", "\u90A3\u662F \u500B\u975E\u5E38\u5E78\u798F\u7684\u4EBA", "\u4ECA\u5929\u662F\u6674\u5929"] + } + ] + ] +]); +var MAPPING_FR = /* @__PURE__ */ new Map([ + ["text-classification", [`Je t'appr\xE9cie beaucoup. Je t'aime.`]], + ["token-classification", [`Mon nom est Wolfgang et je vis \xE0 Berlin`]], + [ + "question-answering", + [ + { + text: `O\xF9 est-ce que je vis?`, + context: `Mon nom est Wolfgang et je vis \xE0 Berlin` + } + ] + ], + ["translation", [`Mon nom est Wolfgang et je vis \xE0 Berlin`]], + [ + "summarization", + [ + `La tour fait 324 m\xE8tres (1,063 pieds) de haut, environ la m\xEAme hauteur qu'un immeuble de 81 \xE9tages, et est la plus haute structure de Paris. Sa base est carr\xE9e, mesurant 125 m\xE8tres (410 pieds) sur chaque c\xF4t\xE9. Durant sa construction, la tour Eiffel surpassa le Washington Monument pour devenir la plus haute structure construite par l'homme dans le monde, un titre qu'elle conserva pendant 41 ans jusqu'\xE0 l'ach\xE8vement du Chrysler Building \xE0 New-York City en 1930. Ce fut la premi\xE8re structure \xE0 atteindre une hauteur de 300 m\xE8tres. Avec l'ajout d'une antenne de radiodiffusion au sommet de la tour Eiffel en 1957, celle-ci redevint plus haute que le Chrysler Building de 5,2 m\xE8tres (17 pieds). En excluant les transmetteurs, elle est la seconde plus haute stucture autoportante de France apr\xE8s le viaduc de Millau.` + ] + ], + ["text-generation", [`Mon nom est Julien et j'aime`, `Mon nom est Thomas et mon principal`, `Il \xE9tait une fois`]], + ["fill-mask", [`Paris est la de la France.`]], + [ + "sentence-similarity", + [ + { + source_sentence: "C'est une personne heureuse", + sentences: [ + "C'est un chien heureux", + "C'est une personne tr\xE8s heureuse", + "Aujourd'hui est une journ\xE9e ensoleill\xE9e" + ] + } + ] + ] +]); +var MAPPING_ES = /* @__PURE__ */ new Map([ + ["text-classification", [`Te quiero. Te amo.`]], + ["token-classification", [`Me llamo Wolfgang y vivo en Berlin`]], + [ + "question-answering", + [ + { + text: `\xBFD\xF3nde vivo?`, + context: `Me llamo Wolfgang y vivo en Berlin` + }, + { + text: `\xBFQui\xE9n invent\xF3 el submarino?`, + context: `Isaac Peral fue un murciano que invent\xF3 el submarino` + }, + { + text: `\xBFCu\xE1ntas personas hablan espa\xF1ol?`, + context: `El espa\xF1ol es el segundo idioma m\xE1s hablado del mundo con m\xE1s de 442 millones de hablantes` + } + ] + ], + [ + "translation", + [ + `Me llamo Wolfgang y vivo en Berlin`, + `Los ingredientes de una tortilla de patatas son: huevos, patatas y cebolla` + ] + ], + [ + "summarization", + [ + `La torre tiene 324 metros (1.063 pies) de altura, aproximadamente la misma altura que un edificio de 81 pisos y la estructura m\xE1s alta de Par\xEDs. Su base es cuadrada, mide 125 metros (410 pies) a cada lado. Durante su construcci\xF3n, la Torre Eiffel super\xF3 al Washington Monument para convertirse en la estructura artificial m\xE1s alta del mundo, un t\xEDtulo que mantuvo durante 41 a\xF1os hasta que el Chrysler Building en la ciudad de Nueva York se termin\xF3 en 1930. Fue la primera estructura en llegar Una altura de 300 metros. Debido a la adici\xF3n de una antena de transmisi\xF3n en la parte superior de la torre en 1957, ahora es m\xE1s alta que el Chrysler Building en 5,2 metros (17 pies). Excluyendo los transmisores, la Torre Eiffel es la segunda estructura independiente m\xE1s alta de Francia despu\xE9s del Viaducto de Millau.` + ] + ], + [ + "text-generation", + [ + `Me llamo Julien y me gusta`, + `Me llamo Thomas y mi principal`, + `Me llamo Manuel y trabajo en`, + `\xC9rase una vez,`, + `Si t\xFA me dices ven, ` + ] + ], + ["fill-mask", [`Mi nombre es y vivo en Nueva York.`, `El espa\xF1ol es un idioma muy en el mundo.`]], + [ + "sentence-similarity", + [ + { + source_sentence: "Esa es una persona feliz", + sentences: ["Ese es un perro feliz", "Esa es una persona muy feliz", "Hoy es un d\xEDa soleado"] + } + ] + ] +]); +var MAPPING_RU = /* @__PURE__ */ new Map([ + ["text-classification", [`\u0422\u044B \u043C\u043D\u0435 \u043D\u0440\u0430\u0432\u0438\u0448\u044C\u0441\u044F. \u042F \u0442\u0435\u0431\u044F \u043B\u044E\u0431\u043B\u044E`]], + ["token-classification", [`\u041C\u0435\u043D\u044F \u0437\u043E\u0432\u0443\u0442 \u0412\u043E\u043B\u044C\u0444\u0433\u0430\u043D\u0433 \u0438 \u044F \u0436\u0438\u0432\u0443 \u0432 \u0411\u0435\u0440\u043B\u0438\u043D\u0435`]], + [ + "question-answering", + [ + { + text: `\u0413\u0434\u0435 \u0436\u0438\u0432\u0443?`, + context: `\u041C\u0435\u043D\u044F \u0437\u043E\u0432\u0443\u0442 \u0412\u043E\u043B\u044C\u0444\u0433\u0430\u043D\u0433 \u0438 \u044F \u0436\u0438\u0432\u0443 \u0432 \u0411\u0435\u0440\u043B\u0438\u043D\u0435` + } + ] + ], + ["translation", [`\u041C\u0435\u043D\u044F \u0437\u043E\u0432\u0443\u0442 \u0412\u043E\u043B\u044C\u0444\u0433\u0430\u043D\u0433 \u0438 \u044F \u0436\u0438\u0432\u0443 \u0432 \u0411\u0435\u0440\u043B\u0438\u043D\u0435`]], + [ + "summarization", + [ + `\u0412\u044B\u0441\u043E\u0442\u0430 \u0431\u0430\u0448\u043D\u0438 \u0441\u043E\u0441\u0442\u0430\u0432\u043B\u044F\u0435\u0442 324 \u043C\u0435\u0442\u0440\u0430 (1063 \u0444\u0443\u0442\u0430), \u043F\u0440\u0438\u043C\u0435\u0440\u043D\u043E \u0442\u0430\u043A\u0430\u044F \u0436\u0435 \u0432\u044B\u0441\u043E\u0442\u0430, \u043A\u0430\u043A \u0443 81-\u044D\u0442\u0430\u0436\u043D\u043E\u0433\u043E \u0437\u0434\u0430\u043D\u0438\u044F, \u0438 \u0441\u0430\u043C\u043E\u0435 \u0432\u044B\u0441\u043E\u043A\u043E\u0435 \u0441\u043E\u043E\u0440\u0443\u0436\u0435\u043D\u0438\u0435 \u0432 \u041F\u0430\u0440\u0438\u0436\u0435. \u0415\u0433\u043E \u043E\u0441\u043D\u043E\u0432\u0430\u043D\u0438\u0435 \u043A\u0432\u0430\u0434\u0440\u0430\u0442\u043D\u043E, \u0440\u0430\u0437\u043C\u0435\u0440\u043E\u043C 125 \u043C\u0435\u0442\u0440\u043E\u0432 (410 \u0444\u0443\u0442\u043E\u0432) \u0441 \u043B\u044E\u0431\u043E\u0439 \u0441\u0442\u043E\u0440\u043E\u043D\u044B. \u0412\u043E \u0432\u0440\u0435\u043C\u044F \u0441\u0442\u0440\u043E\u0438\u0442\u0435\u043B\u044C\u0441\u0442\u0432\u0430 \u042D\u0439\u0444\u0435\u043B\u0435\u0432\u0430 \u0431\u0430\u0448\u043D\u044F \u043F\u0440\u0435\u0432\u0437\u043E\u0448\u043B\u0430 \u043C\u043E\u043D\u0443\u043C\u0435\u043D\u0442 \u0412\u0430\u0448\u0438\u043D\u0433\u0442\u043E\u043D\u0430, \u0441\u0442\u0430\u0432 \u0441\u0430\u043C\u044B\u043C \u0432\u044B\u0441\u043E\u043A\u0438\u043C \u0438\u0441\u043A\u0443\u0441\u0441\u0442\u0432\u0435\u043D\u043D\u044B\u043C \u0441\u043E\u043E\u0440\u0443\u0436\u0435\u043D\u0438\u0435\u043C \u0432 \u043C\u0438\u0440\u0435, \u0438 \u044D\u0442\u043E\u0442 \u0442\u0438\u0442\u0443\u043B \u043E\u043D\u0430 \u0443\u0434\u0435\u0440\u0436\u0438\u0432\u0430\u043B\u0430 \u0432 \u0442\u0435\u0447\u0435\u043D\u0438\u0435 41 \u0433\u043E\u0434\u0430 \u0434\u043E \u0437\u0430\u0432\u0435\u0440\u0448\u0435\u043D\u0438\u044F \u0441\u0442\u0440\u043E\u0438\u0442\u0435\u043B\u044C\u0441\u0442\u0432\u043E \u0437\u0434\u0430\u043D\u0438\u044F \u041A\u0440\u0430\u0439\u0441\u043B\u0435\u0440 \u0432 \u041D\u044C\u044E-\u0419\u043E\u0440\u043A\u0435 \u0432 1930 \u0433\u043E\u0434\u0443. \u042D\u0442\u043E \u043F\u0435\u0440\u0432\u043E\u0435 \u0441\u043E\u043E\u0440\u0443\u0436\u0435\u043D\u0438\u0435 \u043A\u043E\u0442\u043E\u0440\u043E\u0435 \u0434\u043E\u0441\u0442\u0438\u0433\u043B\u043E \u0432\u044B\u0441\u043E\u0442\u044B 300 \u043C\u0435\u0442\u0440\u043E\u0432. \u0418\u0437-\u0437\u0430 \u0434\u043E\u0431\u0430\u0432\u043B\u0435\u043D\u0438\u044F \u0432\u0435\u0449\u0430\u0442\u0435\u043B\u044C\u043D\u043E\u0439 \u0430\u043D\u0442\u0435\u043D\u043D\u044B \u043D\u0430 \u0432\u0435\u0440\u0448\u0438\u043D\u0435 \u0431\u0430\u0448\u043D\u0438 \u0432 1957 \u0433\u043E\u0434\u0443 \u043E\u043D\u0430 \u0441\u0435\u0439\u0447\u0430\u0441 \u0432\u044B\u0448\u0435 \u0437\u0434\u0430\u043D\u0438\u044F \u041A\u0440\u0430\u0439\u0441\u043B\u0435\u0440 \u043D\u0430 5,2 \u043C\u0435\u0442\u0440\u0430 (17 \u0444\u0443\u0442\u043E\u0432). \u0417\u0430 \u0438\u0441\u043A\u043B\u044E\u0447\u0435\u043D\u0438\u0435\u043C \u043F\u0435\u0440\u0435\u0434\u0430\u0442\u0447\u0438\u043A\u043E\u0432, \u042D\u0439\u0444\u0435\u043B\u0435\u0432\u0430 \u0431\u0430\u0448\u043D\u044F \u044F\u0432\u043B\u044F\u0435\u0442\u0441\u044F \u0432\u0442\u043E\u0440\u043E\u0439 \u0441\u0430\u043C\u043E\u0439 \u0432\u044B\u0441\u043E\u043A\u043E\u0439 \u043E\u0442\u0434\u0435\u043B\u044C\u043D\u043E \u0441\u0442\u043E\u044F\u0449\u0435\u0439 \u0441\u0442\u0440\u0443\u043A\u0442\u0443\u0440\u043E\u0439 \u0432\u043E \u0424\u0440\u0430\u043D\u0446\u0438\u0438 \u043F\u043E\u0441\u043B\u0435 \u0432\u0438\u0430\u0434\u0443\u043A\u0430 \u041C\u0438\u0439\u043E.` + ] + ], + ["text-generation", [`\u041C\u0435\u043D\u044F \u0437\u043E\u0432\u0443\u0442 \u0416\u044E\u043B\u044C\u0435\u043D \u0438`, `\u041C\u0435\u043D\u044F \u0437\u043E\u0432\u0443\u0442 \u0422\u043E\u043C\u0430\u0441 \u0438 \u043C\u043E\u0439 \u043E\u0441\u043D\u043E\u0432\u043D\u043E\u0439`, `\u041E\u0434\u043D\u0430\u0436\u0434\u044B`]], + ["fill-mask", [`\u041C\u0435\u043D\u044F \u0437\u043E\u0432\u0443\u0442 \u0438 \u044F \u0438\u043D\u0436\u0435\u043D\u0435\u0440 \u0436\u0438\u0432\u0443\u0449\u0438\u0439 \u0432 \u041D\u044C\u044E-\u0419\u043E\u0440\u043A\u0435.`]], + [ + "sentence-similarity", + [ + { + source_sentence: "\u042D\u0442\u043E \u0441\u0447\u0430\u0441\u0442\u043B\u0438\u0432\u044B\u0439 \u0447\u0435\u043B\u043E\u0432\u0435\u043A", + sentences: ["\u042D\u0442\u043E \u0441\u0447\u0430\u0441\u0442\u043B\u0438\u0432\u0430\u044F \u0441\u043E\u0431\u0430\u043A\u0430", "\u042D\u0442\u043E \u043E\u0447\u0435\u043D\u044C \u0441\u0447\u0430\u0441\u0442\u043B\u0438\u0432\u044B\u0439 \u0447\u0435\u043B\u043E\u0432\u0435\u043A", "\u0421\u0435\u0433\u043E\u0434\u043D\u044F \u0441\u043E\u043B\u043D\u0435\u0447\u043D\u044B\u0439 \u0434\u0435\u043D\u044C"] + } + ] + ] +]); +var MAPPING_UK = /* @__PURE__ */ new Map([ + ["translation", [`\u041C\u0435\u043D\u0435 \u0437\u0432\u0430\u0442\u0438 \u0412\u043E\u043B\u044C\u0444\u0491\u0430\u043D\u0491 \u0456 \u044F \u0436\u0438\u0432\u0443 \u0432 \u0411\u0435\u0440\u043B\u0456\u043D\u0456.`]], + ["fill-mask", [`\u041C\u0435\u043D\u0435 \u0437\u0432\u0430\u0442\u0438 .`]] +]); +var MAPPING_IT = /* @__PURE__ */ new Map([ + ["text-classification", [`Mi piaci. Ti amo`]], + [ + "token-classification", + [ + `Mi chiamo Wolfgang e vivo a Berlino`, + `Mi chiamo Sarah e vivo a Londra`, + `Mi chiamo Clara e vivo a Berkeley in California.` + ] + ], + [ + "question-answering", + [ + { + text: `Dove vivo?`, + context: `Mi chiamo Wolfgang e vivo a Berlino` + }, + { + text: `Dove vivo?`, + context: `Mi chiamo Sarah e vivo a Londra` + }, + { + text: `Come mio chiamo?`, + context: `Mi chiamo Clara e vivo a Berkeley.` + } + ] + ], + ["translation", [`Mi chiamo Wolfgang e vivo a Berlino`, `Mi chiamo Sarah e vivo a Londra`]], + [ + "summarization", + [ + `La torre degli Asinelli \xE8 una delle cosiddette due torri di Bologna, simbolo della citt\xE0, situate in piazza di porta Ravegnana, all'incrocio tra le antiche strade San Donato (ora via Zamboni), San Vitale, Maggiore e Castiglione. Eretta, secondo la tradizione, fra il 1109 e il 1119 dal nobile Gherardo Asinelli, la torre \xE8 alta 97,20 metri, pende verso ovest per 2,23 metri e presenta all'interno una scalinata composta da 498 gradini. Ancora non si pu\xF2 dire con certezza quando e da chi fu costruita la torre degli Asinelli. Si presume che la torre debba il proprio nome a Gherardo Asinelli, il nobile cavaliere di fazione ghibellina al quale se ne attribuisce la costruzione, iniziata secondo una consolidata tradizione l'11 ottobre 1109 e terminata dieci anni dopo, nel 1119.` + ] + ], + [ + "text-generation", + [ + `Mi chiamo Loreto e mi piace`, + `Mi chiamo Thomas e il mio principale`, + `Mi chiamo Marianna, la mia cosa preferita`, + `Mi chiamo Clara e sono`, + `C'era una volta` + ] + ], + ["fill-mask", [`Roma \xE8 la d'Italia.`, `Lo scopo della vita \xE8 .`]], + [ + "sentence-similarity", + [ + { + source_sentence: "Questa \xE8 una persona felice", + sentences: ["Questo \xE8 un cane felice", "Questa \xE8 una persona molto felice", "Oggi \xE8 una giornata di sole"] + } + ] + ] +]); +var MAPPING_FA = /* @__PURE__ */ new Map([ + [ + "text-classification", + [`\u067E\u0631\u0648\u0698\u0647 \u0628\u0647 \u0645\u0648\u0642\u0639 \u062A\u062D\u0648\u06CC\u0644 \u0634\u062F \u0648 \u0647\u0645\u0647 \u0686\u06CC\u0632 \u062E\u0648\u0628 \u0628\u0648\u062F.`, `\u0633\u06CC\u0628\u200C\u0632\u0645\u06CC\u0646\u06CC \u0628\u06CC\u200C\u06A9\u06CC\u0641\u06CC\u062A \u0628\u0648\u062F.`, `\u0642\u06CC\u0645\u062A \u0648 \u06A9\u06CC\u0641\u06CC\u062A \u0639\u0627\u0644\u06CC`, `\u062E\u0648\u0628 \u0646\u0628\u0648\u062F \u0627\u0635\u0644\u0627`] + ], + [ + "token-classification", + [ + `\u0627\u06CC\u0646 \u0633\u0631\u06CC\u0627\u0644 \u0628\u0647 \u0635\u0648\u0631\u062A \u0631\u0633\u0645\u06CC \u062F\u0631 \u062A\u0627\u0631\u06CC\u062E \u062F\u0647\u0645 \u0645\u06CC \u06F2\u06F0\u06F1\u06F1 \u062A\u0648\u0633\u0637 \u0634\u0628\u06A9\u0647 \u0641\u0627\u06A9\u0633 \u0628\u0631\u0627\u06CC \u067E\u062E\u0634 \u0631\u0632\u0631\u0648 \u0634\u062F.`, + `\u062F\u0641\u062A\u0631 \u0645\u0631\u06A9\u0632\u06CC \u0634\u0631\u06A9\u062A \u067E\u0627\u0631\u0633\u200C\u0645\u06CC\u0646\u0648 \u062F\u0631 \u0634\u0647\u0631 \u0627\u0631\u0627\u06A9 \u062F\u0631 \u0627\u0633\u062A\u0627\u0646 \u0645\u0631\u06A9\u0632\u06CC \u0642\u0631\u0627\u0631 \u062F\u0627\u0631\u062F.`, + `\u0648\u06CC \u062F\u0631 \u0633\u0627\u0644 \u06F2\u06F0\u06F1\u06F3 \u062F\u0631\u06AF\u0630\u0634\u062A \u0648 \u0645\u0633\u0626\u0648\u0644 \u062E\u0627\u06A9\u0633\u067E\u0627\u0631\u06CC \u0648 \u0627\u0642\u0648\u0627\u0645\u0634 \u0628\u0631\u0627\u06CC \u0627\u0648 \u0645\u0631\u0627\u0633\u0645 \u06CC\u0627\u062F\u0628\u0648\u062F \u06AF\u0631\u0641\u062A\u0646\u062F.` + ] + ], + [ + "question-answering", + [ + { + text: `\u0645\u0646 \u06A9\u062C\u0627 \u0632\u0646\u062F\u06AF\u06CC \u0645\u06CC\u06A9\u0646\u0645\u061F`, + context: `\u0646\u0627\u0645 \u0645\u0646 \u067E\u0698\u0645\u0627\u0646 \u0627\u0633\u062A \u0648 \u062F\u0631 \u06AF\u0631\u06AF\u0627\u0646 \u0632\u0646\u062F\u06AF\u06CC \u0645\u06CC\u06A9\u0646\u0645.` + }, + { + text: `\u0646\u0627\u0645\u0645 \u0686\u06CC\u0633\u062A \u0648 \u06A9\u062C\u0627 \u0632\u0646\u062F\u06AF\u06CC \u0645\u06CC\u200C\u06A9\u0646\u0645\u061F`, + context: `\u0627\u0633\u0645\u0645 \u0633\u0627\u0631\u0627 \u0627\u0633\u062A \u0648 \u062F\u0631 \u0622\u0641\u0631\u06CC\u0642\u0627\u06CC \u062C\u0646\u0648\u0628\u06CC \u0632\u0646\u062F\u06AF\u06CC \u0645\u06CC\u06A9\u0646\u0645.` + }, + { + text: `\u0646\u0627\u0645 \u0645\u0646 \u0686\u06CC\u0633\u062A\u061F`, + context: `\u0645\u0646 \u0645\u0631\u06CC\u0645 \u0647\u0633\u062A\u0645 \u0648 \u062F\u0631 \u062A\u0628\u0631\u06CC\u0632 \u0632\u0646\u062F\u06AF\u06CC \u0645\u06CC\u200C\u06A9\u0646\u0645.` + }, + { + text: `\u0628\u06CC\u0634\u062A\u0631\u06CC\u0646 \u0645\u0633\u0627\u062D\u062A \u062C\u0646\u06AF\u0644 \u0622\u0645\u0627\u0632\u0648\u0646 \u062F\u0631 \u06A9\u062F\u0627\u0645 \u06A9\u0634\u0648\u0631 \u0627\u0633\u062A\u061F`, + context: [ + "\u0622\u0645\u0627\u0632\u0648\u0646 \u0646\u0627\u0645 \u0628\u0632\u0631\u06AF\u200C\u062A\u0631\u06CC\u0646 \u062C\u0646\u06AF\u0644 \u0628\u0627\u0631\u0627\u0646\u06CC \u062C\u0647\u0627\u0646 \u0627\u0633\u062A \u06A9\u0647 \u062F\u0631 \u0634\u0645\u0627\u0644 \u0622\u0645\u0631\u06CC\u06A9\u0627\u06CC \u062C\u0646\u0648\u0628\u06CC \u0642\u0631\u0627\u0631 \u06AF\u0631\u0641\u062A\u0647 \u0648 \u0628\u06CC\u0634\u062A\u0631 \u0622\u0646 \u062F\u0631 \u062E\u0627\u06A9 \u0628\u0631\u0632\u06CC\u0644 \u0648 \u067E\u0631\u0648", + "\u062C\u0627\u06CC \u062F\u0627\u0631\u062F. \u0628\u06CC\u0634 \u0627\u0632 \u0646\u06CC\u0645\u06CC \u0627\u0632 \u0647\u0645\u0647 \u062C\u0646\u06AF\u0644\u200C\u0647\u0627\u06CC \u0628\u0627\u0631\u0627\u0646\u06CC \u0628\u0627\u0642\u06CC\u200C\u0645\u0627\u0646\u062F\u0647 \u062F\u0631 \u062C\u0647\u0627\u0646 \u062F\u0631 \u0622\u0645\u0627\u0632\u0648\u0646 \u0642\u0631\u0627\u0631 \u062F\u0627\u0631\u062F.", + "\u0645\u0633\u0627\u062D\u062A \u062C\u0646\u06AF\u0644\u200C\u0647\u0627\u06CC \u0622\u0645\u0627\u0632\u0648\u0646 \u06F5\u066B\u06F5 \u0645\u06CC\u0644\u06CC\u0648\u0646 \u06A9\u06CC\u0644\u0648\u0645\u062A\u0631 \u0645\u0631\u0628\u0639 \u0627\u0633\u062A \u06A9\u0647 \u0628\u06CC\u0646 \u06F9 \u06A9\u0634\u0648\u0631 \u062A\u0642\u0633\u06CC\u0645 \u0634\u062F\u0647\u200C\u0627\u0633\u062A." + ].join("\n") + } + ] + ], + [ + "translation", + [ + "\u0628\u06CC\u0634\u062A\u0631 \u0645\u0633\u0627\u062D\u062A \u062C\u0646\u06AF\u0644\u200C\u0647\u0627\u06CC \u0622\u0645\u0627\u0632\u0648\u0646 \u062F\u0631 \u062D\u0648\u0636\u0647 \u0622\u0628\u0631\u06CC\u0632 \u0631\u0648\u062F \u0622\u0645\u0627\u0632\u0648\u0646 \u0648 \u06F1\u06F1\u06F0\u06F0 \u0634\u0627\u062E\u0647 \u0622\u0646 \u0648\u0627\u0642\u0639 \u0634\u062F\u0647\u200C\u0627\u0633\u062A.", + "\u0645\u0631\u062F\u0645\u0627\u0646 \u0646\u064E\u0628\u064E\u0637\u06CC \u0627\u0632 \u0647\u0632\u0627\u0631\u0647\u200C\u0647\u0627\u06CC \u06CC\u06A9\u0645 \u0648 \u062F\u0648\u0645 \u067E\u06CC\u0634 \u0627\u0632 \u0645\u06CC\u0644\u0627\u062F \u062F\u0631 \u0627\u06CC\u0646 \u0645\u0646\u0637\u0642\u0647 \u0632\u0646\u062F\u06AF\u06CC \u0645\u06CC\u200C\u06A9\u0631\u062F\u0646\u062F." + ] + ], + [ + "summarization", + [ + [ + "\u0634\u0627\u0647\u0646\u0627\u0645\u0647 \u0627\u062B\u0631 \u062D\u06A9\u06CC\u0645 \u0627\u0628\u0648\u0627\u0644\u0642\u0627\u0633\u0645 \u0641\u0631\u062F\u0648\u0633\u06CC \u062A\u0648\u0633\u06CC\u060C \u062D\u0645\u0627\u0633\u0647\u200C\u0627\u06CC \u0645\u0646\u0638\u0648\u0645\u060C \u0628\u0631 \u062D\u0633\u0628 \u062F\u0633\u062A \u0646\u0648\u0634\u062A\u0647\u200C\u0647\u0627\u06CC ", + "\u0645\u0648\u062C\u0648\u062F \u062F\u0631\u0628\u0631\u06AF\u06CC\u0631\u0646\u062F\u0647 \u0646\u0632\u062F\u06CC\u06A9 \u0628\u0647 \u06F5\u06F0\u066C\u06F0\u06F0\u06F0 \u0628\u06CC\u062A \u062A\u0627 \u0646\u0632\u062F\u06CC\u06A9 \u0628\u0647 \u06F6\u06F1\u066C\u06F0\u06F0\u06F0 \u0628\u06CC\u062A \u0648 \u06CC\u06A9\u06CC \u0627\u0632 ", + "\u0628\u0632\u0631\u06AF\u200C\u062A\u0631\u06CC\u0646 \u0648 \u0628\u0631\u062C\u0633\u062A\u0647\u200C\u062A\u0631\u06CC\u0646 \u0633\u0631\u0648\u062F\u0647\u200C\u0647\u0627\u06CC \u062D\u0645\u0627\u0633\u06CC \u062C\u0647\u0627\u0646 \u0627\u0633\u062A \u06A9\u0647 \u0633\u0631\u0627\u06CC\u0634 \u0622\u0646 \u062F\u0633\u062A\u200C\u0622\u0648\u0631\u062F\u0650 ", + "\u062F\u0633\u062A\u200C\u06A9\u0645 \u0633\u06CC \u0633\u0627\u0644 \u06A9\u0627\u0631\u0650 \u067E\u06CC\u0648\u0633\u062A\u0647\u0654 \u0627\u06CC\u0646 \u0633\u062E\u0646\u200C\u0633\u0631\u0627\u06CC \u0646\u0627\u0645\u062F\u0627\u0631 \u0627\u06CC\u0631\u0627\u0646\u06CC \u0627\u0633\u062A. \u0645\u0648\u0636\u0648\u0639 \u0627\u06CC\u0646 \u0634\u0627\u0647\u06A9\u0627\u0631 \u0627\u062F\u0628\u06CC\u060C", + " \u0627\u0641\u0633\u0627\u0646\u0647\u200C\u0647\u0627 \u0648 \u062A\u0627\u0631\u06CC\u062E \u0627\u06CC\u0631\u0627\u0646 \u0627\u0632 \u0622\u063A\u0627\u0632 \u062A\u0627 \u062D\u0645\u0644\u0647\u0654 \u0639\u0631\u0628\u200C\u0647\u0627 \u0628\u0647 \u0627\u06CC\u0631\u0627\u0646 \u062F\u0631 \u0633\u062F\u0647\u0654 \u0647\u0641\u062A\u0645 \u0645\u06CC\u0644\u0627\u062F\u06CC \u0627\u0633\u062A", + " (\u0634\u0627\u0647\u0646\u0627\u0645\u0647 \u0627\u0632 \u0633\u0647 \u0628\u062E\u0634 \u0627\u0633\u0637\u0648\u0631\u0647\u060C \u067E\u0647\u0644\u0648\u0627\u0646\u06CC \u0648 \u062A\u0627\u0631\u06CC\u062E\u06CC \u062A\u0634\u06A9\u06CC\u0644 \u0634\u062F\u0647\u200C\u0627\u0633\u062A) \u06A9\u0647 \u062F\u0631 \u0686\u0647\u0627\u0631", + " \u062F\u0648\u062F\u0645\u0627\u0646 \u067E\u0627\u062F\u0634\u0627\u0647\u06CC\u0650 \u067E\u06CC\u0634\u062F\u0627\u062F\u06CC\u0627\u0646\u060C \u06A9\u06CC\u0627\u0646\u06CC\u0627\u0646\u060C \u0627\u0634\u06A9\u0627\u0646\u06CC\u0627\u0646 \u0648 \u0633\u0627\u0633\u0627\u0646\u06CC\u0627\u0646 \u06AF\u0646\u062C\u0627\u0646\u062F\u0647 \u0645\u06CC\u200C\u0634\u0648\u062F.", + " \u0634\u0627\u0647\u0646\u0627\u0645\u0647 \u0628\u0631 \u0648\u0632\u0646 \xAB\u0641\u064E\u0639\u0648\u0644\u064F\u0646 \u0641\u0639\u0648\u0644\u0646 \u0641\u0639\u0648\u0644\u0646 \u0641\u064E\u0639\u064E\u0644\u0652\xBB\u060C \u062F\u0631 \u0628\u062D\u0631\u0650 \u0645\u064F\u062A\u064E\u0642\u0627\u0631\u0650\u0628\u0650 \u0645\u062B\u0645\u064E\u0651\u0646\u0650 \u0645\u062D\u0630\u0648\u0641 \u0646\u06AF\u0627\u0634\u062A\u0647 \u0634\u062F\u0647\u200C\u0627\u0633\u062A.", + "\u0647\u0646\u06AF\u0627\u0645\u06CC \u06A9\u0647 \u0632\u0628\u0627\u0646 \u062F\u0627\u0646\u0634 \u0648 \u0627\u062F\u0628\u06CC\u0627\u062A \u062F\u0631 \u0627\u06CC\u0631\u0627\u0646 \u0632\u0628\u0627\u0646 \u0639\u0631\u0628\u06CC \u0628\u0648\u062F\u060C \u0641\u0631\u062F\u0648\u0633\u06CC\u060C \u0628\u0627 \u0633\u0631\u0648\u062F\u0646 \u0634\u0627\u0647\u0646\u0627\u0645\u0647", + " \u0628\u0627 \u0648\u06CC\u0698\u06AF\u06CC\u200C\u0647\u0627\u06CC \u0647\u062F\u0641\u200C\u0645\u0646\u062F\u06CC \u06A9\u0647 \u062F\u0627\u0634\u062A\u060C \u0632\u0628\u0627\u0646 \u067E\u0627\u0631\u0633\u06CC \u0631\u0627 \u0632\u0646\u062F\u0647 \u0648 \u067E\u0627\u06CC\u062F\u0627\u0631 \u06A9\u0631\u062F. \u06CC\u06A9\u06CC \u0627\u0632 ", + " \u0628\u0646\u200C\u0645\u0627\u06CC\u0647\u200C\u0647\u0627\u06CC \u0645\u0647\u0645\u06CC \u06A9\u0647 \u0641\u0631\u062F\u0648\u0633\u06CC \u0628\u0631\u0627\u06CC \u0633\u0631\u0648\u062F\u0646 \u0634\u0627\u0647\u0646\u0627\u0645\u0647 \u0627\u0632 \u0622\u0646 \u0627\u0633\u062A\u0641\u0627\u062F\u0647 \u06A9\u0631\u062F\u060C", + " \u0634\u0627\u0647\u0646\u0627\u0645\u0647\u0654 \u0627\u0628\u0648\u0645\u0646\u0635\u0648\u0631\u06CC \u0628\u0648\u062F. \u0634\u0627\u0647\u0646\u0627\u0645\u0647 \u0646\u0641\u0648\u0630 \u0628\u0633\u06CC\u0627\u0631\u06CC \u062F\u0631 \u062C\u0647\u062A\u200C\u06AF\u06CC\u0631\u06CC ", + " \u0641\u0631\u0647\u0646\u06AF \u0641\u0627\u0631\u0633\u06CC \u0648 \u0646\u06CC\u0632 \u0628\u0627\u0632\u062A\u0627\u0628\u200C\u0647\u0627\u06CC \u0634\u06A9\u0648\u0647\u200C\u0645\u0646\u062F\u06CC \u062F\u0631 \u0627\u062F\u0628\u06CC\u0627\u062A \u062C\u0647\u0627\u0646 \u062F\u0627\u0634\u062A\u0647\u200C\u0627\u0633\u062A \u0648 \u0634\u0627\u0639\u0631\u0627\u0646 ", + " \u0628\u0632\u0631\u06AF\u06CC \u0645\u0627\u0646\u0646\u062F \u06AF\u0648\u062A\u0647 \u0648 \u0648\u06CC\u06A9\u062A\u0648\u0631 \u0647\u0648\u06AF\u0648 \u0627\u0632 \u0622\u0646 \u0628\u0647 \u0646\u06CC\u06A9\u06CC \u06CC\u0627\u062F \u06A9\u0631\u062F\u0647\u200C\u0627\u0646\u062F." + ].join("\n") + ] + ], + ["text-generation", ["\u0627\u0633\u0645 \u0645\u0646 \u0646\u0627\u0632\u0646\u06CC\u0646 \u0627\u0633\u062A \u0648 \u0645\u0646", "\u0631\u0648\u0632\u06CC \u0631\u0648\u0632\u06AF\u0627\u0631\u06CC"]], + [ + "fill-mask", + [ + `\u0632\u0646\u062F\u06AF\u06CC \u06CC\u06A9 \u0633\u0648\u0627\u0644 \u0627\u0633\u062A \u0648 \u0627\u06CC\u0646 \u06A9\u0647 \u0686\u06AF\u0648\u0646\u0647 \u06A9\u0646\u06CC\u0645 \u067E\u0627\u0633\u062E \u0627\u06CC\u0646 \u0633\u0648\u0627\u0644!`, + `\u0632\u0646\u062F\u06AF\u06CC \u0627\u0632 \u0645\u0631\u06AF \u067E\u0631\u0633\u06CC\u062F: \u0686\u0631\u0627 \u0647\u0645\u0647 \u0645\u0646 \u0631\u0627 \u062F\u0627\u0631\u0646\u062F \u0627\u0645\u0627 \u0627\u0632 \u062A\u0648 \u0645\u062A\u0646\u0641\u0631\u0646\u062F\u061F` + ] + ] +]); +var MAPPING_AR = /* @__PURE__ */ new Map([ + ["text-classification", [`\u0623\u062D\u0628\u0643. \u0623\u0647\u0648\u0627\u0643`]], + [ + "token-classification", + [`\u0625\u0633\u0645\u064A \u0645\u062D\u0645\u062F \u0648\u0623\u0633\u0643\u0646 \u0641\u064A \u0628\u0631\u0644\u064A\u0646`, `\u0625\u0633\u0645\u064A \u0633\u0627\u0631\u0647 \u0648\u0623\u0633\u0643\u0646 \u0641\u064A \u0644\u0646\u062F\u0646`, `\u0625\u0633\u0645\u064A \u0633\u0627\u0645\u064A \u0648\u0623\u0633\u0643\u0646 \u0641\u064A \u0627\u0644\u0642\u062F\u0633 \u0641\u064A \u0641\u0644\u0633\u0637\u064A\u0646.`] + ], + [ + "question-answering", + [ + { + text: `\u0623\u064A\u0646 \u0623\u0633\u0643\u0646\u061F`, + context: `\u0625\u0633\u0645\u064A \u0645\u062D\u0645\u062F \u0648\u0623\u0633\u0643\u0646 \u0641\u064A \u0628\u064A\u0631\u0648\u062A` + }, + { + text: `\u0623\u064A\u0646 \u0623\u0633\u0643\u0646\u061F`, + context: `\u0625\u0633\u0645\u064A \u0633\u0627\u0631\u0647 \u0648\u0623\u0633\u0643\u0646 \u0641\u064A \u0644\u0646\u062F\u0646` + }, + { + text: `\u0645\u0627 \u0627\u0633\u0645\u064A\u061F`, + context: `\u0627\u0633\u0645\u064A \u0633\u0639\u064A\u062F \u0648\u0623\u0633\u0643\u0646 \u0641\u064A \u062D\u064A\u0641\u0627.` + }, + { + text: `\u0645\u0627 \u0644\u0642\u0628 \u062E\u0627\u0644\u062F \u0628\u0646 \u0627\u0644\u0648\u0644\u064A\u062F \u0628\u0627\u0644\u0639\u0631\u0628\u064A\u0629\u061F`, + context: `\u062E\u0627\u0644\u062F \u0628\u0646 \u0627\u0644\u0648\u0644\u064A\u062F \u0645\u0646 \u0623\u0628\u0637\u0627\u0644 \u0648\u0642\u0627\u062F\u0629 \u0627\u0644\u0641\u062A\u062D \u0627\u0644\u0625\u0633\u0644\u0627\u0645\u064A \u0648\u0642\u062F \u062A\u062D\u062F\u062B\u062A \u0639\u0646\u0647 \u0627\u0644\u0644\u063A\u0627\u062A \u0627\u0644\u0625\u0646\u062C\u0644\u064A\u0632\u064A\u0629 \u0648\u0627\u0644\u0641\u0631\u0646\u0633\u064A\u0629 \u0648\u0627\u0644\u0625\u0633\u0628\u0627\u0646\u064A\u0629 \u0648\u0644\u0642\u0628 \u0628\u0633\u064A\u0641 \u0627\u0644\u0644\u0647 \u0627\u0644\u0645\u0633\u0644\u0648\u0644.` + } + ] + ], + ["translation", [`\u0625\u0633\u0645\u064A \u0645\u062D\u0645\u062F \u0648\u0623\u0633\u0643\u0646 \u0641\u064A \u0628\u0631\u0644\u064A\u0646`, `\u0625\u0633\u0645\u064A \u0633\u0627\u0631\u0647 \u0648\u0623\u0633\u0643\u0646 \u0641\u064A \u0644\u0646\u062F\u0646`]], + [ + "summarization", + [ + `\u062A\u0642\u0639 \u0627\u0644\u0623\u0647\u0631\u0627\u0645\u0627\u062A \u0641\u064A \u0627\u0644\u062C\u064A\u0632\u0629 \u0642\u0631\u0628 \u0627\u0644\u0642\u0627\u0647\u0631\u0629 \u0641\u064A \u0645\u0635\u0631 \u0648\u0642\u062F \u0628\u0646\u064A\u062A \u0645\u0646\u0630 \u0639\u062F\u0629 \u0642\u0631\u0648\u0646\u060C \u0648\u0642\u064A\u0644 \u0625\u0646\u0647\u0627 \u0643\u0627\u0646\u062A \u0642\u0628\u0648\u0631\u0627 \u0644\u0644\u0641\u0631\u0627\u0639\u0646\u0629 \u0648\u062A\u0645 \u0628\u0646\u0627\u0624\u0647\u0627 \u0628\u0639\u0645\u0644\u064A\u0629 \u0647\u0646\u062F\u0633\u064A\u0629 \u0631\u0627\u0626\u0639\u0629 \u0648\u0627\u0633\u062A\u0642\u062F\u0645\u062A \u062D\u062C\u0627\u0631\u062A\u0647\u0627 \u0645\u0646 \u062C\u0628\u0644 \u0627\u0644\u0645\u0642\u0637\u0645 \u0648\u062A\u0645 \u0646\u0642\u0644\u0647\u0627 \u0628\u0627\u0644\u0633\u0641\u0646 \u0623\u0648 \u0639\u0644\u0649 \u0627\u0644\u0631\u0645\u0644\u060C \u0648\u0645\u0627 \u062A\u0632\u0627\u0644 \u0634\u0627\u0645\u062E\u0629 \u0648\u064A\u0642\u0635\u062F\u0647\u0627 \u0627\u0644\u0633\u064A\u0627\u062D \u0645\u0646 \u0643\u0627\u0641\u0629 \u0623\u0631\u062C\u0627\u0621 \u0627\u0644\u0645\u0639\u0645\u0648\u0631\u0629.` + ] + ], + [ + "text-generation", + [ + `\u0625\u0633\u0645\u064A \u0645\u062D\u0645\u062F \u0648\u0623\u062D\u0628 \u0623\u0646`, + `\u062F\u0639 \u0627\u0644\u0645\u0643\u0627\u0631\u0645 \u0644\u0627 \u062A\u0631\u062D\u0644 \u0644\u0628\u063A\u064A\u062A\u0647\u0627 - \u0648\u0627\u0642\u0639\u062F \u0641\u0625\u0646\u0643 \u0623\u0646\u062A \u0627\u0644\u0637\u0627\u0639\u0645 \u0627\u0644\u0643\u0627\u0633\u064A.`, + `\u0644\u0645\u0627\u0630\u0627 \u0646\u062D\u0646 \u0647\u0646\u0627\u061F`, + `\u0627\u0644\u0642\u062F\u0633 \u0645\u062F\u064A\u0646\u0629 \u062A\u0627\u0631\u064A\u062E\u064A\u0629\u060C \u0628\u0646\u0627\u0647\u0627 \u0627\u0644\u0643\u0646\u0639\u0627\u0646\u064A\u0648\u0646 \u0641\u064A`, + `\u0643\u0627\u0646 \u064A\u0627 \u0645\u0627 \u0643\u0627\u0646 \u0641\u064A \u0642\u062F\u064A\u0645 \u0627\u0644\u0632\u0645\u0627\u0646` + ] + ], + ["fill-mask", [`\u0628\u0627\u0631\u064A\u0633 \u0641\u0631\u0646\u0633\u0627.`, `\u0641\u0644\u0633\u0641\u0629 \u0627\u0644\u062D\u064A\u0627\u0629 \u0647\u064A .`]], + [ + "sentence-similarity", + [ + { + source_sentence: "\u0647\u0630\u0627 \u0634\u062E\u0635 \u0633\u0639\u064A\u062F", + sentences: ["\u0647\u0630\u0627 \u0643\u0644\u0628 \u0633\u0639\u064A\u062F", "\u0647\u0630\u0627 \u0634\u062E\u0635 \u0633\u0639\u064A\u062F \u062C\u062F\u0627", "\u0627\u0644\u064A\u0648\u0645 \u0647\u0648 \u064A\u0648\u0645 \u0645\u0634\u0645\u0633"] + } + ] + ] +]); +var MAPPING_BN = /* @__PURE__ */ new Map([ + ["text-classification", [`\u09AC\u09BE\u0999\u09BE\u09B2\u09BF\u09B0 \u0998\u09B0\u09C7 \u0998\u09B0\u09C7 \u0986\u099C \u09A8\u09AC\u09BE\u09A8\u09CD\u09A8 \u0989\u09CE\u09B8\u09AC\u0964`]], + [ + "token-classification", + [`\u0986\u09AE\u09BE\u09B0 \u09A8\u09BE\u09AE \u099C\u09BE\u09B9\u09BF\u09A6 \u098F\u09AC\u0982 \u0986\u09AE\u09BF \u09A2\u09BE\u0995\u09BE\u09DF \u09AC\u09BE\u09B8 \u0995\u09B0\u09BF\u0964`, `\u09A4\u09BF\u09A8\u09BF \u0997\u09C1\u0997\u09B2\u09C7 \u099A\u09BE\u0995\u09B0\u09C0 \u0995\u09B0\u09C7\u09A8\u0964`, `\u0986\u09AE\u09BE\u09B0 \u09A8\u09BE\u09AE \u09B8\u09C1\u09B8\u09CD\u09AE\u09BF\u09A4\u09BE \u098F\u09AC\u0982 \u0986\u09AE\u09BF \u0995\u09B2\u0995\u09BE\u09A4\u09BE\u09DF \u09AC\u09BE\u09B8 \u0995\u09B0\u09BF\u0964`] + ], + ["translation", [`\u0986\u09AE\u09BE\u09B0 \u09A8\u09BE\u09AE \u099C\u09BE\u09B9\u09BF\u09A6, \u0986\u09AE\u09BF \u09B0\u0982\u09AA\u09C1\u09B0\u09C7 \u09AC\u09BE\u09B8 \u0995\u09B0\u09BF\u0964`, `\u0986\u09AA\u09A8\u09BF \u0995\u09C0 \u0986\u099C\u0995\u09C7 \u09AC\u09BE\u09B8\u09BE\u09DF \u0986\u09B8\u09AC\u09C7\u09A8?`]], + [ + "summarization", + [ + `\u2018\u0987\u0995\u09CB\u09A8\u09AE\u09BF\u09B8\u09CD\u099F\u2019 \u09B2\u09BF\u0996\u09C7\u099B\u09C7, \u0985\u09CD\u09AF\u09BE\u09A8\u09CD\u099F\u09BF\u09AC\u09A1\u09BF\u09B0 \u099A\u09BE\u09B0 \u09AE\u09BE\u09B8 \u09B8\u09CD\u09A5\u09BE\u09DF\u09C0 \u09B9\u0993\u09DF\u09BE\u09B0 \u0996\u09AC\u09B0\u099F\u09BF \u09A6\u09C1\u0987 \u0995\u09BE\u09B0\u09A3\u09C7 \u0986\u09A8\u09A8\u09CD\u09A6\u09C7\u09B0\u0964 \u0985\u09CD\u09AF\u09BE\u09A8\u09CD\u099F\u09BF\u09AC\u09A1\u09BF \u09AF\u09A4 \u09A6\u09BF\u09A8 \u09AA\u09B0\u09CD\u09AF\u09A8\u09CD\u09A4 \u09B6\u09B0\u09C0\u09B0\u09C7 \u099F\u09BF\u0995\u09AC\u09C7, \u09A4\u09A4 \u09A6\u09BF\u09A8 \u09B8\u0982\u0995\u09CD\u09B0\u09AE\u09A3 \u09A5\u09C7\u0995\u09C7 \u09B8\u09C1\u09B0\u0995\u09CD\u09B7\u09BF\u09A4 \u09A5\u09BE\u0995\u09BE \u09B8\u09AE\u09CD\u09AD\u09AC\u0964 \u0985\u09B0\u09CD\u09A5\u09BE\u09CE, \u098F\u09AE\u09A8 \u098F\u0995 \u099F\u09BF\u0995\u09BE\u09B0 \u09AA\u09CD\u09B0\u09DF\u09CB\u099C\u09A8 \u09B9\u09AC\u09C7, \u09AF\u09BE \u0985\u09CD\u09AF\u09BE\u09A8\u09CD\u099F\u09BF\u09AC\u09A1\u09BF\u09B0 \u0989\u09A4\u09CD\u09AA\u09BE\u09A6\u09A8\u0995\u09C7 \u09AA\u09CD\u09B0\u09B0\u09CB\u099A\u09BF\u09A4 \u0995\u09B0\u09A4\u09C7 \u09AA\u09BE\u09B0\u09C7 \u098F\u09AC\u0982 \u09A6\u09C0\u09B0\u09CD\u0998\u09B8\u09CD\u09A5\u09BE\u09DF\u09C0 \u09B8\u09C1\u09B0\u0995\u09CD\u09B7\u09BE \u09A6\u09BF\u09A4\u09C7 \u09AA\u09BE\u09B0\u09C7\u0964 \u098F\u0997\u09C1\u09B2\u09CB \u0996\u09C1\u0981\u099C\u09C7 \u09AC\u09C7\u09B0 \u0995\u09B0\u09BE\u0993 \u09B8\u09B9\u099C\u0964 \u098F\u099F\u09BF \u0986\u09AD\u09BE\u09B8 \u09A6\u09C7\u09DF, \u09AC\u09CD\u09AF\u09BE\u09AA\u0995 \u09B9\u09BE\u09B0\u09C7 \u0985\u09CD\u09AF\u09BE\u09A8\u09CD\u099F\u09BF\u09AC\u09A1\u09BF \u09B6\u09A8\u09BE\u0995\u09CD\u09A4\u0995\u09B0\u09A3 \u09AB\u09B2\u09BE\u09AB\u09B2 \u09AE\u09CB\u099F\u09BE\u09AE\u09C1\u099F\u09BF \u09A8\u09BF\u09B0\u09CD\u09AD\u09C1\u09B2 \u09B9\u0993\u09DF\u09BE \u0989\u099A\u09BF\u09A4\u0964 \u09A6\u09CD\u09AC\u09BF\u09A4\u09C0\u09DF \u0986\u09B0\u09C7\u0995\u099F\u09BF \u0997\u09AC\u09C7\u09B7\u09A3\u09BE\u09B0 \u09A8\u09C7\u09A4\u09C3\u09A4\u09CD\u09AC \u09A6\u09BF\u09DF\u09C7\u099B\u09C7\u09A8 \u09AF\u09C1\u0995\u09CD\u09A4\u09B0\u09BE\u099C\u09CD\u09AF\u09C7\u09B0 \u09AE\u09C7\u09A1\u09BF\u0995\u09C7\u09B2 \u09B0\u09BF\u09B8\u09BE\u09B0\u09CD\u099A \u0995\u09BE\u0989\u09A8\u09CD\u09B8\u09BF\u09B2\u09C7\u09B0 (\u098F\u09AE\u0986\u09B0\u09B8\u09BF) \u0987\u09AE\u09BF\u0989\u09A8\u09CB\u09B2\u099C\u09BF\u09B8\u09CD\u099F \u09A4\u09BE\u0993 \u09A6\u0982\u0964 \u09A4\u09BF\u09A8\u09BF \u099F\u09BF-\u09B8\u09C7\u09B2 \u09B6\u09A8\u09BE\u0995\u09CD\u09A4\u0995\u09B0\u09A3\u09C7 \u0995\u09BE\u099C \u0995\u09B0\u09C7\u099B\u09C7\u09A8\u0964 \u099F\u09BF-\u09B8\u09C7\u09B2 \u09B6\u09A8\u09BE\u0995\u09CD\u09A4\u0995\u09B0\u09A3\u09C7\u09B0 \u09AA\u09CD\u09B0\u0995\u09CD\u09B0\u09BF\u09DF\u09BE \u0985\u09AC\u09B6\u09CD\u09AF \u0985\u09CD\u09AF\u09BE\u09A8\u09CD\u099F\u09BF\u09AC\u09A1\u09BF\u09B0 \u09AE\u09A4\u09CB \u098F\u09A4 \u0986\u09B2\u09CB\u099A\u09BF\u09A4 \u09A8\u09DF\u0964 \u09A4\u09AC\u09C7 \u09B8\u0982\u0995\u09CD\u09B0\u09AE\u09A3\u09C7\u09B0 \u09AC\u09BF\u09B0\u09C1\u09A6\u09CD\u09A7\u09C7 \u09B2\u09DC\u09BE\u0987 \u098F\u09AC\u0982 \u09A6\u09C0\u09B0\u09CD\u0998\u09AE\u09C7\u09DF\u09BE\u09A6\u09BF \u09B8\u09C1\u09B0\u0995\u09CD\u09B7\u09BE\u09DF \u09B8\u09AE\u09BE\u09A8 \u0997\u09C1\u09B0\u09C1\u09A4\u09CD\u09AC\u09AA\u09C2\u09B0\u09CD\u09A3 \u09AD\u09C2\u09AE\u09BF\u0995\u09BE \u09AA\u09BE\u09B2\u09A8 \u0995\u09B0\u09C7\u0964 \u0997\u09AC\u09C7\u09B7\u09A3\u09BE\u09B8\u0982\u0995\u09CD\u09B0\u09BE\u09A8\u09CD\u09A4 \u09A8\u09BF\u09AC\u09A8\u09CD\u09A7 \u09AA\u09CD\u09B0\u0995\u09BE\u09B6\u09BF\u09A4 \u09B9\u09DF\u09C7\u099B\u09C7 \u2018\u09A8\u09C7\u099A\u09BE\u09B0 \u0987\u09AE\u09BF\u0989\u09A8\u09CB\u09B2\u099C\u09BF\u2019 \u09B8\u09BE\u09AE\u09DF\u09BF\u0995\u09C0\u09A4\u09C7\u0964 \u09A4\u09BE\u0981\u09B0\u09BE \u09AC\u09B2\u099B\u09C7\u09A8, \u0997\u09AC\u09C7\u09B7\u09A3\u09BE\u09B0 \u0995\u09CD\u09B7\u09C7\u09A4\u09CD\u09B0\u09C7 \u0995\u09CB\u09AD\u09BF\u09A1-\u09E7\u09EF \u09AE\u09C3\u09A6\u09C1 \u09B8\u0982\u0995\u09CD\u09B0\u09AE\u09A3\u09C7\u09B0 \u09B6\u09BF\u0995\u09BE\u09B0 \u09E8\u09EE \u09AC\u09CD\u09AF\u0995\u09CD\u09A4\u09BF\u09B0 \u09B0\u0995\u09CD\u09A4\u09C7\u09B0 \u09A8\u09AE\u09C1\u09A8\u09BE, \u09E7\u09EA \u099C\u09A8 \u0997\u09C1\u09B0\u09C1\u09A4\u09B0 \u0985\u09B8\u09C1\u09B8\u09CD\u09A5 \u0993 \u09E7\u09EC \u099C\u09A8 \u09B8\u09C1\u09B8\u09CD\u09A5 \u09AC\u09CD\u09AF\u0995\u09CD\u09A4\u09BF\u09B0 \u09B0\u0995\u09CD\u09A4\u09C7\u09B0 \u09A8\u09AE\u09C1\u09A8\u09BE \u09AA\u09B0\u09C0\u0995\u09CD\u09B7\u09BE \u0995\u09B0\u09C7\u099B\u09C7\u09A8\u0964 \u0997\u09AC\u09C7\u09B7\u09A3\u09BE \u09A8\u09BF\u09AC\u09A8\u09CD\u09A7\u09C7 \u09AC\u09B2\u09BE \u09B9\u09DF, \u09B8\u0982\u0995\u09CD\u09B0\u09AE\u09BF\u09A4 \u09AC\u09CD\u09AF\u0995\u09CD\u09A4\u09BF\u09A6\u09C7\u09B0 \u0995\u09CD\u09B7\u09C7\u09A4\u09CD\u09B0\u09C7 \u099F\u09BF-\u09B8\u09C7\u09B2\u09C7\u09B0 \u09A4\u09C0\u09AC\u09CD\u09B0 \u09AA\u09CD\u09B0\u09A4\u09BF\u0995\u09CD\u09B0\u09BF\u09DF\u09BE \u09A4\u09BE\u0981\u09B0\u09BE \u09A6\u09C7\u0996\u09C7\u099B\u09C7\u09A8\u0964 \u098F \u0995\u09CD\u09B7\u09C7\u09A4\u09CD\u09B0\u09C7 \u09AE\u09C3\u09A6\u09C1 \u0993 \u0997\u09C1\u09B0\u09C1\u09A4\u09B0 \u0985\u09B8\u09C1\u09B8\u09CD\u09A5 \u09AC\u09CD\u09AF\u0995\u09CD\u09A4\u09BF\u09A6\u09C7\u09B0 \u0995\u09CD\u09B7\u09C7\u09A4\u09CD\u09B0\u09C7 \u09AA\u09CD\u09B0\u09A4\u09BF\u0995\u09CD\u09B0\u09BF\u09DF\u09BE\u09B0 \u09AD\u09BF\u09A8\u09CD\u09A8\u09A4\u09BE \u09AA\u09BE\u0993\u09DF\u09BE \u0997\u09C7\u099B\u09C7\u0964` + ] + ], + ["text-generation", [`\u0986\u09AE\u09BF \u09B0\u09A4\u09A8 \u098F\u09AC\u0982 \u0986\u09AE\u09BF`, `\u09A4\u09C1\u09AE\u09BF \u09AF\u09A6\u09BF \u099A\u09BE\u0993 \u09A4\u09AC\u09C7`, `\u09AE\u09BF\u09A5\u09BF\u09B2\u09BE \u0986\u099C\u0995\u09C7 \u09AC\u09A1\u09CD\u09A1`]], + ["fill-mask", [`\u0986\u09AE\u09BF \u09AC\u09BE\u0982\u09B2\u09BE\u09DF \u0997\u09BE\u0987\u0964`, `\u0986\u09AE\u09BF \u0996\u09C1\u09AC \u09AD\u09BE\u09B2\u09CB\u09AC\u09BE\u09B8\u09BF\u0964 `]], + [ + "question-answering", + [ + { + text: `\u09AA\u09CD\u09B0\u09A5\u09AE \u098F\u09B6\u09BF\u09AF\u09BC\u09BE \u0995\u09BE\u09AA \u0995\u09CD\u09B0\u09BF\u0995\u09C7\u099F \u099F\u09C1\u09B0\u09CD\u09A8\u09BE\u09AE\u09C7\u09A8\u09CD\u099F \u0995\u09CB\u09A5\u09BE\u09DF \u0985\u09A8\u09C1\u09B7\u09CD\u09A0\u09BF\u09A4 \u09B9\u09DF ?`, + context: `\u09AA\u09CD\u09B0\u09A5\u09AE \u099F\u09C1\u09B0\u09CD\u09A8\u09BE\u09AE\u09C7\u09A8\u09CD\u099F \u0985\u09A8\u09C1\u09B7\u09CD\u09A0\u09BF\u09A4 \u09B9\u09AF\u09BC \u09E7\u09EF\u09EE\u09EA \u09B8\u09BE\u09B2\u09C7 \u09B8\u0982\u09AF\u09C1\u0995\u09CD\u09A4 \u0986\u09B0\u09AC \u0986\u09AE\u09BF\u09B0\u09BE\u09A4 \u098F\u09B0 \u09B6\u09BE\u09B0\u099C\u09BE\u09B9 \u09A4\u09C7 \u09AF\u09C7\u0996\u09BE\u09A8\u09C7 \u0995\u09BE\u0989\u09A8\u09CD\u09B8\u09BF\u09B2\u09C7\u09B0 \u09AE\u09C2\u09B2 \u0985\u09AB\u09BF\u09B8 \u099B\u09BF\u09B2 (\u09E7\u09EF\u09EF\u09EB \u09AA\u09B0\u09CD\u09AF\u09A8\u09CD\u09A4)\u0964 \u09AD\u09BE\u09B0\u09A4 \u09B6\u09CD\u09B0\u09C0\u09B2\u0999\u09CD\u0995\u09BE\u09B0 \u09B8\u09BE\u09A5\u09C7 \u0986\u09A8\u09CD\u09A4\u09B0\u09BF\u0995\u09A4\u09BE\u09B9\u09C0\u09A8 \u0995\u09CD\u09B0\u09BF\u0995\u09C7\u099F \u09B8\u09AE\u09CD\u09AA\u09B0\u09CD\u0995\u09C7\u09B0 \u0995\u09BE\u09B0\u09A3\u09C7 \u09E7\u09EF\u09EE\u09EC \u09B8\u09BE\u09B2\u09C7\u09B0 \u099F\u09C1\u09B0\u09CD\u09A8\u09BE\u09AE\u09C7\u09A8\u09CD\u099F \u09AC\u09B0\u09CD\u099C\u09A8 \u0995\u09B0\u09C7\u0964 \u09E7\u09EF\u09EF\u09E9 \u09B8\u09BE\u09B2\u09C7 \u09AD\u09BE\u09B0\u09A4 \u0993 \u09AA\u09BE\u0995\u09BF\u09B8\u09CD\u09A4\u09BE\u09A8 \u098F\u09B0 \u09AE\u09A7\u09CD\u09AF\u09C7 \u09B0\u09BE\u099C\u09A8\u09C8\u09A4\u09BF\u0995 \u0985\u09B8\u09CD\u09A5\u09BF\u09B0\u09A4\u09BE\u09B0 \u0995\u09BE\u09B0\u09A3\u09C7 \u098F\u099F\u09BF \u09AC\u09BE\u09A4\u09BF\u09B2 \u09B9\u09AF\u09BC\u09C7 \u09AF\u09BE\u09AF\u09BC\u0964 \u09B6\u09CD\u09B0\u09C0\u09B2\u0999\u09CD\u0995\u09BE \u098F\u09B6\u09BF\u09AF\u09BC\u09BE \u0995\u09BE\u09AA \u09B6\u09C1\u09B0\u09C1 \u09A5\u09C7\u0995\u09C7 \u0985\u0982\u09B6 \u0997\u09CD\u09B0\u09B9\u09A3 \u0995\u09B0\u09C7 \u0986\u09B8\u099B\u09C7\u0964 \u0986\u09A8\u09CD\u09A4\u09B0\u09CD\u099C\u09BE\u09A4\u09BF\u0995 \u0995\u09CD\u09B0\u09BF\u0995\u09C7\u099F \u0995\u09BE\u0989\u09A8\u09CD\u09B8\u09BF\u09B2 \u09A8\u09BF\u09AF\u09BC\u09AE \u0995\u09B0\u09C7 \u09A6\u09BF\u09AF\u09BC\u09C7\u099B\u09C7 \u09AF\u09C7 \u098F\u09B6\u09BF\u09AF\u09BC\u09BE \u0995\u09BE\u09AA\u09C7\u09B0 \u09B8\u0995\u09B2 \u0996\u09C7\u09B2\u09BE \u0985\u09A8\u09C1\u09B7\u09CD\u09A0\u09BF\u09A4 \u09B9\u09AC\u09C7 \u0985\u09AB\u09BF\u09B8\u09BF\u09AF\u09BC\u09BE\u09B2 \u098F\u0995\u09A6\u09BF\u09A8\u09C7\u09B0 \u0986\u09A8\u09CD\u09A4\u09B0\u09CD\u099C\u09BE\u09A4\u09BF\u0995 \u0995\u09CD\u09B0\u09BF\u0995\u09C7\u099F \u09B9\u09BF\u09B8\u09C7\u09AC\u09C7\u0964 \u098F\u09B8\u09BF\u09B8\u09BF \u0998\u09CB\u09B7\u09A8\u09BE \u0985\u09A8\u09C1\u09AF\u09BE\u09AF\u09BC\u09C0 \u09AA\u09CD\u09B0\u09A4\u09BF \u09A6\u09C1\u0987 \u09AC\u099B\u09B0 \u09AA\u09B0 \u09AA\u09B0 \u099F\u09C1\u09B0\u09CD\u09A8\u09BE\u09AE\u09C7\u09A8\u09CD\u099F \u0985\u09A8\u09C1\u09B7\u09CD\u09A0\u09BF\u09A4 \u09B9\u09AF\u09BC \u09E8\u09E6\u09E6\u09EE \u09B8\u09BE\u09B2 \u09A5\u09C7\u0995\u09C7\u0964` + }, + { + text: `\u09AD\u09BE\u09B0\u09A4\u09C0\u09AF\u09BC \u09AC\u09BE\u0999\u09BE\u09B2\u09BF \u0995\u09A5\u09BE\u09B8\u09BE\u09B9\u09BF\u09A4\u09CD\u09AF\u09BF\u0995 \u09AE\u09B9\u09BE\u09B6\u09CD\u09AC\u09C7\u09A4\u09BE \u09A6\u09C7\u09AC\u09C0\u09B0 \u09AE\u09C3\u09A4\u09CD\u09AF\u09C1 \u0995\u09AC\u09C7 \u09B9\u09DF ?`, + context: `\u09E8\u09E6\u09E7\u09EC \u09B8\u09BE\u09B2\u09C7\u09B0 \u09E8\u09E9 \u099C\u09C1\u09B2\u09BE\u0987 \u09B9\u09C3\u09A6\u09B0\u09CB\u0997\u09C7 \u0986\u0995\u09CD\u09B0\u09BE\u09A8\u09CD\u09A4 \u09B9\u09AF\u09BC\u09C7 \u09AE\u09B9\u09BE\u09B6\u09CD\u09AC\u09C7\u09A4\u09BE \u09A6\u09C7\u09AC\u09C0 \u0995\u09B2\u0995\u09BE\u09A4\u09BE\u09B0 \u09AC\u09C7\u09B2 \u09AD\u09BF\u0989 \u0995\u09CD\u09B2\u09BF\u09A8\u09BF\u0995\u09C7 \u09AD\u09B0\u09CD\u09A4\u09BF \u09B9\u09A8\u0964 \u09B8\u09C7\u0987 \u09AC\u099B\u09B0\u0987 \u09E8\u09EE \u099C\u09C1\u09B2\u09BE\u0987 \u098F\u0995\u09BE\u09A7\u09BF\u0995 \u0985\u0999\u09CD\u0997 \u09AC\u09BF\u0995\u09B2 \u09B9\u09AF\u09BC\u09C7 \u09A4\u09BE\u0981\u09B0 \u09AE\u09C3\u09A4\u09CD\u09AF\u09C1 \u0998\u099F\u09C7\u0964 \u09A4\u09BF\u09A8\u09BF \u09AE\u09A7\u09C1\u09AE\u09C7\u09B9, \u09B8\u09C7\u09AA\u09CD\u099F\u09BF\u09B8\u09C7\u09AE\u09BF\u09AF\u09BC\u09BE \u0993 \u09AE\u09C2\u09A4\u09CD\u09B0 \u09B8\u0982\u0995\u09CD\u09B0\u09AE\u09A3 \u09B0\u09CB\u0997\u09C7\u0993 \u09AD\u09C1\u0997\u099B\u09BF\u09B2\u09C7\u09A8\u0964` + }, + { + text: `\u09AE\u09BE\u09B8\u09CD\u099F\u09BE\u09B0\u09A6\u09BE \u09B8\u09C2\u09B0\u09CD\u09AF\u0995\u09C1\u09AE\u09BE\u09B0 \u09B8\u09C7\u09A8\u09C7\u09B0 \u09AC\u09BE\u09AC\u09BE\u09B0 \u09A8\u09BE\u09AE \u0995\u09C0 \u099B\u09BF\u09B2 ?`, + context: `\u09B8\u09C2\u09B0\u09CD\u09AF \u09B8\u09C7\u09A8 \u09E7\u09EE\u09EF\u09EA \u09B8\u09BE\u09B2\u09C7\u09B0 \u09E8\u09E8 \u09AE\u09BE\u09B0\u09CD\u099A \u099A\u099F\u09CD\u099F\u0997\u09CD\u09B0\u09BE\u09AE\u09C7\u09B0 \u09B0\u09BE\u0989\u099C\u09BE\u09A8 \u09A5\u09BE\u09A8\u09BE\u09B0 \u09A8\u09CB\u09AF\u09BC\u09BE\u09AA\u09BE\u09A1\u09BC\u09BE\u09AF\u09BC \u0985\u09B0\u09CD\u09A5\u09A8\u09C8\u09A4\u09BF\u0995 \u09AD\u09BE\u09AC\u09C7 \u0985\u09B8\u09CD\u09AC\u099A\u09CD\u099B\u09B2 \u09AA\u09B0\u09BF\u09AC\u09BE\u09B0\u09C7 \u099C\u09A8\u09CD\u09AE\u0997\u09CD\u09B0\u09B9\u09A3 \u0995\u09B0\u09C7\u09A8\u0964 \u09A4\u09BE\u0981\u09B0 \u09AA\u09BF\u09A4\u09BE\u09B0 \u09A8\u09BE\u09AE \u09B0\u09BE\u099C\u09AE\u09A8\u09BF \u09B8\u09C7\u09A8 \u098F\u09AC\u0982 \u09AE\u09BE\u09A4\u09BE\u09B0 \u09A8\u09BE\u09AE \u09B6\u09B6\u09C0 \u09AC\u09BE\u09B2\u09BE \u09B8\u09C7\u09A8\u0964 \u09B0\u09BE\u099C\u09AE\u09A8\u09BF \u09B8\u09C7\u09A8\u09C7\u09B0 \u09A6\u09C1\u0987 \u099B\u09C7\u09B2\u09C7 \u0986\u09B0 \u099A\u09BE\u09B0 \u09AE\u09C7\u09AF\u09BC\u09C7\u0964 \u09B8\u09C2\u09B0\u09CD\u09AF \u09B8\u09C7\u09A8 \u09A4\u09BE\u0981\u09A6\u09C7\u09B0 \u09AA\u09B0\u09BF\u09AC\u09BE\u09B0\u09C7\u09B0 \u099A\u09A4\u09C1\u09B0\u09CD\u09A5 \u09B8\u09A8\u09CD\u09A4\u09BE\u09A8\u0964 \u09A6\u09C1\u0987 \u099B\u09C7\u09B2\u09C7\u09B0 \u09A8\u09BE\u09AE \u09B8\u09C2\u09B0\u09CD\u09AF \u0993 \u0995\u09AE\u09B2\u0964 \u099A\u09BE\u09B0 \u09AE\u09C7\u09AF\u09BC\u09C7\u09B0 \u09A8\u09BE\u09AE \u09AC\u09B0\u09A6\u09BE\u09B8\u09C1\u09A8\u09CD\u09A6\u09B0\u09C0, \u09B8\u09BE\u09AC\u09BF\u09A4\u09CD\u09B0\u09C0, \u09AD\u09BE\u09A8\u09C1\u09AE\u09A4\u09C0 \u0993 \u09AA\u09CD\u09B0\u09AE\u09BF\u09B2\u09BE\u0964 \u09B6\u09C8\u09B6\u09AC\u09C7 \u09AA\u09BF\u09A4\u09BE \u09AE\u09BE\u09A4\u09BE\u0995\u09C7 \u09B9\u09BE\u09B0\u09BE\u09A8\u09CB \u09B8\u09C2\u09B0\u09CD\u09AF \u09B8\u09C7\u09A8 \u0995\u09BE\u0995\u09BE \u0997\u09CC\u09B0\u09AE\u09A8\u09BF \u09B8\u09C7\u09A8\u09C7\u09B0 \u0995\u09BE\u099B\u09C7 \u09AE\u09BE\u09A8\u09C1\u09B7 \u09B9\u09AF\u09BC\u09C7\u099B\u09C7\u09A8\u0964 \u09B8\u09C2\u09B0\u09CD\u09AF \u09B8\u09C7\u09A8 \u099B\u09C7\u09B2\u09C7\u09AC\u09C7\u09B2\u09BE \u09A5\u09C7\u0995\u09C7\u0987 \u0996\u09C1\u09AC \u09AE\u09A8\u09CB\u09AF\u09CB\u0997\u09C0 \u09AD\u09BE\u09B2 \u099B\u09BE\u09A4\u09CD\u09B0 \u099B\u09BF\u09B2\u09C7\u09A8 \u098F\u09AC\u0982 \u09A7\u09B0\u09CD\u09AE\u09AD\u09BE\u09AC\u09BE\u09AA\u09A8\u09CD\u09A8 \u0997\u09AE\u09CD\u09AD\u09C0\u09B0 \u09AA\u09CD\u09B0\u0995\u09C3\u09A4\u09BF\u09B0 \u099B\u09BF\u09B2\u09C7\u09A8\u0964` + } + ] + ], + [ + "sentence-similarity", + [ + { + source_sentence: "\u09B8\u09C7 \u098F\u0995\u099C\u09A8 \u09B8\u09C1\u0996\u09C0 \u09AC\u09CD\u09AF\u0995\u09CD\u09A4\u09BF", + sentences: ["\u09B8\u09C7 \u09B9\u09CD\u09AF\u09BE\u09AA\u09BF \u0995\u09C1\u0995\u09C1\u09B0", "\u09B8\u09C7 \u0996\u09C1\u09AC \u09B8\u09C1\u0996\u09C0 \u09AE\u09BE\u09A8\u09C1\u09B7", "\u0986\u099C \u098F\u0995\u099F\u09BF \u09B0\u09CC\u09A6\u09CD\u09B0\u09CB\u099C\u09CD\u099C\u09CD\u09AC\u09B2 \u09A6\u09BF\u09A8"] + } + ] + ] +]); +var MAPPING_MN = /* @__PURE__ */ new Map([ + ["text-classification", [`\u0411\u0438 \u0447\u0430\u043C\u0434 \u0445\u0430\u0439\u0440\u0442\u0430\u0439`]], + [ + "token-classification", + [ + `\u041D\u0430\u043C\u0430\u0439\u0433 \u0414\u043E\u0440\u0436 \u0433\u044D\u0434\u044D\u0433. \u0411\u0438 \u0423\u043B\u0430\u0430\u043D\u0431\u0430\u0430\u0442\u0430\u0440\u0442 \u0430\u043C\u044C\u0434\u0430\u0440\u0434\u0430\u0433.`, + `\u041D\u0430\u043C\u0430\u0439\u0433 \u0413\u0430\u043D\u0431\u0430\u0442 \u0433\u044D\u0434\u044D\u0433. \u0411\u0438 \u0423\u0432\u0441 \u0430\u0439\u043C\u0430\u0433\u0442 \u0442\u04E9\u0440\u0441\u04E9\u043D.`, + `\u041C\u0430\u043D\u0430\u0439 \u0443\u043B\u0441 \u0442\u0430\u0432\u0430\u043D \u0445\u043E\u0448\u0443\u0443 \u043C\u0430\u043B\u0442\u0430\u0439.` + ] + ], + [ + "question-answering", + [ + { + text: `\u0422\u0430 \u0445\u0430\u0430\u043D\u0430 \u0430\u043C\u044C\u0434\u0430\u0440\u0434\u0430\u0433 \u0432\u044D?`, + context: `\u041D\u0430\u043C\u0430\u0439\u0433 \u0414\u043E\u0440\u0436 \u0433\u044D\u0434\u044D\u0433. \u0411\u0438 \u0423\u043B\u0430\u0430\u043D\u0431\u0430\u0430\u0442\u0430\u0440\u0442 \u0430\u043C\u044C\u0434\u0430\u0440\u0434\u0430\u0433.` + }, + { + text: `\u0422\u0430\u043D\u044B\u0433 \u0445\u044D\u043D \u0433\u044D\u0434\u044D\u0433 \u0432\u044D?`, + context: `\u041D\u0430\u043C\u0430\u0439\u0433 \u0414\u043E\u0440\u0436 \u0433\u044D\u0434\u044D\u0433. \u0411\u0438 \u0423\u043B\u0430\u0430\u043D\u0431\u0430\u0430\u0442\u0430\u0440\u0442 \u0430\u043C\u044C\u0434\u0430\u0440\u0434\u0430\u0433.` + }, + { + text: `\u041C\u0438\u043D\u0438\u0439 \u043D\u044D\u0440\u0438\u0439\u0433 \u0445\u044D\u043D \u0433\u044D\u0434\u044D\u0433 \u0432\u044D?`, + context: `\u041D\u0430\u043C\u0430\u0439\u0433 \u0413\u0430\u043D\u0431\u0430\u0442 \u0433\u044D\u0434\u044D\u0433. \u0411\u0438 \u0423\u0432\u0441 \u0430\u0439\u043C\u0430\u0433\u0442 \u0442\u04E9\u0440\u0441\u04E9\u043D.` + } + ] + ], + ["translation", [`\u041D\u0430\u043C\u0430\u0439\u0433 \u0414\u043E\u0440\u0436 \u0433\u044D\u0434\u044D\u0433. \u0411\u0438 \u0423\u043B\u0430\u0430\u043D\u0431\u0430\u0430\u0442\u0430\u0440\u0442 \u0430\u043C\u044C\u0434\u0430\u0440\u0434\u0430\u0433.`, `\u041D\u0430\u043C\u0430\u0439\u0433 \u0413\u0430\u043D\u0431\u0430\u0442 \u0433\u044D\u0434\u044D\u0433. \u0411\u0438 \u0423\u0432\u0441 \u0430\u0439\u043C\u0430\u0433\u0442 \u0442\u04E9\u0440\u0441\u04E9\u043D.`]], + [ + "summarization", + [ + `\u041C\u043E\u043D\u0433\u043E\u043B \u0423\u043B\u0441 (1992 \u043E\u043D\u043E\u043E\u0441 \u0445\u043E\u0439\u0448) \u2014 \u0434\u043E\u0440\u043D\u043E \u0431\u043E\u043B\u043E\u043D \u0442\u04E9\u0432 \u0410\u0437\u0438\u0434 \u043E\u0440\u0448\u0434\u043E\u0433 \u0431\u04AF\u0440\u044D\u043D \u044D\u0440\u0445\u0442 \u0443\u043B\u0441. \u0425\u043E\u0439\u0434 \u0442\u0430\u043B\u0430\u0430\u0440\u0430\u0430 \u041E\u0440\u043E\u0441, \u0431\u0443\u0441\u0430\u0434 \u0442\u0430\u043B\u0430\u0430\u0440\u0430\u0430 \u0425\u044F\u0442\u0430\u0434 \u0443\u043B\u0441\u0442\u0430\u0439 \u0445\u0438\u043B\u043B\u044D\u0434\u044D\u0433 \u0434\u0430\u043B\u0430\u0439\u0434 \u0433\u0430\u0440\u0446\u0433\u04AF\u0439 \u043E\u0440\u043E\u043D. \u041D\u0438\u0439\u0441\u043B\u044D\u043B \u2014 \u0423\u043B\u0430\u0430\u043D\u0431\u0430\u0430\u0442\u0430\u0440 \u0445\u043E\u0442. \u0410\u043B\u0442\u0430\u0439\u043D \u043D\u0443\u0440\u0443\u0443\u043D\u0430\u0430\u0441 \u0425\u044F\u043D\u0433\u0430\u043D, \u0421\u043E\u0451\u043D\u043E\u043E\u0441 \u0413\u043E\u0432\u044C \u0445\u04AF\u0440\u0441\u044D\u043D 1 \u0441\u0430\u044F 566 \u043C\u044F\u043D\u0433\u0430\u043D \u043A\u043C2 \u0443\u0443\u0434\u0430\u043C \u043D\u0443\u0442\u0430\u0433\u0442\u0430\u0439, \u0434\u044D\u043B\u0445\u0438\u0439\u0434 \u043D\u0443\u0442\u0430\u0433 \u0434\u044D\u0432\u0441\u0433\u044D\u0440\u0438\u0439\u043D \u0445\u044D\u043C\u0436\u044D\u044D\u0433\u044D\u044D\u0440 19-\u0440\u0442 \u0436\u0430\u0433\u0441\u0434\u0430\u0433. 2015 \u043E\u043D\u044B \u044D\u0445\u044D\u043D\u0434 \u041C\u043E\u043D\u0433\u043E\u043B \u0423\u043B\u0441\u044B\u043D \u0445\u04AF\u043D \u0430\u043C 3 \u0441\u0430\u044F \u0445\u04AF\u0440\u0441\u044D\u043D (135-\u0440 \u043E\u043B\u043E\u043D). \u04AE\u043D\u0434\u0441\u044D\u043D\u0434\u044D\u044D \u043C\u043E\u043D\u0433\u043E\u043B \u04AF\u043D\u0434\u044D\u0441\u0442\u044D\u043D (95 \u0445\u0443\u0432\u044C), \u043C\u04E9\u043D \u0445\u0430\u0441\u0430\u0433, \u0442\u0443\u0432\u0430 \u0445\u04AF\u043D \u0431\u0430\u0439\u043D\u0430. 16-\u0440 \u0437\u0443\u0443\u043D\u0430\u0430\u0441 \u0445\u043E\u0439\u0448 \u0431\u0443\u0434\u0434\u044B\u043D \u0448\u0430\u0448\u0438\u043D, 20-\u0440 \u0437\u0443\u0443\u043D\u0430\u0430\u0441 \u0448\u0430\u0448\u0438\u043D\u0433\u04AF\u0439 \u0431\u0430\u0439\u0434\u0430\u043B \u0434\u044D\u043B\u0433\u044D\u0440\u0441\u044D\u043D \u0431\u0430 \u0430\u043B\u0431\u0430\u043D \u0445\u044D\u0440\u044D\u0433\u0442 \u043C\u043E\u043D\u0433\u043E\u043B \u0445\u044D\u043B\u044D\u044D\u0440 \u0445\u0430\u0440\u0438\u043B\u0446\u0430\u043D\u0430.` + ] + ], + [ + "text-generation", + [`\u041D\u0430\u043C\u0430\u0439\u0433 \u0414\u043E\u0440\u0436 \u0433\u044D\u0434\u044D\u0433. \u0411\u0438`, `\u0425\u0430\u043C\u0433\u0438\u0439\u043D \u0441\u0430\u0439\u043D \u0434\u0443\u0443\u0447\u0438\u043D \u0431\u043E\u043B`, `\u041C\u0438\u043D\u0438\u0439 \u0434\u0443\u0440\u0442\u0430\u0439 \u0445\u0430\u043C\u0442\u043B\u0430\u0433 \u0431\u043E\u043B`, `\u042D\u0440\u0442 \u0443\u0440\u044C\u0434\u044B\u043D \u0446\u0430\u0433\u0442`] + ], + ["fill-mask", [`\u041C\u043E\u043D\u0433\u043E\u043B \u0443\u043B\u0441\u044B\u043D \u0423\u043B\u0430\u0430\u043D\u0431\u0430\u0430\u0442\u0430\u0440 \u0445\u043E\u0442\u043E\u043E\u0441 \u044F\u0440\u044C\u0436 \u0431\u0430\u0439\u043D\u0430.`, `\u041C\u0438\u043D\u0438\u0439 \u0430\u043C\u044C\u0434\u0440\u0430\u043B\u044B\u043D \u0437\u043E\u0440\u0438\u043B\u0433\u043E \u0431\u043E\u043B .`]], + [ + "automatic-speech-recognition", + [ + { + label: `Common Voice Train Example`, + src: `https://cdn-media.huggingface.co/common_voice/train/common_voice_mn_18577472.wav` + }, + { + label: `Common Voice Test Example`, + src: `https://cdn-media.huggingface.co/common_voice/test/common_voice_mn_18577346.wav` + } + ] + ], + [ + "text-to-speech", + [ + `\u0411\u0438 \u041C\u043E\u043D\u0433\u043E\u043B \u0443\u043B\u0441\u044B\u043D \u0438\u0440\u0433\u044D\u043D.`, + `\u042D\u043D\u044D\u0445\u04AF\u04AF \u0436\u0438\u0448\u044D\u044D \u043D\u044C \u0446\u0430\u0430\u043D\u0430\u0430 \u044F\u043C\u0430\u0440 \u0447 \u0443\u0442\u0433\u0430 \u0430\u0433\u0443\u0443\u043B\u0430\u0430\u0433\u04AF\u0439 \u0431\u043E\u043B\u043D\u043E`, + `\u0421\u0430\u0440 \u0448\u0438\u043D\u044D\u0434\u044D\u044D \u0441\u0430\u0439\u0445\u0430\u043D \u0448\u0438\u043D\u044D\u043B\u044D\u0436 \u0431\u0430\u0439\u043D\u0430 \u0443\u0443?` + ] + ], + [ + "sentence-similarity", + [ + { + source_sentence: "\u042D\u043D\u044D \u0431\u043E\u043B \u0430\u0437 \u0436\u0430\u0440\u0433\u0430\u043B\u0442\u0430\u0439 \u0445\u04AF\u043D \u044E\u043C", + sentences: ["\u042D\u043D\u044D \u0431\u043E\u043B \u0430\u0437 \u0436\u0430\u0440\u0433\u0430\u043B\u0442\u0430\u0439 \u043D\u043E\u0445\u043E\u0439 \u044E\u043C", "\u042D\u043D\u044D \u0431\u043E\u043B \u043C\u0430\u0448 \u0438\u0445 \u0430\u0437 \u0436\u0430\u0440\u0433\u0430\u043B\u0442\u0430\u0439 \u0445\u04AF\u043D \u044E\u043C", "\u04E8\u043D\u04E9\u04E9\u0434\u04E9\u0440 \u043D\u0430\u0440\u043B\u0430\u0433 \u04E9\u0434\u04E9\u0440 \u0431\u0430\u0439\u043D\u0430"] + } + ] + ] +]); +var MAPPING_SI = /* @__PURE__ */ new Map([ + ["translation", [`\u0DC3\u0DD2\u0D82\u0DC4\u0DBD \u0D89\u0DAD\u0DCF \u0D85\u0DBD\u0D82\u0D9A\u0DCF\u0DBB \u0DB7\u0DCF\u0DC2\u0DCF\u0DC0\u0D9A\u0DD2.`, `\u0DB8\u0DD9\u0DB8 \u0DAD\u0DCF\u0D9A\u0DCA\u0DC2\u0DAB\u0DBA \u0DB7\u0DCF\u0DC0\u0DD2\u0DAD\u0DCF \u0D9A\u0DBB\u0DB1 \u0D94\u0DB6\u0DA7 \u0DC3\u0DCA\u0DAD\u0DD6\u0DAD\u0DD2\u0DBA\u0DD2.`]], + ["fill-mask", [`\u0DB8\u0DB8 \u0D9C\u0DD9\u0DAF\u0DBB .`, ` \u0D89\u0D9C\u0DD9\u0DB1\u0DD3\u0DB8\u0DA7 \u0D9C\u0DD2\u0DBA\u0DCF\u0DBA.`]] +]); +var MAPPING_DE = /* @__PURE__ */ new Map([ + [ + "question-answering", + [ + { + text: `Wo wohne ich?`, + context: `Mein Name ist Wolfgang und ich lebe in Berlin` + }, + { + text: `Welcher Name wird auch verwendet, um den Amazonas-Regenwald auf Englisch zu beschreiben?`, + context: `Der Amazonas-Regenwald, auf Englisch auch als Amazonien oder Amazonas-Dschungel bekannt, ist ein feuchter Laubwald, der den gr\xF6\xDFten Teil des Amazonas-Beckens S\xFCdamerikas bedeckt. Dieses Becken umfasst 7.000.000 Quadratkilometer (2.700.000 Quadratmeilen), von denen 5.500.000 Quadratkilometer (2.100.000 Quadratmeilen) vom Regenwald bedeckt sind. Diese Region umfasst Gebiete von neun Nationen. Der gr\xF6\xDFte Teil des Waldes befindet sich in Brasilien mit 60% des Regenwaldes, gefolgt von Peru mit 13%, Kolumbien mit 10% und geringen Mengen in Venezuela, Ecuador, Bolivien, Guyana, Suriname und Franz\xF6sisch-Guayana. Staaten oder Abteilungen in vier Nationen enthalten "Amazonas" in ihren Namen. Der Amazonas repr\xE4sentiert mehr als die H\xE4lfte der verbleibenden Regenw\xE4lder des Planeten und umfasst den gr\xF6\xDFten und artenreichsten tropischen Regenwald der Welt mit gesch\xE4tzten 390 Milliarden Einzelb\xE4umen, die in 16.000 Arten unterteilt sind.` + } + ] + ], + [ + "sentence-similarity", + [ + { + source_sentence: "Das ist eine gl\xFCckliche Person", + sentences: [ + "Das ist ein gl\xFCcklicher Hund", + "Das ist eine sehr gl\xFCckliche Person", + "Heute ist ein sonniger Tag" + ] + } + ] + ] +]); +var MAPPING_DV = /* @__PURE__ */ new Map([ + ["text-classification", [`\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078E\u07A6\u0794\u07A7\u0788\u07AD. \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078D\u07AF\u0784\u07A8\u0788\u07AD`]], + [ + "token-classification", + [ + `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0787\u07A6\u0780\u07AA\u0789\u07A6\u078B\u07AA \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u0789\u07A7\u078D\u07AD\u078E\u07A6`, + `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0790\u07A7\u0783\u07A7 \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u0787\u07AA\u078C\u07A9\u0789\u07AA\u078E\u07A6`, + `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0787\u07A6\u0787\u07A8\u079D\u07A7 \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u078A\u07AD\u078B\u07AB\u060C \u0787\u07A6\u0787\u07B0\u0791\u07AB\u078E\u07A6` + ] + ], + [ + "question-answering", + [ + { + text: `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u0786\u07AE\u0782\u07B0\u078C\u07A7\u0786\u07AA\u061F`, + context: `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0787\u07A6\u0780\u07AA\u0789\u07A6\u078B\u07AA \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u0789\u07A7\u078D\u07AD\u078E\u07A6` + }, + { + text: `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u0786\u07AE\u0782\u07B0\u078C\u07A7\u0786\u07AA\u061F`, + context: `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0790\u07A7\u0783\u07A7 \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u0787\u07AA\u078C\u07A9\u0789\u07AA\u078E\u07A6` + }, + { + text: `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0786\u07AE\u0784\u07A7\u061F`, + context: `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0787\u07A6\u0787\u07A8\u079D\u07A7 \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u078A\u07AD\u078B\u07AB\u078E\u07A6` + }, + { + text: `\u0787\u07AC\u0789\u07AD\u0792\u07A6\u0782\u07B0 \u0783\u07AC\u0787\u07A8\u0782\u07B0\u078A\u07AE\u0783\u07AC\u0790\u07B0\u0793\u07B0 \u0790\u07A8\u078A\u07A6\u0786\u07AE\u0781\u07B0\u078B\u07A8\u0782\u07AA\u0789\u07A6\u0781\u07B0 \u0787\u07A8\u0782\u078E\u07A8\u0783\u07AD\u0790\u07A8 \u0784\u07A6\u0780\u07AA\u0782\u07B0 \u0784\u07AD\u0782\u07AA\u0782\u07B0\u0786\u07AA\u0783\u07A7\u0782\u07A9 \u0786\u07AE\u0782\u07B0\u0782\u07A6\u0789\u07AC\u0787\u07B0\u061F`, + context: `\u0787\u07AC\u0789\u07AD\u0792\u07A6\u0782\u07B0 \u0783\u07AC\u0787\u07A8\u0782\u07B0\u078A\u07AE\u0783\u07AC\u0790\u07B0\u0793\u07B0 (\u0795\u07AF\u0797\u07AA\u0796\u07A9\u0792\u07B0: \u078A\u07B0\u078D\u07AE\u0783\u07AC\u0790\u07B0\u0793\u07A7 \u0787\u07AC\u0789\u07A6\u0790\u07AE\u0782\u07A8\u0786\u07A7 \u0782\u07AA\u0788\u07A6\u078C\u07A6 \u0787\u07AC\u0789\u07A6\u0790\u07AE\u0782\u07A8\u0787\u07A7\u061B \u0790\u07B0\u0795\u07AC\u0782\u07A8\u079D\u07B0: \u0790\u07AC\u078D\u07B0\u0788\u07A7 \u0787\u07AC\u0789\u07A6\u0790\u07AE\u0782\u07A8\u0786\u07A7, \u0787\u07AC\u0789\u07A6\u0790\u07AE\u0782\u07A8\u0787\u07A7 \u0782\u07AB\u0782\u07A9 \u0787\u07A7\u0782\u07B0\u0789\u07AA\u0786\u07AE\u0781\u07B0 \u0787\u07AC\u0789\u07A6\u0792\u07AF\u0782\u07A8\u0787\u07A7\u061B \u078A\u07B0\u0783\u07AC\u0782\u07B0\u0797\u07B0: \u078A\u07AE\u0783\u07AD \u0787\u07AC\u0789\u07AC\u0792\u07AE\u0782\u07A8\u0787\u07AC\u0782\u07B0\u061B \u0791\u07A6\u0797\u07B0: \u0787\u07AC\u0789\u07AC\u0792\u07AF\u0782\u07B0\u0783\u07AD\u078E\u07AC\u0788\u07A6\u0787\u07AA\u0791\u07B0)\u060C \u0787\u07A8\u078E\u07A8\u0783\u07AD\u0790\u07A8 \u0784\u07A6\u0780\u07AA\u0782\u07B0 \u0784\u07AA\u0782\u07A7 \u0787\u07AC\u0789\u07AC\u0792\u07AF\u0782\u07A8\u0787\u07A7 \u0782\u07AA\u0788\u07A6\u078C\u07A6 \u078B\u07A6 \u0787\u07AC\u0789\u07AD\u0792\u07A6\u0782\u07B0 \u0796\u07A6\u0782\u07B0\u078E\u07A6\u078D\u07B0 \u0787\u07A6\u0786\u07A9, \u0790\u07A6\u0787\u07AA\u078C\u07AA \u0787\u07AC\u0789\u07AC\u0783\u07A8\u0786\u07A7\u078E\u07AC \u0787\u07AC\u0789\u07AD\u0792\u07A6\u0782\u07B0 \u0784\u07AD\u0790\u07A8\u0782\u07B0 \u0790\u07A6\u0783\u07A6\u0780\u07A6\u0787\u07B0\u078B\u07AA\u078E\u07AC \u0784\u07AE\u0791\u07AA\u0784\u07A6\u0787\u07AC\u0787\u07B0\u078E\u07A6\u0787\u07A8 \u0780\u07A8\u0789\u07AC\u0782\u07AD \u0789\u07AE\u0787\u07A8\u0790\u07B0\u0793\u07B0 \u0784\u07AE\u0783\u07AF\u0791\u07B0\u078D\u07A9\u078A\u07B0 \u078A\u07AE\u0783\u07AC\u0790\u07B0\u0793\u07AC\u0787\u07AC\u0786\u07AC\u0788\u07AC. \u0787\u07AC\u0789\u07AD\u0792\u07A6\u0782\u07B0 \u0784\u07AD\u0790\u07A8\u0782\u07B0 \u0790\u07A6\u0783\u07A6\u0780\u07A6\u0787\u07B0\u078B\u07AA\u078E\u07AC \u0784\u07AE\u0791\u07AA \u0789\u07A8\u0782\u07A6\u0786\u07A9 7 \u0789\u07A8\u078D\u07A8\u0787\u07A6\u0782\u07B0 \u0787\u07A6\u0786\u07A6 \u0786\u07A8\u078D\u07AF\u0789\u07A9\u0793\u07A6\u0783 (2.7 \u0789\u07A8\u078D\u07A8\u0787\u07A6\u0782\u07B0 \u0787\u07A6\u0786\u07A6 \u0789\u07A6\u0787\u07A8\u078D\u07B0(. \u0789\u07A9\u078E\u07AC \u078C\u07AC\u0783\u07AC\u0787\u07A8\u0782\u07B0 5.5 \u0789\u07A8\u078D\u07A8\u0787\u07A6\u0782\u07B0 \u0787\u07A6\u0786\u07A6 \u0786\u07A8\u078D\u07AF\u0789\u07A9\u0793\u07A6\u0783 (2.1 \u0789\u07A8\u078D\u07A8\u0787\u07A6\u0782\u07B0 \u0787\u07A6\u0786\u07A6 \u0789\u07A6\u0787\u07A8\u078D\u07B0) \u0787\u07A6\u0786\u07A9 \u0789\u07A8 \u078A\u07AE\u0783\u07AC\u0790\u07B0\u0793\u07AC\u0788\u07AC. \u0789\u07A8 \u0790\u07A6\u0783\u07A6\u0780\u07A6\u0787\u07B0\u078B\u07AA\u078E\u07A6\u0787\u07A8 9 \u078E\u07A6\u0787\u07AA\u0789\u07A6\u0786\u07A6\u0781\u07B0 \u0782\u07A8\u0790\u07B0\u0784\u07A6\u078C\u07B0\u0788\u07A7 \u0793\u07AC\u0783\u07A8\u0793\u07A6\u0783\u07A9 \u0780\u07A8\u0789\u07AC\u0782\u07AC\u0787\u07AC\u0788\u07AC. 60% \u0787\u07A7\u0787\u07A8\u0787\u07AC\u0786\u07AC \u0787\u07AC\u0782\u07B0\u0789\u07AC \u0784\u07AE\u0791\u07AA \u0784\u07A6\u0787\u07AC\u0787\u07B0 \u0782\u07A8\u0790\u07B0\u0784\u07A6\u078C\u07B0\u0788\u07A6\u0782\u07A9 \u0784\u07B0\u0783\u07AC\u0792\u07A8\u078D\u07B0\u0787\u07A6\u0781\u07AC\u0788\u07AC. \u0787\u07AD\u078E\u07AC \u078A\u07A6\u0780\u07AA\u078C\u07AA\u0782\u07B0 13% \u0787\u07A7\u0787\u07AC\u0786\u07AA \u0795\u07AC\u0783\u07AB \u0787\u07A7\u0787\u07A8 10% \u0787\u07A7\u0787\u07AC\u0786\u07AA \u0786\u07AE\u078D\u07A6\u0789\u07B0\u0784\u07A8\u0787\u07A7 \u0787\u07A6\u078B\u07A8 \u0786\u07AA\u0791\u07A6 \u0784\u07A6\u0787\u07AC\u0787\u07B0 \u0780\u07A8\u0789\u07AC\u0782\u07AD \u078E\u07AE\u078C\u07AA\u0782\u07B0 \u0788\u07AC\u0782\u07AC\u0792\u07AA\u0787\u07AC\u078D\u07A7, \u0787\u07AC\u0786\u07B0\u0787\u07A6\u0791\u07AF, \u0784\u07AE\u078D\u07A8\u0788\u07A8\u0787\u07A7, \u078E\u07AA\u0794\u07A7\u0782\u07A7, \u0790\u07AA\u0783\u07A8\u0782\u07A7\u0789\u07B0 \u0787\u07A6\u078B\u07A8 \u078A\u07B0\u0783\u07AC\u0782\u07B0\u0797\u07B0 \u078E\u07B0\u0787\u07A7\u0782\u07A7 \u0787\u07A6\u0781\u07B0 \u0788\u07AC\u0790\u07B0 \u0782\u07A8\u0790\u07B0\u0784\u07A6\u078C\u07B0\u0788\u07AC\u0787\u07AC\u0788\u07AC. \u0789\u07A9\u078E\u07AC \u078C\u07AC\u0783\u07AC\u0787\u07A8\u0782\u07B0 4 \u078E\u07A6\u0787\u07AA\u0789\u07AC\u0787\u07B0\u078E\u07A6\u0787\u07A8 "\u0787\u07AC\u0789\u07AC\u0792\u07AE\u0782\u07A7\u0790\u07B0" \u0780\u07A8\u0789\u07A6\u0782\u07A6\u0787\u07A8\u078E\u07AC\u0782\u07B0 \u0790\u07B0\u0793\u07AD\u0793\u07B0 \u0782\u07AA\u0788\u07A6\u078C\u07A6 \u0791\u07A8\u0795\u07A7\u0793\u07B0\u0789\u07A6\u0782\u07B0\u0793\u07B0 \u0787\u07A6\u0786\u07A6\u0781\u07B0 \u0782\u07A6\u0782\u07B0\u078B\u07A9\u078A\u07A6\u0787\u07A8\u0788\u07AC\u0787\u07AC\u0788\u07AC. \u0789\u07AA\u0785\u07A8 \u078B\u07AA\u0782\u07A8\u0794\u07AD\u078E\u07A6\u0787\u07A8 \u0784\u07A7\u0786\u07A9 \u0780\u07AA\u0783\u07A8 \u0783\u07AC\u0787\u07A8\u0782\u07B0\u078A\u07AE\u0783\u07AC\u0790\u07B0\u0793\u07B0\u078E\u07AC \u078C\u07AC\u0783\u07AC\u0787\u07A8\u0782\u07B0 \u078B\u07AC\u0784\u07A6\u0787\u07A8\u0786\u07AA\u0785\u07A6 \u0787\u07AC\u0787\u07B0\u0784\u07A6\u0794\u07A6\u0781\u07B0\u0788\u07AA\u0783\u07AC\u0784\u07AE\u0791\u07AA\u0788\u07A6\u0783\u07AC\u0787\u07B0 \u0787\u07AC\u0789\u07AD\u0792\u07AE\u0782\u07B0 \u0783\u07AC\u0787\u07A8\u0782\u07B0\u078A\u07AE\u0783\u07AC\u0790\u07B0\u0793\u07B0 \u0780\u07A8\u0787\u07B0\u0790\u07A7\u0786\u07AA\u0783\u07AC\u0787\u07AC\u0788\u07AC. \u0789\u07A8\u0787\u07A9 \u0789\u07AA\u0785\u07A8 \u078B\u07AA\u0782\u07A8\u0794\u07AC\u0787\u07A8\u0782\u07B0 \u0787\u07AC\u0782\u07B0\u0789\u07AE \u0784\u07AE\u0791\u07AA \u0787\u07A6\u078B\u07A8 \u0787\u07AC\u0782\u07B0\u0789\u07AC \u0784\u07A6\u0787\u07AE\u0791\u07A6\u0787\u07A8\u0788\u07A6\u0783\u0790\u07B0 \u0783\u07AC\u0787\u07A8\u0782\u07B0\u078A\u07AE\u0783\u07AC\u0790\u07B0\u0793\u07B0 \u0793\u07B0\u0783\u07AC\u0786\u07B0\u0793\u07AC\u0788\u07AC. \u078D\u07A6\u078A\u07A7\u0786\u07AA\u0783\u07AC\u0788\u07AD \u078E\u07AE\u078C\u07AA\u0782\u07B0 16 \u0780\u07A7\u0790\u07B0 \u0790\u07B0\u0795\u07A9\u079D\u07A9\u0790\u07B0\u0787\u07A6\u0781\u07B0 \u0784\u07AC\u0780\u07A8\u078E\u07AC\u0782\u07B0\u0788\u07A7 390 \u0789\u07A8\u078D\u07A8\u0787\u07A6\u0782\u07B0 \u0788\u07A6\u0787\u07B0\u078C\u07A6\u0783\u07AA\u078E\u07AC \u078E\u07A6\u0790\u07B0 \u0789\u07A8\u078C\u07A7\u078E\u07A6\u0787\u07A8 \u0780\u07A8\u0789\u07AC\u0782\u07AC\u0787\u07AC\u0788\u07AC` + } + ] + ], + [ + "translation", + [ + `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0787\u07A6\u0780\u07AA\u0789\u07A6\u078B\u07AA \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u0789\u07A7\u078D\u07AD\u078E\u07A6`, + `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0790\u07A7\u0783\u07A7 \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u078B\u07A8\u0783\u07A8\u0787\u07AA\u0785\u07AC\u0782\u07A9 \u0787\u07AA\u078C\u07A9\u0789\u07AA\u078E\u07A6` + ] + ], + [ + "summarization", + [ + `\u0793\u07A6\u0788\u07A6\u0783\u07AA\u078E\u07AC \u0787\u07AA\u0790\u07B0\u0789\u07A8\u0782\u07A6\u0786\u07A9 324 \u0789\u07A9\u0793\u07A6\u0783\u07AA\u060C \u0787\u07AC\u0787\u07A9 \u078E\u07A7\u078C\u07B0\u078E\u07A6\u0782\u0791\u07A6\u0786\u07A6\u0781\u07B0 81 \u0784\u07AA\u0783\u07A9\u078E\u07AC \u0787\u07A8\u0789\u07A7\u0783\u07A7\u078C\u07A6\u0786\u07A7\u0787\u07A8 \u0787\u07AC\u0787\u07B0\u0788\u07A6\u0783\u07AC\u0788\u07AC. \u0787\u07AC\u0787\u07A9 \u0795\u07AC\u0783\u07A8\u0790\u07B0\u078E\u07A6\u0787\u07A8 \u0780\u07AA\u0783\u07A8 \u0787\u07AC\u0782\u07B0\u0789\u07AC \u0787\u07AA\u0790\u07B0 \u0787\u07A8\u0789\u07A7\u0783\u07A7\u078C\u07AC\u0788\u07AC. \u0787\u07AD\u078E\u07AC \u0780\u07A6\u078C\u07A6\u0783\u07AC\u0790\u07B0\u0786\u07A6\u0782\u07A6\u0781\u07B0 \u0780\u07AA\u0783\u07A8 \u0784\u07AA\u0791\u07AA\u078E\u07AC \u078B\u07A8\u078E\u07AA\u0789\u07A8\u0782\u07A6\u0786\u07A9 \u0786\u07AE\u0782\u07B0\u0789\u07AC \u078A\u07A6\u0783\u07A7\u078C\u07A6\u0786\u07AA\u0782\u07B0 125 \u0789\u07A9\u0793\u07A6\u0783\u07AC\u0788\u07AC. (410 \u078A\u07AB\u0793\u07AA) \u0787\u07A6\u0787\u07A8\u078A\u07A8\u078D\u07B0 \u0793\u07A6\u0788\u07A6\u0783\u07AA \u0784\u07A8\u0782\u07A7\u0786\u07AA\u0783\u07A8 \u0787\u07A8\u0783\u07AA\u060C \u0788\u07AE\u079D\u07A8\u0782\u07B0\u078E\u07B0\u0793\u07A6\u0782\u07B0 \u0789\u07AE\u0782\u07A8\u0787\u07AA\u0789\u07AC\u0782\u07B0\u0793\u07B0\u078E\u07AC \u0787\u07AA\u0790\u07B0\u0789\u07A8\u0782\u07B0 \u078A\u07A6\u0780\u07A6\u0782\u07A6\u0787\u07A6\u0785\u07A7 \u078E\u07AE\u0790\u07B0\u060C \u078B\u07AA\u0782\u07A8\u0794\u07AD\u078E\u07A6\u0787\u07A8 \u0789\u07A9\u0780\u07AA\u0782\u07B0 \u0787\u07AA\u078A\u07AC\u0787\u07B0\u078B\u07A8 \u078C\u07A6\u0782\u07B0\u078C\u07A6\u0782\u07AA\u078E\u07AC \u078C\u07AC\u0783\u07AC\u0787\u07A8\u0782\u07B0 \u0787\u07AC\u0782\u07B0\u0789\u07AC \u0787\u07AA\u0790\u07B0 \u078C\u07A6\u0782\u07AA\u078E\u07AC \u078D\u07A6\u078E\u07A6\u0784\u07AA \u078D\u07A8\u0784\u07AA\u0782\u07AC\u0788\u07AC. \u0787\u07A6\u078B\u07A8 1930 \u078E\u07A6\u0787\u07A8 \u0782\u07A8\u0787\u07AA \u0794\u07AF\u0786\u07B0\u078E\u07AC \u0786\u07B0\u0783\u07A6\u0787\u07A8\u0790\u07B0\u078D\u07A6\u0783 \u0784\u07A8\u078D\u07B0\u0791\u07A8\u0782\u07B0\u078E\u07B0 \u0784\u07A8\u0782\u07A7\u0786\u07AA\u0783\u07AA\u0789\u07A7\u0787\u07A8 \u0780\u07A6\u0789\u07A6\u0787\u07A6\u0781\u07B0 41 \u0787\u07A6\u0780\u07A6\u0783\u07AA \u0788\u07A6\u0782\u07B0\u078B\u07AC\u0782\u07B0 \u0789\u07A8\u078D\u07A6\u078E\u07A6\u0784\u07AA \u0780\u07A8\u078A\u07AC\u0780\u07AC\u0787\u07B0\u0793\u07A8\u0787\u07AC\u0788\u07AC. \u0789\u07A8\u0787\u07A9 300 \u0789\u07A9\u0793\u07A6\u0783\u07A6\u0781\u07B0 \u0788\u07AA\u0783\u07AC \u0787\u07AA\u0790\u07B0\u0786\u07AE\u0781\u07B0 \u0787\u07A8\u0789\u07A7\u0783\u07A7\u078C\u07B0\u0786\u07AA\u0783\u07AC\u0788\u07AA\u0782\u07AA \u078A\u07AA\u0783\u07A6\u078C\u07A6\u0789\u07A6 \u078C\u07A6\u0782\u07AC\u0788\u07AC. 1957 \u078E\u07A6\u0787\u07A8 \u0793\u07A6\u0788\u07A6\u0783\u07AA\u078E\u07AC \u0787\u07AC\u0782\u07B0\u0789\u07AC \u0789\u07A6\u078C\u07A9\u078E\u07A6\u0787\u07A8 \u0780\u07A6\u0783\u07AA\u0786\u07AA\u0783\u07AC\u0788\u07AA\u0782\u07AA \u0784\u07B0\u0783\u07AF\u0791\u07B0\u0786\u07A7\u0790\u07B0\u0793\u07A8\u0782\u07B0\u078E \u0787\u07AD\u0783\u07A8\u0787\u07A6\u078D\u07B0\u078E\u07AC \u0790\u07A6\u0784\u07A6\u0784\u07AA\u0782\u07B0 \u0789\u07A8\u0780\u07A7\u0783\u07AA \u0789\u07A8 \u0793\u07A6\u0788\u07A6\u0783\u07AA \u0786\u07B0\u0783\u07A6\u0787\u07A8\u0790\u07B0\u078D\u07A6\u0783 \u0784\u07A8\u078D\u07B0\u0791\u07A8\u0782\u07B0\u078E\u0787\u07A6\u0781\u07B0 \u0788\u07AA\u0783\u07AC 5.2 \u0789\u07A9\u0793\u07A6\u0783 (17 \u078A\u07AB\u0793\u07AA) \u0787\u07AA\u0780\u07AC\u0788\u07AC. \u0789\u07A8 \u0793\u07B0\u0783\u07A7\u0782\u07B0\u0790\u07B0\u0789\u07A8\u0793\u07A6\u0783\u07AA \u0782\u07AA\u078D\u07A7\u060C \u0787\u07A6\u0787\u07A8\u078A\u07A8\u078D\u07B0 \u0793\u07A6\u0788\u07A6\u0783\u07A6\u0786\u07A9\u060C \u0789\u07A8\u078D\u07A7\u0787\u07AA \u0788\u07A8\u0787\u07A7\u0791\u07A6\u0786\u07B0\u0793\u07A6\u0781\u07B0 \u078A\u07A6\u0780\u07AA \u078A\u07B0\u0783\u07A7\u0782\u07B0\u0790\u07B0\u078E\u07A6\u0787\u07A8 \u0780\u07AA\u0783\u07A8 2 \u0788\u07A6\u0782\u07A6\u0787\u07A6\u0781\u07B0 \u0787\u07AC\u0782\u07B0\u0789\u07AC \u0787\u07AA\u0790\u07B0 \u078A\u07B0\u0783\u07A9\u0790\u07B0\u0793\u07AD\u0782\u07B0\u0791\u07A8\u0782\u07B0\u078E \u0787\u07A8\u0789\u07A7\u0783\u07A7\u078C\u07AC\u0788\u07AC` + ] + ], + [ + "text-generation", + [ + `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0794\u07AB\u0790\u07AA\u078A\u07B0 \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0789\u07A6\u0787\u07A8\u078E\u07A6\u0782\u0791\u07AA`, + `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u0789\u07A6\u0783\u07A8\u0787\u07A6\u0789\u07B0\u060C \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0 \u0787\u07AC\u0782\u07B0\u0789\u07AC \u078E\u07A6\u0794\u07A7\u0788\u07A7`, + `\u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0\u078E\u07AC \u0782\u07A6\u0789\u07A6\u0786\u07A9 \u078A\u07A7\u078C\u07AA\u0789\u07A6\u078C\u07AA \u0787\u07A6\u078B\u07A8 \u0787\u07A6\u0780\u07A6\u0783\u07AC\u0782\u07B0`, + `\u060C\u0787\u07AC\u0787\u07B0 \u0792\u07A6\u0789\u07A7\u0782\u07AC\u0787\u07B0\u078E\u07A6\u0787\u07A8` + ] + ], + ["fill-mask", [`. \u0789\u07A7\u078D\u07AC \u0787\u07A6\u0786\u07A9 \u078B\u07A8\u0788\u07AC\u0780\u07A8\u0783\u07A7\u0787\u07B0\u0796\u07AD\u078E\u07AC`, `\u078E\u07A6\u0783\u07AA\u078B\u07A8\u0794\u07A6\u0787\u07A6\u0786\u07A9 \u078B\u07A8\u0788\u07AC\u0780\u07A8\u0782\u07B0\u078E\u07AC \u0789\u07AC\u078B\u07AA\u078E\u07A6\u0787\u07A8 \u0786\u07AC\u0787\u07AA\u0789\u07AC\u0787\u07B0.`]] +]); +var MAPPING_DEFAULT_WIDGET = /* @__PURE__ */ new Map([ + ["en", MAPPING_EN], + ["zh", MAPPING_ZH], + ["fr", MAPPING_FR], + ["es", MAPPING_ES], + ["ru", MAPPING_RU], + ["uk", MAPPING_UK], + ["it", MAPPING_IT], + ["fa", MAPPING_FA], + ["ar", MAPPING_AR], + ["bn", MAPPING_BN], + ["mn", MAPPING_MN], + ["si", MAPPING_SI], + ["de", MAPPING_DE], + ["dv", MAPPING_DV] +]); + +// src/pipelines.ts +var MODALITIES = ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"]; +var MODALITY_LABELS = { + multimodal: "Multimodal", + nlp: "Natural Language Processing", + audio: "Audio", + cv: "Computer Vision", + rl: "Reinforcement Learning", + tabular: "Tabular", + other: "Other" +}; +var PIPELINE_DATA = { + "text-classification": { + name: "Text Classification", + subtasks: [ + { + type: "acceptability-classification", + name: "Acceptability Classification" + }, + { + type: "entity-linking-classification", + name: "Entity Linking Classification" + }, + { + type: "fact-checking", + name: "Fact Checking" + }, + { + type: "intent-classification", + name: "Intent Classification" + }, + { + type: "language-identification", + name: "Language Identification" + }, + { + type: "multi-class-classification", + name: "Multi Class Classification" + }, + { + type: "multi-label-classification", + name: "Multi Label Classification" + }, + { + type: "multi-input-text-classification", + name: "Multi-input Text Classification" + }, + { + type: "natural-language-inference", + name: "Natural Language Inference" + }, + { + type: "semantic-similarity-classification", + name: "Semantic Similarity Classification" + }, + { + type: "sentiment-classification", + name: "Sentiment Classification" + }, + { + type: "topic-classification", + name: "Topic Classification" + }, + { + type: "semantic-similarity-scoring", + name: "Semantic Similarity Scoring" + }, + { + type: "sentiment-scoring", + name: "Sentiment Scoring" + }, + { + type: "sentiment-analysis", + name: "Sentiment Analysis" + }, + { + type: "hate-speech-detection", + name: "Hate Speech Detection" + }, + { + type: "text-scoring", + name: "Text Scoring" + } + ], + modality: "nlp", + color: "orange" + }, + "token-classification": { + name: "Token Classification", + subtasks: [ + { + type: "named-entity-recognition", + name: "Named Entity Recognition" + }, + { + type: "part-of-speech", + name: "Part of Speech" + }, + { + type: "parsing", + name: "Parsing" + }, + { + type: "lemmatization", + name: "Lemmatization" + }, + { + type: "word-sense-disambiguation", + name: "Word Sense Disambiguation" + }, + { + type: "coreference-resolution", + name: "Coreference-resolution" + } + ], + modality: "nlp", + color: "blue" + }, + "table-question-answering": { + name: "Table Question Answering", + modality: "nlp", + color: "green" + }, + "question-answering": { + name: "Question Answering", + subtasks: [ + { + type: "extractive-qa", + name: "Extractive QA" + }, + { + type: "open-domain-qa", + name: "Open Domain QA" + }, + { + type: "closed-domain-qa", + name: "Closed Domain QA" + } + ], + modality: "nlp", + color: "blue" + }, + "zero-shot-classification": { + name: "Zero-Shot Classification", + modality: "nlp", + color: "yellow" + }, + translation: { + name: "Translation", + modality: "nlp", + color: "green" + }, + summarization: { + name: "Summarization", + subtasks: [ + { + type: "news-articles-summarization", + name: "News Articles Summarization" + }, + { + type: "news-articles-headline-generation", + name: "News Articles Headline Generation" + } + ], + modality: "nlp", + color: "indigo" + }, + "feature-extraction": { + name: "Feature Extraction", + modality: "nlp", + color: "red" + }, + "text-generation": { + name: "Text Generation", + subtasks: [ + { + type: "dialogue-modeling", + name: "Dialogue Modeling" + }, + { + type: "dialogue-generation", + name: "Dialogue Generation" + }, + { + type: "conversational", + name: "Conversational" + }, + { + type: "language-modeling", + name: "Language Modeling" + } + ], + modality: "nlp", + color: "indigo" + }, + "text2text-generation": { + name: "Text2Text Generation", + subtasks: [ + { + type: "text-simplification", + name: "Text simplification" + }, + { + type: "explanation-generation", + name: "Explanation Generation" + }, + { + type: "abstractive-qa", + name: "Abstractive QA" + }, + { + type: "open-domain-abstractive-qa", + name: "Open Domain Abstractive QA" + }, + { + type: "closed-domain-qa", + name: "Closed Domain QA" + }, + { + type: "open-book-qa", + name: "Open Book QA" + }, + { + type: "closed-book-qa", + name: "Closed Book QA" + } + ], + modality: "nlp", + color: "indigo" + }, + "fill-mask": { + name: "Fill-Mask", + subtasks: [ + { + type: "slot-filling", + name: "Slot Filling" + }, + { + type: "masked-language-modeling", + name: "Masked Language Modeling" + } + ], + modality: "nlp", + color: "red" + }, + "sentence-similarity": { + name: "Sentence Similarity", + modality: "nlp", + color: "yellow" + }, + "text-to-speech": { + name: "Text-to-Speech", + modality: "audio", + color: "yellow" + }, + "text-to-audio": { + name: "Text-to-Audio", + modality: "audio", + color: "yellow" + }, + "automatic-speech-recognition": { + name: "Automatic Speech Recognition", + modality: "audio", + color: "yellow" + }, + "audio-to-audio": { + name: "Audio-to-Audio", + modality: "audio", + color: "blue" + }, + "audio-classification": { + name: "Audio Classification", + subtasks: [ + { + type: "keyword-spotting", + name: "Keyword Spotting" + }, + { + type: "speaker-identification", + name: "Speaker Identification" + }, + { + type: "audio-intent-classification", + name: "Audio Intent Classification" + }, + { + type: "audio-emotion-recognition", + name: "Audio Emotion Recognition" + }, + { + type: "audio-language-identification", + name: "Audio Language Identification" + } + ], + modality: "audio", + color: "green" + }, + "voice-activity-detection": { + name: "Voice Activity Detection", + modality: "audio", + color: "red" + }, + "depth-estimation": { + name: "Depth Estimation", + modality: "cv", + color: "yellow" + }, + "image-classification": { + name: "Image Classification", + subtasks: [ + { + type: "multi-label-image-classification", + name: "Multi Label Image Classification" + }, + { + type: "multi-class-image-classification", + name: "Multi Class Image Classification" + } + ], + modality: "cv", + color: "blue" + }, + "object-detection": { + name: "Object Detection", + subtasks: [ + { + type: "face-detection", + name: "Face Detection" + }, + { + type: "vehicle-detection", + name: "Vehicle Detection" + } + ], + modality: "cv", + color: "yellow" + }, + "image-segmentation": { + name: "Image Segmentation", + subtasks: [ + { + type: "instance-segmentation", + name: "Instance Segmentation" + }, + { + type: "semantic-segmentation", + name: "Semantic Segmentation" + }, + { + type: "panoptic-segmentation", + name: "Panoptic Segmentation" + } + ], + modality: "cv", + color: "green" + }, + "text-to-image": { + name: "Text-to-Image", + modality: "cv", + color: "yellow" + }, + "image-to-text": { + name: "Image-to-Text", + subtasks: [ + { + type: "image-captioning", + name: "Image Captioning" + } + ], + modality: "cv", + color: "red" + }, + "image-to-image": { + name: "Image-to-Image", + subtasks: [ + { + type: "image-inpainting", + name: "Image Inpainting" + }, + { + type: "image-colorization", + name: "Image Colorization" + }, + { + type: "super-resolution", + name: "Super Resolution" + } + ], + modality: "cv", + color: "indigo" + }, + "image-to-video": { + name: "Image-to-Video", + modality: "cv", + color: "indigo" + }, + "unconditional-image-generation": { + name: "Unconditional Image Generation", + modality: "cv", + color: "green" + }, + "video-classification": { + name: "Video Classification", + modality: "cv", + color: "blue" + }, + "reinforcement-learning": { + name: "Reinforcement Learning", + modality: "rl", + color: "red" + }, + robotics: { + name: "Robotics", + modality: "rl", + subtasks: [ + { + type: "grasping", + name: "Grasping" + }, + { + type: "task-planning", + name: "Task Planning" + } + ], + color: "blue" + }, + "tabular-classification": { + name: "Tabular Classification", + modality: "tabular", + subtasks: [ + { + type: "tabular-multi-class-classification", + name: "Tabular Multi Class Classification" + }, + { + type: "tabular-multi-label-classification", + name: "Tabular Multi Label Classification" + } + ], + color: "blue" + }, + "tabular-regression": { + name: "Tabular Regression", + modality: "tabular", + subtasks: [ + { + type: "tabular-single-column-regression", + name: "Tabular Single Column Regression" + } + ], + color: "blue" + }, + "tabular-to-text": { + name: "Tabular to Text", + modality: "tabular", + subtasks: [ + { + type: "rdf-to-text", + name: "RDF to text" + } + ], + color: "blue", + hideInModels: true + }, + "table-to-text": { + name: "Table to Text", + modality: "nlp", + color: "blue", + hideInModels: true + }, + "multiple-choice": { + name: "Multiple Choice", + subtasks: [ + { + type: "multiple-choice-qa", + name: "Multiple Choice QA" + }, + { + type: "multiple-choice-coreference-resolution", + name: "Multiple Choice Coreference Resolution" + } + ], + modality: "nlp", + color: "blue", + hideInModels: true + }, + "text-retrieval": { + name: "Text Retrieval", + subtasks: [ + { + type: "document-retrieval", + name: "Document Retrieval" + }, + { + type: "utterance-retrieval", + name: "Utterance Retrieval" + }, + { + type: "entity-linking-retrieval", + name: "Entity Linking Retrieval" + }, + { + type: "fact-checking-retrieval", + name: "Fact Checking Retrieval" + } + ], + modality: "nlp", + color: "indigo", + hideInModels: true + }, + "time-series-forecasting": { + name: "Time Series Forecasting", + modality: "tabular", + subtasks: [ + { + type: "univariate-time-series-forecasting", + name: "Univariate Time Series Forecasting" + }, + { + type: "multivariate-time-series-forecasting", + name: "Multivariate Time Series Forecasting" + } + ], + color: "blue" + }, + "text-to-video": { + name: "Text-to-Video", + modality: "cv", + color: "green" + }, + "image-text-to-text": { + name: "Image-Text-to-Text", + modality: "multimodal", + color: "red", + hideInDatasets: true + }, + "visual-question-answering": { + name: "Visual Question Answering", + subtasks: [ + { + type: "visual-question-answering", + name: "Visual Question Answering" + } + ], + modality: "multimodal", + color: "red" + }, + "document-question-answering": { + name: "Document Question Answering", + subtasks: [ + { + type: "document-question-answering", + name: "Document Question Answering" + } + ], + modality: "multimodal", + color: "blue", + hideInDatasets: true + }, + "zero-shot-image-classification": { + name: "Zero-Shot Image Classification", + modality: "cv", + color: "yellow" + }, + "graph-ml": { + name: "Graph Machine Learning", + modality: "other", + color: "green" + }, + "mask-generation": { + name: "Mask Generation", + modality: "cv", + color: "indigo" + }, + "zero-shot-object-detection": { + name: "Zero-Shot Object Detection", + modality: "cv", + color: "yellow" + }, + "text-to-3d": { + name: "Text-to-3D", + modality: "cv", + color: "yellow" + }, + "image-to-3d": { + name: "Image-to-3D", + modality: "cv", + color: "green" + }, + "image-feature-extraction": { + name: "Image Feature Extraction", + modality: "cv", + color: "indigo" + }, + other: { + name: "Other", + modality: "other", + color: "blue", + hideInModels: true, + hideInDatasets: true + } +}; +var PIPELINE_TYPES = Object.keys(PIPELINE_DATA); +var SUBTASK_TYPES = Object.values(PIPELINE_DATA).flatMap((data) => "subtasks" in data ? data.subtasks : []).map((s) => s.type); +var PIPELINE_TYPES_SET = new Set(PIPELINE_TYPES); + +// src/tasks/audio-classification/data.ts +var taskData = { + datasets: [ + { + description: "A benchmark of 10 different audio tasks.", + id: "superb" + } + ], + demo: { + inputs: [ + { + filename: "audio.wav", + type: "audio" + } + ], + outputs: [ + { + data: [ + { + label: "Up", + score: 0.2 + }, + { + label: "Down", + score: 0.8 + } + ], + type: "chart" + } + ] + }, + metrics: [ + { + description: "", + id: "accuracy" + }, + { + description: "", + id: "recall" + }, + { + description: "", + id: "precision" + }, + { + description: "", + id: "f1" + } + ], + models: [ + { + description: "An easy-to-use model for Command Recognition.", + id: "speechbrain/google_speech_command_xvector" + }, + { + description: "An Emotion Recognition model.", + id: "ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" + }, + { + description: "A language identification model.", + id: "facebook/mms-lid-126" + } + ], + spaces: [ + { + description: "An application that can classify music into different genre.", + id: "kurianbenoy/audioclassification" + } + ], + summary: "Audio classification is the task of assigning a label or class to a given audio. It can be used for recognizing which command a user is giving or the emotion of a statement, as well as identifying a speaker.", + widgetModels: ["facebook/mms-lid-126"], + youtubeId: "KWwzcmG98Ds" +}; +var data_default = taskData; + +// src/tasks/audio-to-audio/data.ts +var taskData2 = { + datasets: [ + { + description: "512-element X-vector embeddings of speakers from CMU ARCTIC dataset.", + id: "Matthijs/cmu-arctic-xvectors" + } + ], + demo: { + inputs: [ + { + filename: "input.wav", + type: "audio" + } + ], + outputs: [ + { + filename: "label-0.wav", + type: "audio" + }, + { + filename: "label-1.wav", + type: "audio" + } + ] + }, + metrics: [ + { + description: "The Signal-to-Noise ratio is the relationship between the target signal level and the background noise level. It is calculated as the logarithm of the target signal divided by the background noise, in decibels.", + id: "snri" + }, + { + description: "The Signal-to-Distortion ratio is the relationship between the target signal and the sum of noise, interference, and artifact errors", + id: "sdri" + } + ], + models: [ + { + description: "A solid model of audio source separation.", + id: "speechbrain/sepformer-wham" + }, + { + description: "A speech enhancement model.", + id: "speechbrain/metricgan-plus-voicebank" + } + ], + spaces: [ + { + description: "An application for speech separation.", + id: "younver/speechbrain-speech-separation" + }, + { + description: "An application for audio style transfer.", + id: "nakas/audio-diffusion_style_transfer" + } + ], + summary: "Audio-to-Audio is a family of tasks in which the input is an audio and the output is one or multiple generated audios. Some example tasks are speech enhancement and source separation.", + widgetModels: ["speechbrain/sepformer-wham"], + youtubeId: "iohj7nCCYoM" +}; +var data_default2 = taskData2; + +// src/tasks/automatic-speech-recognition/data.ts +var taskData3 = { + datasets: [ + { + description: "31,175 hours of multilingual audio-text dataset in 108 languages.", + id: "mozilla-foundation/common_voice_17_0" + }, + { + description: "An English dataset with 1,000 hours of data.", + id: "librispeech_asr" + }, + { + description: "A multi-lingual audio dataset with 370K hours of audio.", + id: "espnet/yodas" + } + ], + demo: { + inputs: [ + { + filename: "input.flac", + type: "audio" + } + ], + outputs: [ + { + /// GOING ALONG SLUSHY COUNTRY ROADS AND SPEAKING TO DAMP AUDIENCES I + label: "Transcript", + content: "Going along slushy country roads and speaking to damp audiences in...", + type: "text" + } + ] + }, + metrics: [ + { + description: "", + id: "wer" + }, + { + description: "", + id: "cer" + } + ], + models: [ + { + description: "A powerful ASR model by OpenAI.", + id: "openai/whisper-large-v3" + }, + { + description: "A good generic speech model by MetaAI for fine-tuning.", + id: "facebook/w2v-bert-2.0" + }, + { + description: "An end-to-end model that performs ASR and Speech Translation by MetaAI.", + id: "facebook/seamless-m4t-v2-large" + } + ], + spaces: [ + { + description: "A powerful general-purpose speech recognition application.", + id: "hf-audio/whisper-large-v3" + }, + { + description: "Fastest speech recognition application.", + id: "sanchit-gandhi/whisper-jax" + }, + { + description: "A high quality speech and text translation model by Meta.", + id: "facebook/seamless_m4t" + } + ], + summary: "Automatic Speech Recognition (ASR), also known as Speech to Text (STT), is the task of transcribing a given audio to text. It has many applications, such as voice user interfaces.", + widgetModels: ["openai/whisper-large-v3"], + youtubeId: "TksaY_FDgnk" +}; +var data_default3 = taskData3; + +// src/tasks/document-question-answering/data.ts +var taskData4 = { + datasets: [ + { + description: "Largest document understanding dataset.", + id: "HuggingFaceM4/Docmatix" + }, + { + description: "Dataset from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry Documents Library.", + id: "eliolio/docvqa" + } + ], + demo: { + inputs: [ + { + label: "Question", + content: "What is the idea behind the consumer relations efficiency team?", + type: "text" + }, + { + filename: "document-question-answering-input.png", + type: "img" + } + ], + outputs: [ + { + label: "Answer", + content: "Balance cost efficiency with quality customer service", + type: "text" + } + ] + }, + metrics: [ + { + description: "The evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein Similarity (ANLS). This metric is flexible to character regognition errors and compares the predicted answer with the ground truth answer.", + id: "anls" + }, + { + description: "Exact Match is a metric based on the strict character match of the predicted answer and the right answer. For answers predicted correctly, the Exact Match will be 1. Even if only one character is different, Exact Match will be 0", + id: "exact-match" + } + ], + models: [ + { + description: "A LayoutLM model for the document QA task, fine-tuned on DocVQA and SQuAD2.0.", + id: "impira/layoutlm-document-qa" + }, + { + description: "A special model for OCR-free Document QA task.", + id: "microsoft/udop-large" + }, + { + description: "A powerful model for document question answering.", + id: "google/pix2struct-docvqa-large" + } + ], + spaces: [ + { + description: "A robust document question answering application.", + id: "impira/docquery" + }, + { + description: "An application that can answer questions from invoices.", + id: "impira/invoices" + }, + { + description: "An application to compare different document question answering models.", + id: "merve/compare_docvqa_models" + } + ], + summary: "Document Question Answering (also known as Document Visual Question Answering) is the task of answering questions on document images. Document question answering models take a (document, question) pair as input and return an answer in natural language. Models usually rely on multi-modal features, combining text, position of words (bounding-boxes) and image.", + widgetModels: ["impira/layoutlm-document-qa"], + youtubeId: "" +}; +var data_default4 = taskData4; + +// src/tasks/feature-extraction/data.ts +var taskData5 = { + datasets: [ + { + description: "Wikipedia dataset containing cleaned articles of all languages. Can be used to train `feature-extraction` models.", + id: "wikipedia" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "India, officially the Republic of India, is a country in South Asia.", + type: "text" + } + ], + outputs: [ + { + table: [ + ["Dimension 1", "Dimension 2", "Dimension 3"], + ["2.583383083343506", "2.757075071334839", "0.9023529887199402"], + ["8.29393482208252", "1.1071064472198486", "2.03399395942688"], + ["-0.7754912972450256", "-1.647324562072754", "-0.6113331913948059"], + ["0.07087723910808563", "1.5942802429199219", "1.4610432386398315"] + ], + type: "tabular" + } + ] + }, + metrics: [], + models: [ + { + description: "A powerful feature extraction model for natural language processing tasks.", + id: "thenlper/gte-large" + }, + { + description: "A strong feature extraction model for retrieval.", + id: "Alibaba-NLP/gte-Qwen1.5-7B-instruct" + } + ], + spaces: [ + { + description: "A leaderboard to rank best feature extraction models..", + id: "mteb/leaderboard" + } + ], + summary: "Feature extraction is the task of extracting features learnt in a model.", + widgetModels: ["facebook/bart-base"] +}; +var data_default5 = taskData5; + +// src/tasks/fill-mask/data.ts +var taskData6 = { + datasets: [ + { + description: "A common dataset that is used to train models for many languages.", + id: "wikipedia" + }, + { + description: "A large English dataset with text crawled from the web.", + id: "c4" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "The barked at me", + type: "text" + } + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "wolf", + score: 0.487 + }, + { + label: "dog", + score: 0.061 + }, + { + label: "cat", + score: 0.058 + }, + { + label: "fox", + score: 0.047 + }, + { + label: "squirrel", + score: 0.025 + } + ] + } + ] + }, + metrics: [ + { + description: "Cross Entropy is a metric that calculates the difference between two probability distributions. Each probability distribution is the distribution of predicted words", + id: "cross_entropy" + }, + { + description: "Perplexity is the exponential of the cross-entropy loss. It evaluates the probabilities assigned to the next word by the model. Lower perplexity indicates better performance", + id: "perplexity" + } + ], + models: [ + { + description: "A faster and smaller model than the famous BERT model.", + id: "distilbert-base-uncased" + }, + { + description: "A multilingual model trained on 100 languages.", + id: "xlm-roberta-base" + } + ], + spaces: [], + summary: "Masked language modeling is the task of masking some of the words in a sentence and predicting which words should replace those masks. These models are useful when we want to get a statistical understanding of the language in which the model is trained in.", + widgetModels: ["distilroberta-base"], + youtubeId: "mqElG5QJWUg" +}; +var data_default6 = taskData6; + +// src/tasks/image-classification/data.ts +var taskData7 = { + datasets: [ + { + // TODO write proper description + description: "Benchmark dataset used for image classification with images that belong to 100 classes.", + id: "cifar100" + }, + { + // TODO write proper description + description: "Dataset consisting of images of garments.", + id: "fashion_mnist" + } + ], + demo: { + inputs: [ + { + filename: "image-classification-input.jpeg", + type: "img" + } + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "Egyptian cat", + score: 0.514 + }, + { + label: "Tabby cat", + score: 0.193 + }, + { + label: "Tiger cat", + score: 0.068 + } + ] + } + ] + }, + metrics: [ + { + description: "", + id: "accuracy" + }, + { + description: "", + id: "recall" + }, + { + description: "", + id: "precision" + }, + { + description: "", + id: "f1" + } + ], + models: [ + { + description: "A strong image classification model.", + id: "google/vit-base-patch16-224" + }, + { + description: "A robust image classification model.", + id: "facebook/deit-base-distilled-patch16-224" + }, + { + description: "A strong image classification model.", + id: "facebook/convnext-large-224" + } + ], + spaces: [ + { + // TO DO: write description + description: "An application that classifies what a given image is about.", + id: "nielsr/perceiver-image-classification" + } + ], + summary: "Image classification is the task of assigning a label or class to an entire image. Images are expected to have only one class for each image. Image classification models take an image as input and return a prediction about which class the image belongs to.", + widgetModels: ["google/vit-base-patch16-224"], + youtubeId: "tjAIM7BOYhw" +}; +var data_default7 = taskData7; + +// src/tasks/image-feature-extraction/data.ts +var taskData8 = { + datasets: [ + { + description: "ImageNet-1K is a image classification dataset in which images are used to train image-feature-extraction models.", + id: "imagenet-1k" + } + ], + demo: { + inputs: [ + { + filename: "mask-generation-input.png", + type: "img" + } + ], + outputs: [ + { + table: [ + ["Dimension 1", "Dimension 2", "Dimension 3"], + ["0.21236686408519745", "1.0919708013534546", "0.8512550592422485"], + ["0.809657871723175", "-0.18544459342956543", "-0.7851548194885254"], + ["1.3103108406066895", "-0.2479034662246704", "-0.9107287526130676"], + ["1.8536205291748047", "-0.36419737339019775", "0.09717650711536407"] + ], + type: "tabular" + } + ] + }, + metrics: [], + models: [ + { + description: "A powerful image feature extraction model.", + id: "timm/vit_large_patch14_dinov2.lvd142m" + }, + { + description: "A strong image feature extraction model.", + id: "google/vit-base-patch16-224-in21k" + }, + { + description: "A robust image feature extraction models.", + id: "facebook/dino-vitb16" + }, + { + description: "Strong image-text-to-text model made for information retrieval from documents.", + id: "vidore/colpali" + } + ], + spaces: [], + summary: "Image feature extraction is the task of extracting features learnt in a computer vision model.", + widgetModels: [] +}; +var data_default8 = taskData8; + +// src/tasks/image-to-image/data.ts +var taskData9 = { + datasets: [ + { + description: "Synthetic dataset, for image relighting", + id: "VIDIT" + }, + { + description: "Multiple images of celebrities, used for facial expression translation", + id: "huggan/CelebA-faces" + } + ], + demo: { + inputs: [ + { + filename: "image-to-image-input.jpeg", + type: "img" + } + ], + outputs: [ + { + filename: "image-to-image-output.png", + type: "img" + } + ] + }, + isPlaceholder: false, + metrics: [ + { + description: "Peak Signal to Noise Ratio (PSNR) is an approximation of the human perception, considering the ratio of the absolute intensity with respect to the variations. Measured in dB, a high value indicates a high fidelity.", + id: "PSNR" + }, + { + description: "Structural Similarity Index (SSIM) is a perceptual metric which compares the luminance, contrast and structure of two images. The values of SSIM range between -1 and 1, and higher values indicate closer resemblance to the original image.", + id: "SSIM" + }, + { + description: "Inception Score (IS) is an analysis of the labels predicted by an image classification model when presented with a sample of the generated images.", + id: "IS" + } + ], + models: [ + { + description: "A model that enhances images captured in low light conditions.", + id: "keras-io/low-light-image-enhancement" + }, + { + description: "A model that increases the resolution of an image.", + id: "keras-io/super-resolution" + }, + { + description: "A model that creates a set of variations of the input image in the style of DALL-E using Stable Diffusion.", + id: "lambdalabs/sd-image-variations-diffusers" + }, + { + description: "A model that generates images based on segments in the input image and the text prompt.", + id: "mfidabel/controlnet-segment-anything" + }, + { + description: "A model that takes an image and an instruction to edit the image.", + id: "timbrooks/instruct-pix2pix" + } + ], + spaces: [ + { + description: "Image enhancer application for low light.", + id: "keras-io/low-light-image-enhancement" + }, + { + description: "Style transfer application.", + id: "keras-io/neural-style-transfer" + }, + { + description: "An application that generates images based on segment control.", + id: "mfidabel/controlnet-segment-anything" + }, + { + description: "Image generation application that takes image control and text prompt.", + id: "hysts/ControlNet" + }, + { + description: "Colorize any image using this app.", + id: "ioclab/brightness-controlnet" + }, + { + description: "Edit images with instructions.", + id: "timbrooks/instruct-pix2pix" + } + ], + summary: "Image-to-image is the task of transforming a source image to match the characteristics of a target image or a target image domain. Any image manipulation and enhancement is possible with image to image models.", + widgetModels: ["lllyasviel/sd-controlnet-canny"], + youtubeId: "" +}; +var data_default9 = taskData9; + +// src/tasks/image-to-text/data.ts +var taskData10 = { + datasets: [ + { + // TODO write proper description + description: "Dataset from 12M image-text of Reddit", + id: "red_caps" + }, + { + // TODO write proper description + description: "Dataset from 3.3M images of Google", + id: "datasets/conceptual_captions" + } + ], + demo: { + inputs: [ + { + filename: "savanna.jpg", + type: "img" + } + ], + outputs: [ + { + label: "Detailed description", + content: "a herd of giraffes and zebras grazing in a field", + type: "text" + } + ] + }, + metrics: [], + models: [ + { + description: "A robust image captioning model.", + id: "Salesforce/blip2-opt-2.7b" + }, + { + description: "A powerful and accurate image-to-text model that can also localize concepts in images.", + id: "microsoft/kosmos-2-patch14-224" + }, + { + description: "A strong optical character recognition model.", + id: "facebook/nougat-base" + }, + { + description: "A powerful model that lets you have a conversation with the image.", + id: "llava-hf/llava-1.5-7b-hf" + } + ], + spaces: [ + { + description: "An application that compares various image captioning models.", + id: "nielsr/comparing-captioning-models" + }, + { + description: "A robust image captioning application.", + id: "flax-community/image-captioning" + }, + { + description: "An application that transcribes handwritings into text.", + id: "nielsr/TrOCR-handwritten" + }, + { + description: "An application that can caption images and answer questions about a given image.", + id: "Salesforce/BLIP" + }, + { + description: "An application that can caption images and answer questions with a conversational agent.", + id: "Salesforce/BLIP2" + }, + { + description: "An image captioning application that demonstrates the effect of noise on captions.", + id: "johko/capdec-image-captioning" + } + ], + summary: "Image to text models output a text from a given image. Image captioning or optical character recognition can be considered as the most common applications of image to text.", + widgetModels: ["Salesforce/blip-image-captioning-base"], + youtubeId: "" +}; +var data_default10 = taskData10; + +// src/tasks/image-text-to-text/data.ts +var taskData11 = { + datasets: [ + { + description: "Instructions composed of image and text.", + id: "liuhaotian/LLaVA-Instruct-150K" + }, + { + description: "Conversation turns where questions involve image and text.", + id: "liuhaotian/LLaVA-Pretrain" + }, + { + description: "A collection of datasets made for model fine-tuning.", + id: "HuggingFaceM4/the_cauldron" + }, + { + description: "Screenshots of websites with their HTML/CSS codes.", + id: "HuggingFaceM4/WebSight" + } + ], + demo: { + inputs: [ + { + filename: "image-text-to-text-input.png", + type: "img" + }, + { + label: "Text Prompt", + content: "Describe the position of the bee in detail.", + type: "text" + } + ], + outputs: [ + { + label: "Answer", + content: "The bee is sitting on a pink flower, surrounded by other flowers. The bee is positioned in the center of the flower, with its head and front legs sticking out.", + type: "text" + } + ] + }, + metrics: [], + models: [ + { + description: "Cutting-edge vision language model that can take multiple image inputs.", + id: "facebook/chameleon-7b" + }, + { + description: "Cutting-edge conversational vision language model that can take multiple image inputs.", + id: "HuggingFaceM4/idefics2-8b-chatty" + }, + { + description: "Small yet powerful model.", + id: "vikhyatk/moondream2" + }, + { + description: "Strong image-text-to-text model made to understand documents.", + id: "mPLUG/DocOwl1.5" + }, + { + description: "Strong image-text-to-text model.", + id: "llava-hf/llava-v1.6-mistral-7b-hf" + } + ], + spaces: [ + { + description: "Leaderboard to evaluate vision language models.", + id: "opencompass/open_vlm_leaderboard" + }, + { + description: "Vision language models arena, where models are ranked by votes of users.", + id: "WildVision/vision-arena" + }, + { + description: "Powerful vision-language model assistant.", + id: "liuhaotian/LLaVA-1.6" + }, + { + description: "An application to compare outputs of different vision language models.", + id: "merve/compare_VLMs" + }, + { + description: "An application for document vision language tasks.", + id: "mPLUG/DocOwl" + } + ], + summary: "Image-text-to-text models take in an image and text prompt and output text. These models are also called vision-language models, or VLMs. The difference from image-to-text models is that these models take an additional text input, not restricting the model to certain use cases like image captioning, and may also be trained to accept a conversation as input.", + widgetModels: ["microsoft/kosmos-2-patch14-224"], + youtubeId: "" +}; +var data_default11 = taskData11; + +// src/tasks/image-segmentation/data.ts +var taskData12 = { + datasets: [ + { + description: "Scene segmentation dataset.", + id: "scene_parse_150" + } + ], + demo: { + inputs: [ + { + filename: "image-segmentation-input.jpeg", + type: "img" + } + ], + outputs: [ + { + filename: "image-segmentation-output.png", + type: "img" + } + ] + }, + metrics: [ + { + description: "Average Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for each semantic class separately", + id: "Average Precision" + }, + { + description: "Mean Average Precision (mAP) is the overall average of the AP values", + id: "Mean Average Precision" + }, + { + description: "Intersection over Union (IoU) is the overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic classes", + id: "Mean Intersection over Union" + }, + { + description: "AP\u03B1 is the Average Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75", + id: "AP\u03B1" + } + ], + models: [ + { + // TO DO: write description + description: "Solid panoptic segmentation model trained on the COCO 2017 benchmark dataset.", + id: "facebook/detr-resnet-50-panoptic" + }, + { + description: "Semantic segmentation model trained on ADE20k benchmark dataset.", + id: "microsoft/beit-large-finetuned-ade-640-640" + }, + { + description: "Semantic segmentation model trained on ADE20k benchmark dataset with 512x512 resolution.", + id: "nvidia/segformer-b0-finetuned-ade-512-512" + }, + { + description: "Semantic segmentation model trained Cityscapes dataset.", + id: "facebook/mask2former-swin-large-cityscapes-semantic" + }, + { + description: "Panoptic segmentation model trained COCO (common objects) dataset.", + id: "facebook/mask2former-swin-large-coco-panoptic" + } + ], + spaces: [ + { + description: "A semantic segmentation application that can predict unseen instances out of the box.", + id: "facebook/ov-seg" + }, + { + description: "One of the strongest segmentation applications.", + id: "jbrinkma/segment-anything" + }, + { + description: "A semantic segmentation application that predicts human silhouettes.", + id: "keras-io/Human-Part-Segmentation" + }, + { + description: "An instance segmentation application to predict neuronal cell types from microscopy images.", + id: "rashmi/sartorius-cell-instance-segmentation" + }, + { + description: "An application that segments videos.", + id: "ArtGAN/Segment-Anything-Video" + }, + { + description: "An panoptic segmentation application built for outdoor environments.", + id: "segments/panoptic-segment-anything" + } + ], + summary: "Image Segmentation divides an image into segments where each pixel in the image is mapped to an object. This task has multiple variants such as instance segmentation, panoptic segmentation and semantic segmentation.", + widgetModels: ["facebook/detr-resnet-50-panoptic"], + youtubeId: "dKE8SIt9C-w" +}; +var data_default12 = taskData12; + +// src/tasks/mask-generation/data.ts +var taskData13 = { + datasets: [], + demo: { + inputs: [ + { + filename: "mask-generation-input.png", + type: "img" + } + ], + outputs: [ + { + filename: "mask-generation-output.png", + type: "img" + } + ] + }, + metrics: [], + models: [ + { + description: "Small yet powerful mask generation model.", + id: "Zigeng/SlimSAM-uniform-50" + }, + { + description: "Very strong mask generation model.", + id: "facebook/sam-vit-huge" + } + ], + spaces: [ + { + description: "An application that combines a mask generation model with an image embedding model for open-vocabulary image segmentation.", + id: "SkalskiP/SAM_and_MetaCLIP" + }, + { + description: "An application that compares the performance of a large and a small mask generation model.", + id: "merve/slimsam" + }, + { + description: "An application based on an improved mask generation model.", + id: "linfanluntan/Grounded-SAM" + }, + { + description: "An application to remove objects from videos using mask generation models.", + id: "SkalskiP/SAM_and_ProPainter" + } + ], + summary: "Mask generation is the task of generating masks that identify a specific object or region of interest in a given image. Masks are often used in segmentation tasks, where they provide a precise way to isolate the object of interest for further processing or analysis.", + widgetModels: [], + youtubeId: "" +}; +var data_default13 = taskData13; + +// src/tasks/object-detection/data.ts +var taskData14 = { + datasets: [ + { + description: "Widely used benchmark dataset for multiple vision tasks.", + id: "merve/coco2017" + }, + { + description: "Multi-task computer vision benchmark.", + id: "merve/pascal-voc" + } + ], + demo: { + inputs: [ + { + filename: "object-detection-input.jpg", + type: "img" + } + ], + outputs: [ + { + filename: "object-detection-output.jpg", + type: "img" + } + ] + }, + metrics: [ + { + description: "The Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It is calculated for each class separately", + id: "Average Precision" + }, + { + description: "The Mean Average Precision (mAP) metric is the overall average of the AP values", + id: "Mean Average Precision" + }, + { + description: "The AP\u03B1 metric is the Average Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75", + id: "AP\u03B1" + } + ], + models: [ + { + description: "Solid object detection model trained on the benchmark dataset COCO 2017.", + id: "facebook/detr-resnet-50" + }, + { + description: "Strong object detection model trained on ImageNet-21k dataset.", + id: "microsoft/beit-base-patch16-224-pt22k-ft22k" + }, + { + description: "Fast and accurate object detection model trained on COCO dataset.", + id: "PekingU/rtdetr_r18vd_coco_o365" + } + ], + spaces: [ + { + description: "Leaderboard to compare various object detection models across several metrics.", + id: "hf-vision/object_detection_leaderboard" + }, + { + description: "An application that contains various object detection models to try from.", + id: "Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS" + }, + { + description: "An application that shows multiple cutting edge techniques for object detection and tracking.", + id: "kadirnar/torchyolo" + }, + { + description: "An object tracking, segmentation and inpainting application.", + id: "VIPLab/Track-Anything" + }, + { + description: "Very fast object tracking application based on object detection.", + id: "merve/RT-DETR-tracking-coco" + } + ], + summary: "Object Detection models allow users to identify objects of certain defined classes. Object detection models receive an image as input and output the images with bounding boxes and labels on detected objects.", + widgetModels: ["facebook/detr-resnet-50"], + youtubeId: "WdAeKSOpxhw" +}; +var data_default14 = taskData14; + +// src/tasks/depth-estimation/data.ts +var taskData15 = { + datasets: [ + { + description: "NYU Depth V2 Dataset: Video dataset containing both RGB and depth sensor data.", + id: "sayakpaul/nyu_depth_v2" + }, + { + description: "Monocular depth estimation benchmark based without noise and errors.", + id: "depth-anything/DA-2K" + } + ], + demo: { + inputs: [ + { + filename: "depth-estimation-input.jpg", + type: "img" + } + ], + outputs: [ + { + filename: "depth-estimation-output.png", + type: "img" + } + ] + }, + metrics: [], + models: [ + { + description: "Cutting-edge depth estimation model.", + id: "depth-anything/Depth-Anything-V2-Large" + }, + { + description: "A strong monocular depth estimation model.", + id: "Bingxin/Marigold" + }, + { + description: "A metric depth estimation model trained on NYU dataset.", + id: "Intel/zoedepth-nyu" + } + ], + spaces: [ + { + description: "An application that predicts the depth of an image and then reconstruct the 3D model as voxels.", + id: "radames/dpt-depth-estimation-3d-voxels" + }, + { + description: "An application on cutting-edge depth estimation.", + id: "depth-anything/Depth-Anything-V2" + }, + { + description: "An application to try state-of-the-art depth estimation.", + id: "merve/compare_depth_models" + } + ], + summary: "Depth estimation is the task of predicting depth of the objects present in an image.", + widgetModels: [""], + youtubeId: "" +}; +var data_default15 = taskData15; + +// src/tasks/placeholder/data.ts +var taskData16 = { + datasets: [], + demo: { + inputs: [], + outputs: [] + }, + isPlaceholder: true, + metrics: [], + models: [], + spaces: [], + summary: "", + widgetModels: [], + youtubeId: void 0, + /// If this is a subtask, link to the most general task ID + /// (eg, text2text-generation is the canonical ID of translation) + canonicalId: void 0 +}; +var data_default16 = taskData16; + +// src/tasks/reinforcement-learning/data.ts +var taskData17 = { + datasets: [ + { + description: "A curation of widely used datasets for Data Driven Deep Reinforcement Learning (D4RL)", + id: "edbeeching/decision_transformer_gym_replay" + } + ], + demo: { + inputs: [ + { + label: "State", + content: "Red traffic light, pedestrians are about to pass.", + type: "text" + } + ], + outputs: [ + { + label: "Action", + content: "Stop the car.", + type: "text" + }, + { + label: "Next State", + content: "Yellow light, pedestrians have crossed.", + type: "text" + } + ] + }, + metrics: [ + { + description: "Accumulated reward across all time steps discounted by a factor that ranges between 0 and 1 and determines how much the agent optimizes for future relative to immediate rewards. Measures how good is the policy ultimately found by a given algorithm considering uncertainty over the future.", + id: "Discounted Total Reward" + }, + { + description: "Average return obtained after running the policy for a certain number of evaluation episodes. As opposed to total reward, mean reward considers how much reward a given algorithm receives while learning.", + id: "Mean Reward" + }, + { + description: "Measures how good a given algorithm is after a predefined time. Some algorithms may be guaranteed to converge to optimal behavior across many time steps. However, an agent that reaches an acceptable level of optimality after a given time horizon may be preferable to one that ultimately reaches optimality but takes a long time.", + id: "Level of Performance After Some Time" + } + ], + models: [ + { + description: "A Reinforcement Learning model trained on expert data from the Gym Hopper environment", + id: "edbeeching/decision-transformer-gym-hopper-expert" + }, + { + description: "A PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and the RL Zoo.", + id: "HumanCompatibleAI/ppo-seals-CartPole-v0" + } + ], + spaces: [ + { + description: "An application for a cute puppy agent learning to catch a stick.", + id: "ThomasSimonini/Huggy" + }, + { + description: "An application to play Snowball Fight with a reinforcement learning agent.", + id: "ThomasSimonini/SnowballFight" + } + ], + summary: "Reinforcement learning is the computational approach of learning from action by interacting with an environment through trial and error and receiving rewards (negative or positive) as feedback", + widgetModels: [], + youtubeId: "q0BiUn5LiBc" +}; +var data_default17 = taskData17; + +// src/tasks/question-answering/data.ts +var taskData18 = { + datasets: [ + { + // TODO write proper description + description: "A famous question answering dataset based on English articles from Wikipedia.", + id: "squad_v2" + }, + { + // TODO write proper description + description: "A dataset of aggregated anonymized actual queries issued to the Google search engine.", + id: "natural_questions" + } + ], + demo: { + inputs: [ + { + label: "Question", + content: "Which name is also used to describe the Amazon rainforest in English?", + type: "text" + }, + { + label: "Context", + content: "The Amazon rainforest, also known in English as Amazonia or the Amazon Jungle", + type: "text" + } + ], + outputs: [ + { + label: "Answer", + content: "Amazonia", + type: "text" + } + ] + }, + metrics: [ + { + description: "Exact Match is a metric based on the strict character match of the predicted answer and the right answer. For answers predicted correctly, the Exact Match will be 1. Even if only one character is different, Exact Match will be 0", + id: "exact-match" + }, + { + description: " The F1-Score metric is useful if we value both false positives and false negatives equally. The F1-Score is calculated on each word in the predicted sequence against the correct answer", + id: "f1" + } + ], + models: [ + { + description: "A robust baseline model for most question answering domains.", + id: "deepset/roberta-base-squad2" + }, + { + description: "A special model that can answer questions from tables!", + id: "google/tapas-base-finetuned-wtq" + } + ], + spaces: [ + { + description: "An application that can answer a long question from Wikipedia.", + id: "deepset/wikipedia-assistant" + } + ], + summary: "Question Answering models can retrieve the answer to a question from a given text, which is useful for searching for an answer in a document. Some question answering models can generate answers without context!", + widgetModels: ["deepset/roberta-base-squad2"], + youtubeId: "ajPx5LwJD-I" +}; +var data_default18 = taskData18; + +// src/tasks/sentence-similarity/data.ts +var taskData19 = { + datasets: [ + { + description: "Bing queries with relevant passages from various web sources.", + id: "ms_marco" + } + ], + demo: { + inputs: [ + { + label: "Source sentence", + content: "Machine learning is so easy.", + type: "text" + }, + { + label: "Sentences to compare to", + content: "Deep learning is so straightforward.", + type: "text" + }, + { + label: "", + content: "This is so difficult, like rocket science.", + type: "text" + }, + { + label: "", + content: "I can't believe how much I struggled with this.", + type: "text" + } + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "Deep learning is so straightforward.", + score: 0.623 + }, + { + label: "This is so difficult, like rocket science.", + score: 0.413 + }, + { + label: "I can't believe how much I struggled with this.", + score: 0.256 + } + ] + } + ] + }, + metrics: [ + { + description: "Reciprocal Rank is a measure used to rank the relevancy of documents given a set of documents. Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal Rank is 1", + id: "Mean Reciprocal Rank" + }, + { + description: "The similarity of the embeddings is evaluated mainly on cosine similarity. It is calculated as the cosine of the angle between two vectors. It is particularly useful when your texts are not the same length", + id: "Cosine Similarity" + } + ], + models: [ + { + description: "This model works well for sentences and paragraphs and can be used for clustering/grouping and semantic searches.", + id: "sentence-transformers/all-mpnet-base-v2" + }, + { + description: "A multilingual model trained for FAQ retrieval.", + id: "clips/mfaq" + } + ], + spaces: [ + { + description: "An application that leverages sentence similarity to answer questions from YouTube videos.", + id: "Gradio-Blocks/Ask_Questions_To_YouTube_Videos" + }, + { + description: "An application that retrieves relevant PubMed abstracts for a given online article which can be used as further references.", + id: "Gradio-Blocks/pubmed-abstract-retriever" + }, + { + description: "An application that leverages sentence similarity to summarize text.", + id: "nickmuchi/article-text-summarizer" + }, + { + description: "A guide that explains how Sentence Transformers can be used for semantic search.", + id: "sentence-transformers/Sentence_Transformers_for_semantic_search" + } + ], + summary: "Sentence Similarity is the task of determining how similar two texts are. Sentence similarity models convert input texts into vectors (embeddings) that capture semantic information and calculate how close (similar) they are between them. This task is particularly useful for information retrieval and clustering/grouping.", + widgetModels: ["sentence-transformers/all-MiniLM-L6-v2"], + youtubeId: "VCZq5AkbNEU" +}; +var data_default19 = taskData19; + +// src/tasks/summarization/data.ts +var taskData20 = { + canonicalId: "text2text-generation", + datasets: [ + { + description: "News articles in five different languages along with their summaries. Widely used for benchmarking multilingual summarization models.", + id: "mlsum" + }, + { + description: "English conversations and their summaries. Useful for benchmarking conversational agents.", + id: "samsum" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. It was the first structure to reach a height of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.", + type: "text" + } + ], + outputs: [ + { + label: "Output", + content: "The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building. It was the first structure to reach a height of 300 metres.", + type: "text" + } + ] + }, + metrics: [ + { + description: "The generated sequence is compared against its summary, and the overlap of tokens are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.", + id: "rouge" + } + ], + models: [ + { + description: "A strong summarization model trained on English news articles. Excels at generating factual summaries.", + id: "facebook/bart-large-cnn" + }, + { + description: "A summarization model trained on medical articles.", + id: "google/bigbird-pegasus-large-pubmed" + } + ], + spaces: [ + { + description: "An application that can summarize long paragraphs.", + id: "pszemraj/summarize-long-text" + }, + { + description: "A much needed summarization application for terms and conditions.", + id: "ml6team/distilbart-tos-summarizer-tosdr" + }, + { + description: "An application that summarizes long documents.", + id: "pszemraj/document-summarization" + }, + { + description: "An application that can detect errors in abstractive summarization.", + id: "ml6team/post-processing-summarization" + } + ], + summary: "Summarization is the task of producing a shorter version of a document while preserving its important information. Some models can extract text from the original input, while other models can generate entirely new text.", + widgetModels: ["sshleifer/distilbart-cnn-12-6"], + youtubeId: "yHnr5Dk2zCI" +}; +var data_default20 = taskData20; + +// src/tasks/table-question-answering/data.ts +var taskData21 = { + datasets: [ + { + description: "The WikiTableQuestions dataset is a large-scale dataset for the task of question answering on semi-structured tables.", + id: "wikitablequestions" + }, + { + description: "WikiSQL is a dataset of 80654 hand-annotated examples of questions and SQL queries distributed across 24241 tables from Wikipedia.", + id: "wikisql" + } + ], + demo: { + inputs: [ + { + table: [ + ["Rank", "Name", "No.of reigns", "Combined days"], + ["1", "lou Thesz", "3", "3749"], + ["2", "Ric Flair", "8", "3103"], + ["3", "Harley Race", "7", "1799"] + ], + type: "tabular" + }, + { label: "Question", content: "What is the number of reigns for Harley Race?", type: "text" } + ], + outputs: [{ label: "Result", content: "7", type: "text" }] + }, + metrics: [ + { + description: "Checks whether the predicted answer(s) is the same as the ground-truth answer(s).", + id: "Denotation Accuracy" + } + ], + models: [ + { + description: "A table question answering model that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL query on a given table.", + id: "microsoft/tapex-base" + }, + { + description: "A robust table question answering model.", + id: "google/tapas-base-finetuned-wtq" + } + ], + spaces: [ + { + description: "An application that answers questions based on table CSV files.", + id: "katanaml/table-query" + } + ], + summary: "Table Question Answering (Table QA) is the answering a question about an information on a given table.", + widgetModels: ["google/tapas-base-finetuned-wtq"] +}; +var data_default21 = taskData21; + +// src/tasks/tabular-classification/data.ts +var taskData22 = { + datasets: [ + { + description: "A comprehensive curation of datasets covering all benchmarks.", + id: "inria-soda/tabular-benchmark" + } + ], + demo: { + inputs: [ + { + table: [ + ["Glucose", "Blood Pressure ", "Skin Thickness", "Insulin", "BMI"], + ["148", "72", "35", "0", "33.6"], + ["150", "50", "30", "0", "35.1"], + ["141", "60", "29", "1", "39.2"] + ], + type: "tabular" + } + ], + outputs: [ + { + table: [["Diabetes"], ["1"], ["1"], ["0"]], + type: "tabular" + } + ] + }, + metrics: [ + { + description: "", + id: "accuracy" + }, + { + description: "", + id: "recall" + }, + { + description: "", + id: "precision" + }, + { + description: "", + id: "f1" + } + ], + models: [ + { + description: "Breast cancer prediction model based on decision trees.", + id: "scikit-learn/cancer-prediction-trees" + } + ], + spaces: [ + { + description: "An application that can predict defective products on a production line.", + id: "scikit-learn/tabular-playground" + }, + { + description: "An application that compares various tabular classification techniques on different datasets.", + id: "scikit-learn/classification" + } + ], + summary: "Tabular classification is the task of classifying a target category (a group) based on set of attributes.", + widgetModels: ["scikit-learn/tabular-playground"], + youtubeId: "" +}; +var data_default22 = taskData22; + +// src/tasks/tabular-regression/data.ts +var taskData23 = { + datasets: [ + { + description: "A comprehensive curation of datasets covering all benchmarks.", + id: "inria-soda/tabular-benchmark" + } + ], + demo: { + inputs: [ + { + table: [ + ["Car Name", "Horsepower", "Weight"], + ["ford torino", "140", "3,449"], + ["amc hornet", "97", "2,774"], + ["toyota corolla", "65", "1,773"] + ], + type: "tabular" + } + ], + outputs: [ + { + table: [["MPG (miles per gallon)"], ["17"], ["18"], ["31"]], + type: "tabular" + } + ] + }, + metrics: [ + { + description: "", + id: "mse" + }, + { + description: "Coefficient of determination (or R-squared) is a measure of how well the model fits the data. Higher R-squared is considered a better fit.", + id: "r-squared" + } + ], + models: [ + { + description: "Fish weight prediction based on length measurements and species.", + id: "scikit-learn/Fish-Weight" + } + ], + spaces: [ + { + description: "An application that can predict weight of a fish based on set of attributes.", + id: "scikit-learn/fish-weight-prediction" + } + ], + summary: "Tabular regression is the task of predicting a numerical value given a set of attributes.", + widgetModels: ["scikit-learn/Fish-Weight"], + youtubeId: "" +}; +var data_default23 = taskData23; + +// src/tasks/text-to-image/data.ts +var taskData24 = { + datasets: [ + { + description: "RedCaps is a large-scale dataset of 12M image-text pairs collected from Reddit.", + id: "red_caps" + }, + { + description: "Conceptual Captions is a dataset consisting of ~3.3M images annotated with captions.", + id: "conceptual_captions" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "A city above clouds, pastel colors, Victorian style", + type: "text" + } + ], + outputs: [ + { + filename: "image.jpeg", + type: "img" + } + ] + }, + metrics: [ + { + description: "The Inception Score (IS) measure assesses diversity and meaningfulness. It uses a generated image sample to predict its label. A higher score signifies more diverse and meaningful images.", + id: "IS" + }, + { + description: "The Fr\xE9chet Inception Distance (FID) calculates the distance between distributions between synthetic and real samples. A lower FID score indicates better similarity between the distributions of real and generated images.", + id: "FID" + }, + { + description: "R-precision assesses how the generated image aligns with the provided text description. It uses the generated images as queries to retrieve relevant text descriptions. The top 'r' relevant descriptions are selected and used to calculate R-precision as r/R, where 'R' is the number of ground truth descriptions associated with the generated images. A higher R-precision value indicates a better model.", + id: "R-Precision" + } + ], + models: [ + { + description: "One of the most powerful image generation models that can generate realistic outputs.", + id: "stabilityai/stable-diffusion-xl-base-1.0" + }, + { + description: "A powerful yet fast image generation model.", + id: "latent-consistency/lcm-lora-sdxl" + }, + { + description: "A very fast text-to-image model.", + id: "ByteDance/SDXL-Lightning" + }, + { + description: "A powerful text-to-image model.", + id: "stabilityai/stable-diffusion-3-medium-diffusers" + } + ], + spaces: [ + { + description: "A powerful text-to-image application.", + id: "stabilityai/stable-diffusion-3-medium" + }, + { + description: "A text-to-image application to generate comics.", + id: "jbilcke-hf/ai-comic-factory" + }, + { + description: "A text-to-image application that can generate coherent text inside the image.", + id: "DeepFloyd/IF" + }, + { + description: "A powerful yet very fast image generation application.", + id: "latent-consistency/lcm-lora-for-sdxl" + }, + { + description: "A gallery to explore various text-to-image models.", + id: "multimodalart/LoraTheExplorer" + }, + { + description: "An application for `text-to-image`, `image-to-image` and image inpainting.", + id: "ArtGAN/Stable-Diffusion-ControlNet-WebUI" + }, + { + description: "An application to generate realistic images given photos of a person and a prompt.", + id: "InstantX/InstantID" + } + ], + summary: "Generates images from input text. These models can be used to generate and modify images based on text prompts.", + widgetModels: ["CompVis/stable-diffusion-v1-4"], + youtubeId: "" +}; +var data_default24 = taskData24; + +// src/tasks/text-to-speech/data.ts +var taskData25 = { + canonicalId: "text-to-audio", + datasets: [ + { + description: "10K hours of multi-speaker English dataset.", + id: "parler-tts/mls_eng_10k" + }, + { + description: "Multi-speaker English dataset.", + id: "LibriTTS" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "I love audio models on the Hub!", + type: "text" + } + ], + outputs: [ + { + filename: "audio.wav", + type: "audio" + } + ] + }, + metrics: [ + { + description: "The Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated speech.", + id: "mel cepstral distortion" + } + ], + models: [ + { + description: "A powerful TTS model.", + id: "suno/bark" + }, + { + description: "A massively multi-lingual TTS model.", + id: "facebook/mms-tts" + }, + { + description: "A prompt based, powerful TTS model.", + id: "parler-tts/parler_tts_mini_v0.1" + } + ], + spaces: [ + { + description: "An application for generate highly realistic, multilingual speech.", + id: "suno/bark" + }, + { + description: "XTTS is a Voice generation model that lets you clone voices into different languages.", + id: "coqui/xtts" + }, + { + description: "An application that synthesizes speech for diverse speaker prompts.", + id: "parler-tts/parler_tts_mini" + } + ], + summary: "Text-to-Speech (TTS) is the task of generating natural sounding speech given text input. TTS models can be extended to have a single model that generates speech for multiple speakers and multiple languages.", + widgetModels: ["suno/bark"], + youtubeId: "NW62DpzJ274" +}; +var data_default25 = taskData25; + +// src/tasks/token-classification/data.ts +var taskData26 = { + datasets: [ + { + description: "A widely used dataset useful to benchmark named entity recognition models.", + id: "conll2003" + }, + { + description: "A multilingual dataset of Wikipedia articles annotated for named entity recognition in over 150 different languages.", + id: "wikiann" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "My name is Omar and I live in Z\xFCrich.", + type: "text" + } + ], + outputs: [ + { + text: "My name is Omar and I live in Z\xFCrich.", + tokens: [ + { + type: "PERSON", + start: 11, + end: 15 + }, + { + type: "GPE", + start: 30, + end: 36 + } + ], + type: "text-with-tokens" + } + ] + }, + metrics: [ + { + description: "", + id: "accuracy" + }, + { + description: "", + id: "recall" + }, + { + description: "", + id: "precision" + }, + { + description: "", + id: "f1" + } + ], + models: [ + { + description: "A robust performance model to identify people, locations, organizations and names of miscellaneous entities.", + id: "dslim/bert-base-NER" + }, + { + description: "Flair models are typically the state of the art in named entity recognition tasks.", + id: "flair/ner-english" + } + ], + spaces: [ + { + description: "An application that can recognizes entities, extracts noun chunks and recognizes various linguistic features of each token.", + id: "spacy/gradio_pipeline_visualizer" + } + ], + summary: "Token classification is a natural language understanding task in which a label is assigned to some tokens in a text. Some popular token classification subtasks are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models could be trained to identify specific entities in a text, such as dates, individuals and places; and PoS tagging would identify, for example, which words in a text are verbs, nouns, and punctuation marks.", + widgetModels: ["dslim/bert-base-NER"], + youtubeId: "wVHdVlPScxA" +}; +var data_default26 = taskData26; + +// src/tasks/translation/data.ts +var taskData27 = { + canonicalId: "text2text-generation", + datasets: [ + { + description: "A dataset of copyright-free books translated into 16 different languages.", + id: "opus_books" + }, + { + description: "An example of translation between programming languages. This dataset consists of functions in Java and C#.", + id: "code_x_glue_cc_code_to_code_trans" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "My name is Omar and I live in Z\xFCrich.", + type: "text" + } + ], + outputs: [ + { + label: "Output", + content: "Mein Name ist Omar und ich wohne in Z\xFCrich.", + type: "text" + } + ] + }, + metrics: [ + { + description: "BLEU score is calculated by counting the number of shared single or subsequent tokens between the generated sequence and the reference. Subsequent n tokens are called \u201Cn-grams\u201D. Unigram refers to a single token while bi-gram refers to token pairs and n-grams refer to n subsequent tokens. The score ranges from 0 to 1, where 1 means the translation perfectly matched and 0 did not match at all", + id: "bleu" + }, + { + description: "", + id: "sacrebleu" + } + ], + models: [ + { + description: "A model that translates from English to French.", + id: "Helsinki-NLP/opus-mt-en-fr" + }, + { + description: "A general-purpose Transformer that can be used to translate from English to German, French, or Romanian.", + id: "t5-base" + } + ], + spaces: [ + { + description: "An application that can translate between 100 languages.", + id: "Iker/Translate-100-languages" + }, + { + description: "An application that can translate between English, Spanish and Hindi.", + id: "EuroPython2022/Translate-with-Bloom" + } + ], + summary: "Translation is the task of converting text from one language to another.", + widgetModels: ["t5-small"], + youtubeId: "1JvfrvZgi6c" +}; +var data_default27 = taskData27; + +// src/tasks/text-classification/data.ts +var taskData28 = { + datasets: [ + { + description: "A widely used dataset used to benchmark multiple variants of text classification.", + id: "glue" + }, + { + description: "A text classification dataset used to benchmark natural language inference models", + id: "snli" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "I love Hugging Face!", + type: "text" + } + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "POSITIVE", + score: 0.9 + }, + { + label: "NEUTRAL", + score: 0.1 + }, + { + label: "NEGATIVE", + score: 0 + } + ] + } + ] + }, + metrics: [ + { + description: "", + id: "accuracy" + }, + { + description: "", + id: "recall" + }, + { + description: "", + id: "precision" + }, + { + description: "The F1 metric is the harmonic mean of the precision and recall. It can be calculated as: F1 = 2 * (precision * recall) / (precision + recall)", + id: "f1" + } + ], + models: [ + { + description: "A robust model trained for sentiment analysis.", + id: "distilbert-base-uncased-finetuned-sst-2-english" + }, + { + description: "Multi-genre natural language inference model.", + id: "roberta-large-mnli" + } + ], + spaces: [ + { + description: "An application that can classify financial sentiment.", + id: "IoannisTr/Tech_Stocks_Trading_Assistant" + }, + { + description: "A dashboard that contains various text classification tasks.", + id: "miesnerjacob/Multi-task-NLP" + }, + { + description: "An application that analyzes user reviews in healthcare.", + id: "spacy/healthsea-demo" + } + ], + summary: "Text Classification is the task of assigning a label or class to a given text. Some use cases are sentiment analysis, natural language inference, and assessing grammatical correctness.", + widgetModels: ["distilbert-base-uncased-finetuned-sst-2-english"], + youtubeId: "leNG9fN9FQU" +}; +var data_default28 = taskData28; + +// src/tasks/text-generation/data.ts +var taskData29 = { + datasets: [ + { + description: "A large multilingual dataset of text crawled from the web.", + id: "mc4" + }, + { + description: "Diverse open-source data consisting of 22 smaller high-quality datasets. It was used to train GPT-Neo.", + id: "the_pile" + }, + { + description: "Truly open-source, curated and cleaned dialogue dataset.", + id: "HuggingFaceH4/ultrachat_200k" + }, + { + description: "An instruction dataset with preference ratings on responses.", + id: "openbmb/UltraFeedback" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "Once upon a time,", + type: "text" + } + ], + outputs: [ + { + label: "Output", + content: "Once upon a time, we knew that our ancestors were on the verge of extinction. The great explorers and poets of the Old World, from Alexander the Great to Chaucer, are dead and gone. A good many of our ancient explorers and poets have", + type: "text" + } + ] + }, + metrics: [ + { + description: "Cross Entropy is a metric that calculates the difference between two probability distributions. Each probability distribution is the distribution of predicted words", + id: "Cross Entropy" + }, + { + description: "The Perplexity metric is the exponential of the cross-entropy loss. It evaluates the probabilities assigned to the next word by the model. Lower perplexity indicates better performance", + id: "Perplexity" + } + ], + models: [ + { + description: "A large language model trained for text generation.", + id: "bigscience/bloom-560m" + }, + { + description: "A large code generation model that can generate code in 80+ languages.", + id: "bigcode/starcoder" + }, + { + description: "A very powerful text generation model.", + id: "mistralai/Mixtral-8x7B-Instruct-v0.1" + }, + { + description: "Small yet powerful text generation model.", + id: "microsoft/phi-2" + }, + { + description: "A very powerful model that can chat, do mathematical reasoning and write code.", + id: "openchat/openchat-3.5-0106" + }, + { + description: "Very strong yet small assistant model.", + id: "HuggingFaceH4/zephyr-7b-beta" + }, + { + description: "Very strong open-source large language model.", + id: "meta-llama/Llama-2-70b-hf" + } + ], + spaces: [ + { + description: "A leaderboard to compare different open-source text generation models based on various benchmarks.", + id: "open-llm-leaderboard/open_llm_leaderboard" + }, + { + description: "An text generation based application based on a very powerful LLaMA2 model.", + id: "ysharma/Explore_llamav2_with_TGI" + }, + { + description: "An text generation based application to converse with Zephyr model.", + id: "HuggingFaceH4/zephyr-chat" + }, + { + description: "An text generation application that combines OpenAI and Hugging Face models.", + id: "microsoft/HuggingGPT" + }, + { + description: "An chatbot to converse with a very powerful text generation model.", + id: "mlabonne/phixtral-chat" + } + ], + summary: "Generating text is the task of generating new text given another text. These models can, for example, fill in incomplete text or paraphrase.", + widgetModels: ["HuggingFaceH4/zephyr-7b-beta"], + youtubeId: "Vpjb1lu0MDk" +}; +var data_default29 = taskData29; + +// src/tasks/text-to-video/data.ts +var taskData30 = { + datasets: [ + { + description: "Microsoft Research Video to Text is a large-scale dataset for open domain video captioning", + id: "iejMac/CLIP-MSR-VTT" + }, + { + description: "UCF101 Human Actions dataset consists of 13,320 video clips from YouTube, with 101 classes.", + id: "quchenyuan/UCF101-ZIP" + }, + { + description: "A high-quality dataset for human action recognition in YouTube videos.", + id: "nateraw/kinetics" + }, + { + description: "A dataset of video clips of humans performing pre-defined basic actions with everyday objects.", + id: "HuggingFaceM4/something_something_v2" + }, + { + description: "This dataset consists of text-video pairs and contains noisy samples with irrelevant video descriptions", + id: "HuggingFaceM4/webvid" + }, + { + description: "A dataset of short Flickr videos for the temporal localization of events with descriptions.", + id: "iejMac/CLIP-DiDeMo" + } + ], + demo: { + inputs: [ + { + label: "Input", + content: "Darth Vader is surfing on the waves.", + type: "text" + } + ], + outputs: [ + { + filename: "text-to-video-output.gif", + type: "img" + } + ] + }, + metrics: [ + { + description: "Inception Score uses an image classification model that predicts class labels and evaluates how distinct and diverse the images are. A higher score indicates better video generation.", + id: "is" + }, + { + description: "Frechet Inception Distance uses an image classification model to obtain image embeddings. The metric compares mean and standard deviation of the embeddings of real and generated images. A smaller score indicates better video generation.", + id: "fid" + }, + { + description: "Frechet Video Distance uses a model that captures coherence for changes in frames and the quality of each frame. A smaller score indicates better video generation.", + id: "fvd" + }, + { + description: "CLIPSIM measures similarity between video frames and text using an image-text similarity model. A higher score indicates better video generation.", + id: "clipsim" + } + ], + models: [ + { + description: "A strong model for video generation.", + id: "Vchitect/LaVie" + }, + { + description: "A robust model for text-to-video generation.", + id: "damo-vilab/text-to-video-ms-1.7b" + }, + { + description: "A text-to-video generation model with high quality and smooth outputs.", + id: "hotshotco/Hotshot-XL" + } + ], + spaces: [ + { + description: "An application that generates video from text.", + id: "fffiloni/zeroscope" + }, + { + description: "An application that generates video from image and text.", + id: "Vchitect/LaVie" + }, + { + description: "An application that generates videos from text and provides multi-model support.", + id: "ArtGAN/Video-Diffusion-WebUI" + } + ], + summary: "Text-to-video models can be used in any application that requires generating consistent sequence of images from text. ", + widgetModels: [], + youtubeId: void 0 +}; +var data_default30 = taskData30; + +// src/tasks/unconditional-image-generation/data.ts +var taskData31 = { + datasets: [ + { + description: "The CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with 600 images per class.", + id: "cifar100" + }, + { + description: "Multiple images of celebrities, used for facial expression translation.", + id: "CelebA" + } + ], + demo: { + inputs: [ + { + label: "Seed", + content: "42", + type: "text" + }, + { + label: "Number of images to generate:", + content: "4", + type: "text" + } + ], + outputs: [ + { + filename: "unconditional-image-generation-output.jpeg", + type: "img" + } + ] + }, + metrics: [ + { + description: "The inception score (IS) evaluates the quality of generated images. It measures the diversity of the generated images (the model predictions are evenly distributed across all possible labels) and their 'distinction' or 'sharpness' (the model confidently predicts a single label for each image).", + id: "Inception score (IS)" + }, + { + description: "The Fr\xE9chet Inception Distance (FID) evaluates the quality of images created by a generative model by calculating the distance between feature vectors for real and generated images.", + id: "Fre\u0107het Inception Distance (FID)" + } + ], + models: [ + { + description: "High-quality image generation model trained on the CIFAR-10 dataset. It synthesizes images of the ten classes presented in the dataset using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics.", + id: "google/ddpm-cifar10-32" + }, + { + description: "High-quality image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes images of faces using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics.", + id: "google/ddpm-celebahq-256" + } + ], + spaces: [ + { + description: "An application that can generate realistic faces.", + id: "CompVis/celeba-latent-diffusion" + } + ], + summary: "Unconditional image generation is the task of generating images with no condition in any context (like a prompt text or another image). Once trained, the model will create images that resemble its training data distribution.", + widgetModels: [""], + // TODO: Add related video + youtubeId: "" +}; +var data_default31 = taskData31; + +// src/tasks/video-classification/data.ts +var taskData32 = { + datasets: [ + { + // TODO write proper description + description: "Benchmark dataset used for video classification with videos that belong to 400 classes.", + id: "kinetics400" + } + ], + demo: { + inputs: [ + { + filename: "video-classification-input.gif", + type: "img" + } + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "Playing Guitar", + score: 0.514 + }, + { + label: "Playing Tennis", + score: 0.193 + }, + { + label: "Cooking", + score: 0.068 + } + ] + } + ] + }, + metrics: [ + { + description: "", + id: "accuracy" + }, + { + description: "", + id: "recall" + }, + { + description: "", + id: "precision" + }, + { + description: "", + id: "f1" + } + ], + models: [ + { + // TO DO: write description + description: "Strong Video Classification model trained on the Kinects 400 dataset.", + id: "MCG-NJU/videomae-base-finetuned-kinetics" + }, + { + // TO DO: write description + description: "Strong Video Classification model trained on the Kinects 400 dataset.", + id: "microsoft/xclip-base-patch32" + } + ], + spaces: [ + { + description: "An application that classifies video at different timestamps.", + id: "nateraw/lavila" + }, + { + description: "An application that classifies video.", + id: "fcakyon/video-classification" + } + ], + summary: "Video classification is the task of assigning a label or class to an entire video. Videos are expected to have only one class for each video. Video classification models take a video as input and return a prediction about which class the video belongs to.", + widgetModels: [], + youtubeId: "" +}; +var data_default32 = taskData32; + +// src/tasks/visual-question-answering/data.ts +var taskData33 = { + datasets: [ + { + description: "A widely used dataset containing questions (with answers) about images.", + id: "Graphcore/vqa" + }, + { + description: "A dataset to benchmark visual reasoning based on text in images.", + id: "textvqa" + } + ], + demo: { + inputs: [ + { + filename: "elephant.jpeg", + type: "img" + }, + { + label: "Question", + content: "What is in this image?", + type: "text" + } + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "elephant", + score: 0.97 + }, + { + label: "elephants", + score: 0.06 + }, + { + label: "animal", + score: 3e-3 + } + ] + } + ] + }, + isPlaceholder: false, + metrics: [ + { + description: "", + id: "accuracy" + }, + { + description: "Measures how much a predicted answer differs from the ground truth based on the difference in their semantic meaning.", + id: "wu-palmer similarity" + } + ], + models: [ + { + description: "A visual question answering model trained to convert charts and plots to text.", + id: "google/deplot" + }, + { + description: "A visual question answering model trained for mathematical reasoning and chart derendering from images.", + id: "google/matcha-base " + }, + { + description: "A strong visual question answering that answers questions from book covers.", + id: "google/pix2struct-ocrvqa-large" + } + ], + spaces: [ + { + description: "An application that compares visual question answering models across different tasks.", + id: "merve/pix2struct" + }, + { + description: "An application that can answer questions based on images.", + id: "nielsr/vilt-vqa" + }, + { + description: "An application that can caption images and answer questions about a given image. ", + id: "Salesforce/BLIP" + }, + { + description: "An application that can caption images and answer questions about a given image. ", + id: "vumichien/Img2Prompt" + } + ], + summary: "Visual Question Answering is the task of answering open-ended questions based on an image. They output natural language responses to natural language questions.", + widgetModels: ["dandelin/vilt-b32-finetuned-vqa"], + youtubeId: "" +}; +var data_default33 = taskData33; + +// src/tasks/zero-shot-classification/data.ts +var taskData34 = { + datasets: [ + { + description: "A widely used dataset used to benchmark multiple variants of text classification.", + id: "glue" + }, + { + description: "The Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced collection of 433k sentence pairs annotated with textual entailment information.", + id: "MultiNLI" + }, + { + description: "FEVER is a publicly available dataset for fact extraction and verification against textual sources.", + id: "FEVER" + } + ], + demo: { + inputs: [ + { + label: "Text Input", + content: "Dune is the best movie ever.", + type: "text" + }, + { + label: "Candidate Labels", + content: "CINEMA, ART, MUSIC", + type: "text" + } + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "CINEMA", + score: 0.9 + }, + { + label: "ART", + score: 0.1 + }, + { + label: "MUSIC", + score: 0 + } + ] + } + ] + }, + metrics: [], + models: [ + { + description: "Powerful zero-shot text classification model", + id: "facebook/bart-large-mnli" + } + ], + spaces: [], + summary: "Zero-shot text classification is a task in natural language processing where a model is trained on a set of labeled examples but is then able to classify new examples from previously unseen classes.", + widgetModels: ["facebook/bart-large-mnli"] +}; +var data_default34 = taskData34; + +// src/tasks/zero-shot-image-classification/data.ts +var taskData35 = { + datasets: [ + { + // TODO write proper description + description: "", + id: "" + } + ], + demo: { + inputs: [ + { + filename: "image-classification-input.jpeg", + type: "img" + }, + { + label: "Classes", + content: "cat, dog, bird", + type: "text" + } + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "Cat", + score: 0.664 + }, + { + label: "Dog", + score: 0.329 + }, + { + label: "Bird", + score: 8e-3 + } + ] + } + ] + }, + metrics: [ + { + description: "Computes the number of times the correct label appears in top K labels predicted", + id: "top-K accuracy" + } + ], + models: [ + { + description: "Robust image classification model trained on publicly available image-caption data.", + id: "openai/clip-vit-base-patch16" + }, + { + description: "Strong zero-shot image classification model.", + id: "google/siglip-base-patch16-224" + }, + { + description: "Small yet powerful zero-shot image classification model that can run on edge devices.", + id: "apple/MobileCLIP-S1-OpenCLIP" + }, + { + description: "Strong image classification model for biomedical domain.", + id: "microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224" + } + ], + spaces: [ + { + description: "An application that leverages zero-shot image classification to find best captions to generate an image. ", + id: "pharma/CLIP-Interrogator" + }, + { + description: "An application to compare different zero-shot image classification models. ", + id: "merve/compare_clip_siglip" + } + ], + summary: "Zero-shot image classification is the task of classifying previously unseen classes during training of a model.", + widgetModels: ["openai/clip-vit-large-patch14-336"], + youtubeId: "" +}; +var data_default35 = taskData35; + +// src/tasks/zero-shot-object-detection/data.ts +var taskData36 = { + datasets: [], + demo: { + inputs: [ + { + filename: "zero-shot-object-detection-input.jpg", + type: "img" + }, + { + label: "Classes", + content: "cat, dog, bird", + type: "text" + } + ], + outputs: [ + { + filename: "zero-shot-object-detection-output.jpg", + type: "img" + } + ] + }, + metrics: [ + { + description: "The Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It is calculated for each class separately", + id: "Average Precision" + }, + { + description: "The Mean Average Precision (mAP) metric is the overall average of the AP values", + id: "Mean Average Precision" + }, + { + description: "The AP\u03B1 metric is the Average Precision at the IoU threshold of a \u03B1 value, for example, AP50 and AP75", + id: "AP\u03B1" + } + ], + models: [ + { + description: "Solid zero-shot object detection model.", + id: "IDEA-Research/grounding-dino-base" + }, + { + description: "Cutting-edge zero-shot object detection model.", + id: "google/owlv2-base-patch16-ensemble" + } + ], + spaces: [ + { + description: "A demo to try the state-of-the-art zero-shot object detection model, OWLv2.", + id: "merve/owlv2" + }, + { + description: "A demo that combines a zero-shot object detection and mask generation model for zero-shot segmentation.", + id: "merve/OWLSAM" + } + ], + summary: "Zero-shot object detection is a computer vision task to detect objects and their classes in images, without any prior training or knowledge of the classes. Zero-shot object detection models receive an image as input, as well as a list of candidate classes, and output the bounding boxes and labels where the objects have been detected.", + widgetModels: [], + youtubeId: "" +}; +var data_default36 = taskData36; + +// src/tasks/image-to-3d/data.ts +var taskData37 = { + datasets: [ + { + description: "A large dataset of over 10 million 3D objects.", + id: "allenai/objaverse-xl" + }, + { + description: "A dataset of isolated object images for evaluating image-to-3D models.", + id: "dylanebert/iso3d" + } + ], + demo: { + inputs: [ + { + filename: "image-to-3d-image-input.png", + type: "img" + } + ], + outputs: [ + { + label: "Result", + content: "image-to-3d-3d-output-filename.glb", + type: "text" + } + ] + }, + metrics: [], + models: [ + { + description: "Fast image-to-3D mesh model by Tencent.", + id: "TencentARC/InstantMesh" + }, + { + description: "Fast image-to-3D mesh model by StabilityAI", + id: "stabilityai/TripoSR" + }, + { + description: "A scaled up image-to-3D mesh model derived from TripoSR.", + id: "hwjiang/Real3D" + }, + { + description: "Generative 3D gaussian splatting model.", + id: "ashawkey/LGM" + } + ], + spaces: [ + { + description: "Leaderboard to evaluate image-to-3D models.", + id: "dylanebert/3d-arena" + }, + { + description: "Image-to-3D demo with mesh outputs.", + id: "TencentARC/InstantMesh" + }, + { + description: "Image-to-3D demo with mesh outputs.", + id: "stabilityai/TripoSR" + }, + { + description: "Image-to-3D demo with mesh outputs.", + id: "hwjiang/Real3D" + }, + { + description: "Image-to-3D demo with splat outputs.", + id: "dylanebert/LGM-mini" + } + ], + summary: "Image-to-3D models take in image input and produce 3D output.", + widgetModels: [], + youtubeId: "" +}; +var data_default37 = taskData37; + +// src/tasks/text-to-3d/data.ts +var taskData38 = { + datasets: [ + { + description: "A large dataset of over 10 million 3D objects.", + id: "allenai/objaverse-xl" + }, + { + description: "Descriptive captions for 3D objects in Objaverse.", + id: "tiange/Cap3D" + } + ], + demo: { + inputs: [ + { + label: "Prompt", + content: "a cat statue", + type: "text" + } + ], + outputs: [ + { + label: "Result", + content: "text-to-3d-3d-output-filename.glb", + type: "text" + } + ] + }, + metrics: [], + models: [ + { + description: "Text-to-3D mesh model by OpenAI", + id: "openai/shap-e" + }, + { + description: "Generative 3D gaussian splatting model.", + id: "ashawkey/LGM" + } + ], + spaces: [ + { + description: "Text-to-3D demo with mesh outputs.", + id: "hysts/Shap-E" + }, + { + description: "Text/image-to-3D demo with splat outputs.", + id: "ashawkey/LGM" + } + ], + summary: "Text-to-3D models take in text input and produce 3D output.", + widgetModels: [], + youtubeId: "" +}; +var data_default38 = taskData38; + +// src/tasks/index.ts +var TASKS_MODEL_LIBRARIES = { + "audio-classification": ["speechbrain", "transformers", "transformers.js"], + "audio-to-audio": ["asteroid", "fairseq", "speechbrain"], + "automatic-speech-recognition": ["espnet", "nemo", "speechbrain", "transformers", "transformers.js"], + "depth-estimation": ["transformers", "transformers.js"], + "document-question-answering": ["transformers", "transformers.js"], + "feature-extraction": ["sentence-transformers", "transformers", "transformers.js"], + "fill-mask": ["transformers", "transformers.js"], + "graph-ml": ["transformers"], + "image-classification": ["keras", "timm", "transformers", "transformers.js"], + "image-feature-extraction": ["timm", "transformers"], + "image-segmentation": ["transformers", "transformers.js"], + "image-text-to-text": ["transformers"], + "image-to-image": ["diffusers", "transformers", "transformers.js"], + "image-to-text": ["transformers", "transformers.js"], + "image-to-video": ["diffusers"], + "video-classification": ["transformers"], + "mask-generation": ["transformers"], + "multiple-choice": ["transformers"], + "object-detection": ["transformers", "transformers.js"], + other: [], + "question-answering": ["adapter-transformers", "allennlp", "transformers", "transformers.js"], + robotics: [], + "reinforcement-learning": ["transformers", "stable-baselines3", "ml-agents", "sample-factory"], + "sentence-similarity": ["sentence-transformers", "spacy", "transformers.js"], + summarization: ["transformers", "transformers.js"], + "table-question-answering": ["transformers"], + "table-to-text": ["transformers"], + "tabular-classification": ["sklearn"], + "tabular-regression": ["sklearn"], + "tabular-to-text": ["transformers"], + "text-classification": ["adapter-transformers", "setfit", "spacy", "transformers", "transformers.js"], + "text-generation": ["transformers", "transformers.js"], + "text-retrieval": [], + "text-to-image": ["diffusers"], + "text-to-speech": ["espnet", "tensorflowtts", "transformers", "transformers.js"], + "text-to-audio": ["transformers", "transformers.js"], + "text-to-video": ["diffusers"], + "text2text-generation": ["transformers", "transformers.js"], + "time-series-forecasting": [], + "token-classification": [ + "adapter-transformers", + "flair", + "spacy", + "span-marker", + "stanza", + "transformers", + "transformers.js" + ], + translation: ["transformers", "transformers.js"], + "unconditional-image-generation": ["diffusers"], + "visual-question-answering": ["transformers", "transformers.js"], + "voice-activity-detection": [], + "zero-shot-classification": ["transformers", "transformers.js"], + "zero-shot-image-classification": ["transformers", "transformers.js"], + "zero-shot-object-detection": ["transformers", "transformers.js"], + "text-to-3d": ["diffusers"], + "image-to-3d": ["diffusers"] +}; +function getData(type, partialTaskData = data_default16) { + return { + ...partialTaskData, + id: type, + label: PIPELINE_DATA[type].name, + libraries: TASKS_MODEL_LIBRARIES[type] + }; +} +var TASKS_DATA = { + "audio-classification": getData("audio-classification", data_default), + "audio-to-audio": getData("audio-to-audio", data_default2), + "automatic-speech-recognition": getData("automatic-speech-recognition", data_default3), + "depth-estimation": getData("depth-estimation", data_default15), + "document-question-answering": getData("document-question-answering", data_default4), + "feature-extraction": getData("feature-extraction", data_default5), + "fill-mask": getData("fill-mask", data_default6), + "graph-ml": void 0, + "image-classification": getData("image-classification", data_default7), + "image-feature-extraction": getData("image-feature-extraction", data_default8), + "image-segmentation": getData("image-segmentation", data_default12), + "image-to-image": getData("image-to-image", data_default9), + "image-text-to-text": getData("image-text-to-text", data_default11), + "image-to-text": getData("image-to-text", data_default10), + "image-to-video": void 0, + "mask-generation": getData("mask-generation", data_default13), + "multiple-choice": void 0, + "object-detection": getData("object-detection", data_default14), + "video-classification": getData("video-classification", data_default32), + other: void 0, + "question-answering": getData("question-answering", data_default18), + "reinforcement-learning": getData("reinforcement-learning", data_default17), + robotics: void 0, + "sentence-similarity": getData("sentence-similarity", data_default19), + summarization: getData("summarization", data_default20), + "table-question-answering": getData("table-question-answering", data_default21), + "table-to-text": void 0, + "tabular-classification": getData("tabular-classification", data_default22), + "tabular-regression": getData("tabular-regression", data_default23), + "tabular-to-text": void 0, + "text-classification": getData("text-classification", data_default28), + "text-generation": getData("text-generation", data_default29), + "text-retrieval": void 0, + "text-to-image": getData("text-to-image", data_default24), + "text-to-speech": getData("text-to-speech", data_default25), + "text-to-audio": void 0, + "text-to-video": getData("text-to-video", data_default30), + "text2text-generation": void 0, + "time-series-forecasting": void 0, + "token-classification": getData("token-classification", data_default26), + translation: getData("translation", data_default27), + "unconditional-image-generation": getData("unconditional-image-generation", data_default31), + "visual-question-answering": getData("visual-question-answering", data_default33), + "voice-activity-detection": void 0, + "zero-shot-classification": getData("zero-shot-classification", data_default34), + "zero-shot-image-classification": getData("zero-shot-image-classification", data_default35), + "zero-shot-object-detection": getData("zero-shot-object-detection", data_default36), + "text-to-3d": getData("text-to-3d", data_default38), + "image-to-3d": getData("image-to-3d", data_default37) +}; + +// src/model-libraries-snippets.ts +var TAG_CUSTOM_CODE = "custom_code"; +function nameWithoutNamespace(modelId) { + const splitted = modelId.split("/"); + return splitted.length === 1 ? splitted[0] : splitted[1]; +} +var adapters = (model) => [ + `from adapters import AutoAdapterModel + +model = AutoAdapterModel.from_pretrained("${model.config?.adapter_transformers?.model_name}") +model.load_adapter("${model.id}", set_active=True)` +]; +var allennlpUnknown = (model) => [ + `import allennlp_models +from allennlp.predictors.predictor import Predictor + +predictor = Predictor.from_path("hf://${model.id}")` +]; +var allennlpQuestionAnswering = (model) => [ + `import allennlp_models +from allennlp.predictors.predictor import Predictor + +predictor = Predictor.from_path("hf://${model.id}") +predictor_input = {"passage": "My name is Wolfgang and I live in Berlin", "question": "Where do I live?"} +predictions = predictor.predict_json(predictor_input)` +]; +var allennlp = (model) => { + if (model.tags.includes("question-answering")) { + return allennlpQuestionAnswering(model); + } + return allennlpUnknown(model); +}; +var asteroid = (model) => [ + `from asteroid.models import BaseModel + +model = BaseModel.from_pretrained("${model.id}")` +]; +var audioseal = (model) => { + const watermarkSnippet = `# Watermark Generator +from audioseal import AudioSeal + +model = AudioSeal.load_generator("${model.id}") +# pass a tensor (tensor_wav) of shape (batch, channels, samples) and a sample rate +wav, sr = tensor_wav, 16000 + +watermark = model.get_watermark(wav, sr) +watermarked_audio = wav + watermark`; + const detectorSnippet = `# Watermark Detector +from audioseal import AudioSeal + +detector = AudioSeal.load_detector("${model.id}") + +result, message = detector.detect_watermark(watermarked_audio, sr)`; + return [watermarkSnippet, detectorSnippet]; +}; +function get_base_diffusers_model(model) { + return model.cardData?.base_model?.toString() ?? "fill-in-base-model"; +} +var bertopic = (model) => [ + `from bertopic import BERTopic + +model = BERTopic.load("${model.id}")` +]; +var bm25s = (model) => [ + `from bm25s.hf import BM25HF + +retriever = BM25HF.load_from_hub("${model.id}")` +]; +var depth_anything_v2 = (model) => { + let encoder; + let features; + let out_channels; + encoder = ""; + features = ""; + out_channels = ""; + if (model.id === "depth-anything/Depth-Anything-V2-Small") { + encoder = "vits"; + features = "64"; + out_channels = "[48, 96, 192, 384]"; + } else if (model.id === "depth-anything/Depth-Anything-V2-Base") { + encoder = "vitb"; + features = "128"; + out_channels = "[96, 192, 384, 768]"; + } else if (model.id === "depth-anything/Depth-Anything-V2-Large") { + encoder = "vitl"; + features = "256"; + out_channels = "[256, 512, 1024, 1024"; + } + return [ + ` +# Install from https://github.com/DepthAnything/Depth-Anything-V2 + +# Load the model and infer depth from an image +import cv2 +import torch + +from depth_anything_v2.dpt import DepthAnythingV2 + +# instantiate the model +model = DepthAnythingV2(encoder="${encoder}", features=${features}, out_channels=${out_channels}) + +# load the weights +filepath = hf_hub_download(repo_id="${model.id}", filename="depth_anything_v2_${encoder}.pth", repo_type="model") +state_dict = torch.load(filepath, map_location="cpu") +model.load_state_dict(state_dict).eval() + +raw_img = cv2.imread("your/image/path") +depth = model.infer_image(raw_img) # HxW raw depth map in numpy + ` + ]; +}; +var diffusers_default = (model) => [ + `from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained("${model.id}")` +]; +var diffusers_controlnet = (model) => [ + `from diffusers import ControlNetModel, StableDiffusionControlNetPipeline + +controlnet = ControlNetModel.from_pretrained("${model.id}") +pipeline = StableDiffusionControlNetPipeline.from_pretrained( + "${get_base_diffusers_model(model)}", controlnet=controlnet +)` +]; +var diffusers_lora = (model) => [ + `from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}") +pipeline.load_lora_weights("${model.id}")` +]; +var diffusers_textual_inversion = (model) => [ + `from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}") +pipeline.load_textual_inversion("${model.id}")` +]; +var diffusers = (model) => { + if (model.tags.includes("controlnet")) { + return diffusers_controlnet(model); + } else if (model.tags.includes("lora")) { + return diffusers_lora(model); + } else if (model.tags.includes("textual_inversion")) { + return diffusers_textual_inversion(model); + } else { + return diffusers_default(model); + } +}; +var edsnlp = (model) => { + const packageName = nameWithoutNamespace(model.id).replaceAll("-", "_"); + return [ + `# Load it from the Hub directly +import edsnlp +nlp = edsnlp.load("${model.id}") +`, + `# Or install it as a package +!pip install git+https://huggingface.co/${model.id} + +# and import it as a module +import ${packageName} + +nlp = ${packageName}.load() # or edsnlp.load("${packageName}") +` + ]; +}; +var espnetTTS = (model) => [ + `from espnet2.bin.tts_inference import Text2Speech + +model = Text2Speech.from_pretrained("${model.id}") + +speech, *_ = model("text to generate speech from")` +]; +var espnetASR = (model) => [ + `from espnet2.bin.asr_inference import Speech2Text + +model = Speech2Text.from_pretrained( + "${model.id}" +) + +speech, rate = soundfile.read("speech.wav") +text, *_ = model(speech)[0]` +]; +var espnetUnknown = () => [`unknown model type (must be text-to-speech or automatic-speech-recognition)`]; +var espnet = (model) => { + if (model.tags.includes("text-to-speech")) { + return espnetTTS(model); + } else if (model.tags.includes("automatic-speech-recognition")) { + return espnetASR(model); + } + return espnetUnknown(); +}; +var fairseq = (model) => [ + `from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub + +models, cfg, task = load_model_ensemble_and_task_from_hf_hub( + "${model.id}" +)` +]; +var flair = (model) => [ + `from flair.models import SequenceTagger + +tagger = SequenceTagger.load("${model.id}")` +]; +var gliner = (model) => [ + `from gliner import GLiNER + +model = GLiNER.from_pretrained("${model.id}")` +]; +var keras = (model) => [ + `# Available backend options are: "jax", "tensorflow", "torch". +import os +os.environ["KERAS_BACKEND"] = "tensorflow" + +import keras + +model = keras.saving.load_model("hf://${model.id}") +` +]; +var keras_nlp = (model) => [ + `# Available backend options are: "jax", "tensorflow", "torch". +import os +os.environ["KERAS_BACKEND"] = "tensorflow" + +import keras_nlp + +tokenizer = keras_nlp.models.Tokenizer.from_preset("hf://${model.id}") +backbone = keras_nlp.models.Backbone.from_preset("hf://${model.id}") +` +]; +var tf_keras = (model) => [ + `# Note: 'keras<3.x' or 'tf_keras' must be installed (legacy) +# See https://github.com/keras-team/tf-keras for more details. +from huggingface_hub import from_pretrained_keras + +model = from_pretrained_keras("${model.id}") +` +]; +var mamba_ssm = (model) => [ + `from mamba_ssm import MambaLMHeadModel + +model = MambaLMHeadModel.from_pretrained("${model.id}")` +]; +var mars5_tts = (model) => [ + `# Install from https://github.com/Camb-ai/MARS5-TTS + +from inference import Mars5TTS +mars5 = Mars5TTS.from_pretrained("${model.id}")` +]; +var mesh_anything = () => [ + `# Install from https://github.com/buaacyw/MeshAnything.git + +from MeshAnything.models.meshanything import MeshAnything + +# refer to https://github.com/buaacyw/MeshAnything/blob/main/main.py#L91 on how to define args +# and https://github.com/buaacyw/MeshAnything/blob/main/app.py regarding usage +model = MeshAnything(args)` +]; +var open_clip = (model) => [ + `import open_clip + +model, preprocess_train, preprocess_val = open_clip.create_model_and_transforms('hf-hub:${model.id}') +tokenizer = open_clip.get_tokenizer('hf-hub:${model.id}')` +]; +var paddlenlp = (model) => { + if (model.config?.architectures?.[0]) { + const architecture = model.config.architectures[0]; + return [ + [ + `from paddlenlp.transformers import AutoTokenizer, ${architecture}`, + "", + `tokenizer = AutoTokenizer.from_pretrained("${model.id}", from_hf_hub=True)`, + `model = ${architecture}.from_pretrained("${model.id}", from_hf_hub=True)` + ].join("\n") + ]; + } else { + return [ + [ + `# \u26A0\uFE0F Type of model unknown`, + `from paddlenlp.transformers import AutoTokenizer, AutoModel`, + "", + `tokenizer = AutoTokenizer.from_pretrained("${model.id}", from_hf_hub=True)`, + `model = AutoModel.from_pretrained("${model.id}", from_hf_hub=True)` + ].join("\n") + ]; + } +}; +var pyannote_audio_pipeline = (model) => [ + `from pyannote.audio import Pipeline + +pipeline = Pipeline.from_pretrained("${model.id}") + +# inference on the whole file +pipeline("file.wav") + +# inference on an excerpt +from pyannote.core import Segment +excerpt = Segment(start=2.0, end=5.0) + +from pyannote.audio import Audio +waveform, sample_rate = Audio().crop("file.wav", excerpt) +pipeline({"waveform": waveform, "sample_rate": sample_rate})` +]; +var pyannote_audio_model = (model) => [ + `from pyannote.audio import Model, Inference + +model = Model.from_pretrained("${model.id}") +inference = Inference(model) + +# inference on the whole file +inference("file.wav") + +# inference on an excerpt +from pyannote.core import Segment +excerpt = Segment(start=2.0, end=5.0) +inference.crop("file.wav", excerpt)` +]; +var pyannote_audio = (model) => { + if (model.tags.includes("pyannote-audio-pipeline")) { + return pyannote_audio_pipeline(model); + } + return pyannote_audio_model(model); +}; +var tensorflowttsTextToMel = (model) => [ + `from tensorflow_tts.inference import AutoProcessor, TFAutoModel + +processor = AutoProcessor.from_pretrained("${model.id}") +model = TFAutoModel.from_pretrained("${model.id}") +` +]; +var tensorflowttsMelToWav = (model) => [ + `from tensorflow_tts.inference import TFAutoModel + +model = TFAutoModel.from_pretrained("${model.id}") +audios = model.inference(mels) +` +]; +var tensorflowttsUnknown = (model) => [ + `from tensorflow_tts.inference import TFAutoModel + +model = TFAutoModel.from_pretrained("${model.id}") +` +]; +var tensorflowtts = (model) => { + if (model.tags.includes("text-to-mel")) { + return tensorflowttsTextToMel(model); + } else if (model.tags.includes("mel-to-wav")) { + return tensorflowttsMelToWav(model); + } + return tensorflowttsUnknown(model); +}; +var timm = (model) => [ + `import timm + +model = timm.create_model("hf_hub:${model.id}", pretrained=True)` +]; +var skopsPickle = (model, modelFile) => { + return [ + `import joblib +from skops.hub_utils import download +download("${model.id}", "path_to_folder") +model = joblib.load( + "${modelFile}" +) +# only load pickle files from sources you trust +# read more about it here https://skops.readthedocs.io/en/stable/persistence.html` + ]; +}; +var skopsFormat = (model, modelFile) => { + return [ + `from skops.hub_utils import download +from skops.io import load +download("${model.id}", "path_to_folder") +# make sure model file is in skops format +# if model is a pickle file, make sure it's from a source you trust +model = load("path_to_folder/${modelFile}")` + ]; +}; +var skopsJobLib = (model) => { + return [ + `from huggingface_hub import hf_hub_download +import joblib +model = joblib.load( + hf_hub_download("${model.id}", "sklearn_model.joblib") +) +# only load pickle files from sources you trust +# read more about it here https://skops.readthedocs.io/en/stable/persistence.html` + ]; +}; +var sklearn = (model) => { + if (model.tags.includes("skops")) { + const skopsmodelFile = model.config?.sklearn?.model?.file; + const skopssaveFormat = model.config?.sklearn?.model_format; + if (!skopsmodelFile) { + return [`# \u26A0\uFE0F Model filename not specified in config.json`]; + } + if (skopssaveFormat === "pickle") { + return skopsPickle(model, skopsmodelFile); + } else { + return skopsFormat(model, skopsmodelFile); + } + } else { + return skopsJobLib(model); + } +}; +var stable_audio_tools = (model) => [ + `import torch +import torchaudio +from einops import rearrange +from stable_audio_tools import get_pretrained_model +from stable_audio_tools.inference.generation import generate_diffusion_cond + +device = "cuda" if torch.cuda.is_available() else "cpu" + +# Download model +model, model_config = get_pretrained_model("${model.id}") +sample_rate = model_config["sample_rate"] +sample_size = model_config["sample_size"] + +model = model.to(device) + +# Set up text and timing conditioning +conditioning = [{ + "prompt": "128 BPM tech house drum loop", +}] + +# Generate stereo audio +output = generate_diffusion_cond( + model, + conditioning=conditioning, + sample_size=sample_size, + device=device +) + +# Rearrange audio batch to a single sequence +output = rearrange(output, "b d n -> d (b n)") + +# Peak normalize, clip, convert to int16, and save to file +output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu() +torchaudio.save("output.wav", output, sample_rate)` +]; +var fastai = (model) => [ + `from huggingface_hub import from_pretrained_fastai + +learn = from_pretrained_fastai("${model.id}")` +]; +var sampleFactory = (model) => [ + `python -m sample_factory.huggingface.load_from_hub -r ${model.id} -d ./train_dir` +]; +var sentenceTransformers = (model) => [ + `from sentence_transformers import SentenceTransformer + +model = SentenceTransformer("${model.id}")` +]; +var setfit = (model) => [ + `from setfit import SetFitModel + +model = SetFitModel.from_pretrained("${model.id}")` +]; +var spacy = (model) => [ + `!pip install https://huggingface.co/${model.id}/resolve/main/${nameWithoutNamespace(model.id)}-any-py3-none-any.whl + +# Using spacy.load(). +import spacy +nlp = spacy.load("${nameWithoutNamespace(model.id)}") + +# Importing as module. +import ${nameWithoutNamespace(model.id)} +nlp = ${nameWithoutNamespace(model.id)}.load()` +]; +var span_marker = (model) => [ + `from span_marker import SpanMarkerModel + +model = SpanMarkerModel.from_pretrained("${model.id}")` +]; +var stanza = (model) => [ + `import stanza + +stanza.download("${nameWithoutNamespace(model.id).replace("stanza-", "")}") +nlp = stanza.Pipeline("${nameWithoutNamespace(model.id).replace("stanza-", "")}")` +]; +var speechBrainMethod = (speechbrainInterface) => { + switch (speechbrainInterface) { + case "EncoderClassifier": + return "classify_file"; + case "EncoderDecoderASR": + case "EncoderASR": + return "transcribe_file"; + case "SpectralMaskEnhancement": + return "enhance_file"; + case "SepformerSeparation": + return "separate_file"; + default: + return void 0; + } +}; +var speechbrain = (model) => { + const speechbrainInterface = model.config?.speechbrain?.speechbrain_interface; + if (speechbrainInterface === void 0) { + return [`# interface not specified in config.json`]; + } + const speechbrainMethod = speechBrainMethod(speechbrainInterface); + if (speechbrainMethod === void 0) { + return [`# interface in config.json invalid`]; + } + return [ + `from speechbrain.pretrained import ${speechbrainInterface} +model = ${speechbrainInterface}.from_hparams( + "${model.id}" +) +model.${speechbrainMethod}("file.wav")` + ]; +}; +var transformers = (model) => { + const info = model.transformersInfo; + if (!info) { + return [`# \u26A0\uFE0F Type of model unknown`]; + } + const remote_code_snippet = model.tags.includes(TAG_CUSTOM_CODE) ? ", trust_remote_code=True" : ""; + let autoSnippet; + if (info.processor) { + const varName = info.processor === "AutoTokenizer" ? "tokenizer" : info.processor === "AutoFeatureExtractor" ? "extractor" : "processor"; + autoSnippet = [ + "# Load model directly", + `from transformers import ${info.processor}, ${info.auto_model}`, + "", + `${varName} = ${info.processor}.from_pretrained("${model.id}"` + remote_code_snippet + ")", + `model = ${info.auto_model}.from_pretrained("${model.id}"` + remote_code_snippet + ")" + ].join("\n"); + } else { + autoSnippet = [ + "# Load model directly", + `from transformers import ${info.auto_model}`, + `model = ${info.auto_model}.from_pretrained("${model.id}"` + remote_code_snippet + ")" + ].join("\n"); + } + if (model.pipeline_tag && LIBRARY_TASK_MAPPING.transformers?.includes(model.pipeline_tag)) { + const pipelineSnippet = ["# Use a pipeline as a high-level helper", "from transformers import pipeline", ""]; + if (model.tags.includes("conversational") && model.config?.tokenizer_config?.chat_template) { + pipelineSnippet.push("messages = [", ' {"role": "user", "content": "Who are you?"},', "]"); + } + pipelineSnippet.push(`pipe = pipeline("${model.pipeline_tag}", model="${model.id}"` + remote_code_snippet + ")"); + if (model.tags.includes("conversational") && model.config?.tokenizer_config?.chat_template) { + pipelineSnippet.push("pipe(messages)"); + } + return [pipelineSnippet.join("\n"), autoSnippet]; + } + return [autoSnippet]; +}; +var transformersJS = (model) => { + if (!model.pipeline_tag) { + return [`// \u26A0\uFE0F Unknown pipeline tag`]; + } + const libName = "@xenova/transformers"; + return [ + `// npm i ${libName} +import { pipeline } from '${libName}'; + +// Allocate pipeline +const pipe = await pipeline('${model.pipeline_tag}', '${model.id}');` + ]; +}; +var peftTask = (peftTaskType) => { + switch (peftTaskType) { + case "CAUSAL_LM": + return "CausalLM"; + case "SEQ_2_SEQ_LM": + return "Seq2SeqLM"; + case "TOKEN_CLS": + return "TokenClassification"; + case "SEQ_CLS": + return "SequenceClassification"; + default: + return void 0; + } +}; +var peft = (model) => { + const { base_model_name_or_path: peftBaseModel, task_type: peftTaskType } = model.config?.peft ?? {}; + const pefttask = peftTask(peftTaskType); + if (!pefttask) { + return [`Task type is invalid.`]; + } + if (!peftBaseModel) { + return [`Base model is not found.`]; + } + return [ + `from peft import PeftModel, PeftConfig +from transformers import AutoModelFor${pefttask} + +config = PeftConfig.from_pretrained("${model.id}") +base_model = AutoModelFor${pefttask}.from_pretrained("${peftBaseModel}") +model = PeftModel.from_pretrained(base_model, "${model.id}")` + ]; +}; +var fasttext = (model) => [ + `from huggingface_hub import hf_hub_download +import fasttext + +model = fasttext.load_model(hf_hub_download("${model.id}", "model.bin"))` +]; +var stableBaselines3 = (model) => [ + `from huggingface_sb3 import load_from_hub +checkpoint = load_from_hub( + repo_id="${model.id}", + filename="{MODEL FILENAME}.zip", +)` +]; +var nemoDomainResolver = (domain, model) => { + switch (domain) { + case "ASR": + return [ + `import nemo.collections.asr as nemo_asr +asr_model = nemo_asr.models.ASRModel.from_pretrained("${model.id}") + +transcriptions = asr_model.transcribe(["file.wav"])` + ]; + default: + return void 0; + } +}; +var mlAgents = (model) => [ + `mlagents-load-from-hf --repo-id="${model.id}" --local-dir="./download: string[]s"` +]; +var sentis = () => [ + `string modelName = "[Your model name here].sentis"; +Model model = ModelLoader.Load(Application.streamingAssetsPath + "/" + modelName); +IWorker engine = WorkerFactory.CreateWorker(BackendType.GPUCompute, model); +// Please see provided C# file for more details +` +]; +var voicecraft = (model) => [ + `from voicecraft import VoiceCraft + +model = VoiceCraft.from_pretrained("${model.id}")` +]; +var chattts = () => [ + `import ChatTTS +import torchaudio + +chat = ChatTTS.Chat() +chat.load_models(compile=False) # Set to True for better performance + +texts = ["PUT YOUR TEXT HERE",] + +wavs = chat.infer(texts, ) + +torchaudio.save("output1.wav", torch.from_numpy(wavs[0]), 24000)` +]; +var mlx = (model) => [ + `pip install huggingface_hub hf_transfer + +export HF_HUB_ENABLE_HF_TRANS: string[]FER=1 +huggingface-cli download --local-dir ${nameWithoutNamespace(model.id)} ${model.id}` +]; +var mlxim = (model) => [ + `from mlxim.model import create_model + +model = create_model(${model.id})` +]; +var nemo = (model) => { + let command = void 0; + if (model.tags.includes("automatic-speech-recognition")) { + command = nemoDomainResolver("ASR", model); + } + return command ?? [`# tag did not correspond to a valid NeMo domain.`]; +}; +var pythae = (model) => [ + `from pythae.models import AutoModel + +model = AutoModel.load_from_hf_hub("${model.id}")` +]; +var musicgen = (model) => [ + `from audiocraft.models import MusicGen + +model = MusicGen.get_pretrained("${model.id}") + +descriptions = ['happy rock', 'energetic EDM', 'sad jazz'] +wav = model.generate(descriptions) # generates 3 samples.` +]; +var magnet = (model) => [ + `from audiocraft.models import MAGNeT + +model = MAGNeT.get_pretrained("${model.id}") + +descriptions = ['disco beat', 'energetic EDM', 'funky groove'] +wav = model.generate(descriptions) # generates 3 samples.` +]; +var audiogen = (model) => [ + `from audiocraft.models import AudioGen + +model = AudioGen.get_pretrained("${model.id}") +model.set_generation_params(duration=5) # generate 5 seconds. +descriptions = ['dog barking', 'sirene of an emergency vehicle', 'footsteps in a corridor'] +wav = model.generate(descriptions) # generates 3 samples.` +]; +var audiocraft = (model) => { + if (model.tags.includes("musicgen")) { + return musicgen(model); + } else if (model.tags.includes("audiogen")) { + return audiogen(model); + } else if (model.tags.includes("magnet")) { + return magnet(model); + } else { + return [`# Type of model unknown.`]; + } +}; +var whisperkit = () => [ + `# Install CLI with Homebrew on macOS device +brew install whisperkit-cli + +# View all available inference options +whisperkit-cli transcribe --help + +# Download and run inference using whisper base model +whisperkit-cli transcribe --audio-path /path/to/audio.mp3 + +# Or use your preferred model variant +whisperkit-cli transcribe --model "large-v3" --model-prefix "distil" --audio-path /path/to/audio.mp3 --verbose` +]; + +// src/model-libraries.ts +var MODEL_LIBRARIES_UI_ELEMENTS = { + "adapter-transformers": { + prettyLabel: "Adapters", + repoName: "adapters", + repoUrl: "https://github.com/Adapter-Hub/adapters", + docsUrl: "https://huggingface.co/docs/hub/adapters", + snippets: adapters, + filter: true, + countDownloads: `path:"adapter_config.json"` + }, + allennlp: { + prettyLabel: "AllenNLP", + repoName: "AllenNLP", + repoUrl: "https://github.com/allenai/allennlp", + docsUrl: "https://huggingface.co/docs/hub/allennlp", + snippets: allennlp, + filter: true + }, + asteroid: { + prettyLabel: "Asteroid", + repoName: "Asteroid", + repoUrl: "https://github.com/asteroid-team/asteroid", + docsUrl: "https://huggingface.co/docs/hub/asteroid", + snippets: asteroid, + filter: true, + countDownloads: `path:"pytorch_model.bin"` + }, + audiocraft: { + prettyLabel: "Audiocraft", + repoName: "audiocraft", + repoUrl: "https://github.com/facebookresearch/audiocraft", + snippets: audiocraft, + filter: false, + countDownloads: `path:"state_dict.bin"` + }, + audioseal: { + prettyLabel: "AudioSeal", + repoName: "audioseal", + repoUrl: "https://github.com/facebookresearch/audioseal", + filter: false, + countDownloads: `path_extension:"pth"`, + snippets: audioseal + }, + bertopic: { + prettyLabel: "BERTopic", + repoName: "BERTopic", + repoUrl: "https://github.com/MaartenGr/BERTopic", + snippets: bertopic, + filter: true + }, + big_vision: { + prettyLabel: "Big Vision", + repoName: "big_vision", + repoUrl: "https://github.com/google-research/big_vision", + filter: false, + countDownloads: `path_extension:"npz"` + }, + bm25s: { + prettyLabel: "BM25S", + repoName: "bm25s", + repoUrl: "https://github.com/xhluca/bm25s", + snippets: bm25s, + filter: false, + countDownloads: `path:"params.index.json"` + }, + champ: { + prettyLabel: "Champ", + repoName: "Champ", + repoUrl: "https://github.com/fudan-generative-vision/champ", + countDownloads: `path:"champ/motion_module.pth"` + }, + chat_tts: { + prettyLabel: "ChatTTS", + repoName: "ChatTTS", + repoUrl: "https://github.com/2noise/ChatTTS.git", + snippets: chattts, + filter: false, + countDownloads: `path:"asset/GPT.pt"` + }, + colpali: { + prettyLabel: "ColPali", + repoName: "ColPali", + repoUrl: "https://github.com/ManuelFay/colpali", + filter: false, + countDownloads: `path:"adapter_config.json"` + }, + "depth-anything-v2": { + prettyLabel: "DepthAnythingV2", + repoName: "Depth Anything V2", + repoUrl: "https://github.com/DepthAnything/Depth-Anything-V2", + snippets: depth_anything_v2, + filter: false, + countDownloads: `path_extension:"pth"` + }, + diffusers: { + prettyLabel: "Diffusers", + repoName: "\u{1F917}/diffusers", + repoUrl: "https://github.com/huggingface/diffusers", + docsUrl: "https://huggingface.co/docs/hub/diffusers", + snippets: diffusers, + filter: true + /// diffusers has its own more complex "countDownloads" query + }, + doctr: { + prettyLabel: "docTR", + repoName: "doctr", + repoUrl: "https://github.com/mindee/doctr" + }, + edsnlp: { + prettyLabel: "EDS-NLP", + repoName: "edsnlp", + repoUrl: "https://github.com/aphp/edsnlp", + docsUrl: "https://aphp.github.io/edsnlp/latest/", + filter: false, + snippets: edsnlp, + countDownloads: `path_filename:"config" AND path_extension:"cfg"` + }, + elm: { + prettyLabel: "ELM", + repoName: "elm", + repoUrl: "https://github.com/slicex-ai/elm", + filter: false, + countDownloads: `path_filename:"slicex_elm_config" AND path_extension:"json"` + }, + espnet: { + prettyLabel: "ESPnet", + repoName: "ESPnet", + repoUrl: "https://github.com/espnet/espnet", + docsUrl: "https://huggingface.co/docs/hub/espnet", + snippets: espnet, + filter: true + }, + fairseq: { + prettyLabel: "Fairseq", + repoName: "fairseq", + repoUrl: "https://github.com/pytorch/fairseq", + snippets: fairseq, + filter: true + }, + fastai: { + prettyLabel: "fastai", + repoName: "fastai", + repoUrl: "https://github.com/fastai/fastai", + docsUrl: "https://huggingface.co/docs/hub/fastai", + snippets: fastai, + filter: true + }, + fasttext: { + prettyLabel: "fastText", + repoName: "fastText", + repoUrl: "https://fasttext.cc/", + snippets: fasttext, + filter: true, + countDownloads: `path_extension:"bin"` + }, + flair: { + prettyLabel: "Flair", + repoName: "Flair", + repoUrl: "https://github.com/flairNLP/flair", + docsUrl: "https://huggingface.co/docs/hub/flair", + snippets: flair, + filter: true, + countDownloads: `path:"pytorch_model.bin"` + }, + "gemma.cpp": { + prettyLabel: "gemma.cpp", + repoName: "gemma.cpp", + repoUrl: "https://github.com/google/gemma.cpp", + filter: false, + countDownloads: `path_extension:"sbs"` + }, + gliner: { + prettyLabel: "GLiNER", + repoName: "GLiNER", + repoUrl: "https://github.com/urchade/GLiNER", + snippets: gliner, + filter: false, + countDownloads: `path:"gliner_config.json"` + }, + "glyph-byt5": { + prettyLabel: "Glyph-ByT5", + repoName: "Glyph-ByT5", + repoUrl: "https://github.com/AIGText/Glyph-ByT5", + filter: false, + countDownloads: `path:"checkpoints/byt5_model.pt"` + }, + grok: { + prettyLabel: "Grok", + repoName: "Grok", + repoUrl: "https://github.com/xai-org/grok-1", + filter: false, + countDownloads: `path:"ckpt/tensor00000_000" OR path:"ckpt-0/tensor00000_000"` + }, + hallo: { + prettyLabel: "Hallo", + repoName: "Hallo", + repoUrl: "https://github.com/fudan-generative-vision/hallo", + countDownloads: `path:"hallo/net.pth"` + }, + "hunyuan-dit": { + prettyLabel: "HunyuanDiT", + repoName: "HunyuanDiT", + repoUrl: "https://github.com/Tencent/HunyuanDiT", + countDownloads: `path:"pytorch_model_ema.pt" OR path:"pytorch_model_distill.pt"` + }, + keras: { + prettyLabel: "Keras", + repoName: "Keras", + repoUrl: "https://github.com/keras-team/keras", + docsUrl: "https://huggingface.co/docs/hub/keras", + snippets: keras, + filter: true, + countDownloads: `path:"config.json" OR path_extension:"keras"` + }, + "tf-keras": { + // Legacy "Keras 2" library (tensorflow-only) + prettyLabel: "TF-Keras", + repoName: "TF-Keras", + repoUrl: "https://github.com/keras-team/tf-keras", + docsUrl: "https://huggingface.co/docs/hub/tf-keras", + snippets: tf_keras, + filter: true, + countDownloads: `path:"saved_model.pb"` + }, + "keras-nlp": { + prettyLabel: "KerasNLP", + repoName: "KerasNLP", + repoUrl: "https://keras.io/keras_nlp/", + docsUrl: "https://github.com/keras-team/keras-nlp", + snippets: keras_nlp + }, + k2: { + prettyLabel: "K2", + repoName: "k2", + repoUrl: "https://github.com/k2-fsa/k2" + }, + liveportrait: { + prettyLabel: "LivePortrait", + repoName: "LivePortrait", + repoUrl: "https://github.com/KwaiVGI/LivePortrait", + filter: false, + countDownloads: `path:"liveportrait/landmark.onnx"` + }, + mindspore: { + prettyLabel: "MindSpore", + repoName: "mindspore", + repoUrl: "https://github.com/mindspore-ai/mindspore" + }, + "mamba-ssm": { + prettyLabel: "MambaSSM", + repoName: "MambaSSM", + repoUrl: "https://github.com/state-spaces/mamba", + filter: false, + snippets: mamba_ssm + }, + "mars5-tts": { + prettyLabel: "MARS5-TTS", + repoName: "MARS5-TTS", + repoUrl: "https://github.com/Camb-ai/MARS5-TTS", + filter: false, + countDownloads: `path:"mars5_ar.safetensors"`, + snippets: mars5_tts + }, + "mesh-anything": { + prettyLabel: "MeshAnything", + repoName: "MeshAnything", + repoUrl: "https://github.com/buaacyw/MeshAnything", + filter: false, + countDownloads: `path:"MeshAnything_350m.pth"`, + snippets: mesh_anything + }, + "ml-agents": { + prettyLabel: "ml-agents", + repoName: "ml-agents", + repoUrl: "https://github.com/Unity-Technologies/ml-agents", + docsUrl: "https://huggingface.co/docs/hub/ml-agents", + snippets: mlAgents, + filter: true, + countDownloads: `path_extension:"onnx"` + }, + mlx: { + prettyLabel: "MLX", + repoName: "MLX", + repoUrl: "https://github.com/ml-explore/mlx-examples/tree/main", + snippets: mlx, + filter: true + }, + "mlx-image": { + prettyLabel: "mlx-image", + repoName: "mlx-image", + repoUrl: "https://github.com/riccardomusmeci/mlx-image", + docsUrl: "https://huggingface.co/docs/hub/mlx-image", + snippets: mlxim, + filter: false, + countDownloads: `path:"model.safetensors"` + }, + "mlc-llm": { + prettyLabel: "MLC-LLM", + repoName: "MLC-LLM", + repoUrl: "https://github.com/mlc-ai/mlc-llm", + docsUrl: "https://llm.mlc.ai/docs/", + filter: false, + countDownloads: `path:"mlc-chat-config.json"` + }, + nemo: { + prettyLabel: "NeMo", + repoName: "NeMo", + repoUrl: "https://github.com/NVIDIA/NeMo", + snippets: nemo, + filter: true, + countDownloads: `path_extension:"nemo" OR path:"model_config.yaml"` + }, + open_clip: { + prettyLabel: "OpenCLIP", + repoName: "OpenCLIP", + repoUrl: "https://github.com/mlfoundations/open_clip", + snippets: open_clip, + filter: true, + countDownloads: `path_extension:"bin" AND path_filename:*pytorch_model` + }, + paddlenlp: { + prettyLabel: "paddlenlp", + repoName: "PaddleNLP", + repoUrl: "https://github.com/PaddlePaddle/PaddleNLP", + docsUrl: "https://huggingface.co/docs/hub/paddlenlp", + snippets: paddlenlp, + filter: true, + countDownloads: `path:"model_config.json"` + }, + peft: { + prettyLabel: "PEFT", + repoName: "PEFT", + repoUrl: "https://github.com/huggingface/peft", + snippets: peft, + filter: true, + countDownloads: `path:"adapter_config.json"` + }, + "pyannote-audio": { + prettyLabel: "pyannote.audio", + repoName: "pyannote-audio", + repoUrl: "https://github.com/pyannote/pyannote-audio", + snippets: pyannote_audio, + filter: true + }, + pythae: { + prettyLabel: "pythae", + repoName: "pythae", + repoUrl: "https://github.com/clementchadebec/benchmark_VAE", + snippets: pythae, + filter: true + }, + recurrentgemma: { + prettyLabel: "RecurrentGemma", + repoName: "recurrentgemma", + repoUrl: "https://github.com/google-deepmind/recurrentgemma", + filter: false, + countDownloads: `path:"tokenizer.model"` + }, + "sample-factory": { + prettyLabel: "sample-factory", + repoName: "sample-factory", + repoUrl: "https://github.com/alex-petrenko/sample-factory", + docsUrl: "https://huggingface.co/docs/hub/sample-factory", + snippets: sampleFactory, + filter: true, + countDownloads: `path:"cfg.json"` + }, + "sentence-transformers": { + prettyLabel: "sentence-transformers", + repoName: "sentence-transformers", + repoUrl: "https://github.com/UKPLab/sentence-transformers", + docsUrl: "https://huggingface.co/docs/hub/sentence-transformers", + snippets: sentenceTransformers, + filter: true + }, + setfit: { + prettyLabel: "setfit", + repoName: "setfit", + repoUrl: "https://github.com/huggingface/setfit", + docsUrl: "https://huggingface.co/docs/hub/setfit", + snippets: setfit, + filter: true + }, + sklearn: { + prettyLabel: "Scikit-learn", + repoName: "Scikit-learn", + repoUrl: "https://github.com/scikit-learn/scikit-learn", + snippets: sklearn, + filter: true, + countDownloads: `path:"sklearn_model.joblib"` + }, + spacy: { + prettyLabel: "spaCy", + repoName: "spaCy", + repoUrl: "https://github.com/explosion/spaCy", + docsUrl: "https://huggingface.co/docs/hub/spacy", + snippets: spacy, + filter: true, + countDownloads: `path_extension:"whl"` + }, + "span-marker": { + prettyLabel: "SpanMarker", + repoName: "SpanMarkerNER", + repoUrl: "https://github.com/tomaarsen/SpanMarkerNER", + docsUrl: "https://huggingface.co/docs/hub/span_marker", + snippets: span_marker, + filter: true + }, + speechbrain: { + prettyLabel: "speechbrain", + repoName: "speechbrain", + repoUrl: "https://github.com/speechbrain/speechbrain", + docsUrl: "https://huggingface.co/docs/hub/speechbrain", + snippets: speechbrain, + filter: true, + countDownloads: `path:"hyperparams.yaml"` + }, + "stable-audio-tools": { + prettyLabel: "Stable Audio Tools", + repoName: "stable-audio-tools", + repoUrl: "https://github.com/Stability-AI/stable-audio-tools.git", + filter: false, + countDownloads: `path:"model.safetensors"`, + snippets: stable_audio_tools + }, + "diffusion-single-file": { + prettyLabel: "Diffusion Single File", + repoName: "diffusion-single-file", + repoUrl: "https://github.com/comfyanonymous/ComfyUI", + filter: false, + countDownloads: `path_extension:"safetensors"` + }, + "stable-baselines3": { + prettyLabel: "stable-baselines3", + repoName: "stable-baselines3", + repoUrl: "https://github.com/huggingface/huggingface_sb3", + docsUrl: "https://huggingface.co/docs/hub/stable-baselines3", + snippets: stableBaselines3, + filter: true, + countDownloads: `path_extension:"zip"` + }, + stanza: { + prettyLabel: "Stanza", + repoName: "stanza", + repoUrl: "https://github.com/stanfordnlp/stanza", + docsUrl: "https://huggingface.co/docs/hub/stanza", + snippets: stanza, + filter: true, + countDownloads: `path:"models/default.zip"` + }, + tensorflowtts: { + prettyLabel: "TensorFlowTTS", + repoName: "TensorFlowTTS", + repoUrl: "https://github.com/TensorSpeech/TensorFlowTTS", + snippets: tensorflowtts + }, + "tic-clip": { + prettyLabel: "TiC-CLIP", + repoName: "TiC-CLIP", + repoUrl: "https://github.com/apple/ml-tic-clip", + filter: false, + countDownloads: `path_extension:"pt" AND path_prefix:"checkpoints/"` + }, + timesfm: { + prettyLabel: "TimesFM", + repoName: "timesfm", + repoUrl: "https://github.com/google-research/timesfm", + filter: false, + countDownloads: `path:"checkpoints/checkpoint_1100000/state/checkpoint"` + }, + timm: { + prettyLabel: "timm", + repoName: "pytorch-image-models", + repoUrl: "https://github.com/rwightman/pytorch-image-models", + docsUrl: "https://huggingface.co/docs/hub/timm", + snippets: timm, + filter: true, + countDownloads: `path:"pytorch_model.bin" OR path:"model.safetensors"` + }, + transformers: { + prettyLabel: "Transformers", + repoName: "\u{1F917}/transformers", + repoUrl: "https://github.com/huggingface/transformers", + docsUrl: "https://huggingface.co/docs/hub/transformers", + snippets: transformers, + filter: true + }, + "transformers.js": { + prettyLabel: "Transformers.js", + repoName: "transformers.js", + repoUrl: "https://github.com/xenova/transformers.js", + docsUrl: "https://huggingface.co/docs/hub/transformers-js", + snippets: transformersJS, + filter: true + }, + "unity-sentis": { + prettyLabel: "unity-sentis", + repoName: "unity-sentis", + repoUrl: "https://github.com/Unity-Technologies/sentis-samples", + snippets: sentis, + filter: true, + countDownloads: `path_extension:"sentis"` + }, + voicecraft: { + prettyLabel: "VoiceCraft", + repoName: "VoiceCraft", + repoUrl: "https://github.com/jasonppy/VoiceCraft", + docsUrl: "https://github.com/jasonppy/VoiceCraft", + snippets: voicecraft + }, + whisperkit: { + prettyLabel: "WhisperKit", + repoName: "WhisperKit", + repoUrl: "https://github.com/argmaxinc/WhisperKit", + docsUrl: "https://github.com/argmaxinc/WhisperKit?tab=readme-ov-file#homebrew", + snippets: whisperkit, + countDownloads: `path_filename:"model" AND path_extension:"mil" AND _exists_:"path_prefix"` + } +}; +var ALL_MODEL_LIBRARY_KEYS = Object.keys(MODEL_LIBRARIES_UI_ELEMENTS); +var ALL_DISPLAY_MODEL_LIBRARY_KEYS = Object.entries(MODEL_LIBRARIES_UI_ELEMENTS).filter(([_, v]) => v.filter).map(([k]) => k); + +// src/tokenizer-data.ts +var SPECIAL_TOKENS_ATTRIBUTES = [ + "bos_token", + "eos_token", + "unk_token", + "sep_token", + "pad_token", + "cls_token", + "mask_token" + // additional_special_tokens (TODO) +]; + +// src/snippets/index.ts +var snippets_exports = {}; +__export(snippets_exports, { + curl: () => curl_exports, + inputs: () => inputs_exports, + js: () => js_exports, + python: () => python_exports +}); + +// src/snippets/inputs.ts +var inputs_exports = {}; +__export(inputs_exports, { + getModelInputSnippet: () => getModelInputSnippet +}); +var inputsZeroShotClassification = () => `"Hi, I recently bought a device from your company but it is not working as advertised and I would like to get reimbursed!"`; +var inputsTranslation = () => `"\u041C\u0435\u043D\u044F \u0437\u043E\u0432\u0443\u0442 \u0412\u043E\u043B\u044C\u0444\u0433\u0430\u043D\u0433 \u0438 \u044F \u0436\u0438\u0432\u0443 \u0432 \u0411\u0435\u0440\u043B\u0438\u043D\u0435"`; +var inputsSummarization = () => `"The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct."`; +var inputsTableQuestionAnswering = () => `{ + "query": "How many stars does the transformers repository have?", + "table": { + "Repository": ["Transformers", "Datasets", "Tokenizers"], + "Stars": ["36542", "4512", "3934"], + "Contributors": ["651", "77", "34"], + "Programming language": [ + "Python", + "Python", + "Rust, Python and NodeJS" + ] + } +}`; +var inputsVisualQuestionAnswering = () => `{ + "image": "cat.png", + "question": "What is in this image?" +}`; +var inputsQuestionAnswering = () => `{ + "question": "What is my name?", + "context": "My name is Clara and I live in Berkeley." +}`; +var inputsTextClassification = () => `"I like you. I love you"`; +var inputsTokenClassification = () => `"My name is Sarah Jessica Parker but you can call me Jessica"`; +var inputsTextGeneration = () => `"Can you please let us know more details about your "`; +var inputsText2TextGeneration = () => `"The answer to the universe is"`; +var inputsFillMask = (model) => `"The answer to the universe is ${model.mask_token}."`; +var inputsSentenceSimilarity = () => `{ + "source_sentence": "That is a happy person", + "sentences": [ + "That is a happy dog", + "That is a very happy person", + "Today is a sunny day" + ] +}`; +var inputsFeatureExtraction = () => `"Today is a sunny day and I will get some ice cream."`; +var inputsImageClassification = () => `"cats.jpg"`; +var inputsImageToText = () => `"cats.jpg"`; +var inputsImageSegmentation = () => `"cats.jpg"`; +var inputsObjectDetection = () => `"cats.jpg"`; +var inputsAudioToAudio = () => `"sample1.flac"`; +var inputsAudioClassification = () => `"sample1.flac"`; +var inputsTextToImage = () => `"Astronaut riding a horse"`; +var inputsTextToSpeech = () => `"The answer to the universe is 42"`; +var inputsTextToAudio = () => `"liquid drum and bass, atmospheric synths, airy sounds"`; +var inputsAutomaticSpeechRecognition = () => `"sample1.flac"`; +var inputsTabularPrediction = () => `'{"Height":[11.52,12.48],"Length1":[23.2,24.0],"Length2":[25.4,26.3],"Species": ["Bream","Bream"]}'`; +var inputsZeroShotImageClassification = () => `"cats.jpg"`; +var modelInputSnippets = { + "audio-to-audio": inputsAudioToAudio, + "audio-classification": inputsAudioClassification, + "automatic-speech-recognition": inputsAutomaticSpeechRecognition, + "document-question-answering": inputsVisualQuestionAnswering, + "feature-extraction": inputsFeatureExtraction, + "fill-mask": inputsFillMask, + "image-classification": inputsImageClassification, + "image-to-text": inputsImageToText, + "image-segmentation": inputsImageSegmentation, + "object-detection": inputsObjectDetection, + "question-answering": inputsQuestionAnswering, + "sentence-similarity": inputsSentenceSimilarity, + summarization: inputsSummarization, + "table-question-answering": inputsTableQuestionAnswering, + "tabular-regression": inputsTabularPrediction, + "tabular-classification": inputsTabularPrediction, + "text-classification": inputsTextClassification, + "text-generation": inputsTextGeneration, + "text-to-image": inputsTextToImage, + "text-to-speech": inputsTextToSpeech, + "text-to-audio": inputsTextToAudio, + "text2text-generation": inputsText2TextGeneration, + "token-classification": inputsTokenClassification, + translation: inputsTranslation, + "zero-shot-classification": inputsZeroShotClassification, + "zero-shot-image-classification": inputsZeroShotImageClassification +}; +function getModelInputSnippet(model, noWrap = false, noQuotes = false) { + if (model.pipeline_tag) { + const inputs = modelInputSnippets[model.pipeline_tag]; + if (inputs) { + let result = inputs(model); + if (noWrap) { + result = result.replace(/(?:(?:\r?\n|\r)\t*)|\t+/g, " "); + } + if (noQuotes) { + const REGEX_QUOTES = /^"(.+)"$/s; + const match = result.match(REGEX_QUOTES); + result = match ? match[1] : result; + } + return result; + } + } + return "No input example has been defined for this model task."; +} + +// src/snippets/curl.ts +var curl_exports = {}; +__export(curl_exports, { + curlSnippets: () => curlSnippets, + getCurlInferenceSnippet: () => getCurlInferenceSnippet, + hasCurlInferenceSnippet: () => hasCurlInferenceSnippet, + snippetBasic: () => snippetBasic, + snippetFile: () => snippetFile, + snippetTextGeneration: () => snippetTextGeneration, + snippetZeroShotClassification: () => snippetZeroShotClassification +}); +var snippetBasic = (model, accessToken) => `curl https://api-inference.huggingface.co/models/${model.id} \\ + -X POST \\ + -d '{"inputs": ${getModelInputSnippet(model, true)}}' \\ + -H 'Content-Type: application/json' \\ + -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" +`; +var snippetTextGeneration = (model, accessToken) => { + if (model.config?.tokenizer_config?.chat_template) { + return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\ +-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\ +-H 'Content-Type: application/json' \\ +-d '{ + "model": "${model.id}", + "messages": [{"role": "user", "content": "What is the capital of France?"}], + "max_tokens": 500, + "stream": false +}' +`; + } else { + return snippetBasic(model, accessToken); + } +}; +var snippetZeroShotClassification = (model, accessToken) => `curl https://api-inference.huggingface.co/models/${model.id} \\ + -X POST \\ + -d '{"inputs": ${getModelInputSnippet(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\ + -H 'Content-Type: application/json' \\ + -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" +`; +var snippetFile = (model, accessToken) => `curl https://api-inference.huggingface.co/models/${model.id} \\ + -X POST \\ + --data-binary '@${getModelInputSnippet(model, true, true)}' \\ + -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" +`; +var curlSnippets = { + // Same order as in js/src/lib/interfaces/Types.ts + "text-classification": snippetBasic, + "token-classification": snippetBasic, + "table-question-answering": snippetBasic, + "question-answering": snippetBasic, + "zero-shot-classification": snippetZeroShotClassification, + translation: snippetBasic, + summarization: snippetBasic, + "feature-extraction": snippetBasic, + "text-generation": snippetTextGeneration, + "text2text-generation": snippetBasic, + "fill-mask": snippetBasic, + "sentence-similarity": snippetBasic, + "automatic-speech-recognition": snippetFile, + "text-to-image": snippetBasic, + "text-to-speech": snippetBasic, + "text-to-audio": snippetBasic, + "audio-to-audio": snippetFile, + "audio-classification": snippetFile, + "image-classification": snippetFile, + "image-to-text": snippetFile, + "object-detection": snippetFile, + "image-segmentation": snippetFile +}; +function getCurlInferenceSnippet(model, accessToken) { + return model.pipeline_tag && model.pipeline_tag in curlSnippets ? curlSnippets[model.pipeline_tag]?.(model, accessToken) ?? "" : ""; +} +function hasCurlInferenceSnippet(model) { + return !!model.pipeline_tag && model.pipeline_tag in curlSnippets; +} + +// src/snippets/python.ts +var python_exports = {}; +__export(python_exports, { + getPythonInferenceSnippet: () => getPythonInferenceSnippet, + hasPythonInferenceSnippet: () => hasPythonInferenceSnippet, + pythonSnippets: () => pythonSnippets, + snippetBasic: () => snippetBasic2, + snippetConversational: () => snippetConversational, + snippetDocumentQuestionAnswering: () => snippetDocumentQuestionAnswering, + snippetFile: () => snippetFile2, + snippetTabular: () => snippetTabular, + snippetTextToAudio: () => snippetTextToAudio, + snippetTextToImage: () => snippetTextToImage, + snippetZeroShotClassification: () => snippetZeroShotClassification2, + snippetZeroShotImageClassification: () => snippetZeroShotImageClassification +}); +var snippetConversational = (model, accessToken) => `from huggingface_hub import InferenceClient + +client = InferenceClient( + "${model.id}", + token="${accessToken || "{API_TOKEN}"}", +) + +for message in client.chat_completion( + messages=[{"role": "user", "content": "What is the capital of France?"}], + max_tokens=500, + stream=True, +): + print(message.choices[0].delta.content, end="") +`; +var snippetZeroShotClassification2 = (model) => `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +output = query({ + "inputs": ${getModelInputSnippet(model)}, + "parameters": {"candidate_labels": ["refund", "legal", "faq"]}, +})`; +var snippetZeroShotImageClassification = (model) => `def query(data): + with open(data["image_path"], "rb") as f: + img = f.read() + payload={ + "parameters": data["parameters"], + "inputs": base64.b64encode(img).decode("utf-8") + } + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +output = query({ + "image_path": ${getModelInputSnippet(model)}, + "parameters": {"candidate_labels": ["cat", "dog", "llama"]}, +})`; +var snippetBasic2 = (model) => `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +output = query({ + "inputs": ${getModelInputSnippet(model)}, +})`; +var snippetFile2 = (model) => `def query(filename): + with open(filename, "rb") as f: + data = f.read() + response = requests.post(API_URL, headers=headers, data=data) + return response.json() + +output = query(${getModelInputSnippet(model)})`; +var snippetTextToImage = (model) => `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.content +image_bytes = query({ + "inputs": ${getModelInputSnippet(model)}, +}) +# You can access the image with PIL.Image for example +import io +from PIL import Image +image = Image.open(io.BytesIO(image_bytes))`; +var snippetTabular = (model) => `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.content +response = query({ + "inputs": {"data": ${getModelInputSnippet(model)}}, +})`; +var snippetTextToAudio = (model) => { + if (model.library_name === "transformers") { + return `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.content + +audio_bytes = query({ + "inputs": ${getModelInputSnippet(model)}, +}) +# You can access the audio with IPython.display for example +from IPython.display import Audio +Audio(audio_bytes)`; + } else { + return `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +audio, sampling_rate = query({ + "inputs": ${getModelInputSnippet(model)}, +}) +# You can access the audio with IPython.display for example +from IPython.display import Audio +Audio(audio, rate=sampling_rate)`; + } +}; +var snippetDocumentQuestionAnswering = (model) => `def query(payload): + with open(payload["image"], "rb") as f: + img = f.read() + payload["image"] = base64.b64encode(img).decode("utf-8") + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +output = query({ + "inputs": ${getModelInputSnippet(model)}, +})`; +var pythonSnippets = { + // Same order as in tasks/src/pipelines.ts + "text-classification": snippetBasic2, + "token-classification": snippetBasic2, + "table-question-answering": snippetBasic2, + "question-answering": snippetBasic2, + "zero-shot-classification": snippetZeroShotClassification2, + translation: snippetBasic2, + summarization: snippetBasic2, + "feature-extraction": snippetBasic2, + "text-generation": snippetBasic2, + "text2text-generation": snippetBasic2, + "fill-mask": snippetBasic2, + "sentence-similarity": snippetBasic2, + "automatic-speech-recognition": snippetFile2, + "text-to-image": snippetTextToImage, + "text-to-speech": snippetTextToAudio, + "text-to-audio": snippetTextToAudio, + "audio-to-audio": snippetFile2, + "audio-classification": snippetFile2, + "image-classification": snippetFile2, + "tabular-regression": snippetTabular, + "tabular-classification": snippetTabular, + "object-detection": snippetFile2, + "image-segmentation": snippetFile2, + "document-question-answering": snippetDocumentQuestionAnswering, + "image-to-text": snippetFile2, + "zero-shot-image-classification": snippetZeroShotImageClassification +}; +function getPythonInferenceSnippet(model, accessToken) { + if (model.pipeline_tag === "text-generation" && model.config?.tokenizer_config?.chat_template) { + return snippetConversational(model, accessToken); + } else { + const body = model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model, accessToken) ?? "" : ""; + return `import requests + +API_URL = "https://api-inference.huggingface.co/models/${model.id}" +headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}} + +${body}`; + } +} +function hasPythonInferenceSnippet(model) { + return !!model.pipeline_tag && model.pipeline_tag in pythonSnippets; +} + +// src/snippets/js.ts +var js_exports = {}; +__export(js_exports, { + getJsInferenceSnippet: () => getJsInferenceSnippet, + hasJsInferenceSnippet: () => hasJsInferenceSnippet, + jsSnippets: () => jsSnippets, + snippetBasic: () => snippetBasic3, + snippetFile: () => snippetFile3, + snippetTextGeneration: () => snippetTextGeneration2, + snippetTextToAudio: () => snippetTextToAudio2, + snippetTextToImage: () => snippetTextToImage2, + snippetZeroShotClassification: () => snippetZeroShotClassification3 +}); +var snippetBasic3 = (model, accessToken) => `async function query(data) { + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { + Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" + "Content-Type": "application/json", + }, + method: "POST", + body: JSON.stringify(data), + } + ); + const result = await response.json(); + return result; +} + +query({"inputs": ${getModelInputSnippet(model)}}).then((response) => { + console.log(JSON.stringify(response)); +});`; +var snippetTextGeneration2 = (model, accessToken) => { + if (model.config?.tokenizer_config?.chat_template) { + return `import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference("${accessToken || `{API_TOKEN}`}"); + +for await (const chunk of inference.chatCompletionStream({ + model: "${model.id}", + messages: [{ role: "user", content: "What is the capital of France?" }], + max_tokens: 500, +})) { + process.stdout.write(chunk.choices[0]?.delta?.content || ""); +} +`; + } else { + return snippetBasic3(model, accessToken); + } +}; +var snippetZeroShotClassification3 = (model, accessToken) => `async function query(data) { + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { + Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" + "Content-Type": "application/json", + }, + method: "POST", + body: JSON.stringify(data), + } + ); + const result = await response.json(); + return result; +} + +query({"inputs": ${getModelInputSnippet( + model +)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}).then((response) => { + console.log(JSON.stringify(response)); +});`; +var snippetTextToImage2 = (model, accessToken) => `async function query(data) { + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { + Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" + "Content-Type": "application/json", + }, + method: "POST", + body: JSON.stringify(data), + } + ); + const result = await response.blob(); + return result; +} +query({"inputs": ${getModelInputSnippet(model)}}).then((response) => { + // Use image +});`; +var snippetTextToAudio2 = (model, accessToken) => { + const commonSnippet = `async function query(data) { + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { + Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" + "Content-Type": "application/json", + }, + method: "POST", + body: JSON.stringify(data), + } + );`; + if (model.library_name === "transformers") { + return commonSnippet + ` + const result = await response.blob(); + return result; + } + query({"inputs": ${getModelInputSnippet(model)}}).then((response) => { + // Returns a byte object of the Audio wavform. Use it directly! + });`; + } else { + return commonSnippet + ` + const result = await response.json(); + return result; + } + + query({"inputs": ${getModelInputSnippet(model)}}).then((response) => { + console.log(JSON.stringify(response)); + });`; + } +}; +var snippetFile3 = (model, accessToken) => `async function query(filename) { + const data = fs.readFileSync(filename); + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { + Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" + "Content-Type": "application/json", + }, + method: "POST", + body: data, + } + ); + const result = await response.json(); + return result; +} + +query(${getModelInputSnippet(model)}).then((response) => { + console.log(JSON.stringify(response)); +});`; +var jsSnippets = { + // Same order as in js/src/lib/interfaces/Types.ts + "text-classification": snippetBasic3, + "token-classification": snippetBasic3, + "table-question-answering": snippetBasic3, + "question-answering": snippetBasic3, + "zero-shot-classification": snippetZeroShotClassification3, + translation: snippetBasic3, + summarization: snippetBasic3, + "feature-extraction": snippetBasic3, + "text-generation": snippetTextGeneration2, + "text2text-generation": snippetBasic3, + "fill-mask": snippetBasic3, + "sentence-similarity": snippetBasic3, + "automatic-speech-recognition": snippetFile3, + "text-to-image": snippetTextToImage2, + "text-to-speech": snippetTextToAudio2, + "text-to-audio": snippetTextToAudio2, + "audio-to-audio": snippetFile3, + "audio-classification": snippetFile3, + "image-classification": snippetFile3, + "image-to-text": snippetFile3, + "object-detection": snippetFile3, + "image-segmentation": snippetFile3 +}; +function getJsInferenceSnippet(model, accessToken) { + return model.pipeline_tag && model.pipeline_tag in jsSnippets ? jsSnippets[model.pipeline_tag]?.(model, accessToken) ?? "" : ""; +} +function hasJsInferenceSnippet(model) { + return !!model.pipeline_tag && model.pipeline_tag in jsSnippets; +} + +// src/hardware.ts +var TFLOPS_THRESHOLD_WHITE_HOUSE_MODEL_TRAINING_TOTAL = 10 ** 14; +var TFLOPS_THRESHOLD_WHITE_HOUSE_MODEL_TRAINING_TOTAL_BIOLOGY = 10 ** 11; +var TFLOPS_THRESHOLD_WHITE_HOUSE_CLUSTER = 10 ** 8; +var TFLOPS_THRESHOLD_EU_AI_ACT_MODEL_TRAINING_TOTAL = 10 ** 13; +var DEFAULT_MEMORY_OPTIONS = [8, 16, 24, 32, 40, 48, 64, 80, 96, 128, 256, 512]; +var SKUS = { + GPU: { + NVIDIA: { + H100: { + tflops: 267.6, + memory: [80] + }, + L40: { + tflops: 90.52, + memory: [48] + }, + "RTX 6000 Ada": { + tflops: 91.1, + memory: [48] + }, + "RTX 5880 Ada": { + tflops: 69.3, + memory: [48] + }, + "RTX 5000 Ada": { + tflops: 65.3, + memory: [32] + }, + "RTX 4500 Ada": { + tflops: 39.6, + memory: [24] + }, + "RTX 4000 Ada": { + tflops: 26.7, + memory: [20] + }, + "RTX 4000 SFF Ada": { + tflops: 19.2, + memory: [20] + }, + "RTX 2000 Ada": { + tflops: 12, + memory: [16] + }, + A100: { + tflops: 77.97, + memory: [80, 40] + }, + A40: { + tflops: 37.42, + memory: [48] + }, + A10: { + tflops: 31.24, + memory: [24] + }, + "RTX 4090": { + tflops: 82.58, + memory: [24] + }, + "RTX 4090D": { + tflops: 79.49, + memory: [24] + }, + "RTX 4080 SUPER": { + tflops: 52.2, + memory: [16] + }, + "RTX 4080": { + tflops: 48.7, + memory: [16] + }, + "RTX 4070": { + tflops: 29.15, + memory: [12] + }, + "RTX 4070 Ti": { + tflops: 40.09, + memory: [12] + }, + "RTX 4070 Super": { + tflops: 35.48, + memory: [12] + }, + "RTX 4070 Ti Super": { + tflops: 44.1, + memory: [16] + }, + "RTX 4060": { + tflops: 15.11, + memory: [8] + }, + "RTX 4060 Ti": { + tflops: 22.06, + memory: [8, 16] + }, + "RTX 3090": { + tflops: 35.58, + memory: [24] + }, + "RTX 3090 Ti": { + tflops: 40, + memory: [24] + }, + "RTX 3080": { + tflops: 30.6, + memory: [12, 10] + }, + "RTX 3080 Ti": { + tflops: 34.1, + memory: [12] + }, + "RTX 3070": { + tflops: 20.31, + memory: [8] + }, + "RTX 3070 Ti": { + tflops: 21.75, + memory: [8] + }, + "RTX 3070 Ti Laptop": { + tflops: 16.6, + memory: [8] + }, + "RTX 3060 Ti": { + tflops: 16.2, + memory: [8] + }, + "RTX 3060": { + tflops: 12.74, + memory: [12, 8] + }, + "RTX 2070": { + tflops: 14.93, + memory: [8] + }, + "RTX 3050 Mobile": { + tflops: 7.639, + memory: [6] + }, + "RTX 2060 Mobile": { + tflops: 9.22, + memory: [6] + }, + "GTX 1080 Ti": { + tflops: 11.34, + // float32 (GPU does not support native float16) + memory: [11] + }, + "GTX 1070 Ti": { + tflops: 8.2, + // float32 (GPU does not support native float16) + memory: [8] + }, + "RTX Titan": { + tflops: 32.62, + memory: [24] + }, + "GTX 1660": { + tflops: 10.05, + memory: [6] + }, + "GTX 1650 Mobile": { + tflops: 6.39, + memory: [4] + }, + T4: { + tflops: 65.13, + memory: [16] + }, + V100: { + tflops: 28.26, + memory: [32, 16] + }, + "Quadro P6000": { + tflops: 12.63, + // float32 (GPU does not support native float16) + memory: [24] + }, + P40: { + tflops: 11.76, + // float32 (GPU does not support native float16) + memory: [24] + } + }, + AMD: { + MI300: { + tflops: 383, + memory: [192] + }, + MI250: { + tflops: 362.1, + memory: [128] + }, + MI210: { + tflops: 181, + memory: [64] + }, + MI100: { + tflops: 184.6, + memory: [32] + }, + "RX 7900 XTX": { + tflops: 122.8, + memory: [24] + }, + "RX 7900 XT": { + tflops: 103, + memory: [20] + }, + "RX 7900 GRE": { + tflops: 91.96, + memory: [16] + }, + "RX 7800 XT": { + tflops: 74.65, + memory: [16] + }, + "RX 7700 XT": { + tflops: 70.34, + memory: [12] + }, + "RX 7600 XT": { + tflops: 45.14, + memory: [16, 8] + }, + "RX 6950 XT": { + tflops: 47.31, + memory: [16] + }, + "RX 6800": { + tflops: 32.33, + memory: [16] + }, + "Radeon Pro VII": { + tflops: 26.11, + memory: [16] + } + } + }, + CPU: { + Intel: { + "Xeon 4th Generation (Sapphire Rapids)": { + tflops: 1.3 + }, + "Xeon 3th Generation (Ice Lake)": { + tflops: 0.8 + }, + "Xeon 2th Generation (Cascade Lake)": { + tflops: 0.55 + }, + "Intel Core 13th Generation (i9)": { + tflops: 0.85 + }, + "Intel Core 13th Generation (i7)": { + tflops: 0.82 + }, + "Intel Core 13th Generation (i5)": { + tflops: 0.68 + }, + "Intel Core 13th Generation (i3)": { + tflops: 0.57 + }, + "Intel Core 12th Generation (i9)": { + tflops: 0.79 + }, + "Intel Core 12th Generation (i7)": { + tflops: 0.77 + }, + "Intel Core 12th Generation (i5)": { + tflops: 0.65 + }, + "Intel Core 12th Generation (i3)": { + tflops: 0.53 + }, + "Intel Core 11th Generation (i9)": { + tflops: 0.7 + }, + "Intel Core 11th Generation (i7)": { + tflops: 0.6 + }, + "Intel Core 11th Generation (i5)": { + tflops: 0.5 + }, + "Intel Core 11th Generation (i3)": { + tflops: 0.35 + }, + "Intel Core 10th Generation (i9)": { + tflops: 0.46 + }, + "Intel Core 10th Generation (i7)": { + tflops: 0.46 + }, + "Intel Core 10th Generation (i5)": { + tflops: 0.46 + }, + "Intel Core 10th Generation (i3)": { + tflops: 0.44 + } + }, + AMD: { + "EPYC 4th Generation (Genoa)": { + tflops: 5 + }, + "EPYC 3th Generation (Milan)": { + tflops: 2.4 + }, + "EPYC 2th Generation (Rome)": { + tflops: 0.6 + }, + "EPYC 1st Generation (Naples)": { + tflops: 0.6 + }, + "Ryzen Zen4 7000 (Ryzen 9)": { + tflops: 0.56 + }, + "Ryzen Zen4 7000 (Ryzen 7)": { + tflops: 0.56 + }, + "Ryzen Zen4 7000 (Ryzen 5)": { + tflops: 0.56 + }, + "Ryzen Zen3 5000 (Ryzen 9)": { + tflops: 1.33 + }, + "Ryzen Zen3 5000 (Ryzen 7)": { + tflops: 1.33 + }, + "Ryzen Zen3 5000 (Ryzen 5)": { + tflops: 0.72 + }, + "Ryzen Zen 2 3000 (Threadripper)": { + tflops: 0.72 + }, + "Ryzen Zen 2 3000 (Ryzen 9)": { + tflops: 0.72 + }, + "Ryzen Zen 2 3000 (Ryzen 7)": { + tflops: 0.72 + }, + "Ryzen Zen 2 3000 (Ryzen 5)": { + tflops: 0.72 + }, + "Ryzen Zen 2 3000 (Ryzen 3)": { + tflops: 0.72 + } + } + }, + "Apple Silicon": { + "-": { + "Apple M1": { + tflops: 2.6, + memory: [8, 16] + }, + "Apple M1 Pro": { + tflops: 5.2, + memory: [16, 24, 32] + }, + "Apple M1 Max": { + tflops: 10.4, + memory: [16, 24, 32, 64] + }, + "Apple M1 Ultra": { + tflops: 21, + memory: [16, 24, 32, 64, 96, 128] + }, + "Apple M2": { + tflops: 3.6, + memory: [8, 16, 24] + }, + "Apple M2 Pro": { + tflops: 13.6, + memory: [16, 24, 32] + }, + "Apple M2 Max": { + tflops: 13.49, + memory: [32, 64, 96] + }, + "Apple M2 Ultra": { + tflops: 27.2, + memory: [64, 96, 128, 192] + }, + "Apple M3": { + tflops: 2.84, + memory: [8, 16, 24] + }, + "Apple M3 Pro": { + tflops: 14, + memory: [18, 36] + }, + "Apple M3 Max": { + tflops: 14.2, + memory: [36, 48, 64, 96, 128] + } + } + } +}; + +// src/local-apps.ts +function isGgufModel(model) { + return model.tags.includes("gguf"); +} +var snippetLlamacpp = (model, filepath) => { + return [ + `# Option 1: use llama.cpp with brew +brew install llama.cpp + +# Load and run the model +llama \\ + --hf-repo "${model.id}" \\ + --hf-file ${filepath ?? "{{GGUF_FILE}}"} \\ + -p "I believe the meaning of life is" \\ + -n 128`, + `# Option 2: build llama.cpp from source with curl support +git clone https://github.com/ggerganov/llama.cpp.git +cd llama.cpp +LLAMA_CURL=1 make + +# Load and run the model +./main \\ + --hf-repo "${model.id}" \\ + -m ${filepath ?? "{{GGUF_FILE}}"} \\ + -p "I believe the meaning of life is" \\ + -n 128` + ]; +}; +var LOCAL_APPS = { + "llama.cpp": { + prettyLabel: "llama.cpp", + docsUrl: "https://github.com/ggerganov/llama.cpp", + mainTask: "text-generation", + displayOnModelPage: isGgufModel, + snippet: snippetLlamacpp + }, + lmstudio: { + prettyLabel: "LM Studio", + docsUrl: "https://lmstudio.ai", + mainTask: "text-generation", + displayOnModelPage: isGgufModel, + deeplink: (model, filepath) => new URL(`lmstudio://open_from_hf?model=${model.id}${filepath ? `&file=${filepath}` : ""}`) + }, + jan: { + prettyLabel: "Jan", + docsUrl: "https://jan.ai", + mainTask: "text-generation", + displayOnModelPage: isGgufModel, + deeplink: (model) => new URL(`jan://models/huggingface/${model.id}`) + }, + backyard: { + prettyLabel: "Backyard AI", + docsUrl: "https://backyard.ai", + mainTask: "text-generation", + displayOnModelPage: isGgufModel, + deeplink: (model) => new URL(`https://backyard.ai/hf/model/${model.id}`) + }, + sanctum: { + prettyLabel: "Sanctum", + docsUrl: "https://sanctum.ai", + mainTask: "text-generation", + displayOnModelPage: isGgufModel, + deeplink: (model) => new URL(`sanctum://open_from_hf?model=${model.id}`) + }, + jellybox: { + prettyLabel: "Jellybox", + docsUrl: "https://jellybox.com", + mainTask: "text-generation", + displayOnModelPage: (model) => isGgufModel(model) || model.library_name === "diffusers" && model.tags.includes("safetensors") && (model.pipeline_tag === "text-to-image" || model.tags.includes("lora")), + deeplink: (model) => { + if (isGgufModel(model)) { + return new URL(`jellybox://llm/models/huggingface/LLM/${model.id}`); + } else if (model.tags.includes("lora")) { + return new URL(`jellybox://image/models/huggingface/ImageLora/${model.id}`); + } else { + return new URL(`jellybox://image/models/huggingface/Image/${model.id}`); + } + } + }, + msty: { + prettyLabel: "Msty", + docsUrl: "https://msty.app", + mainTask: "text-generation", + displayOnModelPage: isGgufModel, + deeplink: (model) => new URL(`msty://models/search/hf/${model.id}`) + }, + recursechat: { + prettyLabel: "RecurseChat", + docsUrl: "https://recurse.chat", + mainTask: "text-generation", + macOSOnly: true, + displayOnModelPage: isGgufModel, + deeplink: (model) => new URL(`recursechat://new-hf-gguf-model?hf-model-id=${model.id}`) + }, + drawthings: { + prettyLabel: "Draw Things", + docsUrl: "https://drawthings.ai", + mainTask: "text-to-image", + macOSOnly: true, + displayOnModelPage: (model) => model.library_name === "diffusers" && (model.pipeline_tag === "text-to-image" || model.tags.includes("lora")), + deeplink: (model) => { + if (model.tags.includes("lora")) { + return new URL(`https://drawthings.ai/import/diffusers/pipeline.load_lora_weights?repo_id=${model.id}`); + } else { + return new URL(`https://drawthings.ai/import/diffusers/pipeline.from_pretrained?repo_id=${model.id}`); + } + } + }, + diffusionbee: { + prettyLabel: "DiffusionBee", + docsUrl: "https://diffusionbee.com", + mainTask: "text-to-image", + macOSOnly: true, + comingSoon: true, + displayOnModelPage: (model) => model.library_name === "diffusers" && model.pipeline_tag === "text-to-image", + deeplink: (model) => new URL(`diffusionbee://open_from_hf?model=${model.id}`) + } +}; + +// src/dataset-libraries.ts +var DATASET_LIBRARIES_UI_ELEMENTS = { + mlcroissant: { + prettyLabel: "Croissant", + repoName: "croissant", + repoUrl: "https://github.com/mlcommons/croissant/tree/main/python/mlcroissant", + docsUrl: "https://github.com/mlcommons/croissant/blob/main/python/mlcroissant/README.md" + }, + webdataset: { + prettyLabel: "WebDataset", + repoName: "webdataset", + repoUrl: "https://github.com/webdataset/webdataset", + docsUrl: "https://huggingface.co/docs/hub/datasets-webdataset" + }, + datasets: { + prettyLabel: "Datasets", + repoName: "datasets", + repoUrl: "https://github.com/huggingface/datasets", + docsUrl: "https://huggingface.co/docs/hub/datasets-usage" + }, + pandas: { + prettyLabel: "pandas", + repoName: "pandas", + repoUrl: "https://github.com/pandas-dev/pandas", + docsUrl: "https://huggingface.co/docs/hub/datasets-pandas" + }, + dask: { + prettyLabel: "Dask", + repoName: "dask", + repoUrl: "https://github.com/dask/dask", + docsUrl: "https://huggingface.co/docs/hub/datasets-dask" + }, + distilabel: { + prettyLabel: "Distilabel", + repoName: "distilabel", + repoUrl: "https://github.com/argilla-io/distilabel", + docsUrl: "https://distilabel.argilla.io" + }, + fiftyone: { + prettyLabel: "FiftyOne", + repoName: "fiftyone", + repoUrl: "https://github.com/voxel51/fiftyone", + docsUrl: "https://docs.voxel51.com" + }, + argilla: { + prettyLabel: "Argilla", + repoName: "argilla", + repoUrl: "https://github.com/argilla-io/argilla", + docsUrl: "https://argilla-io.github.io/argilla" + }, + polars: { + prettyLabel: "Polars", + repoName: "polars", + repoUrl: "https://github.com/pola-rs/polars", + docsUrl: "https://docs.pola.rs/" + } +}; +export { + ALL_DISPLAY_MODEL_LIBRARY_KEYS, + ALL_MODEL_LIBRARY_KEYS, + DATASET_LIBRARIES_UI_ELEMENTS, + DEFAULT_MEMORY_OPTIONS, + LIBRARY_TASK_MAPPING, + LOCAL_APPS, + MAPPING_DEFAULT_WIDGET, + MODALITIES, + MODALITY_LABELS, + MODEL_LIBRARIES_UI_ELEMENTS, + PIPELINE_DATA, + PIPELINE_TYPES, + PIPELINE_TYPES_SET, + SKUS, + SPECIAL_TOKENS_ATTRIBUTES, + SUBTASK_TYPES, + TASKS_DATA, + TASKS_MODEL_LIBRARIES, + snippets_exports as snippets +}; diff --git a/data/node_modules/@huggingface/tasks/dist/scripts/inference-codegen.d.ts b/data/node_modules/@huggingface/tasks/dist/scripts/inference-codegen.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..5dedcdd544391f54e59d6448b67f254cfbed7f3f --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/scripts/inference-codegen.d.ts @@ -0,0 +1,2 @@ +export {}; +//# sourceMappingURL=inference-codegen.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/scripts/inference-codegen.d.ts.map b/data/node_modules/@huggingface/tasks/dist/scripts/inference-codegen.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..045b9347783800434e04064bee16f55cdbc5bba9 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/scripts/inference-codegen.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference-codegen.d.ts","sourceRoot":"","sources":["../../scripts/inference-codegen.ts"],"names":[],"mappings":""} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/scripts/inference-tei-import.d.ts b/data/node_modules/@huggingface/tasks/dist/scripts/inference-tei-import.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..7f47605c7ca0585a719bb71c33a2402da96b1656 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/scripts/inference-tei-import.d.ts @@ -0,0 +1,2 @@ +export {}; +//# sourceMappingURL=inference-tei-import.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/scripts/inference-tei-import.d.ts.map b/data/node_modules/@huggingface/tasks/dist/scripts/inference-tei-import.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..6ed4f69dd08866ae32eafd60537885ecace89e65 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/scripts/inference-tei-import.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference-tei-import.d.ts","sourceRoot":"","sources":["../../scripts/inference-tei-import.ts"],"names":[],"mappings":""} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/scripts/inference-tgi-import.d.ts b/data/node_modules/@huggingface/tasks/dist/scripts/inference-tgi-import.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..8632f2043c2cd9c9f144d39a5db4d34ce92cd349 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/scripts/inference-tgi-import.d.ts @@ -0,0 +1,2 @@ +export {}; +//# sourceMappingURL=inference-tgi-import.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/scripts/inference-tgi-import.d.ts.map b/data/node_modules/@huggingface/tasks/dist/scripts/inference-tgi-import.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..64696bba40da64705baeab554c025edd34dd1148 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/scripts/inference-tgi-import.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference-tgi-import.d.ts","sourceRoot":"","sources":["../../scripts/inference-tgi-import.ts"],"names":[],"mappings":""} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/dataset-libraries.d.ts b/data/node_modules/@huggingface/tasks/dist/src/dataset-libraries.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..5f3b4ded56ba518f7398466f43c5d8269e47fe73 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/dataset-libraries.d.ts @@ -0,0 +1,81 @@ +/** + * Elements configurable by a dataset library. + */ +export interface DatasetLibraryUiElement { + /** + * Pretty name of the library. + * displayed (in tags?, and) on the main + * call-to-action button on the dataset page. + */ + prettyLabel: string; + /** + * Repo name of the library's (usually on GitHub) code repo + */ + repoName: string; + /** + * URL to library's (usually on GitHub) code repo + */ + repoUrl: string; + /** + * URL to library's docs + */ + docsUrl?: string; +} +export declare const DATASET_LIBRARIES_UI_ELEMENTS: { + mlcroissant: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + }; + webdataset: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + }; + datasets: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + }; + pandas: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + }; + dask: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + }; + distilabel: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + }; + fiftyone: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + }; + argilla: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + }; + polars: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + }; +}; +export type DatasetLibraryKey = keyof typeof DATASET_LIBRARIES_UI_ELEMENTS; +//# sourceMappingURL=dataset-libraries.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/dataset-libraries.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/dataset-libraries.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..f4a3f8973e6e21e728398b12fee13ea9b111124d --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/dataset-libraries.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"dataset-libraries.d.ts","sourceRoot":"","sources":["../../src/dataset-libraries.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,MAAM,WAAW,uBAAuB;IACvC;;;;OAIG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;CACjB;AAED,eAAO,MAAM,6BAA6B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAuDS,CAAC;AAGpD,MAAM,MAAM,iBAAiB,GAAG,MAAM,OAAO,6BAA6B,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/default-widget-inputs.d.ts b/data/node_modules/@huggingface/tasks/dist/src/default-widget-inputs.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..06d06c2cc432839e2b0a1e9b75d40a3b39625556 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/default-widget-inputs.d.ts @@ -0,0 +1,6 @@ +import type { WidgetExample } from "./widget-example"; +import type { WidgetType } from "./pipelines"; +type PerLanguageMapping = Map; +export declare const MAPPING_DEFAULT_WIDGET: Map; +export {}; +//# sourceMappingURL=default-widget-inputs.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/default-widget-inputs.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/default-widget-inputs.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..039844266dd2a708c74ad1f7eedb98ad662a5855 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/default-widget-inputs.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"default-widget-inputs.d.ts","sourceRoot":"","sources":["../../src/default-widget-inputs.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,kBAAkB,CAAC;AACtD,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,aAAa,CAAC;AAI9C,KAAK,kBAAkB,GAAG,GAAG,CAAC,UAAU,EAAE,MAAM,EAAE,GAAG,aAAa,EAAE,CAAC,CAAC;AAyrBtE,eAAO,MAAM,sBAAsB,iCAejC,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/hardware.d.ts b/data/node_modules/@huggingface/tasks/dist/src/hardware.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..004b2eaa97366e910bf42b8bb8c1be0e3f402c3b --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/hardware.d.ts @@ -0,0 +1,421 @@ +/** + * Biden AI Executive Order + * https://www.whitehouse.gov/briefing-room/presidential-actions/2023/10/30/executive-order-on-the-safe-secure-and-trustworthy-development-and-use-of-artificial-intelligence/ + */ +export declare const TFLOPS_THRESHOLD_WHITE_HOUSE_MODEL_TRAINING_TOTAL: number; +export declare const TFLOPS_THRESHOLD_WHITE_HOUSE_MODEL_TRAINING_TOTAL_BIOLOGY: number; +export declare const TFLOPS_THRESHOLD_WHITE_HOUSE_CLUSTER: number; +/** + * EU AI Act + * https://ec.europa.eu/commission/presscorner/detail/en/qanda_21_1683 + */ +export declare const TFLOPS_THRESHOLD_EU_AI_ACT_MODEL_TRAINING_TOTAL: number; +export interface HardwareSpec { + /** + * Approximate value, in FP16 whenever possible. + * This is only approximate/theoretical and shouldn't be taken too seriously. + * Currently the CPU values are from cpu-monkey.com + * while the GPU values are from techpowerup.com + * + * Note to reviewers: I got fed up with data entry, + * and HuggingChat running Llama3 with Web search was failing a bit, + * so some of those values might be slightly inaccurate. Forgive me and please feel free to improve. + */ + tflops: number; + /** + * If an array is specified, options of memory size (can be VRAM, unified RAM) + * e.g. an A100 exists in 40 or 80 GB. + */ + memory?: number[]; +} +export declare const DEFAULT_MEMORY_OPTIONS: number[]; +export declare const SKUS: { + GPU: { + NVIDIA: { + H100: { + tflops: number; + memory: number[]; + }; + L40: { + tflops: number; + memory: number[]; + }; + "RTX 6000 Ada": { + tflops: number; + memory: number[]; + }; + "RTX 5880 Ada": { + tflops: number; + memory: number[]; + }; + "RTX 5000 Ada": { + tflops: number; + memory: number[]; + }; + "RTX 4500 Ada": { + tflops: number; + memory: number[]; + }; + "RTX 4000 Ada": { + tflops: number; + memory: number[]; + }; + "RTX 4000 SFF Ada": { + tflops: number; + memory: number[]; + }; + "RTX 2000 Ada": { + tflops: number; + memory: number[]; + }; + A100: { + tflops: number; + memory: number[]; + }; + A40: { + tflops: number; + memory: number[]; + }; + A10: { + tflops: number; + memory: number[]; + }; + "RTX 4090": { + tflops: number; + memory: number[]; + }; + "RTX 4090D": { + tflops: number; + memory: number[]; + }; + "RTX 4080 SUPER": { + tflops: number; + memory: number[]; + }; + "RTX 4080": { + tflops: number; + memory: number[]; + }; + "RTX 4070": { + tflops: number; + memory: number[]; + }; + "RTX 4070 Ti": { + tflops: number; + memory: number[]; + }; + "RTX 4070 Super": { + tflops: number; + memory: number[]; + }; + "RTX 4070 Ti Super": { + tflops: number; + memory: number[]; + }; + "RTX 4060": { + tflops: number; + memory: number[]; + }; + "RTX 4060 Ti": { + tflops: number; + memory: number[]; + }; + "RTX 3090": { + tflops: number; + memory: number[]; + }; + "RTX 3090 Ti": { + tflops: number; + memory: number[]; + }; + "RTX 3080": { + tflops: number; + memory: number[]; + }; + "RTX 3080 Ti": { + tflops: number; + memory: number[]; + }; + "RTX 3070": { + tflops: number; + memory: number[]; + }; + "RTX 3070 Ti": { + tflops: number; + memory: number[]; + }; + "RTX 3070 Ti Laptop": { + tflops: number; + memory: number[]; + }; + "RTX 3060 Ti": { + tflops: number; + memory: number[]; + }; + "RTX 3060": { + tflops: number; + memory: number[]; + }; + "RTX 2070": { + tflops: number; + memory: number[]; + }; + "RTX 3050 Mobile": { + tflops: number; + memory: number[]; + }; + "RTX 2060 Mobile": { + tflops: number; + memory: number[]; + }; + "GTX 1080 Ti": { + tflops: number; + memory: number[]; + }; + "GTX 1070 Ti": { + tflops: number; + memory: number[]; + }; + "RTX Titan": { + tflops: number; + memory: number[]; + }; + "GTX 1660": { + tflops: number; + memory: number[]; + }; + "GTX 1650 Mobile": { + tflops: number; + memory: number[]; + }; + T4: { + tflops: number; + memory: number[]; + }; + V100: { + tflops: number; + memory: number[]; + }; + "Quadro P6000": { + tflops: number; + memory: number[]; + }; + P40: { + tflops: number; + memory: number[]; + }; + }; + AMD: { + MI300: { + tflops: number; + memory: number[]; + }; + MI250: { + tflops: number; + memory: number[]; + }; + MI210: { + tflops: number; + memory: number[]; + }; + MI100: { + tflops: number; + memory: number[]; + }; + "RX 7900 XTX": { + tflops: number; + memory: number[]; + }; + "RX 7900 XT": { + tflops: number; + memory: number[]; + }; + "RX 7900 GRE": { + tflops: number; + memory: number[]; + }; + "RX 7800 XT": { + tflops: number; + memory: number[]; + }; + "RX 7700 XT": { + tflops: number; + memory: number[]; + }; + "RX 7600 XT": { + tflops: number; + memory: number[]; + }; + "RX 6950 XT": { + tflops: number; + memory: number[]; + }; + "RX 6800": { + tflops: number; + memory: number[]; + }; + "Radeon Pro VII": { + tflops: number; + memory: number[]; + }; + }; + }; + CPU: { + Intel: { + "Xeon 4th Generation (Sapphire Rapids)": { + tflops: number; + }; + "Xeon 3th Generation (Ice Lake)": { + tflops: number; + }; + "Xeon 2th Generation (Cascade Lake)": { + tflops: number; + }; + "Intel Core 13th Generation (i9)": { + tflops: number; + }; + "Intel Core 13th Generation (i7)": { + tflops: number; + }; + "Intel Core 13th Generation (i5)": { + tflops: number; + }; + "Intel Core 13th Generation (i3)": { + tflops: number; + }; + "Intel Core 12th Generation (i9)": { + tflops: number; + }; + "Intel Core 12th Generation (i7)": { + tflops: number; + }; + "Intel Core 12th Generation (i5)": { + tflops: number; + }; + "Intel Core 12th Generation (i3)": { + tflops: number; + }; + "Intel Core 11th Generation (i9)": { + tflops: number; + }; + "Intel Core 11th Generation (i7)": { + tflops: number; + }; + "Intel Core 11th Generation (i5)": { + tflops: number; + }; + "Intel Core 11th Generation (i3)": { + tflops: number; + }; + "Intel Core 10th Generation (i9)": { + tflops: number; + }; + "Intel Core 10th Generation (i7)": { + tflops: number; + }; + "Intel Core 10th Generation (i5)": { + tflops: number; + }; + "Intel Core 10th Generation (i3)": { + tflops: number; + }; + }; + AMD: { + "EPYC 4th Generation (Genoa)": { + tflops: number; + }; + "EPYC 3th Generation (Milan)": { + tflops: number; + }; + "EPYC 2th Generation (Rome)": { + tflops: number; + }; + "EPYC 1st Generation (Naples)": { + tflops: number; + }; + "Ryzen Zen4 7000 (Ryzen 9)": { + tflops: number; + }; + "Ryzen Zen4 7000 (Ryzen 7)": { + tflops: number; + }; + "Ryzen Zen4 7000 (Ryzen 5)": { + tflops: number; + }; + "Ryzen Zen3 5000 (Ryzen 9)": { + tflops: number; + }; + "Ryzen Zen3 5000 (Ryzen 7)": { + tflops: number; + }; + "Ryzen Zen3 5000 (Ryzen 5)": { + tflops: number; + }; + "Ryzen Zen 2 3000 (Threadripper)": { + tflops: number; + }; + "Ryzen Zen 2 3000 (Ryzen 9)": { + tflops: number; + }; + "Ryzen Zen 2 3000 (Ryzen 7)": { + tflops: number; + }; + "Ryzen Zen 2 3000 (Ryzen 5)": { + tflops: number; + }; + "Ryzen Zen 2 3000 (Ryzen 3)": { + tflops: number; + }; + }; + }; + "Apple Silicon": { + "-": { + "Apple M1": { + tflops: number; + memory: number[]; + }; + "Apple M1 Pro": { + tflops: number; + memory: number[]; + }; + "Apple M1 Max": { + tflops: number; + memory: number[]; + }; + "Apple M1 Ultra": { + tflops: number; + memory: number[]; + }; + "Apple M2": { + tflops: number; + memory: number[]; + }; + "Apple M2 Pro": { + tflops: number; + memory: number[]; + }; + "Apple M2 Max": { + tflops: number; + memory: number[]; + }; + "Apple M2 Ultra": { + tflops: number; + memory: number[]; + }; + "Apple M3": { + tflops: number; + memory: number[]; + }; + "Apple M3 Pro": { + tflops: number; + memory: number[]; + }; + "Apple M3 Max": { + tflops: number; + memory: number[]; + }; + }; + }; +}; +export type SkuType = keyof typeof SKUS; +//# sourceMappingURL=hardware.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/hardware.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/hardware.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..6c598e94887e57b334f59b6665b0e4d6a1135695 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/hardware.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"hardware.d.ts","sourceRoot":"","sources":["../../src/hardware.ts"],"names":[],"mappings":"AAAA;;;GAGG;AACH,eAAO,MAAM,iDAAiD,QAAW,CAAC;AAC1E,eAAO,MAAM,yDAAyD,QAAW,CAAC;AAClF,eAAO,MAAM,oCAAoC,QAAU,CAAC;AAE5D;;;GAGG;AACH,eAAO,MAAM,+CAA+C,QAAW,CAAC;AAExE,MAAM,WAAW,YAAY;IAC5B;;;;;;;;;OASG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,MAAM,CAAC,EAAE,MAAM,EAAE,CAAC;CAClB;AAED,eAAO,MAAM,sBAAsB,UAAqD,CAAC;AAEzF,eAAO,MAAM,IAAI;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAmYuD,CAAC;AAEzE,MAAM,MAAM,OAAO,GAAG,MAAM,OAAO,IAAI,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/index.d.ts b/data/node_modules/@huggingface/tasks/dist/src/index.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..d26ef1cd94962720b1f3829b8e9c93686aadecf7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/index.d.ts @@ -0,0 +1,20 @@ +export { LIBRARY_TASK_MAPPING } from "./library-to-tasks"; +export { MAPPING_DEFAULT_WIDGET } from "./default-widget-inputs"; +export type { TaskData, TaskDemo, TaskDemoEntry, ExampleRepo } from "./tasks"; +export * from "./tasks"; +export { PIPELINE_DATA, PIPELINE_TYPES, type WidgetType, type PipelineType, type PipelineData, type Modality, MODALITIES, MODALITY_LABELS, SUBTASK_TYPES, PIPELINE_TYPES_SET, } from "./pipelines"; +export { ALL_DISPLAY_MODEL_LIBRARY_KEYS, ALL_MODEL_LIBRARY_KEYS, MODEL_LIBRARIES_UI_ELEMENTS } from "./model-libraries"; +export type { LibraryUiElement, ModelLibraryKey } from "./model-libraries"; +export type { ModelData, TransformersInfo } from "./model-data"; +export type { AddedToken, SpecialTokensMap, TokenizerConfig } from "./tokenizer-data"; +export type { WidgetExample, WidgetExampleAttribute, WidgetExampleAssetAndPromptInput, WidgetExampleAssetAndTextInput, WidgetExampleAssetAndZeroShotInput, WidgetExampleAssetInput, WidgetExampleChatInput, WidgetExampleSentenceSimilarityInput, WidgetExampleStructuredDataInput, WidgetExampleTableDataInput, WidgetExampleTextAndContextInput, WidgetExampleTextAndTableInput, WidgetExampleTextInput, WidgetExampleZeroShotTextInput, WidgetExampleOutput, WidgetExampleOutputUrl, WidgetExampleOutputLabels, WidgetExampleOutputAnswerScore, WidgetExampleOutputText, } from "./widget-example"; +export { SPECIAL_TOKENS_ATTRIBUTES } from "./tokenizer-data"; +import * as snippets from "./snippets"; +export { snippets }; +export { SKUS, DEFAULT_MEMORY_OPTIONS } from "./hardware"; +export type { HardwareSpec, SkuType } from "./hardware"; +export { LOCAL_APPS } from "./local-apps"; +export type { LocalApp, LocalAppKey } from "./local-apps"; +export { DATASET_LIBRARIES_UI_ELEMENTS } from "./dataset-libraries"; +export type { DatasetLibraryUiElement, DatasetLibraryKey } from "./dataset-libraries"; +//# sourceMappingURL=index.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/index.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/index.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..0ee0c3ac0cb4f9451605a88b4265bf24f560f64f --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/index.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,oBAAoB,EAAE,MAAM,oBAAoB,CAAC;AAC1D,OAAO,EAAE,sBAAsB,EAAE,MAAM,yBAAyB,CAAC;AACjE,YAAY,EAAE,QAAQ,EAAE,QAAQ,EAAE,aAAa,EAAE,WAAW,EAAE,MAAM,SAAS,CAAC;AAC9E,cAAc,SAAS,CAAC;AACxB,OAAO,EACN,aAAa,EACb,cAAc,EACd,KAAK,UAAU,EACf,KAAK,YAAY,EACjB,KAAK,YAAY,EACjB,KAAK,QAAQ,EACb,UAAU,EACV,eAAe,EACf,aAAa,EACb,kBAAkB,GAClB,MAAM,aAAa,CAAC;AACrB,OAAO,EAAE,8BAA8B,EAAE,sBAAsB,EAAE,2BAA2B,EAAE,MAAM,mBAAmB,CAAC;AACxH,YAAY,EAAE,gBAAgB,EAAE,eAAe,EAAE,MAAM,mBAAmB,CAAC;AAC3E,YAAY,EAAE,SAAS,EAAE,gBAAgB,EAAE,MAAM,cAAc,CAAC;AAChE,YAAY,EAAE,UAAU,EAAE,gBAAgB,EAAE,eAAe,EAAE,MAAM,kBAAkB,CAAC;AACtF,YAAY,EACX,aAAa,EACb,sBAAsB,EACtB,gCAAgC,EAChC,8BAA8B,EAC9B,kCAAkC,EAClC,uBAAuB,EACvB,sBAAsB,EACtB,oCAAoC,EACpC,gCAAgC,EAChC,2BAA2B,EAC3B,gCAAgC,EAChC,8BAA8B,EAC9B,sBAAsB,EACtB,8BAA8B,EAC9B,mBAAmB,EACnB,sBAAsB,EACtB,yBAAyB,EACzB,8BAA8B,EAC9B,uBAAuB,GACvB,MAAM,kBAAkB,CAAC;AAC1B,OAAO,EAAE,yBAAyB,EAAE,MAAM,kBAAkB,CAAC;AAE7D,OAAO,KAAK,QAAQ,MAAM,YAAY,CAAC;AACvC,OAAO,EAAE,QAAQ,EAAE,CAAC;AAEpB,OAAO,EAAE,IAAI,EAAE,sBAAsB,EAAE,MAAM,YAAY,CAAC;AAC1D,YAAY,EAAE,YAAY,EAAE,OAAO,EAAE,MAAM,YAAY,CAAC;AACxD,OAAO,EAAE,UAAU,EAAE,MAAM,cAAc,CAAC;AAC1C,YAAY,EAAE,QAAQ,EAAE,WAAW,EAAE,MAAM,cAAc,CAAC;AAE1D,OAAO,EAAE,6BAA6B,EAAE,MAAM,qBAAqB,CAAC;AACpE,YAAY,EAAE,uBAAuB,EAAE,iBAAiB,EAAE,MAAM,qBAAqB,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/library-to-tasks.d.ts b/data/node_modules/@huggingface/tasks/dist/src/library-to-tasks.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..a711d7e6762883b57f5da01d08f270e78e682a9b --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/library-to-tasks.d.ts @@ -0,0 +1,11 @@ +import type { ModelLibraryKey } from "./model-libraries"; +import type { PipelineType } from "./pipelines"; +/** + * Mapping from library name to its supported tasks. + * Inference API (serverless) should be disabled for all other (library, task) pairs beyond this mapping. + * This mapping is partially generated automatically by "python-api-export-tasks" action in + * huggingface/api-inference-community repo upon merge. For transformers, the mapping is manually + * based on api-inference (hf_types.rs). + */ +export declare const LIBRARY_TASK_MAPPING: Partial>; +//# sourceMappingURL=library-to-tasks.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/library-to-tasks.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/library-to-tasks.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..c0d49dd3664a3b23e2733a77c994dbe21e3e0cf6 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/library-to-tasks.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"library-to-tasks.d.ts","sourceRoot":"","sources":["../../src/library-to-tasks.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,mBAAmB,CAAC;AACzD,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAEhD;;;;;;GAMG;AACH,eAAO,MAAM,oBAAoB,EAAE,OAAO,CAAC,MAAM,CAAC,eAAe,EAAE,YAAY,EAAE,CAAC,CAiEjF,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/local-apps.d.ts b/data/node_modules/@huggingface/tasks/dist/src/local-apps.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..d4bb960225a56d983123451159c0094d7bc676fe --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/local-apps.d.ts @@ -0,0 +1,130 @@ +import type { ModelData } from "./model-data"; +import type { PipelineType } from "./pipelines"; +/** + * Elements configurable by a local app. + */ +export type LocalApp = { + /** + * Name that appears in buttons + */ + prettyLabel: string; + /** + * Link to get more info about a local app (website etc) + */ + docsUrl: string; + /** + * main category of app + */ + mainTask: PipelineType; + /** + * Whether to display a pill "macOS-only" + */ + macOSOnly?: boolean; + comingSoon?: boolean; + /** + * IMPORTANT: function to figure out whether to display the button on a model page's main "Use this model" dropdown. + */ + displayOnModelPage: (model: ModelData) => boolean; +} & ({ + /** + * If the app supports deeplink, URL to open. + */ + deeplink: (model: ModelData, filepath?: string) => URL; +} | { + /** + * And if not (mostly llama.cpp), snippet to copy/paste in your terminal + * Support the placeholder {{GGUF_FILE}} that will be replaced by the gguf file path or the list of available files. + */ + snippet: (model: ModelData, filepath?: string) => string | string[]; +}); +declare function isGgufModel(model: ModelData): boolean; +/** + * Add your new local app here. + * + * This is open to new suggestions and awesome upcoming apps. + * + * /!\ IMPORTANT + * + * If possible, you need to support deeplinks and be as cross-platform as possible. + * + * Ping the HF team if we can help with anything! + */ +export declare const LOCAL_APPS: { + "llama.cpp": { + prettyLabel: string; + docsUrl: string; + mainTask: "text-generation"; + displayOnModelPage: typeof isGgufModel; + snippet: (model: ModelData, filepath?: string) => string[]; + }; + lmstudio: { + prettyLabel: string; + docsUrl: string; + mainTask: "text-generation"; + displayOnModelPage: typeof isGgufModel; + deeplink: (model: ModelData, filepath: string | undefined) => URL; + }; + jan: { + prettyLabel: string; + docsUrl: string; + mainTask: "text-generation"; + displayOnModelPage: typeof isGgufModel; + deeplink: (model: ModelData) => URL; + }; + backyard: { + prettyLabel: string; + docsUrl: string; + mainTask: "text-generation"; + displayOnModelPage: typeof isGgufModel; + deeplink: (model: ModelData) => URL; + }; + sanctum: { + prettyLabel: string; + docsUrl: string; + mainTask: "text-generation"; + displayOnModelPage: typeof isGgufModel; + deeplink: (model: ModelData) => URL; + }; + jellybox: { + prettyLabel: string; + docsUrl: string; + mainTask: "text-generation"; + displayOnModelPage: (model: ModelData) => boolean; + deeplink: (model: ModelData) => URL; + }; + msty: { + prettyLabel: string; + docsUrl: string; + mainTask: "text-generation"; + displayOnModelPage: typeof isGgufModel; + deeplink: (model: ModelData) => URL; + }; + recursechat: { + prettyLabel: string; + docsUrl: string; + mainTask: "text-generation"; + macOSOnly: true; + displayOnModelPage: typeof isGgufModel; + deeplink: (model: ModelData) => URL; + }; + drawthings: { + prettyLabel: string; + docsUrl: string; + mainTask: "text-to-image"; + macOSOnly: true; + displayOnModelPage: (model: ModelData) => boolean; + deeplink: (model: ModelData) => URL; + }; + diffusionbee: { + prettyLabel: string; + docsUrl: string; + mainTask: "text-to-image"; + macOSOnly: true; + comingSoon: true; + displayOnModelPage: (model: ModelData) => boolean; + deeplink: (model: ModelData) => URL; + }; +}; +export type LocalAppKey = keyof typeof LOCAL_APPS; +export {}; +//# sourceMappingURL=local-apps.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/local-apps.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/local-apps.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..869faa054afd4ed5081c4cae73b55328f0dcde1b --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/local-apps.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"local-apps.d.ts","sourceRoot":"","sources":["../../src/local-apps.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAEhD;;GAEG;AACH,MAAM,MAAM,QAAQ,GAAG;IACtB;;OAEG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,QAAQ,EAAE,YAAY,CAAC;IACvB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IAEpB,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB;;OAEG;IACH,kBAAkB,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,OAAO,CAAC;CAClD,GAAG,CACD;IACA;;OAEG;IACH,QAAQ,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,GAAG,CAAC;CACtD,GACD;IACA;;;OAGG;IACH,OAAO,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,MAAM,GAAG,MAAM,EAAE,CAAC;CACnE,CACH,CAAC;AAEF,iBAAS,WAAW,CAAC,KAAK,EAAE,SAAS,WAEpC;AA2BD;;;;;;;;;;GAUG;AACH,eAAO,MAAM,UAAU;;;;;;yBApCS,SAAS,aAAa,MAAM,KAAG,MAAM,EAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAmInC,CAAC;AAErC,MAAM,MAAM,WAAW,GAAG,MAAM,OAAO,UAAU,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/model-data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/model-data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..4ae125c48b53b8f59dc15c211627514dd9d0300c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/model-data.d.ts @@ -0,0 +1,131 @@ +import type { PipelineType } from "./pipelines"; +import type { WidgetExample } from "./widget-example"; +import type { TokenizerConfig } from "./tokenizer-data"; +/** + * Public interface for model metadata + */ +export interface ModelData { + /** + * id of model (e.g. 'user/repo_name') + */ + id: string; + /** + * Whether or not to enable inference widget for this model + * TODO(type it) + */ + inference: string; + /** + * is this model private? + */ + private?: boolean; + /** + * this dictionary has useful information about the model configuration + */ + config?: { + architectures?: string[]; + /** + * Dict of AutoModel or Auto… class name to local import path in the repo + */ + auto_map?: { + /** + * String Property + */ + [x: string]: string; + }; + model_type?: string; + quantization_config?: { + bits?: number; + load_in_4bit?: boolean; + load_in_8bit?: boolean; + }; + tokenizer_config?: TokenizerConfig; + adapter_transformers?: { + model_name?: string; + model_class?: string; + }; + diffusers?: { + _class_name?: string; + }; + sklearn?: { + model?: { + file?: string; + }; + model_format?: string; + }; + speechbrain?: { + speechbrain_interface?: string; + vocoder_interface?: string; + vocoder_model_id?: string; + }; + peft?: { + base_model_name_or_path?: string; + task_type?: string; + }; + }; + /** + * all the model tags + */ + tags: string[]; + /** + * transformers-specific info to display in the code sample. + */ + transformersInfo?: TransformersInfo; + /** + * Pipeline type + */ + pipeline_tag?: PipelineType | undefined; + /** + * for relevant models, get mask token + */ + mask_token?: string | undefined; + /** + * Example data that will be fed into the widget. + * + * can be set in the model card metadata (under `widget`), + * or by default in `DefaultWidget.ts` + */ + widgetData?: WidgetExample[] | undefined; + /** + * Parameters that will be used by the widget when calling Inference API (serverless) + * https://huggingface.co/docs/api-inference/detailed_parameters + * + * can be set in the model card metadata (under `inference/parameters`) + * Example: + * inference: + * parameters: + * key: val + */ + cardData?: { + inference?: boolean | { + parameters?: Record; + }; + base_model?: string | string[]; + }; + /** + * Library name + * Example: transformers, SpeechBrain, Stanza, etc. + */ + library_name?: string; +} +/** + * transformers-specific info to display in the code sample. + */ +export interface TransformersInfo { + /** + * e.g. AutoModelForSequenceClassification + */ + auto_model: string; + /** + * if set in config.json's auto_map + */ + custom_class?: string; + /** + * e.g. text-classification + */ + pipeline_tag?: PipelineType; + /** + * e.g. "AutoTokenizer" | "AutoFeatureExtractor" | "AutoProcessor" + */ + processor?: string; +} +//# sourceMappingURL=model-data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/model-data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/model-data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..a47abd21d0785ccb6c02ee7e8a1423798796a50b --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/model-data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"model-data.d.ts","sourceRoot":"","sources":["../../src/model-data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAChD,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,kBAAkB,CAAC;AACtD,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,kBAAkB,CAAC;AAExD;;GAEG;AACH,MAAM,WAAW,SAAS;IACzB;;OAEG;IACH,EAAE,EAAE,MAAM,CAAC;IACX;;;OAGG;IACH,SAAS,EAAE,MAAM,CAAC;IAClB;;OAEG;IACH,OAAO,CAAC,EAAE,OAAO,CAAC;IAClB;;OAEG;IACH,MAAM,CAAC,EAAE;QACR,aAAa,CAAC,EAAE,MAAM,EAAE,CAAC;QACzB;;WAEG;QACH,QAAQ,CAAC,EAAE;YACV;;eAEG;YACH,CAAC,CAAC,EAAE,MAAM,GAAG,MAAM,CAAC;SACpB,CAAC;QACF,UAAU,CAAC,EAAE,MAAM,CAAC;QACpB,mBAAmB,CAAC,EAAE;YACrB,IAAI,CAAC,EAAE,MAAM,CAAC;YACd,YAAY,CAAC,EAAE,OAAO,CAAC;YACvB,YAAY,CAAC,EAAE,OAAO,CAAC;SACvB,CAAC;QACF,gBAAgB,CAAC,EAAE,eAAe,CAAC;QACnC,oBAAoB,CAAC,EAAE;YACtB,UAAU,CAAC,EAAE,MAAM,CAAC;YACpB,WAAW,CAAC,EAAE,MAAM,CAAC;SACrB,CAAC;QACF,SAAS,CAAC,EAAE;YACX,WAAW,CAAC,EAAE,MAAM,CAAC;SACrB,CAAC;QACF,OAAO,CAAC,EAAE;YACT,KAAK,CAAC,EAAE;gBACP,IAAI,CAAC,EAAE,MAAM,CAAC;aACd,CAAC;YACF,YAAY,CAAC,EAAE,MAAM,CAAC;SACtB,CAAC;QACF,WAAW,CAAC,EAAE;YACb,qBAAqB,CAAC,EAAE,MAAM,CAAC;YAC/B,iBAAiB,CAAC,EAAE,MAAM,CAAC;YAC3B,gBAAgB,CAAC,EAAE,MAAM,CAAC;SAC1B,CAAC;QACF,IAAI,CAAC,EAAE;YACN,uBAAuB,CAAC,EAAE,MAAM,CAAC;YACjC,SAAS,CAAC,EAAE,MAAM,CAAC;SACnB,CAAC;KACF,CAAC;IACF;;OAEG;IACH,IAAI,EAAE,MAAM,EAAE,CAAC;IACf;;OAEG;IACH,gBAAgB,CAAC,EAAE,gBAAgB,CAAC;IACpC;;OAEG;IACH,YAAY,CAAC,EAAE,YAAY,GAAG,SAAS,CAAC;IACxC;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,GAAG,SAAS,CAAC;IAChC;;;;;OAKG;IACH,UAAU,CAAC,EAAE,aAAa,EAAE,GAAG,SAAS,CAAC;IACzC;;;;;;;;;OASG;IACH,QAAQ,CAAC,EAAE;QACV,SAAS,CAAC,EACP,OAAO,GACP;YACA,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;SACpC,CAAC;QACL,UAAU,CAAC,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;KAC/B,CAAC;IACF;;;OAGG;IACH,YAAY,CAAC,EAAE,MAAM,CAAC;CACtB;AAED;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;OAEG;IACH,UAAU,EAAE,MAAM,CAAC;IACnB;;OAEG;IACH,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB;;OAEG;IACH,YAAY,CAAC,EAAE,YAAY,CAAC;IAC5B;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;CACnB"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/model-libraries-downloads.d.ts b/data/node_modules/@huggingface/tasks/dist/src/model-libraries-downloads.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..2068624044fa42a89cd7889633465fa802c9ac71 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/model-libraries-downloads.d.ts @@ -0,0 +1,18 @@ +/** + * This file contains the (simplified) types used + * to represent queries that are made to Elastic + * in order to count number of model downloads + * + * Read this doc about download stats on the Hub: + * + * https://huggingface.co/docs/hub/models-download-stats + * Available fields: + * - path: the complete file path (relative) (e.g: "prefix/file.extension") + * - path_prefix: the prefix of the file path (e.g: "prefix/", empty if no prefix) + * - path_extension: the extension of the file path (e.g: "extension") + * - path_filename: the name of the file path (e.g: "file") + * see also: + * https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html + */ +export type ElasticSearchQuery = string; +//# sourceMappingURL=model-libraries-downloads.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/model-libraries-downloads.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/model-libraries-downloads.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..f0a63c2eda1c1650a18fb8060e9cf6fb53c7c272 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/model-libraries-downloads.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"model-libraries-downloads.d.ts","sourceRoot":"","sources":["../../src/model-libraries-downloads.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;GAeG;AAEH,MAAM,MAAM,kBAAkB,GAAG,MAAM,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/model-libraries-snippets.d.ts b/data/node_modules/@huggingface/tasks/dist/src/model-libraries-snippets.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..9c93e62708b2d91e247db1b84574cce9c1f483a8 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/model-libraries-snippets.d.ts @@ -0,0 +1,54 @@ +import type { ModelData } from "./model-data"; +export declare const adapters: (model: ModelData) => string[]; +export declare const allennlp: (model: ModelData) => string[]; +export declare const asteroid: (model: ModelData) => string[]; +export declare const audioseal: (model: ModelData) => string[]; +export declare const bertopic: (model: ModelData) => string[]; +export declare const bm25s: (model: ModelData) => string[]; +export declare const depth_anything_v2: (model: ModelData) => string[]; +export declare const diffusers: (model: ModelData) => string[]; +export declare const edsnlp: (model: ModelData) => string[]; +export declare const espnetTTS: (model: ModelData) => string[]; +export declare const espnetASR: (model: ModelData) => string[]; +export declare const espnet: (model: ModelData) => string[]; +export declare const fairseq: (model: ModelData) => string[]; +export declare const flair: (model: ModelData) => string[]; +export declare const gliner: (model: ModelData) => string[]; +export declare const keras: (model: ModelData) => string[]; +export declare const keras_nlp: (model: ModelData) => string[]; +export declare const tf_keras: (model: ModelData) => string[]; +export declare const mamba_ssm: (model: ModelData) => string[]; +export declare const mars5_tts: (model: ModelData) => string[]; +export declare const mesh_anything: () => string[]; +export declare const open_clip: (model: ModelData) => string[]; +export declare const paddlenlp: (model: ModelData) => string[]; +export declare const pyannote_audio_pipeline: (model: ModelData) => string[]; +export declare const pyannote_audio: (model: ModelData) => string[]; +export declare const tensorflowtts: (model: ModelData) => string[]; +export declare const timm: (model: ModelData) => string[]; +export declare const sklearn: (model: ModelData) => string[]; +export declare const stable_audio_tools: (model: ModelData) => string[]; +export declare const fastai: (model: ModelData) => string[]; +export declare const sampleFactory: (model: ModelData) => string[]; +export declare const sentenceTransformers: (model: ModelData) => string[]; +export declare const setfit: (model: ModelData) => string[]; +export declare const spacy: (model: ModelData) => string[]; +export declare const span_marker: (model: ModelData) => string[]; +export declare const stanza: (model: ModelData) => string[]; +export declare const speechbrain: (model: ModelData) => string[]; +export declare const transformers: (model: ModelData) => string[]; +export declare const transformersJS: (model: ModelData) => string[]; +export declare const peft: (model: ModelData) => string[]; +export declare const fasttext: (model: ModelData) => string[]; +export declare const stableBaselines3: (model: ModelData) => string[]; +export declare const mlAgents: (model: ModelData) => string[]; +export declare const sentis: () => string[]; +export declare const voicecraft: (model: ModelData) => string[]; +export declare const chattts: () => string[]; +export declare const mlx: (model: ModelData) => string[]; +export declare const mlxim: (model: ModelData) => string[]; +export declare const nemo: (model: ModelData) => string[]; +export declare const pythae: (model: ModelData) => string[]; +export declare const audiocraft: (model: ModelData) => string[]; +export declare const whisperkit: () => string[]; +//# sourceMappingURL=model-libraries-snippets.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/model-libraries-snippets.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/model-libraries-snippets.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..bf2a28daab9939ab3a16abadb25d0483d208e47a --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/model-libraries-snippets.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"model-libraries-snippets.d.ts","sourceRoot":"","sources":["../../src/model-libraries-snippets.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAY9C,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAkBF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAIjD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAkBlD,CAAC;AAMF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAIjD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAEF,eAAO,MAAM,iBAAiB,UAAW,SAAS,KAAG,MAAM,EA6C1D,CAAC;AA+BF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAUlD,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAgB/C,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAMlD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EASlD,CAAC;AAIF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAO/C,CAAC;AAEF,eAAO,MAAM,OAAO,UAAW,SAAS,KAAG,MAAM,EAMhD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAS9C,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAUlD,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAOjD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAIlD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAKlD,CAAC;AAEF,eAAO,MAAM,aAAa,QAAO,MAAM,EAQtC,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAKlD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAsBlD,CAAC;AAEF,eAAO,MAAM,uBAAuB,UAAW,SAAS,KAAG,MAAM,EAehE,CAAC;AAiBF,eAAO,MAAM,cAAc,UAAW,SAAS,KAAG,MAAM,EAKvD,CAAC;AAyBF,eAAO,MAAM,aAAa,UAAW,SAAS,KAAG,MAAM,EAOtD,CAAC;AAEF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAI7C,CAAC;AAsCF,eAAO,MAAM,OAAO,UAAW,SAAS,KAAG,MAAM,EAehD,CAAC;AAEF,eAAO,MAAM,kBAAkB,UAAW,SAAS,KAAG,MAAM,EAmC3D,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AAEF,eAAO,MAAM,aAAa,UAAW,SAAS,KAAG,MAAM,EAEtD,CAAC;AAEF,eAAO,MAAM,oBAAoB,UAAW,SAAS,KAAG,MAAM,EAI7D,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAU9C,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,SAAS,KAAG,MAAM,EAIpD,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAK/C,CAAC;AAkBF,eAAO,MAAM,WAAW,UAAW,SAAS,KAAG,MAAM,EAkBpD,CAAC;AAEF,eAAO,MAAM,YAAY,UAAW,SAAS,KAAG,MAAM,EA4CrD,CAAC;AAEF,eAAO,MAAM,cAAc,UAAW,SAAS,KAAG,MAAM,EAcvD,CAAC;AAiBF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAkB7C,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAEF,eAAO,MAAM,gBAAgB,UAAW,SAAS,KAAG,MAAM,EAMzD,CAAC;AAgBF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAEjD,CAAC;AAEF,eAAO,MAAM,MAAM,QAA6B,MAAM,EAMrD,CAAC;AAEF,eAAO,MAAM,UAAU,UAAW,SAAS,KAAG,MAAM,EAInD,CAAC;AAEF,eAAO,MAAM,OAAO,QAAO,MAAM,EAYhC,CAAC;AAEF,eAAO,MAAM,GAAG,UAAW,SAAS,KAAG,MAAM,EAK5C,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAEF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAQ7C,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AA6BF,eAAO,MAAM,UAAU,UAAW,SAAS,KAAG,MAAM,EAUnD,CAAC;AAEF,eAAO,MAAM,UAAU,QAAO,MAAM,EAYnC,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/model-libraries.d.ts b/data/node_modules/@huggingface/tasks/dist/src/model-libraries.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..a3dd090d3e7887d151a7f8a61d7afb1c855786f9 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/model-libraries.d.ts @@ -0,0 +1,576 @@ +import type { ModelData } from "./model-data"; +import type { ElasticSearchQuery } from "./model-libraries-downloads"; +/** + * Elements configurable by a model library. + */ +export interface LibraryUiElement { + /** + * Pretty name of the library. + * displayed in tags, and on the main + * call-to-action button on the model page. + */ + prettyLabel: string; + /** + * Repo name of the library's (usually on GitHub) code repo + */ + repoName: string; + /** + * URL to library's (usually on GitHub) code repo + */ + repoUrl: string; + /** + * URL to library's docs + */ + docsUrl?: string; + /** + * Code snippet(s) displayed on model page + */ + snippets?: (model: ModelData) => string[]; + /** + * Elastic query used to count this library's model downloads + * + * By default, those files are counted: + * "config.json", "config.yaml", "hyperparams.yaml", "meta.yaml" + */ + countDownloads?: ElasticSearchQuery; + /** + * should we display this library in hf.co/models filter + * (only for popular libraries with > 100 models) + */ + filter?: boolean; +} +/** + * Add your new library here. + * + * This is for modeling (= architectures) libraries, not for file formats (like ONNX, etc). + * (unlike libraries, file formats live in an enum inside the internal codebase.) + * + * Doc on how to add a library to the Hub: + * + * https://huggingface.co/docs/hub/models-adding-libraries + * + * /!\ IMPORTANT + * + * The key you choose is the tag your models have in their library_name on the Hub. + */ +export declare const MODEL_LIBRARIES_UI_ELEMENTS: { + "adapter-transformers": { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + countDownloads: string; + }; + allennlp: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + }; + asteroid: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + countDownloads: string; + }; + audiocraft: { + prettyLabel: string; + repoName: string; + repoUrl: string; + snippets: (model: ModelData) => string[]; + filter: false; + countDownloads: string; + }; + audioseal: { + prettyLabel: string; + repoName: string; + repoUrl: string; + filter: false; + countDownloads: string; + snippets: (model: ModelData) => string[]; + }; + bertopic: { + prettyLabel: string; + repoName: string; + repoUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + }; + big_vision: { + prettyLabel: string; + repoName: string; + repoUrl: string; + filter: false; + countDownloads: string; + }; + bm25s: { + prettyLabel: string; + repoName: string; + repoUrl: string; + snippets: (model: ModelData) => string[]; + filter: false; + countDownloads: string; + }; + champ: { + prettyLabel: string; + repoName: string; + repoUrl: string; + countDownloads: string; + }; + chat_tts: { + prettyLabel: string; + repoName: string; + repoUrl: string; + snippets: () => string[]; + filter: false; + countDownloads: string; + }; + colpali: { + prettyLabel: string; + repoName: string; + repoUrl: string; + filter: false; + countDownloads: string; + }; + "depth-anything-v2": { + prettyLabel: string; + repoName: string; + repoUrl: string; + snippets: (model: ModelData) => string[]; + filter: false; + countDownloads: string; + }; + diffusers: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + }; + doctr: { + prettyLabel: string; + repoName: string; + repoUrl: string; + }; + edsnlp: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + filter: false; + snippets: (model: ModelData) => string[]; + countDownloads: string; + }; + elm: { + prettyLabel: string; + repoName: string; + repoUrl: string; + filter: false; + countDownloads: string; + }; + espnet: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + }; + fairseq: { + prettyLabel: string; + repoName: string; + repoUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + }; + fastai: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + }; + fasttext: { + prettyLabel: string; + repoName: string; + repoUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + countDownloads: string; + }; + flair: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + countDownloads: string; + }; + "gemma.cpp": { + prettyLabel: string; + repoName: string; + repoUrl: string; + filter: false; + countDownloads: string; + }; + gliner: { + prettyLabel: string; + repoName: string; + repoUrl: string; + snippets: (model: ModelData) => string[]; + filter: false; + countDownloads: string; + }; + "glyph-byt5": { + prettyLabel: string; + repoName: string; + repoUrl: string; + filter: false; + countDownloads: string; + }; + grok: { + prettyLabel: string; + repoName: string; + repoUrl: string; + filter: false; + countDownloads: string; + }; + hallo: { + prettyLabel: string; + repoName: string; + repoUrl: string; + countDownloads: string; + }; + "hunyuan-dit": { + prettyLabel: string; + repoName: string; + repoUrl: string; + countDownloads: string; + }; + keras: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + countDownloads: string; + }; + "tf-keras": { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + countDownloads: string; + }; + "keras-nlp": { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + }; + k2: { + prettyLabel: string; + repoName: string; + repoUrl: string; + }; + liveportrait: { + prettyLabel: string; + repoName: string; + repoUrl: string; + filter: false; + countDownloads: string; + }; + mindspore: { + prettyLabel: string; + repoName: string; + repoUrl: string; + }; + "mamba-ssm": { + prettyLabel: string; + repoName: string; + repoUrl: string; + filter: false; + snippets: (model: ModelData) => string[]; + }; + "mars5-tts": { + prettyLabel: string; + repoName: string; + repoUrl: string; + filter: false; + countDownloads: string; + snippets: (model: ModelData) => string[]; + }; + "mesh-anything": { + prettyLabel: string; + repoName: string; + repoUrl: string; + filter: false; + countDownloads: string; + snippets: () => string[]; + }; + "ml-agents": { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + countDownloads: string; + }; + mlx: { + prettyLabel: string; + repoName: string; + repoUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + }; + "mlx-image": { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: false; + countDownloads: string; + }; + "mlc-llm": { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + filter: false; + countDownloads: string; + }; + nemo: { + prettyLabel: string; + repoName: string; + repoUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + countDownloads: string; + }; + open_clip: { + prettyLabel: string; + repoName: string; + repoUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + countDownloads: string; + }; + paddlenlp: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + countDownloads: string; + }; + peft: { + prettyLabel: string; + repoName: string; + repoUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + countDownloads: string; + }; + "pyannote-audio": { + prettyLabel: string; + repoName: string; + repoUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + }; + pythae: { + prettyLabel: string; + repoName: string; + repoUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + }; + recurrentgemma: { + prettyLabel: string; + repoName: string; + repoUrl: string; + filter: false; + countDownloads: string; + }; + "sample-factory": { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + countDownloads: string; + }; + "sentence-transformers": { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + }; + setfit: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + }; + sklearn: { + prettyLabel: string; + repoName: string; + repoUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + countDownloads: string; + }; + spacy: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + countDownloads: string; + }; + "span-marker": { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + }; + speechbrain: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + countDownloads: string; + }; + "stable-audio-tools": { + prettyLabel: string; + repoName: string; + repoUrl: string; + filter: false; + countDownloads: string; + snippets: (model: ModelData) => string[]; + }; + "diffusion-single-file": { + prettyLabel: string; + repoName: string; + repoUrl: string; + filter: false; + countDownloads: string; + }; + "stable-baselines3": { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + countDownloads: string; + }; + stanza: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + countDownloads: string; + }; + tensorflowtts: { + prettyLabel: string; + repoName: string; + repoUrl: string; + snippets: (model: ModelData) => string[]; + }; + "tic-clip": { + prettyLabel: string; + repoName: string; + repoUrl: string; + filter: false; + countDownloads: string; + }; + timesfm: { + prettyLabel: string; + repoName: string; + repoUrl: string; + filter: false; + countDownloads: string; + }; + timm: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + countDownloads: string; + }; + transformers: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + }; + "transformers.js": { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + filter: true; + }; + "unity-sentis": { + prettyLabel: string; + repoName: string; + repoUrl: string; + snippets: () => string[]; + filter: true; + countDownloads: string; + }; + voicecraft: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: (model: ModelData) => string[]; + }; + whisperkit: { + prettyLabel: string; + repoName: string; + repoUrl: string; + docsUrl: string; + snippets: () => string[]; + countDownloads: string; + }; +}; +export type ModelLibraryKey = keyof typeof MODEL_LIBRARIES_UI_ELEMENTS; +export declare const ALL_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "bm25s" | "champ" | "chat_tts" | "colpali" | "depth-anything-v2" | "diffusers" | "doctr" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hunyuan-dit" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "pythae" | "recurrentgemma" | "sample-factory" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "stable-audio-tools" | "diffusion-single-file" | "stable-baselines3" | "stanza" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "voicecraft" | "whisperkit")[]; +export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "bm25s" | "champ" | "chat_tts" | "colpali" | "depth-anything-v2" | "diffusers" | "doctr" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hunyuan-dit" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "pythae" | "recurrentgemma" | "sample-factory" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "stable-audio-tools" | "diffusion-single-file" | "stable-baselines3" | "stanza" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "voicecraft" | "whisperkit")[]; +//# sourceMappingURL=model-libraries.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/model-libraries.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/model-libraries.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..707194cf81fa74b4397c5851a4c377f2715c9c80 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/model-libraries.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"model-libraries.d.ts","sourceRoot":"","sources":["../../src/model-libraries.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,6BAA6B,CAAC;AAEtE;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;;;OAIG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,QAAQ,CAAC,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,EAAE,CAAC;IAC1C;;;;;OAKG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;OAGG;IACH,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB;AAED;;;;;;;;;;;;;GAaG;AAEH,eAAO,MAAM,2BAA2B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAsgBI,CAAC;AAE7C,MAAM,MAAM,eAAe,GAAG,MAAM,OAAO,2BAA2B,CAAC;AAEvE,eAAO,MAAM,sBAAsB,i7BAAgE,CAAC;AAEpG,eAAO,MAAM,8BAA8B,i7BAQ1B,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/pipelines.d.ts b/data/node_modules/@huggingface/tasks/dist/src/pipelines.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..f4926b934924998b3b32c9e9463f4ecb3ce44ae4 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/pipelines.d.ts @@ -0,0 +1,403 @@ +export declare const MODALITIES: readonly ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"]; +export type Modality = (typeof MODALITIES)[number]; +export declare const MODALITY_LABELS: { + multimodal: string; + nlp: string; + audio: string; + cv: string; + rl: string; + tabular: string; + other: string; +}; +/** + * Public interface for a sub task. + * + * This can be used in a model card's `model-index` metadata. + * and is more granular classification that can grow significantly + * over time as new tasks are added. + */ +export interface SubTask { + /** + * type of the task (e.g. audio-source-separation) + */ + type: string; + /** + * displayed name of the task (e.g. Audio Source Separation) + */ + name: string; +} +/** + * Public interface for a PipelineData. + * + * This information corresponds to a pipeline type (aka task) + * in the Hub. + */ +export interface PipelineData { + /** + * displayed name of the task (e.g. Text Classification) + */ + name: string; + subtasks?: SubTask[]; + modality: Modality; + /** + * color for the tag icon. + */ + color: "blue" | "green" | "indigo" | "orange" | "red" | "yellow"; + /** + * whether to hide in /models filters + */ + hideInModels?: boolean; + /** + * whether to hide in /datasets filters + */ + hideInDatasets?: boolean; +} +export declare const PIPELINE_DATA: { + "text-classification": { + name: string; + subtasks: { + type: string; + name: string; + }[]; + modality: "nlp"; + color: "orange"; + }; + "token-classification": { + name: string; + subtasks: { + type: string; + name: string; + }[]; + modality: "nlp"; + color: "blue"; + }; + "table-question-answering": { + name: string; + modality: "nlp"; + color: "green"; + }; + "question-answering": { + name: string; + subtasks: { + type: string; + name: string; + }[]; + modality: "nlp"; + color: "blue"; + }; + "zero-shot-classification": { + name: string; + modality: "nlp"; + color: "yellow"; + }; + translation: { + name: string; + modality: "nlp"; + color: "green"; + }; + summarization: { + name: string; + subtasks: { + type: string; + name: string; + }[]; + modality: "nlp"; + color: "indigo"; + }; + "feature-extraction": { + name: string; + modality: "nlp"; + color: "red"; + }; + "text-generation": { + name: string; + subtasks: { + type: string; + name: string; + }[]; + modality: "nlp"; + color: "indigo"; + }; + "text2text-generation": { + name: string; + subtasks: { + type: string; + name: string; + }[]; + modality: "nlp"; + color: "indigo"; + }; + "fill-mask": { + name: string; + subtasks: { + type: string; + name: string; + }[]; + modality: "nlp"; + color: "red"; + }; + "sentence-similarity": { + name: string; + modality: "nlp"; + color: "yellow"; + }; + "text-to-speech": { + name: string; + modality: "audio"; + color: "yellow"; + }; + "text-to-audio": { + name: string; + modality: "audio"; + color: "yellow"; + }; + "automatic-speech-recognition": { + name: string; + modality: "audio"; + color: "yellow"; + }; + "audio-to-audio": { + name: string; + modality: "audio"; + color: "blue"; + }; + "audio-classification": { + name: string; + subtasks: { + type: string; + name: string; + }[]; + modality: "audio"; + color: "green"; + }; + "voice-activity-detection": { + name: string; + modality: "audio"; + color: "red"; + }; + "depth-estimation": { + name: string; + modality: "cv"; + color: "yellow"; + }; + "image-classification": { + name: string; + subtasks: { + type: string; + name: string; + }[]; + modality: "cv"; + color: "blue"; + }; + "object-detection": { + name: string; + subtasks: { + type: string; + name: string; + }[]; + modality: "cv"; + color: "yellow"; + }; + "image-segmentation": { + name: string; + subtasks: { + type: string; + name: string; + }[]; + modality: "cv"; + color: "green"; + }; + "text-to-image": { + name: string; + modality: "cv"; + color: "yellow"; + }; + "image-to-text": { + name: string; + subtasks: { + type: string; + name: string; + }[]; + modality: "cv"; + color: "red"; + }; + "image-to-image": { + name: string; + subtasks: { + type: string; + name: string; + }[]; + modality: "cv"; + color: "indigo"; + }; + "image-to-video": { + name: string; + modality: "cv"; + color: "indigo"; + }; + "unconditional-image-generation": { + name: string; + modality: "cv"; + color: "green"; + }; + "video-classification": { + name: string; + modality: "cv"; + color: "blue"; + }; + "reinforcement-learning": { + name: string; + modality: "rl"; + color: "red"; + }; + robotics: { + name: string; + modality: "rl"; + subtasks: { + type: string; + name: string; + }[]; + color: "blue"; + }; + "tabular-classification": { + name: string; + modality: "tabular"; + subtasks: { + type: string; + name: string; + }[]; + color: "blue"; + }; + "tabular-regression": { + name: string; + modality: "tabular"; + subtasks: { + type: string; + name: string; + }[]; + color: "blue"; + }; + "tabular-to-text": { + name: string; + modality: "tabular"; + subtasks: { + type: string; + name: string; + }[]; + color: "blue"; + hideInModels: true; + }; + "table-to-text": { + name: string; + modality: "nlp"; + color: "blue"; + hideInModels: true; + }; + "multiple-choice": { + name: string; + subtasks: { + type: string; + name: string; + }[]; + modality: "nlp"; + color: "blue"; + hideInModels: true; + }; + "text-retrieval": { + name: string; + subtasks: { + type: string; + name: string; + }[]; + modality: "nlp"; + color: "indigo"; + hideInModels: true; + }; + "time-series-forecasting": { + name: string; + modality: "tabular"; + subtasks: { + type: string; + name: string; + }[]; + color: "blue"; + }; + "text-to-video": { + name: string; + modality: "cv"; + color: "green"; + }; + "image-text-to-text": { + name: string; + modality: "multimodal"; + color: "red"; + hideInDatasets: true; + }; + "visual-question-answering": { + name: string; + subtasks: { + type: string; + name: string; + }[]; + modality: "multimodal"; + color: "red"; + }; + "document-question-answering": { + name: string; + subtasks: { + type: string; + name: string; + }[]; + modality: "multimodal"; + color: "blue"; + hideInDatasets: true; + }; + "zero-shot-image-classification": { + name: string; + modality: "cv"; + color: "yellow"; + }; + "graph-ml": { + name: string; + modality: "other"; + color: "green"; + }; + "mask-generation": { + name: string; + modality: "cv"; + color: "indigo"; + }; + "zero-shot-object-detection": { + name: string; + modality: "cv"; + color: "yellow"; + }; + "text-to-3d": { + name: string; + modality: "cv"; + color: "yellow"; + }; + "image-to-3d": { + name: string; + modality: "cv"; + color: "green"; + }; + "image-feature-extraction": { + name: string; + modality: "cv"; + color: "indigo"; + }; + other: { + name: string; + modality: "other"; + color: "blue"; + hideInModels: true; + hideInDatasets: true; + }; +}; +export type PipelineType = keyof typeof PIPELINE_DATA; +export type WidgetType = PipelineType | "conversational"; +export declare const PIPELINE_TYPES: ("other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "image-text-to-text" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d" | "image-feature-extraction")[]; +export declare const SUBTASK_TYPES: string[]; +export declare const PIPELINE_TYPES_SET: Set<"other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "image-text-to-text" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d" | "image-feature-extraction">; +//# sourceMappingURL=pipelines.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/pipelines.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/pipelines.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..61371a1a3651784124105f1538acdda023645a28 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/pipelines.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"pipelines.d.ts","sourceRoot":"","sources":["../../src/pipelines.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,UAAU,yEAA0E,CAAC;AAElG,MAAM,MAAM,QAAQ,GAAG,CAAC,OAAO,UAAU,CAAC,CAAC,MAAM,CAAC,CAAC;AAEnD,eAAO,MAAM,eAAe;;;;;;;;CAQQ,CAAC;AAErC;;;;;;GAMG;AACH,MAAM,WAAW,OAAO;IACvB;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;IACb;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;CACb;AAED;;;;;GAKG;AACH,MAAM,WAAW,YAAY;IAC5B;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;IACb,QAAQ,CAAC,EAAE,OAAO,EAAE,CAAC;IACrB,QAAQ,EAAE,QAAQ,CAAC;IACnB;;OAEG;IACH,KAAK,EAAE,MAAM,GAAG,OAAO,GAAG,QAAQ,GAAG,QAAQ,GAAG,KAAK,GAAG,QAAQ,CAAC;IACjE;;OAEG;IACH,YAAY,CAAC,EAAE,OAAO,CAAC;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB;AAcD,eAAO,MAAM,aAAa;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA8kBc,CAAC;AAEzC,MAAM,MAAM,YAAY,GAAG,MAAM,OAAO,aAAa,CAAC;AAEtD,MAAM,MAAM,UAAU,GAAG,YAAY,GAAG,gBAAgB,CAAC;AAEzD,eAAO,MAAM,cAAc,mlCAA+C,CAAC;AAE3E,eAAO,MAAM,aAAa,UAEN,CAAC;AAErB,eAAO,MAAM,kBAAkB,olCAA0B,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/snippets/curl.d.ts b/data/node_modules/@huggingface/tasks/dist/src/snippets/curl.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..86c4b513a479e272d501385fad01e3f0fde24641 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/snippets/curl.d.ts @@ -0,0 +1,10 @@ +import type { PipelineType } from "../pipelines.js"; +import type { ModelDataMinimal } from "./types.js"; +export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string) => string; +export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string) => string; +export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => string; +export declare const snippetFile: (model: ModelDataMinimal, accessToken: string) => string; +export declare const curlSnippets: Partial string>>; +export declare function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string): string; +export declare function hasCurlInferenceSnippet(model: Pick): boolean; +//# sourceMappingURL=curl.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/snippets/curl.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/snippets/curl.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..77ed7956454cd594ab9b19d0056f802a92db0e8f --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/snippets/curl.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAEpD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAEnD,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAM3E,CAAC;AAEF,eAAO,MAAM,qBAAqB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAgBpF,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAM5F,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAK1E,CAAC;AAEF,eAAO,MAAM,YAAY,EAAE,OAAO,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,KAAK,MAAM,CAAC,CAwBhH,CAAC;AAEF,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,CAI5F;AAED,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,IAAI,CAAC,gBAAgB,EAAE,cAAc,CAAC,GAAG,OAAO,CAE9F"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/snippets/index.d.ts b/data/node_modules/@huggingface/tasks/dist/src/snippets/index.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..24bba5e7f197ad24786065c5d808a91221e62eca --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/snippets/index.d.ts @@ -0,0 +1,6 @@ +import * as inputs from "./inputs"; +import * as curl from "./curl"; +import * as python from "./python"; +import * as js from "./js"; +export { inputs, curl, python, js }; +//# sourceMappingURL=index.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/snippets/index.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/snippets/index.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..3e293c100f24cbb99486236d431441f65b586c97 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/snippets/index.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/snippets/index.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,MAAM,MAAM,UAAU,CAAC;AACnC,OAAO,KAAK,IAAI,MAAM,QAAQ,CAAC;AAC/B,OAAO,KAAK,MAAM,MAAM,UAAU,CAAC;AACnC,OAAO,KAAK,EAAE,MAAM,MAAM,CAAC;AAE3B,OAAO,EAAE,MAAM,EAAE,IAAI,EAAE,MAAM,EAAE,EAAE,EAAE,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/snippets/inputs.d.ts b/data/node_modules/@huggingface/tasks/dist/src/snippets/inputs.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..0ade5a0f8807e51da6678ea8ace6f75a9cb97913 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/snippets/inputs.d.ts @@ -0,0 +1,3 @@ +import type { ModelDataMinimal } from "./types"; +export declare function getModelInputSnippet(model: ModelDataMinimal, noWrap?: boolean, noQuotes?: boolean): string; +//# sourceMappingURL=inputs.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/snippets/inputs.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/snippets/inputs.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..6eedc0613b65f34014716bcbe07090295f16b17d --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/snippets/inputs.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inputs.d.ts","sourceRoot":"","sources":["../../../src/snippets/inputs.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,SAAS,CAAC;AAqHhD,wBAAgB,oBAAoB,CAAC,KAAK,EAAE,gBAAgB,EAAE,MAAM,UAAQ,EAAE,QAAQ,UAAQ,GAAG,MAAM,CAiBtG"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/snippets/js.d.ts b/data/node_modules/@huggingface/tasks/dist/src/snippets/js.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..56125509681de46e5858c9fcec36cdba402cc247 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/snippets/js.d.ts @@ -0,0 +1,12 @@ +import type { PipelineType } from "../pipelines.js"; +import type { ModelDataMinimal } from "./types.js"; +export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string) => string; +export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string) => string; +export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => string; +export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string) => string; +export declare const snippetTextToAudio: (model: ModelDataMinimal, accessToken: string) => string; +export declare const snippetFile: (model: ModelDataMinimal, accessToken: string) => string; +export declare const jsSnippets: Partial string>>; +export declare function getJsInferenceSnippet(model: ModelDataMinimal, accessToken: string): string; +export declare function hasJsInferenceSnippet(model: ModelDataMinimal): boolean; +//# sourceMappingURL=js.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/snippets/js.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/snippets/js.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..e98cc0d79ae2fcbf128547969066b7370534b545 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/snippets/js.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAEpD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAEnD,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAmBxE,CAAC;AAEL,eAAO,MAAM,qBAAqB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAkBpF,CAAC;AACF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAqBzF,CAAC;AAEL,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAkB9E,CAAC;AAEL,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAqCjF,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAoBvE,CAAC;AAEL,eAAO,MAAM,UAAU,EAAE,OAAO,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,KAAK,MAAM,CAAC,CAwB9G,CAAC;AAEF,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,CAI1F;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAEtE"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/snippets/python.d.ts b/data/node_modules/@huggingface/tasks/dist/src/snippets/python.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..c5514f569160ea2322d380d637584619d26e6a08 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/snippets/python.d.ts @@ -0,0 +1,15 @@ +import type { PipelineType } from "../pipelines.js"; +import type { ModelDataMinimal } from "./types.js"; +export declare const snippetConversational: (model: ModelDataMinimal, accessToken: string) => string; +export declare const snippetZeroShotClassification: (model: ModelDataMinimal) => string; +export declare const snippetZeroShotImageClassification: (model: ModelDataMinimal) => string; +export declare const snippetBasic: (model: ModelDataMinimal) => string; +export declare const snippetFile: (model: ModelDataMinimal) => string; +export declare const snippetTextToImage: (model: ModelDataMinimal) => string; +export declare const snippetTabular: (model: ModelDataMinimal) => string; +export declare const snippetTextToAudio: (model: ModelDataMinimal) => string; +export declare const snippetDocumentQuestionAnswering: (model: ModelDataMinimal) => string; +export declare const pythonSnippets: Partial string>>; +export declare function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string): string; +export declare function hasPythonInferenceSnippet(model: ModelDataMinimal): boolean; +//# sourceMappingURL=python.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/snippets/python.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/snippets/python.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..0e2794cef4fb686e34d105aaa2d7e627cfd68e2c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/snippets/python.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"python.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAEpD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAEnD,eAAO,MAAM,qBAAqB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAcpF,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,KAAG,MAQrE,CAAC;AAEJ,eAAO,MAAM,kCAAkC,UAAW,gBAAgB,KAAG,MAc1E,CAAC;AAEJ,eAAO,MAAM,YAAY,UAAW,gBAAgB,KAAG,MAOpD,CAAC;AAEJ,eAAO,MAAM,WAAW,UAAW,gBAAgB,KAAG,MAOP,CAAC;AAEhD,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,MAUjB,CAAC;AAE7C,eAAO,MAAM,cAAc,UAAW,gBAAgB,KAAG,MAMtD,CAAC;AAEJ,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,MA2B5D,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,KAAG,MAUxE,CAAC;AAEJ,eAAO,MAAM,cAAc,EAAE,OAAO,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,KAAK,MAAM,CAAC,CA4BlH,CAAC;AAEF,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,CAiB9F;AAED,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAE1E"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/snippets/types.d.ts b/data/node_modules/@huggingface/tasks/dist/src/snippets/types.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..27c0ff91861fab69c9eba960ae6d6e27683599dc --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/snippets/types.d.ts @@ -0,0 +1,8 @@ +import type { ModelData } from "../model-data"; +/** + * Minimal model data required for snippets. + * + * Add more fields as needed. + */ +export type ModelDataMinimal = Pick; +//# sourceMappingURL=types.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/snippets/types.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/snippets/types.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..0d38023ef18b0822ea76a081fc0babf41c917dd9 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/snippets/types.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../../src/snippets/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,eAAe,CAAC;AAE/C;;;;GAIG;AACH,MAAM,MAAM,gBAAgB,GAAG,IAAI,CAAC,SAAS,EAAE,IAAI,GAAG,cAAc,GAAG,YAAY,GAAG,cAAc,GAAG,QAAQ,CAAC,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/audio-classification/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/audio-classification/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/audio-classification/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/audio-classification/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/audio-classification/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..3206d1c62d91301f9f59a93ebac7bafb8a8f1f08 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/audio-classification/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio-classification/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAwEf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/audio-classification/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/audio-classification/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..5863d370120ebee096cfdc958dd04438ddede587 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/audio-classification/inference.d.ts @@ -0,0 +1,52 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Audio Classification inference + */ +export interface AudioClassificationInput { + /** + * The input audio data + */ + inputs: unknown; + /** + * Additional inference parameters + */ + parameters?: AudioClassificationParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Audio Classification + */ +export interface AudioClassificationParameters { + function_to_apply?: ClassificationOutputTransform; + /** + * When specified, limits the output to the top K most probable classes. + */ + top_k?: number; + [property: string]: unknown; +} +/** + * The function to apply to the model outputs in order to retrieve the scores. + */ +export type ClassificationOutputTransform = "sigmoid" | "softmax" | "none"; +export type AudioClassificationOutput = AudioClassificationOutputElement[]; +/** + * Outputs for Audio Classification inference + */ +export interface AudioClassificationOutputElement { + /** + * The predicted class label. + */ + label: string; + /** + * The corresponding probability. + */ + score: number; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/audio-classification/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/audio-classification/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..b5ec909efdcf290f6477e944ad6e5172edc8bd79 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/audio-classification/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio-classification/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,wBAAwB;IACxC;;OAEG;IACH,MAAM,EAAE,OAAO,CAAC;IAChB;;OAEG;IACH,UAAU,CAAC,EAAE,6BAA6B,CAAC;IAC3C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;;GAIG;AACH,MAAM,WAAW,6BAA6B;IAC7C,iBAAiB,CAAC,EAAE,6BAA6B,CAAC;IAClD;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;GAEG;AACH,MAAM,MAAM,6BAA6B,GAAG,SAAS,GAAG,SAAS,GAAG,MAAM,CAAC;AAC3E,MAAM,MAAM,yBAAyB,GAAG,gCAAgC,EAAE,CAAC;AAC3E;;GAEG;AACH,MAAM,WAAW,gCAAgC;IAChD;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/audio-to-audio/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/audio-to-audio/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/audio-to-audio/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/audio-to-audio/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/audio-to-audio/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..fbac6c5dc4ea500c2a508bcf8f5940046815becf --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/audio-to-audio/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio-to-audio/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cA6Df,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/automatic-speech-recognition/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/automatic-speech-recognition/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/automatic-speech-recognition/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/automatic-speech-recognition/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/automatic-speech-recognition/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..af8f5b97102fdf5f36a3091480e1d27bb7192c9d --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/automatic-speech-recognition/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/automatic-speech-recognition/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAyEf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/automatic-speech-recognition/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/automatic-speech-recognition/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..fbe5e469ebb050f193eafc3a35fe116e4abf4dfb --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/automatic-speech-recognition/inference.d.ts @@ -0,0 +1,154 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Automatic Speech Recognition inference + */ +export interface AutomaticSpeechRecognitionInput { + /** + * The input audio data + */ + inputs: unknown; + /** + * Additional inference parameters + */ + parameters?: AutomaticSpeechRecognitionParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Automatic Speech Recognition + */ +export interface AutomaticSpeechRecognitionParameters { + /** + * Parametrization of the text generation process + */ + generate?: GenerationParameters; + /** + * Whether to output corresponding timestamps with the generated text + */ + return_timestamps?: boolean; + [property: string]: unknown; +} +/** + * Parametrization of the text generation process + * + * Ad-hoc parametrization of the text generation process + */ +export interface GenerationParameters { + /** + * Whether to use sampling instead of greedy decoding when generating new tokens. + */ + do_sample?: boolean; + /** + * Controls the stopping condition for beam-based methods. + */ + early_stopping?: EarlyStoppingUnion; + /** + * If set to float strictly between 0 and 1, only tokens with a conditional probability + * greater than epsilon_cutoff will be sampled. In the paper, suggested values range from + * 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language + * Model Desmoothing](https://hf.co/papers/2210.15191) for more details. + */ + epsilon_cutoff?: number; + /** + * Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to + * float strictly between 0 and 1, a token is only considered if it is greater than either + * eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter + * term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In + * the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. + * See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) + * for more details. + */ + eta_cutoff?: number; + /** + * The maximum length (in tokens) of the generated text, including the input. + */ + max_length?: number; + /** + * The maximum number of tokens to generate. Takes precedence over maxLength. + */ + max_new_tokens?: number; + /** + * The minimum length (in tokens) of the generated text, including the input. + */ + min_length?: number; + /** + * The minimum number of tokens to generate. Takes precedence over maxLength. + */ + min_new_tokens?: number; + /** + * Number of groups to divide num_beams into in order to ensure diversity among different + * groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details. + */ + num_beam_groups?: number; + /** + * Number of beams to use for beam search. + */ + num_beams?: number; + /** + * The value balances the model confidence and the degeneration penalty in contrastive + * search decoding. + */ + penalty_alpha?: number; + /** + * The value used to modulate the next token probabilities. + */ + temperature?: number; + /** + * The number of highest probability vocabulary tokens to keep for top-k-filtering. + */ + top_k?: number; + /** + * If set to float < 1, only the smallest set of most probable tokens with probabilities + * that add up to top_p or higher are kept for generation. + */ + top_p?: number; + /** + * Local typicality measures how similar the conditional probability of predicting a target + * token next is to the expected conditional probability of predicting a random token next, + * given the partial text already generated. If set to float < 1, the smallest set of the + * most locally typical tokens with probabilities that add up to typical_p or higher are + * kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details. + */ + typical_p?: number; + /** + * Whether the model should use the past last key/values attentions to speed up decoding + */ + use_cache?: boolean; + [property: string]: unknown; +} +/** + * Controls the stopping condition for beam-based methods. + */ +export type EarlyStoppingUnion = boolean | "never"; +/** + * Outputs of inference for the Automatic Speech Recognition task + */ +export interface AutomaticSpeechRecognitionOutput { + /** + * When returnTimestamps is enabled, chunks contains a list of audio chunks identified by + * the model. + */ + chunks?: AutomaticSpeechRecognitionOutputChunk[]; + /** + * The recognized text. + */ + text: string; + [property: string]: unknown; +} +export interface AutomaticSpeechRecognitionOutputChunk { + /** + * A chunk of text identified by the model + */ + text: string; + /** + * The start and end timestamps corresponding with the text + */ + timestamps: number[]; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/automatic-speech-recognition/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/automatic-speech-recognition/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..454c2ba6cbc9213daa29f56945fd1baaf8e7ef2b --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/automatic-speech-recognition/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/automatic-speech-recognition/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;GAEG;AACH,MAAM,WAAW,+BAA+B;IAC/C;;OAEG;IACH,MAAM,EAAE,OAAO,CAAC;IAChB;;OAEG;IACH,UAAU,CAAC,EAAE,oCAAoC,CAAC;IAClD,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,oCAAoC;IACpD;;OAEG;IACH,QAAQ,CAAC,EAAE,oBAAoB,CAAC;IAChC;;OAEG;IACH,iBAAiB,CAAC,EAAE,OAAO,CAAC;IAC5B,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,oBAAoB;IACpC;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;;;OAKG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;;;;;;OAQG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;OAGG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;;OAGG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB;;OAEG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;;;;OAMG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,MAAM,kBAAkB,GAAG,OAAO,GAAG,OAAO,CAAC;AAEnD;;GAEG;AACH,MAAM,WAAW,gCAAgC;IAChD;;;OAGG;IACH,MAAM,CAAC,EAAE,qCAAqC,EAAE,CAAC;IACjD;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;IACb,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,qCAAqC;IACrD;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;IACb;;OAEG;IACH,UAAU,EAAE,MAAM,EAAE,CAAC;IACrB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/chat-completion/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/chat-completion/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..533c0236e27bcd9cc22cb149a3e8873af74902dc --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/chat-completion/inference.d.ts @@ -0,0 +1,254 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Chat Completion Input. + * + * Auto-generated from TGI specs. + * For more details, check out + * https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. + */ +export interface ChatCompletionInput { + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + * frequency in the text so far, + * decreasing the model's likelihood to repeat the same line verbatim. + */ + frequency_penalty?: number; + /** + * UNUSED + * Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON + * object that maps tokens + * (specified by their token ID in the tokenizer) to an associated bias value from -100 to + * 100. Mathematically, + * the bias is added to the logits generated by the model prior to sampling. The exact + * effect will vary per model, + * but values between -1 and 1 should decrease or increase likelihood of selection; values + * like -100 or 100 should + * result in a ban or exclusive selection of the relevant token. + */ + logit_bias?: number[]; + /** + * Whether to return log probabilities of the output tokens or not. If true, returns the log + * probabilities of each + * output token returned in the content of message. + */ + logprobs?: boolean; + /** + * The maximum number of tokens that can be generated in the chat completion. + */ + max_tokens?: number; + /** + * A list of messages comprising the conversation so far. + */ + messages: ChatCompletionInputMessage[]; + /** + * [UNUSED] ID of the model to use. See the model endpoint compatibility table for details + * on which models work with the Chat API. + */ + model: string; + /** + * UNUSED + * How many chat completion choices to generate for each input message. Note that you will + * be charged based on the + * number of generated tokens across all of the choices. Keep n as 1 to minimize costs. + */ + n?: number; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they + * appear in the text so far, + * increasing the model's likelihood to talk about new topics + */ + presence_penalty?: number; + seed?: number; + /** + * Up to 4 sequences where the API will stop generating further tokens. + */ + stop?: string[]; + stream?: boolean; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the + * output more random, while + * lower values like 0.2 will make it more focused and deterministic. + * + * We generally recommend altering this or `top_p` but not both. + */ + temperature?: number; + tool_choice?: ChatCompletionInputToolType; + /** + * A prompt to be appended before the tools + */ + tool_prompt?: string; + /** + * A list of tools the model may call. Currently, only functions are supported as a tool. + * Use this to provide a list of + * functions the model may generate JSON inputs for. + */ + tools?: ChatCompletionInputTool[]; + /** + * An integer between 0 and 5 specifying the number of most likely tokens to return at each + * token position, each with + * an associated log probability. logprobs must be set to true if this parameter is used. + */ + top_logprobs?: number; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the + * tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% + * probability mass are considered. + */ + top_p?: number; + [property: string]: unknown; +} +export interface ChatCompletionInputMessage { + content?: string; + name?: string; + role: string; + tool_calls?: ChatCompletionInputToolCall[]; + [property: string]: unknown; +} +export interface ChatCompletionInputToolCall { + function: ChatCompletionInputFunctionDefinition; + id: number; + type: string; + [property: string]: unknown; +} +export interface ChatCompletionInputFunctionDefinition { + arguments: unknown; + description?: string; + name: string; + [property: string]: unknown; +} +export type ChatCompletionInputToolType = "OneOf" | ChatCompletionInputToolTypeObject; +export interface ChatCompletionInputToolTypeObject { + FunctionName: string; + [property: string]: unknown; +} +export interface ChatCompletionInputTool { + function: ChatCompletionInputFunctionDefinition; + type: string; + [property: string]: unknown; +} +/** + * Chat Completion Output. + * + * Auto-generated from TGI specs. + * For more details, check out + * https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. + */ +export interface ChatCompletionOutput { + choices: ChatCompletionOutputComplete[]; + created: number; + id: string; + model: string; + object: string; + system_fingerprint: string; + usage: ChatCompletionOutputUsage; + [property: string]: unknown; +} +export interface ChatCompletionOutputComplete { + finish_reason: string; + index: number; + logprobs?: ChatCompletionOutputLogprobs; + message: ChatCompletionOutputMessage; + [property: string]: unknown; +} +export interface ChatCompletionOutputLogprobs { + content: ChatCompletionOutputLogprob[]; + [property: string]: unknown; +} +export interface ChatCompletionOutputLogprob { + logprob: number; + token: string; + top_logprobs: ChatCompletionOutputTopLogprob[]; + [property: string]: unknown; +} +export interface ChatCompletionOutputTopLogprob { + logprob: number; + token: string; + [property: string]: unknown; +} +export interface ChatCompletionOutputMessage { + content?: string; + name?: string; + role: string; + tool_calls?: ChatCompletionOutputToolCall[]; + [property: string]: unknown; +} +export interface ChatCompletionOutputToolCall { + function: ChatCompletionOutputFunctionDefinition; + id: number; + type: string; + [property: string]: unknown; +} +export interface ChatCompletionOutputFunctionDefinition { + arguments: unknown; + description?: string; + name: string; + [property: string]: unknown; +} +export interface ChatCompletionOutputUsage { + completion_tokens: number; + prompt_tokens: number; + total_tokens: number; + [property: string]: unknown; +} +/** + * Chat Completion Stream Output. + * + * Auto-generated from TGI specs. + * For more details, check out + * https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. + */ +export interface ChatCompletionStreamOutput { + choices: ChatCompletionStreamOutputChoice[]; + created: number; + id: string; + model: string; + object: string; + system_fingerprint: string; + [property: string]: unknown; +} +export interface ChatCompletionStreamOutputChoice { + delta: ChatCompletionStreamOutputDelta; + finish_reason?: string; + index: number; + logprobs?: ChatCompletionStreamOutputLogprobs; + [property: string]: unknown; +} +export interface ChatCompletionStreamOutputDelta { + content?: string; + role: string; + tool_calls?: ChatCompletionStreamOutputDeltaToolCall; + [property: string]: unknown; +} +export interface ChatCompletionStreamOutputDeltaToolCall { + function: ChatCompletionStreamOutputFunction; + id: string; + index: number; + type: string; + [property: string]: unknown; +} +export interface ChatCompletionStreamOutputFunction { + arguments: string; + name?: string; + [property: string]: unknown; +} +export interface ChatCompletionStreamOutputLogprobs { + content: ChatCompletionStreamOutputLogprob[]; + [property: string]: unknown; +} +export interface ChatCompletionStreamOutputLogprob { + logprob: number; + token: string; + top_logprobs: ChatCompletionStreamOutputTopLogprob[]; + [property: string]: unknown; +} +export interface ChatCompletionStreamOutputTopLogprob { + logprob: number; + token: string; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/chat-completion/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/chat-completion/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..38a36eacb3402a0bde9ecd5f613cf2131d201c9a --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/chat-completion/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/chat-completion/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;;;;;GAMG;AACH,MAAM,WAAW,mBAAmB;IACnC;;;;OAIG;IACH,iBAAiB,CAAC,EAAE,MAAM,CAAC;IAC3B;;;;;;;;;;;OAWG;IACH,UAAU,CAAC,EAAE,MAAM,EAAE,CAAC;IACtB;;;;OAIG;IACH,QAAQ,CAAC,EAAE,OAAO,CAAC;IACnB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,0BAA0B,EAAE,CAAC;IACvC;;;OAGG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;;;;OAKG;IACH,CAAC,CAAC,EAAE,MAAM,CAAC;IACX;;;;OAIG;IACH,gBAAgB,CAAC,EAAE,MAAM,CAAC;IAC1B,IAAI,CAAC,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,IAAI,CAAC,EAAE,MAAM,EAAE,CAAC;IAChB,MAAM,CAAC,EAAE,OAAO,CAAC;IACjB;;;;;;OAMG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,WAAW,CAAC,EAAE,2BAA2B,CAAC;IAC1C;;OAEG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB;;;;OAIG;IACH,KAAK,CAAC,EAAE,uBAAuB,EAAE,CAAC;IAClC;;;;OAIG;IACH,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB;;;;;OAKG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,0BAA0B;IAC1C,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,IAAI,EAAE,MAAM,CAAC;IACb,UAAU,CAAC,EAAE,2BAA2B,EAAE,CAAC;IAC3C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,2BAA2B;IAC3C,QAAQ,EAAE,qCAAqC,CAAC;IAChD,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,qCAAqC;IACrD,SAAS,EAAE,OAAO,CAAC;IACnB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,IAAI,EAAE,MAAM,CAAC;IACb,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,MAAM,2BAA2B,GAAG,OAAO,GAAG,iCAAiC,CAAC;AAEtF,MAAM,WAAW,iCAAiC;IACjD,YAAY,EAAE,MAAM,CAAC;IACrB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,uBAAuB;IACvC,QAAQ,EAAE,qCAAqC,CAAC;IAChD,IAAI,EAAE,MAAM,CAAC;IACb,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;;;GAMG;AACH,MAAM,WAAW,oBAAoB;IACpC,OAAO,EAAE,4BAA4B,EAAE,CAAC;IACxC,OAAO,EAAE,MAAM,CAAC;IAChB,EAAE,EAAE,MAAM,CAAC;IACX,KAAK,EAAE,MAAM,CAAC;IACd,MAAM,EAAE,MAAM,CAAC;IACf,kBAAkB,EAAE,MAAM,CAAC;IAC3B,KAAK,EAAE,yBAAyB,CAAC;IACjC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,4BAA4B;IAC5C,aAAa,EAAE,MAAM,CAAC;IACtB,KAAK,EAAE,MAAM,CAAC;IACd,QAAQ,CAAC,EAAE,4BAA4B,CAAC;IACxC,OAAO,EAAE,2BAA2B,CAAC;IACrC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,4BAA4B;IAC5C,OAAO,EAAE,2BAA2B,EAAE,CAAC;IACvC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,2BAA2B;IAC3C,OAAO,EAAE,MAAM,CAAC;IAChB,KAAK,EAAE,MAAM,CAAC;IACd,YAAY,EAAE,8BAA8B,EAAE,CAAC;IAC/C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,8BAA8B;IAC9C,OAAO,EAAE,MAAM,CAAC;IAChB,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,2BAA2B;IAC3C,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,IAAI,EAAE,MAAM,CAAC;IACb,UAAU,CAAC,EAAE,4BAA4B,EAAE,CAAC;IAC5C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,4BAA4B;IAC5C,QAAQ,EAAE,sCAAsC,CAAC;IACjD,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,sCAAsC;IACtD,SAAS,EAAE,OAAO,CAAC;IACnB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,IAAI,EAAE,MAAM,CAAC;IACb,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,yBAAyB;IACzC,iBAAiB,EAAE,MAAM,CAAC;IAC1B,aAAa,EAAE,MAAM,CAAC;IACtB,YAAY,EAAE,MAAM,CAAC;IACrB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;;;GAMG;AACH,MAAM,WAAW,0BAA0B;IAC1C,OAAO,EAAE,gCAAgC,EAAE,CAAC;IAC5C,OAAO,EAAE,MAAM,CAAC;IAChB,EAAE,EAAE,MAAM,CAAC;IACX,KAAK,EAAE,MAAM,CAAC;IACd,MAAM,EAAE,MAAM,CAAC;IACf,kBAAkB,EAAE,MAAM,CAAC;IAC3B,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,gCAAgC;IAChD,KAAK,EAAE,+BAA+B,CAAC;IACvC,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,KAAK,EAAE,MAAM,CAAC;IACd,QAAQ,CAAC,EAAE,kCAAkC,CAAC;IAC9C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,+BAA+B;IAC/C,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,IAAI,EAAE,MAAM,CAAC;IACb,UAAU,CAAC,EAAE,uCAAuC,CAAC;IACrD,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,uCAAuC;IACvD,QAAQ,EAAE,kCAAkC,CAAC;IAC7C,EAAE,EAAE,MAAM,CAAC;IACX,KAAK,EAAE,MAAM,CAAC;IACd,IAAI,EAAE,MAAM,CAAC;IACb,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,kCAAkC;IAClD,SAAS,EAAE,MAAM,CAAC;IAClB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,kCAAkC;IAClD,OAAO,EAAE,iCAAiC,EAAE,CAAC;IAC7C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,iCAAiC;IACjD,OAAO,EAAE,MAAM,CAAC;IAChB,KAAK,EAAE,MAAM,CAAC;IACd,YAAY,EAAE,oCAAoC,EAAE,CAAC;IACrD,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,oCAAoC;IACpD,OAAO,EAAE,MAAM,CAAC;IAChB,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/depth-estimation/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/depth-estimation/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/depth-estimation/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/depth-estimation/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/depth-estimation/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..39e40c9898d05c58afd10dd9f26dd806c8c9be84 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/depth-estimation/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/depth-estimation/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAyDf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/depth-estimation/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/depth-estimation/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..2c3e21b73f42b499daf748cf4d156e8414b078b1 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/depth-estimation/inference.d.ts @@ -0,0 +1,36 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Depth Estimation inference + */ +export interface DepthEstimationInput { + /** + * The input image data + */ + inputs: unknown; + /** + * Additional inference parameters + */ + parameters?: { + [key: string]: unknown; + }; + [property: string]: unknown; +} +/** + * Outputs of inference for the Depth Estimation task + */ +export interface DepthEstimationOutput { + /** + * The predicted depth as an image + */ + depth?: unknown; + /** + * The predicted depth as a tensor + */ + predicted_depth?: unknown; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/depth-estimation/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/depth-estimation/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..14a61011afce27b3b54273888447f7589a22e875 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/depth-estimation/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/depth-estimation/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;GAEG;AACH,MAAM,WAAW,oBAAoB;IACpC;;OAEG;IACH,MAAM,EAAE,OAAO,CAAC;IAChB;;OAEG;IACH,UAAU,CAAC,EAAE;QAAE,CAAC,GAAG,EAAE,MAAM,GAAG,OAAO,CAAA;KAAE,CAAC;IACxC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,WAAW,qBAAqB;IACrC;;OAEG;IACH,KAAK,CAAC,EAAE,OAAO,CAAC;IAChB;;OAEG;IACH,eAAe,CAAC,EAAE,OAAO,CAAC;IAC1B,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/document-question-answering/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/document-question-answering/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/document-question-answering/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/document-question-answering/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/document-question-answering/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..3555981b8dcd9bac6bbfdb2e7dc159a51c6e2bd7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/document-question-answering/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/document-question-answering/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cA4Ef,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/document-question-answering/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/document-question-answering/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..7f2a572bc77567ec7d9034ed987f669317c78134 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/document-question-answering/inference.d.ts @@ -0,0 +1,111 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Document Question Answering inference + */ +export interface DocumentQuestionAnsweringInput { + /** + * One (document, question) pair to answer + */ + inputs: DocumentQuestionAnsweringInputData; + /** + * Additional inference parameters + */ + parameters?: DocumentQuestionAnsweringParameters; + [property: string]: unknown; +} +/** + * One (document, question) pair to answer + */ +export interface DocumentQuestionAnsweringInputData { + /** + * The image on which the question is asked + */ + image: unknown; + /** + * A question to ask of the document + */ + question: string; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Document Question Answering + */ +export interface DocumentQuestionAnsweringParameters { + /** + * If the words in the document are too long to fit with the question for the model, it will + * be split in several chunks with some overlap. This argument controls the size of that + * overlap. + */ + doc_stride?: number; + /** + * Whether to accept impossible as an answer + */ + handle_impossible_answer?: boolean; + /** + * Language to use while running OCR. Defaults to english. + */ + lang?: string; + /** + * The maximum length of predicted answers (e.g., only answers with a shorter length are + * considered). + */ + max_answer_len?: number; + /** + * The maximum length of the question after tokenization. It will be truncated if needed. + */ + max_question_len?: number; + /** + * The maximum length of the total sentence (context + question) in tokens of each chunk + * passed to the model. The context will be split in several chunks (using doc_stride as + * overlap) if needed. + */ + max_seq_len?: number; + /** + * The number of answers to return (will be chosen by order of likelihood). Can return less + * than top_k answers if there are not enough options available within the context. + */ + top_k?: number; + /** + * A list of words and bounding boxes (normalized 0->1000). If provided, the inference will + * skip the OCR step and use the provided bounding boxes instead. + */ + word_boxes?: WordBox[]; + [property: string]: unknown; +} +export type WordBox = number[] | string; +export type DocumentQuestionAnsweringOutput = DocumentQuestionAnsweringOutputElement[]; +/** + * Outputs of inference for the Document Question Answering task + */ +export interface DocumentQuestionAnsweringOutputElement { + /** + * The answer to the question. + */ + answer: string; + /** + * The end word index of the answer (in the OCR’d version of the input or provided word + * boxes). + */ + end: number; + /** + * The probability associated to the answer. + */ + score: number; + /** + * The start word index of the answer (in the OCR’d version of the input or provided word + * boxes). + */ + start: number; + /** + * The index of each word/box pair that is in the answer + */ + words: number[]; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/document-question-answering/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/document-question-answering/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..84ba917be8796ba4dc86c8e180b4a62b0b6d223b --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/document-question-answering/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/document-question-answering/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,8BAA8B;IAC9C;;OAEG;IACH,MAAM,EAAE,kCAAkC,CAAC;IAC3C;;OAEG;IACH,UAAU,CAAC,EAAE,mCAAmC,CAAC;IACjD,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;GAEG;AACH,MAAM,WAAW,kCAAkC;IAClD;;OAEG;IACH,KAAK,EAAE,OAAO,CAAC;IACf;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;;GAIG;AACH,MAAM,WAAW,mCAAmC;IACnD;;;;OAIG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,wBAAwB,CAAC,EAAE,OAAO,CAAC;IACnC;;OAEG;IACH,IAAI,CAAC,EAAE,MAAM,CAAC;IACd;;;OAGG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;OAEG;IACH,gBAAgB,CAAC,EAAE,MAAM,CAAC;IAC1B;;;;OAIG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB;;;OAGG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,UAAU,CAAC,EAAE,OAAO,EAAE,CAAC;IACvB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD,MAAM,MAAM,OAAO,GAAG,MAAM,EAAE,GAAG,MAAM,CAAC;AACxC,MAAM,MAAM,+BAA+B,GAAG,sCAAsC,EAAE,CAAC;AACvF;;GAEG;AACH,MAAM,WAAW,sCAAsC;IACtD;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,GAAG,EAAE,MAAM,CAAC;IACZ;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;;OAGG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,EAAE,CAAC;IAChB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/feature-extraction/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/feature-extraction/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/feature-extraction/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/feature-extraction/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/feature-extraction/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..1b887bd0408161d07a1a4a1e1d093eebde9bf0c8 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/feature-extraction/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/feature-extraction/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAgDf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/feature-extraction/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/feature-extraction/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..154360e6f09349a9e599327d0b3584e37a7db30a --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/feature-extraction/inference.d.ts @@ -0,0 +1,38 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +export type FeatureExtractionOutput = Array; +/** + * Feature Extraction Input. + * + * Auto-generated from TEI specs. + * For more details, check out + * https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tei-import.ts. + */ +export interface FeatureExtractionInput { + /** + * The text to embed. + */ + inputs: string; + normalize?: boolean; + /** + * The name of the prompt that should be used by for encoding. If not set, no prompt + * will be applied. + * + * Must be a key in the `Sentence Transformers` configuration `prompts` dictionary. + * + * For example if ``prompt_name`` is "query" and the ``prompts`` is {"query": "query: ", + * ...}, + * then the sentence "What is the capital of France?" will be encoded as + * "query: What is the capital of France?" because the prompt text will be prepended before + * any text to encode. + */ + prompt_name?: string; + truncate?: boolean; + truncation_direction?: FeatureExtractionInputTruncationDirection; + [property: string]: unknown; +} +export type FeatureExtractionInputTruncationDirection = "Left" | "Right"; +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/feature-extraction/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/feature-extraction/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..4b0f66d277168989ecdc14a146de504ff66a2ef3 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/feature-extraction/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/feature-extraction/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,MAAM,MAAM,uBAAuB,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC,CAAC;AAEtD;;;;;;GAMG;AACH,MAAM,WAAW,sBAAsB;IACtC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;;;;;;;;;;OAWG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,QAAQ,CAAC,EAAE,OAAO,CAAC;IACnB,oBAAoB,CAAC,EAAE,yCAAyC,CAAC;IACjE,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,MAAM,yCAAyC,GAAG,MAAM,GAAG,OAAO,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/fill-mask/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/fill-mask/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/fill-mask/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/fill-mask/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/fill-mask/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..7460764fef7ff1b7292ebc6e976c542c4f1c8d70 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/fill-mask/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/fill-mask/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cA0Ef,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/fill-mask/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/fill-mask/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..bccae838a01619b26cbe0c7f8db0c05eee361588 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/fill-mask/inference.d.ts @@ -0,0 +1,63 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Fill Mask inference + */ +export interface FillMaskInput { + /** + * The text with masked tokens + */ + inputs: string; + /** + * Additional inference parameters + */ + parameters?: FillMaskParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Fill Mask + */ +export interface FillMaskParameters { + /** + * When passed, the model will limit the scores to the passed targets instead of looking up + * in the whole vocabulary. If the provided targets are not in the model vocab, they will be + * tokenized and the first resulting token will be used (with a warning, and that might be + * slower). + */ + targets?: string[]; + /** + * When passed, overrides the number of predictions to return. + */ + top_k?: number; + [property: string]: unknown; +} +export type FillMaskOutput = FillMaskOutputElement[]; +/** + * Outputs of inference for the Fill Mask task + */ +export interface FillMaskOutputElement { + /** + * The corresponding probability + */ + score: number; + /** + * The corresponding input with the mask token prediction. + */ + sequence: string; + /** + * The predicted token id (to replace the masked one). + */ + token: number; + tokenStr: unknown; + /** + * The predicted token (to replace the masked one). + */ + token_str?: string; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/fill-mask/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/fill-mask/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..e50ae9c4ee622b06e20adac641299abcc4b4af1d --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/fill-mask/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/fill-mask/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,aAAa;IAC7B;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,UAAU,CAAC,EAAE,kBAAkB,CAAC;IAChC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;;GAIG;AACH,MAAM,WAAW,kBAAkB;IAClC;;;;;OAKG;IACH,OAAO,CAAC,EAAE,MAAM,EAAE,CAAC;IACnB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD,MAAM,MAAM,cAAc,GAAG,qBAAqB,EAAE,CAAC;AACrD;;GAEG;AACH,MAAM,WAAW,qBAAqB;IACrC;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd,QAAQ,EAAE,OAAO,CAAC;IAClB;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-classification/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-classification/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-classification/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-classification/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-classification/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..51572756f916a5a2e462806f188e4e19704f0f5e --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-classification/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-classification/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAmFf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-classification/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-classification/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..791dfc58f641e1139d4a105c5ff9c1958ce613aa --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-classification/inference.d.ts @@ -0,0 +1,52 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Image Classification inference + */ +export interface ImageClassificationInput { + /** + * The input image data + */ + inputs: unknown; + /** + * Additional inference parameters + */ + parameters?: ImageClassificationParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Image Classification + */ +export interface ImageClassificationParameters { + function_to_apply?: ClassificationOutputTransform; + /** + * When specified, limits the output to the top K most probable classes. + */ + top_k?: number; + [property: string]: unknown; +} +/** + * The function to apply to the model outputs in order to retrieve the scores. + */ +export type ClassificationOutputTransform = "sigmoid" | "softmax" | "none"; +export type ImageClassificationOutput = ImageClassificationOutputElement[]; +/** + * Outputs of inference for the Image Classification task + */ +export interface ImageClassificationOutputElement { + /** + * The predicted class label. + */ + label: string; + /** + * The corresponding probability. + */ + score: number; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-classification/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-classification/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..8938dfd11d0e42d04804f34e37933883c10ddcec --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-classification/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-classification/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,wBAAwB;IACxC;;OAEG;IACH,MAAM,EAAE,OAAO,CAAC;IAChB;;OAEG;IACH,UAAU,CAAC,EAAE,6BAA6B,CAAC;IAC3C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;;GAIG;AACH,MAAM,WAAW,6BAA6B;IAC7C,iBAAiB,CAAC,EAAE,6BAA6B,CAAC;IAClD;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;GAEG;AACH,MAAM,MAAM,6BAA6B,GAAG,SAAS,GAAG,SAAS,GAAG,MAAM,CAAC;AAC3E,MAAM,MAAM,yBAAyB,GAAG,gCAAgC,EAAE,CAAC;AAC3E;;GAEG;AACH,MAAM,WAAW,gCAAgC;IAChD;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-feature-extraction/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-feature-extraction/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-feature-extraction/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-feature-extraction/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-feature-extraction/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..dee098ef87008d1c25d2f383a2aaf75c43363830 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-feature-extraction/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-feature-extraction/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAkDf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-segmentation/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-segmentation/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-segmentation/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-segmentation/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-segmentation/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..6d569e266e435d54e3862854721f83332d686c16 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-segmentation/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-segmentation/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cA8Ff,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-segmentation/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-segmentation/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..9220638224c2d06c7b8d7de0e73bf61c5f11d980 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-segmentation/inference.d.ts @@ -0,0 +1,66 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Image Segmentation inference + */ +export interface ImageSegmentationInput { + /** + * The input image data + */ + inputs: unknown; + /** + * Additional inference parameters + */ + parameters?: ImageSegmentationParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Image Segmentation + */ +export interface ImageSegmentationParameters { + /** + * Threshold to use when turning the predicted masks into binary values. + */ + mask_threshold?: number; + /** + * Mask overlap threshold to eliminate small, disconnected segments. + */ + overlap_mask_area_threshold?: number; + /** + * Segmentation task to be performed, depending on model capabilities. + */ + subtask?: ImageSegmentationSubtask; + /** + * Probability threshold to filter out predicted masks. + */ + threshold?: number; + [property: string]: unknown; +} +export type ImageSegmentationSubtask = "instance" | "panoptic" | "semantic"; +export type ImageSegmentationOutput = ImageSegmentationOutputElement[]; +/** + * Outputs of inference for the Image Segmentation task + * + * A predicted mask / segment + */ +export interface ImageSegmentationOutputElement { + /** + * The label of the predicted segment + */ + label: string; + /** + * The corresponding mask as a black-and-white image + */ + mask: unknown; + /** + * The score or confidence degreee the model has + */ + score?: number; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-segmentation/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-segmentation/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..b9f8a06e59dd717e8539c9af7b60e209fc648fc1 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-segmentation/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-segmentation/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,sBAAsB;IACtC;;OAEG;IACH,MAAM,EAAE,OAAO,CAAC;IAChB;;OAEG;IACH,UAAU,CAAC,EAAE,2BAA2B,CAAC;IACzC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;;GAIG;AACH,MAAM,WAAW,2BAA2B;IAC3C;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;OAEG;IACH,2BAA2B,CAAC,EAAE,MAAM,CAAC;IACrC;;OAEG;IACH,OAAO,CAAC,EAAE,wBAAwB,CAAC;IACnC;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD,MAAM,MAAM,wBAAwB,GAAG,UAAU,GAAG,UAAU,GAAG,UAAU,CAAC;AAC5E,MAAM,MAAM,uBAAuB,GAAG,8BAA8B,EAAE,CAAC;AACvE;;;;GAIG;AACH,MAAM,WAAW,8BAA8B;IAC9C;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,IAAI,EAAE,OAAO,CAAC;IACd;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-text-to-text/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-text-to-text/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-text-to-text/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-text-to-text/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-text-to-text/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..940a22fb6c65e4fda907dcdb399fc46c7aaa9449 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-text-to-text/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-text-to-text/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAyFf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-3d/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-3d/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-3d/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-3d/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-3d/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..9144641634040356a12476b32a1bc7de76597cfe --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-3d/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-to-3d/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAsEf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-image/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-image/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-image/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-image/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-image/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..a0a6edbdc7edc74b5e5d7aeb28ef368c52d475d8 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-image/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-to-image/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAgGf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-image/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-image/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..85770a57e2b1b9b74d30cad2dac0df0e50f9c1e0 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-image/inference.d.ts @@ -0,0 +1,64 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Image To Image inference + */ +export interface ImageToImageInput { + /** + * The input image data + */ + inputs: unknown; + /** + * Additional inference parameters + */ + parameters?: ImageToImageParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Image To Image + */ +export interface ImageToImageParameters { + /** + * For diffusion models. A higher guidance scale value encourages the model to generate + * images closely linked to the text prompt at the expense of lower image quality. + */ + guidance_scale?: number; + /** + * One or several prompt to guide what NOT to include in image generation. + */ + negative_prompt?: string[]; + /** + * For diffusion models. The number of denoising steps. More denoising steps usually lead to + * a higher quality image at the expense of slower inference. + */ + num_inference_steps?: number; + /** + * The size in pixel of the output image + */ + target_size?: TargetSize; + [property: string]: unknown; +} +/** + * The size in pixel of the output image + */ +export interface TargetSize { + height: number; + width: number; + [property: string]: unknown; +} +/** + * Outputs of inference for the Image To Image task + */ +export interface ImageToImageOutput { + /** + * The output image + */ + image?: unknown; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-image/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-image/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..b7c83dbc993fb0901e94dccc9dd4536ce5815a31 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-image/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-to-image/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;GAEG;AACH,MAAM,WAAW,iBAAiB;IACjC;;OAEG;IACH,MAAM,EAAE,OAAO,CAAC;IAChB;;OAEG;IACH,UAAU,CAAC,EAAE,sBAAsB,CAAC;IACpC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,sBAAsB;IACtC;;;OAGG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;OAEG;IACH,eAAe,CAAC,EAAE,MAAM,EAAE,CAAC;IAC3B;;;OAGG;IACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;IAC7B;;OAEG;IACH,WAAW,CAAC,EAAE,UAAU,CAAC;IACzB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,WAAW,UAAU;IAC1B,MAAM,EAAE,MAAM,CAAC;IACf,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,WAAW,kBAAkB;IAClC;;OAEG;IACH,KAAK,CAAC,EAAE,OAAO,CAAC;IAChB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-text/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-text/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-text/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-text/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-text/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..b5169de9b969f2b729be5f9577976d0c9d30bf88 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-text/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-to-text/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cA6Ef,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-text/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-text/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..907b5d16cb75bbc11169c061fc42b462bbf44250 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-text/inference.d.ts @@ -0,0 +1,139 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Image To Text inference + */ +export interface ImageToTextInput { + /** + * The input image data + */ + inputs: unknown; + /** + * Additional inference parameters + */ + parameters?: ImageToTextParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Image To Text + */ +export interface ImageToTextParameters { + /** + * Parametrization of the text generation process + */ + generate?: GenerationParameters; + /** + * The amount of maximum tokens to generate. + */ + max_new_tokens?: number; + [property: string]: unknown; +} +/** + * Parametrization of the text generation process + * + * Ad-hoc parametrization of the text generation process + */ +export interface GenerationParameters { + /** + * Whether to use sampling instead of greedy decoding when generating new tokens. + */ + do_sample?: boolean; + /** + * Controls the stopping condition for beam-based methods. + */ + early_stopping?: EarlyStoppingUnion; + /** + * If set to float strictly between 0 and 1, only tokens with a conditional probability + * greater than epsilon_cutoff will be sampled. In the paper, suggested values range from + * 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language + * Model Desmoothing](https://hf.co/papers/2210.15191) for more details. + */ + epsilon_cutoff?: number; + /** + * Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to + * float strictly between 0 and 1, a token is only considered if it is greater than either + * eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter + * term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In + * the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. + * See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) + * for more details. + */ + eta_cutoff?: number; + /** + * The maximum length (in tokens) of the generated text, including the input. + */ + max_length?: number; + /** + * The maximum number of tokens to generate. Takes precedence over maxLength. + */ + max_new_tokens?: number; + /** + * The minimum length (in tokens) of the generated text, including the input. + */ + min_length?: number; + /** + * The minimum number of tokens to generate. Takes precedence over maxLength. + */ + min_new_tokens?: number; + /** + * Number of groups to divide num_beams into in order to ensure diversity among different + * groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details. + */ + num_beam_groups?: number; + /** + * Number of beams to use for beam search. + */ + num_beams?: number; + /** + * The value balances the model confidence and the degeneration penalty in contrastive + * search decoding. + */ + penalty_alpha?: number; + /** + * The value used to modulate the next token probabilities. + */ + temperature?: number; + /** + * The number of highest probability vocabulary tokens to keep for top-k-filtering. + */ + top_k?: number; + /** + * If set to float < 1, only the smallest set of most probable tokens with probabilities + * that add up to top_p or higher are kept for generation. + */ + top_p?: number; + /** + * Local typicality measures how similar the conditional probability of predicting a target + * token next is to the expected conditional probability of predicting a random token next, + * given the partial text already generated. If set to float < 1, the smallest set of the + * most locally typical tokens with probabilities that add up to typical_p or higher are + * kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details. + */ + typical_p?: number; + /** + * Whether the model should use the past last key/values attentions to speed up decoding + */ + use_cache?: boolean; + [property: string]: unknown; +} +/** + * Controls the stopping condition for beam-based methods. + */ +export type EarlyStoppingUnion = boolean | "never"; +/** + * Outputs of inference for the Image To Text task + */ +export interface ImageToTextOutput { + generatedText: unknown; + /** + * The generated text. + */ + generated_text?: string; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-text/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-text/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..f76edbbbbec929d1c72a09935396fe97df0055bb --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/image-to-text/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-to-text/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;OAEG;IACH,MAAM,EAAE,OAAO,CAAC;IAChB;;OAEG;IACH,UAAU,CAAC,EAAE,qBAAqB,CAAC;IACnC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,qBAAqB;IACrC;;OAEG;IACH,QAAQ,CAAC,EAAE,oBAAoB,CAAC;IAChC;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,oBAAoB;IACpC;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;;;OAKG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;;;;;;OAQG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;OAGG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;;OAGG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB;;OAEG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;;;;OAMG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,MAAM,kBAAkB,GAAG,OAAO,GAAG,OAAO,CAAC;AAEnD;;GAEG;AACH,MAAM,WAAW,iBAAiB;IACjC,aAAa,EAAE,OAAO,CAAC;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/index.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/index.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..dcc1a18211831ee6c775fc10ecc08a94096bd3c4 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/index.d.ts @@ -0,0 +1,87 @@ +import type { PipelineType } from "../pipelines"; +export type * from "./audio-classification/inference"; +export type * from "./automatic-speech-recognition/inference"; +export type { ChatCompletionInput, ChatCompletionInputMessage, ChatCompletionOutput, ChatCompletionOutputComplete, ChatCompletionOutputMessage, ChatCompletionStreamOutput, ChatCompletionStreamOutputChoice, ChatCompletionStreamOutputDelta, } from "./chat-completion/inference"; +export type * from "./document-question-answering/inference"; +export type * from "./feature-extraction/inference"; +export type * from "./fill-mask/inference"; +export type { ImageClassificationInput, ImageClassificationOutput, ImageClassificationOutputElement, ImageClassificationParameters, } from "./image-classification/inference"; +export type * from "./image-to-image/inference"; +export type { ImageToTextInput, ImageToTextOutput, ImageToTextParameters } from "./image-to-text/inference"; +export type * from "./image-segmentation/inference"; +export type * from "./object-detection/inference"; +export type * from "./depth-estimation/inference"; +export type * from "./question-answering/inference"; +export type * from "./sentence-similarity/inference"; +export type * from "./summarization/inference"; +export type * from "./table-question-answering/inference"; +export type { TextToImageInput, TextToImageOutput, TextToImageParameters } from "./text-to-image/inference"; +export type { TextToAudioParameters, TextToSpeechInput, TextToSpeechOutput } from "./text-to-speech/inference"; +export type * from "./token-classification/inference"; +export type { Text2TextGenerationParameters, Text2TextGenerationTruncationStrategy, TranslationInput, TranslationOutput, } from "./translation/inference"; +export type { ClassificationOutputTransform, TextClassificationInput, TextClassificationOutput, TextClassificationOutputElement, TextClassificationParameters, } from "./text-classification/inference"; +export type { TextGenerationOutputFinishReason, TextGenerationOutputPrefillToken, TextGenerationInput, TextGenerationOutput, TextGenerationOutputDetails, TextGenerationInputGenerateParameters, TextGenerationOutputBestOfSequence, TextGenerationOutputToken, TextGenerationStreamOutputStreamDetails, TextGenerationStreamOutput, } from "./text-generation/inference"; +export type * from "./video-classification/inference"; +export type * from "./visual-question-answering/inference"; +export type * from "./zero-shot-classification/inference"; +export type * from "./zero-shot-image-classification/inference"; +export type { BoundingBox, ZeroShotObjectDetectionInput, ZeroShotObjectDetectionInputData, ZeroShotObjectDetectionOutput, ZeroShotObjectDetectionOutputElement, } from "./zero-shot-object-detection/inference"; +import type { ModelLibraryKey } from "../model-libraries"; +/** + * Model libraries compatible with each ML task + */ +export declare const TASKS_MODEL_LIBRARIES: Record; +export declare const TASKS_DATA: Record; +export interface ExampleRepo { + description: string; + id: string; +} +export type TaskDemoEntry = { + filename: string; + type: "audio"; +} | { + data: Array<{ + label: string; + score: number; + }>; + type: "chart"; +} | { + filename: string; + type: "img"; +} | { + table: string[][]; + type: "tabular"; +} | { + content: string; + label: string; + type: "text"; +} | { + text: string; + tokens: Array<{ + end: number; + start: number; + type: string; + }>; + type: "text-with-tokens"; +}; +export interface TaskDemo { + inputs: TaskDemoEntry[]; + outputs: TaskDemoEntry[]; +} +export interface TaskData { + datasets: ExampleRepo[]; + demo: TaskDemo; + id: PipelineType; + canonicalId?: PipelineType; + isPlaceholder?: boolean; + label: string; + libraries: ModelLibraryKey[]; + metrics: ExampleRepo[]; + models: ExampleRepo[]; + spaces: ExampleRepo[]; + summary: string; + widgetModels: string[]; + youtubeId?: string; +} +export type TaskDataCustom = Omit; +//# sourceMappingURL=index.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/index.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/index.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..17cc42ac427af3b21c806e5032121d8567021b69 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/index.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/tasks/index.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,cAAc,CAAC;AA0CjD,mBAAmB,kCAAkC,CAAC;AACtD,mBAAmB,0CAA0C,CAAC;AAC9D,YAAY,EACX,mBAAmB,EACnB,0BAA0B,EAC1B,oBAAoB,EACpB,4BAA4B,EAC5B,2BAA2B,EAC3B,0BAA0B,EAC1B,gCAAgC,EAChC,+BAA+B,GAC/B,MAAM,6BAA6B,CAAC;AACrC,mBAAmB,yCAAyC,CAAC;AAC7D,mBAAmB,gCAAgC,CAAC;AACpD,mBAAmB,uBAAuB,CAAC;AAC3C,YAAY,EACX,wBAAwB,EACxB,yBAAyB,EACzB,gCAAgC,EAChC,6BAA6B,GAC7B,MAAM,kCAAkC,CAAC;AAC1C,mBAAmB,4BAA4B,CAAC;AAChD,YAAY,EAAE,gBAAgB,EAAE,iBAAiB,EAAE,qBAAqB,EAAE,MAAM,2BAA2B,CAAC;AAC5G,mBAAmB,gCAAgC,CAAC;AACpD,mBAAmB,8BAA8B,CAAC;AAClD,mBAAmB,8BAA8B,CAAC;AAClD,mBAAmB,gCAAgC,CAAC;AACpD,mBAAmB,iCAAiC,CAAC;AACrD,mBAAmB,2BAA2B,CAAC;AAC/C,mBAAmB,sCAAsC,CAAC;AAC1D,YAAY,EAAE,gBAAgB,EAAE,iBAAiB,EAAE,qBAAqB,EAAE,MAAM,2BAA2B,CAAC;AAC5G,YAAY,EAAE,qBAAqB,EAAE,iBAAiB,EAAE,kBAAkB,EAAE,MAAM,4BAA4B,CAAC;AAC/G,mBAAmB,kCAAkC,CAAC;AACtD,YAAY,EACX,6BAA6B,EAC7B,qCAAqC,EACrC,gBAAgB,EAChB,iBAAiB,GACjB,MAAM,yBAAyB,CAAC;AACjC,YAAY,EACX,6BAA6B,EAC7B,uBAAuB,EACvB,wBAAwB,EACxB,+BAA+B,EAC/B,4BAA4B,GAC5B,MAAM,iCAAiC,CAAC;AACzC,YAAY,EACX,gCAAgC,EAChC,gCAAgC,EAChC,mBAAmB,EACnB,oBAAoB,EACpB,2BAA2B,EAC3B,qCAAqC,EACrC,kCAAkC,EAClC,yBAAyB,EACzB,uCAAuC,EACvC,0BAA0B,GAC1B,MAAM,6BAA6B,CAAC;AACrC,mBAAmB,kCAAkC,CAAC;AACtD,mBAAmB,uCAAuC,CAAC;AAC3D,mBAAmB,sCAAsC,CAAC;AAC1D,mBAAmB,4CAA4C,CAAC;AAChE,YAAY,EACX,WAAW,EACX,4BAA4B,EAC5B,gCAAgC,EAChC,6BAA6B,EAC7B,oCAAoC,GACpC,MAAM,wCAAwC,CAAC;AAEhD,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,oBAAoB,CAAC;AAE1D;;GAEG;AACH,eAAO,MAAM,qBAAqB,EAAE,MAAM,CAAC,YAAY,EAAE,eAAe,EAAE,CA0DzE,CAAC;AAoBF,eAAO,MAAM,UAAU,EAAE,MAAM,CAAC,YAAY,EAAE,QAAQ,GAAG,SAAS,CAkDxD,CAAC;AAEX,MAAM,WAAW,WAAW;IAC3B,WAAW,EAAE,MAAM,CAAC;IACpB,EAAE,EAAE,MAAM,CAAC;CACX;AAED,MAAM,MAAM,aAAa,GACtB;IACA,QAAQ,EAAE,MAAM,CAAC;IACjB,IAAI,EAAE,OAAO,CAAC;CACb,GACD;IACA,IAAI,EAAE,KAAK,CAAC;QACX,KAAK,EAAE,MAAM,CAAC;QACd,KAAK,EAAE,MAAM,CAAC;KACd,CAAC,CAAC;IACH,IAAI,EAAE,OAAO,CAAC;CACb,GACD;IACA,QAAQ,EAAE,MAAM,CAAC;IACjB,IAAI,EAAE,KAAK,CAAC;CACX,GACD;IACA,KAAK,EAAE,MAAM,EAAE,EAAE,CAAC;IAClB,IAAI,EAAE,SAAS,CAAC;CACf,GACD;IACA,OAAO,EAAE,MAAM,CAAC;IAChB,KAAK,EAAE,MAAM,CAAC;IACd,IAAI,EAAE,MAAM,CAAC;CACZ,GACD;IACA,IAAI,EAAE,MAAM,CAAC;IACb,MAAM,EAAE,KAAK,CAAC;QACb,GAAG,EAAE,MAAM,CAAC;QACZ,KAAK,EAAE,MAAM,CAAC;QACd,IAAI,EAAE,MAAM,CAAC;KACb,CAAC,CAAC;IACH,IAAI,EAAE,kBAAkB,CAAC;CACxB,CAAC;AAEL,MAAM,WAAW,QAAQ;IACxB,MAAM,EAAE,aAAa,EAAE,CAAC;IACxB,OAAO,EAAE,aAAa,EAAE,CAAC;CACzB;AAED,MAAM,WAAW,QAAQ;IACxB,QAAQ,EAAE,WAAW,EAAE,CAAC;IACxB,IAAI,EAAE,QAAQ,CAAC;IACf,EAAE,EAAE,YAAY,CAAC;IACjB,WAAW,CAAC,EAAE,YAAY,CAAC;IAC3B,aAAa,CAAC,EAAE,OAAO,CAAC;IACxB,KAAK,EAAE,MAAM,CAAC;IACd,SAAS,EAAE,eAAe,EAAE,CAAC;IAC7B,OAAO,EAAE,WAAW,EAAE,CAAC;IACvB,MAAM,EAAE,WAAW,EAAE,CAAC;IACtB,MAAM,EAAE,WAAW,EAAE,CAAC;IACtB,OAAO,EAAE,MAAM,CAAC;IAChB,YAAY,EAAE,MAAM,EAAE,CAAC;IACvB,SAAS,CAAC,EAAE,MAAM,CAAC;CACnB;AAED,MAAM,MAAM,cAAc,GAAG,IAAI,CAAC,QAAQ,EAAE,IAAI,GAAG,OAAO,GAAG,WAAW,CAAC,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/mask-generation/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/mask-generation/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/mask-generation/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/mask-generation/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/mask-generation/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..78b2e2170eef9f6c47ac3ff7d6f7e124826d2409 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/mask-generation/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/mask-generation/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAkDf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/object-detection/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/object-detection/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/object-detection/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/object-detection/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/object-detection/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..01be0fc6384e62edfe46c100f1e4f94783db0c26 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/object-detection/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/object-detection/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAiFf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/object-detection/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/object-detection/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..f18ef9178414a5d306377c913d96d26c62393891 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/object-detection/inference.d.ts @@ -0,0 +1,63 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Object Detection inference + */ +export interface ObjectDetectionInput { + /** + * The input image data + */ + inputs: unknown; + /** + * Additional inference parameters + */ + parameters?: ObjectDetectionParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Object Detection + */ +export interface ObjectDetectionParameters { + /** + * The probability necessary to make a prediction. + */ + threshold?: number; + [property: string]: unknown; +} +/** + * The predicted bounding box. Coordinates are relative to the top left corner of the input + * image. + */ +export interface BoundingBox { + xmax: number; + xmin: number; + ymax: number; + ymin: number; + [property: string]: unknown; +} +export type ObjectDetectionOutput = ObjectDetectionOutputElement[]; +/** + * Outputs of inference for the Object Detection task + */ +export interface ObjectDetectionOutputElement { + /** + * The predicted bounding box. Coordinates are relative to the top left corner of the input + * image. + */ + box: BoundingBox; + /** + * The predicted label for the bounding box + */ + label: string; + /** + * The associated score / probability + */ + score: number; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/object-detection/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/object-detection/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..882e723940dba39251a73674ea4dba3a7fefcfd2 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/object-detection/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/object-detection/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,oBAAoB;IACpC;;OAEG;IACH,MAAM,EAAE,OAAO,CAAC;IAChB;;OAEG;IACH,UAAU,CAAC,EAAE,yBAAyB,CAAC;IACvC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;;GAIG;AACH,MAAM,WAAW,yBAAyB;IACzC;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;GAGG;AACH,MAAM,WAAW,WAAW;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD,MAAM,MAAM,qBAAqB,GAAG,4BAA4B,EAAE,CAAC;AACnE;;GAEG;AACH,MAAM,WAAW,4BAA4B;IAC5C;;;OAGG;IACH,GAAG,EAAE,WAAW,CAAC;IACjB;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/placeholder/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/placeholder/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/placeholder/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/placeholder/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/placeholder/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..460b5d1560be028c648af269d3bc2059bf6542c4 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/placeholder/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/placeholder/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAgBf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/question-answering/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/question-answering/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/question-answering/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/question-answering/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/question-answering/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..f2d3f40dec1040cfdf22849ac8c73f02c63a7070 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/question-answering/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/question-answering/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAkEf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/question-answering/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/question-answering/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..b280b5f278d6192a05dc5091511a59ae9d9f6cb4 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/question-answering/inference.d.ts @@ -0,0 +1,100 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Question Answering inference + */ +export interface QuestionAnsweringInput { + /** + * One (context, question) pair to answer + */ + inputs: QuestionAnsweringInputData; + /** + * Additional inference parameters + */ + parameters?: QuestionAnsweringParameters; + [property: string]: unknown; +} +/** + * One (context, question) pair to answer + */ +export interface QuestionAnsweringInputData { + /** + * The context to be used for answering the question + */ + context: string; + /** + * The question to be answered + */ + question: string; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Question Answering + */ +export interface QuestionAnsweringParameters { + /** + * Attempts to align the answer to real words. Improves quality on space separated + * languages. Might hurt on non-space-separated languages (like Japanese or Chinese) + */ + align_to_words?: boolean; + /** + * If the context is too long to fit with the question for the model, it will be split in + * several chunks with some overlap. This argument controls the size of that overlap. + */ + doc_stride?: number; + /** + * Whether to accept impossible as an answer. + */ + handle_impossible_answer?: boolean; + /** + * The maximum length of predicted answers (e.g., only answers with a shorter length are + * considered). + */ + max_answer_len?: number; + /** + * The maximum length of the question after tokenization. It will be truncated if needed. + */ + max_question_len?: number; + /** + * The maximum length of the total sentence (context + question) in tokens of each chunk + * passed to the model. The context will be split in several chunks (using docStride as + * overlap) if needed. + */ + max_seq_len?: number; + /** + * The number of answers to return (will be chosen by order of likelihood). Note that we + * return less than topk answers if there are not enough options available within the + * context. + */ + top_k?: number; + [property: string]: unknown; +} +export type QuestionAnsweringOutput = QuestionAnsweringOutputElement[]; +/** + * Outputs of inference for the Question Answering task + */ +export interface QuestionAnsweringOutputElement { + /** + * The answer to the question. + */ + answer: string; + /** + * The character position in the input where the answer ends. + */ + end: number; + /** + * The probability associated to the answer. + */ + score: number; + /** + * The character position in the input where the answer begins. + */ + start: number; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/question-answering/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/question-answering/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..42521842a8b1c13b56079c458ce5bbfb76424758 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/question-answering/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/question-answering/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,sBAAsB;IACtC;;OAEG;IACH,MAAM,EAAE,0BAA0B,CAAC;IACnC;;OAEG;IACH,UAAU,CAAC,EAAE,2BAA2B,CAAC;IACzC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;GAEG;AACH,MAAM,WAAW,0BAA0B;IAC1C;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;;GAIG;AACH,MAAM,WAAW,2BAA2B;IAC3C;;;OAGG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB;;;OAGG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,wBAAwB,CAAC,EAAE,OAAO,CAAC;IACnC;;;OAGG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;OAEG;IACH,gBAAgB,CAAC,EAAE,MAAM,CAAC;IAC1B;;;;OAIG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB;;;;OAIG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD,MAAM,MAAM,uBAAuB,GAAG,8BAA8B,EAAE,CAAC;AACvE;;GAEG;AACH,MAAM,WAAW,8BAA8B;IAC9C;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,GAAG,EAAE,MAAM,CAAC;IACZ;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/reinforcement-learning/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/reinforcement-learning/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/reinforcement-learning/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/reinforcement-learning/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/reinforcement-learning/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..eddb7470a21876f1f4fa90122b4a2b2511f10f1e --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/reinforcement-learning/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/reinforcement-learning/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAsEf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/sentence-similarity/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/sentence-similarity/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/sentence-similarity/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/sentence-similarity/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/sentence-similarity/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..cacb5083b6e628ce2d6e3f1609621bc9c8f14a24 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/sentence-similarity/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/sentence-similarity/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAgGf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/sentence-similarity/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/sentence-similarity/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..306c78217832eb168c37b676fb7e61bd6a6d4905 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/sentence-similarity/inference.d.ts @@ -0,0 +1,32 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +export type SentenceSimilarityOutput = number[]; +/** + * Inputs for Sentence similarity inference + */ +export interface SentenceSimilarityInput { + inputs: SentenceSimilarityInputData; + /** + * Additional inference parameters + */ + parameters?: { + [key: string]: unknown; + }; + [property: string]: unknown; +} +export interface SentenceSimilarityInputData { + /** + * A list of strings which will be compared against the source_sentence. + */ + sentences: string[]; + /** + * The string that you wish to compare the other strings with. This can be a phrase, + * sentence, or longer passage, depending on the model being used. + */ + sourceSentence: string; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/sentence-similarity/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/sentence-similarity/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..23655e829b8f8eb9277de6d83ca6a9c7fd49770c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/sentence-similarity/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/sentence-similarity/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,MAAM,MAAM,wBAAwB,GAAG,MAAM,EAAE,CAAC;AAEhD;;GAEG;AACH,MAAM,WAAW,uBAAuB;IACvC,MAAM,EAAE,2BAA2B,CAAC;IACpC;;OAEG;IACH,UAAU,CAAC,EAAE;QAAE,CAAC,GAAG,EAAE,MAAM,GAAG,OAAO,CAAA;KAAE,CAAC;IACxC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,2BAA2B;IAC3C;;OAEG;IACH,SAAS,EAAE,MAAM,EAAE,CAAC;IACpB;;;OAGG;IACH,cAAc,EAAE,MAAM,CAAC;IACvB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/summarization/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/summarization/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/summarization/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/summarization/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/summarization/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..84b1a435fc44aa177ea51f67778065e027f71414 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/summarization/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/summarization/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAuEf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/summarization/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/summarization/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..2fb5047c753dbe23a8eb273ceeccb4d7c64852ca --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/summarization/inference.d.ts @@ -0,0 +1,55 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Summarization inference + * + * Inputs for Text2text Generation inference + */ +export interface SummarizationInput { + /** + * The input text data + */ + inputs: string; + /** + * Additional inference parameters + */ + parameters?: Text2TextGenerationParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Text2text Generation + */ +export interface Text2TextGenerationParameters { + /** + * Whether to clean up the potential extra spaces in the text output. + */ + clean_up_tokenization_spaces?: boolean; + /** + * Additional parametrization of the text generation algorithm + */ + generate_parameters?: { + [key: string]: unknown; + }; + /** + * The truncation strategy to use + */ + truncation?: Text2TextGenerationTruncationStrategy; + [property: string]: unknown; +} +export type Text2TextGenerationTruncationStrategy = "do_not_truncate" | "longest_first" | "only_first" | "only_second"; +/** + * Outputs of inference for the Summarization task + */ +export interface SummarizationOutput { + /** + * The summarized text. + */ + summary_text: string; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/summarization/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/summarization/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..549a0bbe84c8e3ba7d14ee1246e039685e160b4c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/summarization/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/summarization/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;;;GAIG;AACH,MAAM,WAAW,kBAAkB;IAClC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,UAAU,CAAC,EAAE,6BAA6B,CAAC;IAC3C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,6BAA6B;IAC7C;;OAEG;IACH,4BAA4B,CAAC,EAAE,OAAO,CAAC;IACvC;;OAEG;IACH,mBAAmB,CAAC,EAAE;QAAE,CAAC,GAAG,EAAE,MAAM,GAAG,OAAO,CAAA;KAAE,CAAC;IACjD;;OAEG;IACH,UAAU,CAAC,EAAE,qCAAqC,CAAC;IACnD,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,MAAM,qCAAqC,GAAG,iBAAiB,GAAG,eAAe,GAAG,YAAY,GAAG,aAAa,CAAC;AAEvH;;GAEG;AACH,MAAM,WAAW,mBAAmB;IACnC;;OAEG;IACH,YAAY,EAAE,MAAM,CAAC;IACrB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/table-question-answering/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/table-question-answering/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/table-question-answering/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/table-question-answering/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/table-question-answering/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..25b1677de740f1579e8c5b84a83bb193f6530621 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/table-question-answering/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/table-question-answering/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAsDf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/table-question-answering/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/table-question-answering/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..7294e8b7925d9829503e1d615078c13888b826e5 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/table-question-answering/inference.d.ts @@ -0,0 +1,62 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Table Question Answering inference + */ +export interface TableQuestionAnsweringInput { + /** + * One (table, question) pair to answer + */ + inputs: TableQuestionAnsweringInputData; + /** + * Additional inference parameters + */ + parameters?: { + [key: string]: unknown; + }; + [property: string]: unknown; +} +/** + * One (table, question) pair to answer + */ +export interface TableQuestionAnsweringInputData { + /** + * The question to be answered about the table + */ + question: string; + /** + * The table to serve as context for the questions + */ + table: { + [key: string]: string[]; + }; + [property: string]: unknown; +} +export type TableQuestionAnsweringOutput = TableQuestionAnsweringOutputElement[]; +/** + * Outputs of inference for the Table Question Answering task + */ +export interface TableQuestionAnsweringOutputElement { + /** + * If the model has an aggregator, this returns the aggregator. + */ + aggregator?: string; + /** + * The answer of the question given the table. If there is an aggregator, the answer will be + * preceded by `AGGREGATOR >`. + */ + answer: string; + /** + * List of strings made up of the answer cell values. + */ + cells: string[]; + /** + * Coordinates of the cells of the answers. + */ + coordinates: Array; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/table-question-answering/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/table-question-answering/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..38583bf0508f3ef866f25b577cc4cc4783121e31 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/table-question-answering/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/table-question-answering/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,2BAA2B;IAC3C;;OAEG;IACH,MAAM,EAAE,+BAA+B,CAAC;IACxC;;OAEG;IACH,UAAU,CAAC,EAAE;QACZ,CAAC,GAAG,EAAE,MAAM,GAAG,OAAO,CAAC;KACvB,CAAC;IACF,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;GAEG;AACH,MAAM,WAAW,+BAA+B;IAC/C;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,KAAK,EAAE;QACN,CAAC,GAAG,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;KACxB,CAAC;IACF,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD,MAAM,MAAM,4BAA4B,GAAG,mCAAmC,EAAE,CAAC;AACjF;;GAEG;AACH,MAAM,WAAW,mCAAmC;IACnD;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;;OAGG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,KAAK,EAAE,MAAM,EAAE,CAAC;IAChB;;OAEG;IACH,WAAW,EAAE,KAAK,CAAC,MAAM,EAAE,CAAC,CAAC;IAC7B,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/tabular-classification/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/tabular-classification/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/tabular-classification/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/tabular-classification/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/tabular-classification/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..7a167035d0f8e3374e4a6c6ffd23e2c928a5730e --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/tabular-classification/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/tabular-classification/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cA+Df,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/tabular-regression/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/tabular-regression/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/tabular-regression/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/tabular-regression/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/tabular-regression/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..fc94bf15b368489a804b8048ddad4cf1bd26d447 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/tabular-regression/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/tabular-regression/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAoDf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-classification/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-classification/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-classification/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-classification/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-classification/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..8d86818f03bdacfe9de0541c283f70ed2ad178ae --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-classification/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-classification/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAsFf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-classification/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-classification/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..bb58192b7e8f85c1e94119af1359c56fe5fbda0b --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-classification/inference.d.ts @@ -0,0 +1,52 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Text Classification inference + */ +export interface TextClassificationInput { + /** + * The text to classify + */ + inputs: string; + /** + * Additional inference parameters + */ + parameters?: TextClassificationParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Text Classification + */ +export interface TextClassificationParameters { + function_to_apply?: ClassificationOutputTransform; + /** + * When specified, limits the output to the top K most probable classes. + */ + top_k?: number; + [property: string]: unknown; +} +/** + * The function to apply to the model outputs in order to retrieve the scores. + */ +export type ClassificationOutputTransform = "sigmoid" | "softmax" | "none"; +export type TextClassificationOutput = TextClassificationOutputElement[]; +/** + * Outputs of inference for the Text Classification task + */ +export interface TextClassificationOutputElement { + /** + * The predicted class label. + */ + label: string; + /** + * The corresponding probability. + */ + score: number; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-classification/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-classification/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..c9cc1bf306e3adba463e98af13b963fefcd7c23f --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-classification/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-classification/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,uBAAuB;IACvC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,UAAU,CAAC,EAAE,4BAA4B,CAAC;IAC1C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;;GAIG;AACH,MAAM,WAAW,4BAA4B;IAC5C,iBAAiB,CAAC,EAAE,6BAA6B,CAAC;IAClD;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;GAEG;AACH,MAAM,MAAM,6BAA6B,GAAG,SAAS,GAAG,SAAS,GAAG,MAAM,CAAC;AAC3E,MAAM,MAAM,wBAAwB,GAAG,+BAA+B,EAAE,CAAC;AACzE;;GAEG;AACH,MAAM,WAAW,+BAA+B;IAC/C;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-generation/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-generation/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-generation/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-generation/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-generation/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..78afde15c4c7a9214319f4878ff208a55d43f2b7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-generation/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-generation/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAyGf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-generation/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-generation/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..499c7bf6070818e5aaa14fefcb2ad1152d3bbb66 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-generation/inference.d.ts @@ -0,0 +1,126 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Text Generation Input. + * + * Auto-generated from TGI specs. + * For more details, check out + * https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. + */ +export interface TextGenerationInput { + inputs: string; + parameters?: TextGenerationInputGenerateParameters; + stream?: boolean; + [property: string]: unknown; +} +export interface TextGenerationInputGenerateParameters { + best_of?: number; + decoder_input_details?: boolean; + details?: boolean; + do_sample?: boolean; + frequency_penalty?: number; + grammar?: TextGenerationInputGrammarType; + max_new_tokens?: number; + repetition_penalty?: number; + return_full_text?: boolean; + seed?: number; + stop?: string[]; + temperature?: number; + top_k?: number; + top_n_tokens?: number; + top_p?: number; + truncate?: number; + typical_p?: number; + watermark?: boolean; + [property: string]: unknown; +} +export interface TextGenerationInputGrammarType { + type: Type; + /** + * A string that represents a [JSON Schema](https://json-schema.org/). + * + * JSON Schema is a declarative language that allows to annotate JSON documents + * with types and descriptions. + */ + value: unknown; + [property: string]: unknown; +} +export type Type = "json" | "regex"; +/** + * Text Generation Output. + * + * Auto-generated from TGI specs. + * For more details, check out + * https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. + */ +export interface TextGenerationOutput { + details?: TextGenerationOutputDetails; + generated_text: string; + [property: string]: unknown; +} +export interface TextGenerationOutputDetails { + best_of_sequences?: TextGenerationOutputBestOfSequence[]; + finish_reason: TextGenerationOutputFinishReason; + generated_tokens: number; + prefill: TextGenerationOutputPrefillToken[]; + seed?: number; + tokens: TextGenerationOutputToken[]; + top_tokens?: Array; + [property: string]: unknown; +} +export interface TextGenerationOutputBestOfSequence { + finish_reason: TextGenerationOutputFinishReason; + generated_text: string; + generated_tokens: number; + prefill: TextGenerationOutputPrefillToken[]; + seed?: number; + tokens: TextGenerationOutputToken[]; + top_tokens?: Array; + [property: string]: unknown; +} +export type TextGenerationOutputFinishReason = "length" | "eos_token" | "stop_sequence"; +export interface TextGenerationOutputPrefillToken { + id: number; + logprob: number; + text: string; + [property: string]: unknown; +} +export interface TextGenerationOutputToken { + id: number; + logprob: number; + special: boolean; + text: string; + [property: string]: unknown; +} +/** + * Text Generation Stream Output. + * + * Auto-generated from TGI specs. + * For more details, check out + * https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. + */ +export interface TextGenerationStreamOutput { + details?: TextGenerationStreamOutputStreamDetails; + generated_text?: string; + index: number; + token: TextGenerationStreamOutputToken; + top_tokens?: TextGenerationStreamOutputToken[]; + [property: string]: unknown; +} +export interface TextGenerationStreamOutputStreamDetails { + finish_reason: TextGenerationOutputFinishReason; + generated_tokens: number; + seed?: number; + [property: string]: unknown; +} +export interface TextGenerationStreamOutputToken { + id: number; + logprob: number; + special: boolean; + text: string; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-generation/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-generation/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..8b16b8a5547e5da2f91f6132f4eb89416d6bc57f --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-generation/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-generation/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;;;;;GAMG;AACH,MAAM,WAAW,mBAAmB;IACnC,MAAM,EAAE,MAAM,CAAC;IACf,UAAU,CAAC,EAAE,qCAAqC,CAAC;IACnD,MAAM,CAAC,EAAE,OAAO,CAAC;IACjB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,qCAAqC;IACrD,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,qBAAqB,CAAC,EAAE,OAAO,CAAC;IAChC,OAAO,CAAC,EAAE,OAAO,CAAC;IAClB,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,iBAAiB,CAAC,EAAE,MAAM,CAAC;IAC3B,OAAO,CAAC,EAAE,8BAA8B,CAAC;IACzC,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,kBAAkB,CAAC,EAAE,MAAM,CAAC;IAC5B,gBAAgB,CAAC,EAAE,OAAO,CAAC;IAC3B,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,IAAI,CAAC,EAAE,MAAM,EAAE,CAAC;IAChB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,8BAA8B;IAC9C,IAAI,EAAE,IAAI,CAAC;IACX;;;;;OAKG;IACH,KAAK,EAAE,OAAO,CAAC;IACf,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,MAAM,IAAI,GAAG,MAAM,GAAG,OAAO,CAAC;AAEpC;;;;;;GAMG;AACH,MAAM,WAAW,oBAAoB;IACpC,OAAO,CAAC,EAAE,2BAA2B,CAAC;IACtC,cAAc,EAAE,MAAM,CAAC;IACvB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,2BAA2B;IAC3C,iBAAiB,CAAC,EAAE,kCAAkC,EAAE,CAAC;IACzD,aAAa,EAAE,gCAAgC,CAAC;IAChD,gBAAgB,EAAE,MAAM,CAAC;IACzB,OAAO,EAAE,gCAAgC,EAAE,CAAC;IAC5C,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,MAAM,EAAE,yBAAyB,EAAE,CAAC;IACpC,UAAU,CAAC,EAAE,KAAK,CAAC,yBAAyB,EAAE,CAAC,CAAC;IAChD,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,kCAAkC;IAClD,aAAa,EAAE,gCAAgC,CAAC;IAChD,cAAc,EAAE,MAAM,CAAC;IACvB,gBAAgB,EAAE,MAAM,CAAC;IACzB,OAAO,EAAE,gCAAgC,EAAE,CAAC;IAC5C,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,MAAM,EAAE,yBAAyB,EAAE,CAAC;IACpC,UAAU,CAAC,EAAE,KAAK,CAAC,yBAAyB,EAAE,CAAC,CAAC;IAChD,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,MAAM,gCAAgC,GAAG,QAAQ,GAAG,WAAW,GAAG,eAAe,CAAC;AAExF,MAAM,WAAW,gCAAgC;IAChD,EAAE,EAAE,MAAM,CAAC;IACX,OAAO,EAAE,MAAM,CAAC;IAChB,IAAI,EAAE,MAAM,CAAC;IACb,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,yBAAyB;IACzC,EAAE,EAAE,MAAM,CAAC;IACX,OAAO,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,OAAO,CAAC;IACjB,IAAI,EAAE,MAAM,CAAC;IACb,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;;;GAMG;AACH,MAAM,WAAW,0BAA0B;IAC1C,OAAO,CAAC,EAAE,uCAAuC,CAAC;IAClD,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,KAAK,EAAE,MAAM,CAAC;IACd,KAAK,EAAE,+BAA+B,CAAC;IACvC,UAAU,CAAC,EAAE,+BAA+B,EAAE,CAAC;IAC/C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,uCAAuC;IACvD,aAAa,EAAE,gCAAgC,CAAC;IAChD,gBAAgB,EAAE,MAAM,CAAC;IACzB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,+BAA+B;IAC/C,EAAE,EAAE,MAAM,CAAC;IACX,OAAO,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,OAAO,CAAC;IACjB,IAAI,EAAE,MAAM,CAAC;IACb,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-3d/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-3d/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-3d/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-3d/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-3d/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..582a425506035cba5217f98702638c87388ee52c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-3d/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-to-3d/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAmDf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-audio/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-audio/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..1ec0490a33a740b7728d1991ef9d7959f79f6918 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-audio/inference.d.ts @@ -0,0 +1,139 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Text To Audio inference + */ +export interface TextToAudioInput { + /** + * The input text data + */ + inputs: string; + /** + * Additional inference parameters + */ + parameters?: TextToAudioParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Text To Audio + */ +export interface TextToAudioParameters { + /** + * Parametrization of the text generation process + */ + generate?: GenerationParameters; + [property: string]: unknown; +} +/** + * Parametrization of the text generation process + * + * Ad-hoc parametrization of the text generation process + */ +export interface GenerationParameters { + /** + * Whether to use sampling instead of greedy decoding when generating new tokens. + */ + do_sample?: boolean; + /** + * Controls the stopping condition for beam-based methods. + */ + early_stopping?: EarlyStoppingUnion; + /** + * If set to float strictly between 0 and 1, only tokens with a conditional probability + * greater than epsilon_cutoff will be sampled. In the paper, suggested values range from + * 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language + * Model Desmoothing](https://hf.co/papers/2210.15191) for more details. + */ + epsilon_cutoff?: number; + /** + * Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to + * float strictly between 0 and 1, a token is only considered if it is greater than either + * eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter + * term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In + * the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. + * See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) + * for more details. + */ + eta_cutoff?: number; + /** + * The maximum length (in tokens) of the generated text, including the input. + */ + max_length?: number; + /** + * The maximum number of tokens to generate. Takes precedence over maxLength. + */ + max_new_tokens?: number; + /** + * The minimum length (in tokens) of the generated text, including the input. + */ + min_length?: number; + /** + * The minimum number of tokens to generate. Takes precedence over maxLength. + */ + min_new_tokens?: number; + /** + * Number of groups to divide num_beams into in order to ensure diversity among different + * groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details. + */ + num_beam_groups?: number; + /** + * Number of beams to use for beam search. + */ + num_beams?: number; + /** + * The value balances the model confidence and the degeneration penalty in contrastive + * search decoding. + */ + penalty_alpha?: number; + /** + * The value used to modulate the next token probabilities. + */ + temperature?: number; + /** + * The number of highest probability vocabulary tokens to keep for top-k-filtering. + */ + top_k?: number; + /** + * If set to float < 1, only the smallest set of most probable tokens with probabilities + * that add up to top_p or higher are kept for generation. + */ + top_p?: number; + /** + * Local typicality measures how similar the conditional probability of predicting a target + * token next is to the expected conditional probability of predicting a random token next, + * given the partial text already generated. If set to float < 1, the smallest set of the + * most locally typical tokens with probabilities that add up to typical_p or higher are + * kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details. + */ + typical_p?: number; + /** + * Whether the model should use the past last key/values attentions to speed up decoding + */ + use_cache?: boolean; + [property: string]: unknown; +} +/** + * Controls the stopping condition for beam-based methods. + */ +export type EarlyStoppingUnion = boolean | "never"; +/** + * Outputs of inference for the Text To Audio task + */ +export interface TextToAudioOutput { + /** + * The generated audio waveform. + */ + audio: unknown; + samplingRate: unknown; + /** + * The sampling rate of the generated audio waveform. + */ + sampling_rate?: number; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-audio/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-audio/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..729dd9c2ecdab03c10fb9edae518d4e3d9d5b63b --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-audio/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-to-audio/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,UAAU,CAAC,EAAE,qBAAqB,CAAC;IACnC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,qBAAqB;IACrC;;OAEG;IACH,QAAQ,CAAC,EAAE,oBAAoB,CAAC;IAChC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,oBAAoB;IACpC;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;;;OAKG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;;;;;;OAQG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;OAGG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;;OAGG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB;;OAEG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;;;;OAMG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,MAAM,kBAAkB,GAAG,OAAO,GAAG,OAAO,CAAC;AAEnD;;GAEG;AACH,MAAM,WAAW,iBAAiB;IACjC;;OAEG;IACH,KAAK,EAAE,OAAO,CAAC;IACf,YAAY,EAAE,OAAO,CAAC;IACtB;;OAEG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-image/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-image/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-image/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-image/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-image/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..14af64765a2c2a55418a37ed96fdb744e546ac7c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-image/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-to-image/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cA+Ff,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-image/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-image/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..956a6805a3c9750705f0ee68330c5afbed69d458 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-image/inference.d.ts @@ -0,0 +1,68 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Text To Image inference + */ +export interface TextToImageInput { + /** + * The input text data (sometimes called "prompt" + */ + inputs: string; + /** + * Additional inference parameters + */ + parameters?: TextToImageParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Text To Image + */ +export interface TextToImageParameters { + /** + * For diffusion models. A higher guidance scale value encourages the model to generate + * images closely linked to the text prompt at the expense of lower image quality. + */ + guidance_scale?: number; + /** + * One or several prompt to guide what NOT to include in image generation. + */ + negative_prompt?: string[]; + /** + * For diffusion models. The number of denoising steps. More denoising steps usually lead to + * a higher quality image at the expense of slower inference. + */ + num_inference_steps?: number; + /** + * For diffusion models. Override the scheduler with a compatible one + */ + scheduler?: string; + /** + * The size in pixel of the output image + */ + target_size?: TargetSize; + [property: string]: unknown; +} +/** + * The size in pixel of the output image + */ +export interface TargetSize { + height: number; + width: number; + [property: string]: unknown; +} +/** + * Outputs of inference for the Text To Image task + */ +export interface TextToImageOutput { + /** + * The generated image + */ + image: unknown; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-image/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-image/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..4da428b79261f5a4dc08478fa117b370521264d7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-image/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-to-image/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,UAAU,CAAC,EAAE,qBAAqB,CAAC;IACnC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,qBAAqB;IACrC;;;OAGG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;OAEG;IACH,eAAe,CAAC,EAAE,MAAM,EAAE,CAAC;IAC3B;;;OAGG;IACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;IAC7B;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;OAEG;IACH,WAAW,CAAC,EAAE,UAAU,CAAC;IACzB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,WAAW,UAAU;IAC1B,MAAM,EAAE,MAAM,CAAC;IACf,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,WAAW,iBAAiB;IACjC;;OAEG;IACH,KAAK,EAAE,OAAO,CAAC;IACf,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-speech/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-speech/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-speech/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-speech/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-speech/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..92b77afa008f6ee33ca19fbfd1eb2a3ba6987e08 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-speech/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-to-speech/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAiEf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-speech/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-speech/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..2efbd3ed8497ea2b74f2148be3e6cbd95e4a471e --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-speech/inference.d.ts @@ -0,0 +1,143 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Text to Speech inference + * + * Inputs for Text To Audio inference + */ +export interface TextToSpeechInput { + /** + * The input text data + */ + inputs: string; + /** + * Additional inference parameters + */ + parameters?: TextToAudioParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Text To Audio + */ +export interface TextToAudioParameters { + /** + * Parametrization of the text generation process + */ + generate?: GenerationParameters; + [property: string]: unknown; +} +/** + * Parametrization of the text generation process + * + * Ad-hoc parametrization of the text generation process + */ +export interface GenerationParameters { + /** + * Whether to use sampling instead of greedy decoding when generating new tokens. + */ + do_sample?: boolean; + /** + * Controls the stopping condition for beam-based methods. + */ + early_stopping?: EarlyStoppingUnion; + /** + * If set to float strictly between 0 and 1, only tokens with a conditional probability + * greater than epsilon_cutoff will be sampled. In the paper, suggested values range from + * 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language + * Model Desmoothing](https://hf.co/papers/2210.15191) for more details. + */ + epsilon_cutoff?: number; + /** + * Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to + * float strictly between 0 and 1, a token is only considered if it is greater than either + * eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter + * term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In + * the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. + * See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) + * for more details. + */ + eta_cutoff?: number; + /** + * The maximum length (in tokens) of the generated text, including the input. + */ + max_length?: number; + /** + * The maximum number of tokens to generate. Takes precedence over maxLength. + */ + max_new_tokens?: number; + /** + * The minimum length (in tokens) of the generated text, including the input. + */ + min_length?: number; + /** + * The minimum number of tokens to generate. Takes precedence over maxLength. + */ + min_new_tokens?: number; + /** + * Number of groups to divide num_beams into in order to ensure diversity among different + * groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details. + */ + num_beam_groups?: number; + /** + * Number of beams to use for beam search. + */ + num_beams?: number; + /** + * The value balances the model confidence and the degeneration penalty in contrastive + * search decoding. + */ + penalty_alpha?: number; + /** + * The value used to modulate the next token probabilities. + */ + temperature?: number; + /** + * The number of highest probability vocabulary tokens to keep for top-k-filtering. + */ + top_k?: number; + /** + * If set to float < 1, only the smallest set of most probable tokens with probabilities + * that add up to top_p or higher are kept for generation. + */ + top_p?: number; + /** + * Local typicality measures how similar the conditional probability of predicting a target + * token next is to the expected conditional probability of predicting a random token next, + * given the partial text already generated. If set to float < 1, the smallest set of the + * most locally typical tokens with probabilities that add up to typical_p or higher are + * kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details. + */ + typical_p?: number; + /** + * Whether the model should use the past last key/values attentions to speed up decoding + */ + use_cache?: boolean; + [property: string]: unknown; +} +/** + * Controls the stopping condition for beam-based methods. + */ +export type EarlyStoppingUnion = boolean | "never"; +/** + * Outputs for Text to Speech inference + * + * Outputs of inference for the Text To Audio task + */ +export interface TextToSpeechOutput { + /** + * The generated audio waveform. + */ + audio: unknown; + samplingRate: unknown; + /** + * The sampling rate of the generated audio waveform. + */ + sampling_rate?: number; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-speech/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-speech/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..b7c00bf0fea3c4318168dfff4b2025c73a80d3ae --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-speech/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-to-speech/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;;;GAIG;AACH,MAAM,WAAW,iBAAiB;IACjC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,UAAU,CAAC,EAAE,qBAAqB,CAAC;IACnC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,qBAAqB;IACrC;;OAEG;IACH,QAAQ,CAAC,EAAE,oBAAoB,CAAC;IAChC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,oBAAoB;IACpC;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;;;OAKG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;;;;;;OAQG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;OAGG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;;OAGG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB;;OAEG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;;;;OAMG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,MAAM,kBAAkB,GAAG,OAAO,GAAG,OAAO,CAAC;AAEnD;;;;GAIG;AACH,MAAM,WAAW,kBAAkB;IAClC;;OAEG;IACH,KAAK,EAAE,OAAO,CAAC;IACf,YAAY,EAAE,OAAO,CAAC;IACtB;;OAEG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-video/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-video/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-video/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-video/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-video/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..1f8def3c57a0e1b0da3bd1b3dc2ad7fccf0c98ce --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text-to-video/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-to-video/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAiGf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text2text-generation/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/text2text-generation/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..0e83e39796248437f685b1436065f481486f8c30 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text2text-generation/inference.d.ts @@ -0,0 +1,54 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Text2text Generation inference + */ +export interface Text2TextGenerationInput { + /** + * The input text data + */ + inputs: string; + /** + * Additional inference parameters + */ + parameters?: Text2TextGenerationParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Text2text Generation + */ +export interface Text2TextGenerationParameters { + /** + * Whether to clean up the potential extra spaces in the text output. + */ + clean_up_tokenization_spaces?: boolean; + /** + * Additional parametrization of the text generation algorithm + */ + generate_parameters?: { + [key: string]: unknown; + }; + /** + * The truncation strategy to use + */ + truncation?: Text2TextGenerationTruncationStrategy; + [property: string]: unknown; +} +export type Text2TextGenerationTruncationStrategy = "do_not_truncate" | "longest_first" | "only_first" | "only_second"; +/** + * Outputs of inference for the Text2text Generation task + */ +export interface Text2TextGenerationOutput { + generatedText: unknown; + /** + * The generated text. + */ + generated_text?: string; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/text2text-generation/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/text2text-generation/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..d9fff5bdba1c3143cc9d5690d40e7e88051ac54c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/text2text-generation/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text2text-generation/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;GAEG;AACH,MAAM,WAAW,wBAAwB;IACxC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,UAAU,CAAC,EAAE,6BAA6B,CAAC;IAC3C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,6BAA6B;IAC7C;;OAEG;IACH,4BAA4B,CAAC,EAAE,OAAO,CAAC;IACvC;;OAEG;IACH,mBAAmB,CAAC,EAAE;QAAE,CAAC,GAAG,EAAE,MAAM,GAAG,OAAO,CAAA;KAAE,CAAC;IACjD;;OAEG;IACH,UAAU,CAAC,EAAE,qCAAqC,CAAC;IACnD,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,MAAM,qCAAqC,GAAG,iBAAiB,GAAG,eAAe,GAAG,YAAY,GAAG,aAAa,CAAC;AAEvH;;GAEG;AACH,MAAM,WAAW,yBAAyB;IACzC,aAAa,EAAE,OAAO,CAAC;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/token-classification/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/token-classification/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/token-classification/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/token-classification/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/token-classification/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..49cafd9d144f795cb89df51f1b8a09e2942d54c8 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/token-classification/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/token-classification/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cA+Ef,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/token-classification/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/token-classification/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..5e92636bc61a6ed3f19086113b8e96194a712eed --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/token-classification/inference.d.ts @@ -0,0 +1,83 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Token Classification inference + */ +export interface TokenClassificationInput { + /** + * The input text data + */ + inputs: string; + /** + * Additional inference parameters + */ + parameters?: TokenClassificationParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Token Classification + */ +export interface TokenClassificationParameters { + /** + * The strategy used to fuse tokens based on model predictions + */ + aggregation_strategy?: TokenClassificationAggregationStrategy; + /** + * A list of labels to ignore + */ + ignore_labels?: string[]; + /** + * The number of overlapping tokens between chunks when splitting the input text. + */ + stride?: number; + [property: string]: unknown; +} +/** + * Do not aggregate tokens + * + * Group consecutive tokens with the same label in a single entity. + * + * Similar to "simple", also preserves word integrity (use the label predicted for the first + * token in a word). + * + * Similar to "simple", also preserves word integrity (uses the label with the highest + * score, averaged across the word's tokens). + * + * Similar to "simple", also preserves word integrity (uses the label with the highest score + * across the word's tokens). + */ +export type TokenClassificationAggregationStrategy = "none" | "simple" | "first" | "average" | "max"; +export type TokenClassificationOutput = TokenClassificationOutputElement[]; +/** + * Outputs of inference for the Token Classification task + */ +export interface TokenClassificationOutputElement { + /** + * The character position in the input where this group ends. + */ + end?: number; + /** + * The predicted label for that group of tokens + */ + entity_group?: string; + label: unknown; + /** + * The associated score / probability + */ + score: number; + /** + * The character position in the input where this group begins. + */ + start?: number; + /** + * The corresponding text + */ + word?: string; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/token-classification/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/token-classification/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..8c3d51f3e996b5d0e54420b92043b2d7eed81806 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/token-classification/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/token-classification/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,wBAAwB;IACxC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,UAAU,CAAC,EAAE,6BAA6B,CAAC;IAC3C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;;GAIG;AACH,MAAM,WAAW,6BAA6B;IAC7C;;OAEG;IACH,oBAAoB,CAAC,EAAE,sCAAsC,CAAC;IAC9D;;OAEG;IACH,aAAa,CAAC,EAAE,MAAM,EAAE,CAAC;IACzB;;OAEG;IACH,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;;;;;;;;;;;GAaG;AACH,MAAM,MAAM,sCAAsC,GAAG,MAAM,GAAG,QAAQ,GAAG,OAAO,GAAG,SAAS,GAAG,KAAK,CAAC;AACrG,MAAM,MAAM,yBAAyB,GAAG,gCAAgC,EAAE,CAAC;AAC3E;;GAEG;AACH,MAAM,WAAW,gCAAgC;IAChD;;OAEG;IACH,GAAG,CAAC,EAAE,MAAM,CAAC;IACb;;OAEG;IACH,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,KAAK,EAAE,OAAO,CAAC;IACf;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/translation/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/translation/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/translation/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/translation/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/translation/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..567bbac1bc040426b9dabe4353e04690fbf8b7e6 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/translation/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/translation/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAgEf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/translation/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/translation/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..0f34faa1105b2a045dfdcc7d5bbe7c3e6ecd7210 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/translation/inference.d.ts @@ -0,0 +1,55 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Translation inference + * + * Inputs for Text2text Generation inference + */ +export interface TranslationInput { + /** + * The input text data + */ + inputs: string; + /** + * Additional inference parameters + */ + parameters?: Text2TextGenerationParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Text2text Generation + */ +export interface Text2TextGenerationParameters { + /** + * Whether to clean up the potential extra spaces in the text output. + */ + clean_up_tokenization_spaces?: boolean; + /** + * Additional parametrization of the text generation algorithm + */ + generate_parameters?: { + [key: string]: unknown; + }; + /** + * The truncation strategy to use + */ + truncation?: Text2TextGenerationTruncationStrategy; + [property: string]: unknown; +} +export type Text2TextGenerationTruncationStrategy = "do_not_truncate" | "longest_first" | "only_first" | "only_second"; +/** + * Outputs of inference for the Translation task + */ +export interface TranslationOutput { + /** + * The translated text. + */ + translation_text: string; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/translation/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/translation/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..ebe2847b09a4e9eb928e8c94b10454d8924794ea --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/translation/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/translation/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;;;GAIG;AACH,MAAM,WAAW,gBAAgB;IAChC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,UAAU,CAAC,EAAE,6BAA6B,CAAC;IAC3C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,6BAA6B;IAC7C;;OAEG;IACH,4BAA4B,CAAC,EAAE,OAAO,CAAC;IACvC;;OAEG;IACH,mBAAmB,CAAC,EAAE;QAAE,CAAC,GAAG,EAAE,MAAM,GAAG,OAAO,CAAA;KAAE,CAAC;IACjD;;OAEG;IACH,UAAU,CAAC,EAAE,qCAAqC,CAAC;IACnD,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,MAAM,qCAAqC,GAAG,iBAAiB,GAAG,eAAe,GAAG,YAAY,GAAG,aAAa,CAAC;AAEvH;;GAEG;AACH,MAAM,WAAW,iBAAiB;IACjC;;OAEG;IACH,gBAAgB,EAAE,MAAM,CAAC;IACzB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/unconditional-image-generation/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/unconditional-image-generation/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/unconditional-image-generation/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/unconditional-image-generation/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/unconditional-image-generation/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..09695b5dd10928036e799c192c9ac9d391c3c861 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/unconditional-image-generation/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/unconditional-image-generation/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAmEf,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/video-classification/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/video-classification/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/video-classification/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/video-classification/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/video-classification/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..37411c825baf45b2445eaa11ab8fb5a5b3518113 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/video-classification/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/video-classification/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cA+Ef,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/video-classification/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/video-classification/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..1dea33c7ff055554347839fe6ca5202e616eaca8 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/video-classification/inference.d.ts @@ -0,0 +1,60 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Video Classification inference + */ +export interface VideoClassificationInput { + /** + * The input video data + */ + inputs: unknown; + /** + * Additional inference parameters + */ + parameters?: VideoClassificationParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Video Classification + */ +export interface VideoClassificationParameters { + /** + * The sampling rate used to select frames from the video. + */ + frame_sampling_rate?: number; + function_to_apply?: ClassificationOutputTransform; + /** + * The number of sampled frames to consider for classification. + */ + num_frames?: number; + /** + * When specified, limits the output to the top K most probable classes. + */ + top_k?: number; + [property: string]: unknown; +} +/** + * The function to apply to the model outputs in order to retrieve the scores. + */ +export type ClassificationOutputTransform = "sigmoid" | "softmax" | "none"; +export type VideoClassificationOutput = VideoClassificationOutputElement[]; +/** + * Outputs of inference for the Video Classification task + */ +export interface VideoClassificationOutputElement { + /** + * The predicted class label. + */ + label: string; + /** + * The corresponding probability. + */ + score: number; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/video-classification/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/video-classification/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..3fa33d60329961862b6c775638c017e8d32e6e73 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/video-classification/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/video-classification/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,wBAAwB;IACxC;;OAEG;IACH,MAAM,EAAE,OAAO,CAAC;IAChB;;OAEG;IACH,UAAU,CAAC,EAAE,6BAA6B,CAAC;IAC3C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;;GAIG;AACH,MAAM,WAAW,6BAA6B;IAC7C;;OAEG;IACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;IAC7B,iBAAiB,CAAC,EAAE,6BAA6B,CAAC;IAClD;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;GAEG;AACH,MAAM,MAAM,6BAA6B,GAAG,SAAS,GAAG,SAAS,GAAG,MAAM,CAAC;AAC3E,MAAM,MAAM,yBAAyB,GAAG,gCAAgC,EAAE,CAAC;AAC3E;;GAEG;AACH,MAAM,WAAW,gCAAgC;IAChD;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/visual-question-answering/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/visual-question-answering/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/visual-question-answering/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/visual-question-answering/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/visual-question-answering/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..d4870dc8ca9c95e994366ce5f7fa68e3efbd4f18 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/visual-question-answering/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/visual-question-answering/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cA4Ff,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/visual-question-answering/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/visual-question-answering/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..b5fb12a9f57a8f6de7075af1f491f4cf0474a300 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/visual-question-answering/inference.d.ts @@ -0,0 +1,64 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Visual Question Answering inference + */ +export interface VisualQuestionAnsweringInput { + /** + * One (image, question) pair to answer + */ + inputs: VisualQuestionAnsweringInputData; + /** + * Additional inference parameters + */ + parameters?: VisualQuestionAnsweringParameters; + [property: string]: unknown; +} +/** + * One (image, question) pair to answer + */ +export interface VisualQuestionAnsweringInputData { + /** + * The image. + */ + image: unknown; + /** + * The question to answer based on the image. + */ + question: unknown; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Visual Question Answering + */ +export interface VisualQuestionAnsweringParameters { + /** + * The number of answers to return (will be chosen by order of likelihood). Note that we + * return less than topk answers if there are not enough options available within the + * context. + */ + top_k?: number; + [property: string]: unknown; +} +export type VisualQuestionAnsweringOutput = VisualQuestionAnsweringOutputElement[]; +/** + * Outputs of inference for the Visual Question Answering task + */ +export interface VisualQuestionAnsweringOutputElement { + /** + * The answer to the question + */ + answer?: string; + label: unknown; + /** + * The associated score / probability + */ + score: number; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/visual-question-answering/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/visual-question-answering/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..3fb410c5237da7c09a0cb6fc2d0f15195625b270 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/visual-question-answering/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/visual-question-answering/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,4BAA4B;IAC5C;;OAEG;IACH,MAAM,EAAE,gCAAgC,CAAC;IACzC;;OAEG;IACH,UAAU,CAAC,EAAE,iCAAiC,CAAC;IAC/C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;GAEG;AACH,MAAM,WAAW,gCAAgC;IAChD;;OAEG;IACH,KAAK,EAAE,OAAO,CAAC;IACf;;OAEG;IACH,QAAQ,EAAE,OAAO,CAAC;IAClB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;;GAIG;AACH,MAAM,WAAW,iCAAiC;IACjD;;;;OAIG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD,MAAM,MAAM,6BAA6B,GAAG,oCAAoC,EAAE,CAAC;AACnF;;GAEG;AACH,MAAM,WAAW,oCAAoC;IACpD;;OAEG;IACH,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,KAAK,EAAE,OAAO,CAAC;IACf;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-classification/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-classification/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-classification/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-classification/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-classification/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..2b176f1649b76a2b275eb396258ce69fc587a0c0 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-classification/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/zero-shot-classification/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cA6Df,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-classification/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-classification/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..637c4d7345fcde05f97a2c732c44119d67928022 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-classification/inference.d.ts @@ -0,0 +1,68 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Zero Shot Classification inference + */ +export interface ZeroShotClassificationInput { + /** + * The input text data, with candidate labels + */ + inputs: ZeroShotClassificationInputData; + /** + * Additional inference parameters + */ + parameters?: ZeroShotClassificationParameters; + [property: string]: unknown; +} +/** + * The input text data, with candidate labels + */ +export interface ZeroShotClassificationInputData { + /** + * The set of possible class labels to classify the text into. + */ + candidateLabels: string[]; + /** + * The text to classify + */ + text: string; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Zero Shot Classification + */ +export interface ZeroShotClassificationParameters { + /** + * The sentence used in conjunction with candidateLabels to attempt the text classification + * by replacing the placeholder with the candidate labels. + */ + hypothesis_template?: string; + /** + * Whether multiple candidate labels can be true. If false, the scores are normalized such + * that the sum of the label likelihoods for each sequence is 1. If true, the labels are + * considered independent and probabilities are normalized for each candidate. + */ + multi_label?: boolean; + [property: string]: unknown; +} +export type ZeroShotClassificationOutput = ZeroShotClassificationOutputElement[]; +/** + * Outputs of inference for the Zero Shot Classification task + */ +export interface ZeroShotClassificationOutputElement { + /** + * The predicted class label. + */ + label: string; + /** + * The corresponding probability. + */ + score: number; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-classification/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-classification/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..8e94678dc0f1ee115d348da67c535fc38d7623c0 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-classification/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/zero-shot-classification/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,2BAA2B;IAC3C;;OAEG;IACH,MAAM,EAAE,+BAA+B,CAAC;IACxC;;OAEG;IACH,UAAU,CAAC,EAAE,gCAAgC,CAAC;IAC9C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;GAEG;AACH,MAAM,WAAW,+BAA+B;IAC/C;;OAEG;IACH,eAAe,EAAE,MAAM,EAAE,CAAC;IAC1B;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;IACb,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;;GAIG;AACH,MAAM,WAAW,gCAAgC;IAChD;;;OAGG;IACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;IAC7B;;;;OAIG;IACH,WAAW,CAAC,EAAE,OAAO,CAAC;IACtB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD,MAAM,MAAM,4BAA4B,GAAG,mCAAmC,EAAE,CAAC;AACjF;;GAEG;AACH,MAAM,WAAW,mCAAmC;IACnD;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-image-classification/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-image-classification/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-image-classification/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-image-classification/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-image-classification/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..b9543e2a29d2d42df7b2567c043181ca44e0fe2b --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-image-classification/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/zero-shot-image-classification/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cA+Ef,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-image-classification/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-image-classification/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..fac19be5f113c093895bd834d127ffc7b36b5584 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-image-classification/inference.d.ts @@ -0,0 +1,62 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Zero Shot Image Classification inference + */ +export interface ZeroShotImageClassificationInput { + /** + * The input image data, with candidate labels + */ + inputs: ZeroShotImageClassificationInputData; + /** + * Additional inference parameters + */ + parameters?: ZeroShotImageClassificationParameters; + [property: string]: unknown; +} +/** + * The input image data, with candidate labels + */ +export interface ZeroShotImageClassificationInputData { + /** + * The candidate labels for this image + */ + candidateLabels: string[]; + /** + * The image data to classify + */ + image: unknown; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Zero Shot Image Classification + */ +export interface ZeroShotImageClassificationParameters { + /** + * The sentence used in conjunction with candidateLabels to attempt the text classification + * by replacing the placeholder with the candidate labels. + */ + hypothesis_template?: string; + [property: string]: unknown; +} +export type ZeroShotImageClassificationOutput = ZeroShotImageClassificationOutputElement[]; +/** + * Outputs of inference for the Zero Shot Image Classification task + */ +export interface ZeroShotImageClassificationOutputElement { + /** + * The predicted class label. + */ + label: string; + /** + * The corresponding probability. + */ + score: number; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-image-classification/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-image-classification/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..dbb81f9d9ab53131cd1811b212458c8735010fe1 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-image-classification/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/zero-shot-image-classification/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,gCAAgC;IAChD;;OAEG;IACH,MAAM,EAAE,oCAAoC,CAAC;IAC7C;;OAEG;IACH,UAAU,CAAC,EAAE,qCAAqC,CAAC;IACnD,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;GAEG;AACH,MAAM,WAAW,oCAAoC;IACpD;;OAEG;IACH,eAAe,EAAE,MAAM,EAAE,CAAC;IAC1B;;OAEG;IACH,KAAK,EAAE,OAAO,CAAC;IACf,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;;GAIG;AACH,MAAM,WAAW,qCAAqC;IACrD;;;OAGG;IACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;IAC7B,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD,MAAM,MAAM,iCAAiC,GAAG,wCAAwC,EAAE,CAAC;AAC3F;;GAEG;AACH,MAAM,WAAW,wCAAwC;IACxD;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-object-detection/data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-object-detection/data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..3ce37b70340e17e6e57c78ec21403a41afab5eb7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-object-detection/data.d.ts @@ -0,0 +1,4 @@ +import type { TaskDataCustom } from ".."; +declare const taskData: TaskDataCustom; +export default taskData; +//# sourceMappingURL=data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-object-detection/data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-object-detection/data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..128b06762823921d9d450fc29c402765391907a3 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-object-detection/data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/zero-shot-object-detection/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cA8Df,CAAC;AAEF,eAAe,QAAQ,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-object-detection/inference.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-object-detection/inference.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..7ff069593b4fe750f76d60227d0097ee190093c1 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-object-detection/inference.d.ts @@ -0,0 +1,67 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Zero Shot Object Detection inference + */ +export interface ZeroShotObjectDetectionInput { + /** + * The input image data, with candidate labels + */ + inputs: ZeroShotObjectDetectionInputData; + /** + * Additional inference parameters + */ + parameters?: { + [key: string]: unknown; + }; + [property: string]: unknown; +} +/** + * The input image data, with candidate labels + */ +export interface ZeroShotObjectDetectionInputData { + /** + * The candidate labels for this image + */ + candidateLabels: string[]; + /** + * The image data to generate bounding boxes from + */ + image: unknown; + [property: string]: unknown; +} +/** + * The predicted bounding box. Coordinates are relative to the top left corner of the input + * image. + */ +export interface BoundingBox { + xmax: number; + xmin: number; + ymax: number; + ymin: number; + [property: string]: unknown; +} +export type ZeroShotObjectDetectionOutput = ZeroShotObjectDetectionOutputElement[]; +/** + * Outputs of inference for the Zero Shot Object Detection task + */ +export interface ZeroShotObjectDetectionOutputElement { + /** + * The predicted bounding box. Coordinates are relative to the top left corner of the input + * image. + */ + box: BoundingBox; + /** + * A candidate label + */ + label: string; + /** + * The associated score / probability + */ + score: number; + [property: string]: unknown; +} +//# sourceMappingURL=inference.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-object-detection/inference.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-object-detection/inference.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..b5ef8fbe3ea1dbe8276ffc72acabea05e6eb6709 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tasks/zero-shot-object-detection/inference.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/zero-shot-object-detection/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,4BAA4B;IAC5C;;OAEG;IACH,MAAM,EAAE,gCAAgC,CAAC;IACzC;;OAEG;IACH,UAAU,CAAC,EAAE;QACZ,CAAC,GAAG,EAAE,MAAM,GAAG,OAAO,CAAC;KACvB,CAAC;IACF,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;GAEG;AACH,MAAM,WAAW,gCAAgC;IAChD;;OAEG;IACH,eAAe,EAAE,MAAM,EAAE,CAAC;IAC1B;;OAEG;IACH,KAAK,EAAE,OAAO,CAAC;IACf,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;GAGG;AACH,MAAM,WAAW,WAAW;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD,MAAM,MAAM,6BAA6B,GAAG,oCAAoC,EAAE,CAAC;AACnF;;GAEG;AACH,MAAM,WAAW,oCAAoC;IACpD;;;OAGG;IACH,GAAG,EAAE,WAAW,CAAC;IACjB;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tokenizer-data.d.ts b/data/node_modules/@huggingface/tasks/dist/src/tokenizer-data.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..5863744080832186bd5f7482bd3f55c446872e84 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tokenizer-data.d.ts @@ -0,0 +1,26 @@ +export declare const SPECIAL_TOKENS_ATTRIBUTES: readonly ["bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token"]; +/** + * Public interface for a tokenizer's special tokens mapping + */ +export interface AddedToken { + __type: "AddedToken"; + content?: string; + lstrip?: boolean; + normalized?: boolean; + rstrip?: boolean; + single_word?: boolean; +} +export type SpecialTokensMap = { + [key in (typeof SPECIAL_TOKENS_ATTRIBUTES)[number]]?: string | AddedToken | null; +}; +/** + * Public interface for tokenizer config + */ +export interface TokenizerConfig extends SpecialTokensMap { + use_default_system_prompt?: boolean; + chat_template?: string | Array<{ + name: string; + template: string; + }>; +} +//# sourceMappingURL=tokenizer-data.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/tokenizer-data.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/tokenizer-data.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..8f5b0a57b829b2f6c0060ac3f36d00e093d8195c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/tokenizer-data.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"tokenizer-data.d.ts","sourceRoot":"","sources":["../../src/tokenizer-data.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,yBAAyB,uGAS5B,CAAC;AAEX;;GAEG;AACH,MAAM,WAAW,UAAU;IAC1B,MAAM,EAAE,YAAY,CAAC;IACrB,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,MAAM,CAAC,EAAE,OAAO,CAAC;IACjB,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB,MAAM,CAAC,EAAE,OAAO,CAAC;IACjB,WAAW,CAAC,EAAE,OAAO,CAAC;CACtB;AACD,MAAM,MAAM,gBAAgB,GAAG;KAC7B,GAAG,IAAI,CAAC,OAAO,yBAAyB,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,MAAM,GAAG,UAAU,GAAG,IAAI;CAChF,CAAC;AACF;;GAEG;AACH,MAAM,WAAW,eAAgB,SAAQ,gBAAgB;IACxD,yBAAyB,CAAC,EAAE,OAAO,CAAC;IACpC,aAAa,CAAC,EAAE,MAAM,GAAG,KAAK,CAAC;QAAE,IAAI,EAAE,MAAM,CAAC;QAAC,QAAQ,EAAE,MAAM,CAAA;KAAE,CAAC,CAAC;CACnE"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/widget-example.d.ts b/data/node_modules/@huggingface/tasks/dist/src/widget-example.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..947fbba1235ac347dd716b6cae28fd8bf4c9e75c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/widget-example.d.ts @@ -0,0 +1,83 @@ +/** + * See default-widget-inputs.ts for the default widget inputs, this files only contains the types + */ +import type { ChatCompletionInputMessage } from "./tasks"; +type TableData = Record; +export type WidgetExampleOutputLabels = Array<{ + label: string; + score: number; +}>; +export interface WidgetExampleOutputAnswerScore { + answer: string; + score: number; +} +export interface WidgetExampleOutputText { + text: string; +} +export interface WidgetExampleOutputUrl { + url: string; +} +export type WidgetExampleOutput = WidgetExampleOutputLabels | WidgetExampleOutputAnswerScore | WidgetExampleOutputText | WidgetExampleOutputUrl; +export interface WidgetExampleBase { + example_title?: string; + group?: string; + /** + * Potential overrides to API parameters for this specific example + * (takes precedences over the model card metadata's inference.parameters) + */ + parameters?: { + aggregation_strategy?: string; + top_k?: number; + top_p?: number; + temperature?: number; + max_new_tokens?: number; + do_sample?: boolean; + negative_prompt?: string; + guidance_scale?: number; + num_inference_steps?: number; + }; + /** + * Optional output + */ + output?: TOutput; +} +export interface WidgetExampleChatInput extends WidgetExampleBase { + messages: ChatCompletionInputMessage[]; +} +export interface WidgetExampleTextInput extends WidgetExampleBase { + text: string; +} +export interface WidgetExampleTextAndContextInput extends WidgetExampleTextInput { + context: string; +} +export interface WidgetExampleTextAndTableInput extends WidgetExampleTextInput { + table: TableData; +} +export interface WidgetExampleAssetInput extends WidgetExampleBase { + src: string; +} +export interface WidgetExampleAssetAndPromptInput extends WidgetExampleAssetInput { + prompt: string; +} +export type WidgetExampleAssetAndTextInput = WidgetExampleAssetInput & WidgetExampleTextInput; +export type WidgetExampleAssetAndZeroShotInput = WidgetExampleAssetInput & WidgetExampleZeroShotTextInput; +export interface WidgetExampleStructuredDataInput extends WidgetExampleBase { + structured_data: TableData; +} +export interface WidgetExampleTableDataInput extends WidgetExampleBase { + table: TableData; +} +export interface WidgetExampleZeroShotTextInput extends WidgetExampleTextInput { + text: string; + candidate_labels: string; + multi_class: boolean; +} +export interface WidgetExampleSentenceSimilarityInput extends WidgetExampleBase { + source_sentence: string; + sentences: string[]; +} +export type WidgetExample = WidgetExampleChatInput | WidgetExampleTextInput | WidgetExampleTextAndContextInput | WidgetExampleTextAndTableInput | WidgetExampleAssetInput | WidgetExampleAssetAndPromptInput | WidgetExampleAssetAndTextInput | WidgetExampleAssetAndZeroShotInput | WidgetExampleStructuredDataInput | WidgetExampleTableDataInput | WidgetExampleZeroShotTextInput | WidgetExampleSentenceSimilarityInput; +type KeysOfUnion = T extends unknown ? keyof T : never; +export type WidgetExampleAttribute = KeysOfUnion; +export {}; +//# sourceMappingURL=widget-example.d.ts.map \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/dist/src/widget-example.d.ts.map b/data/node_modules/@huggingface/tasks/dist/src/widget-example.d.ts.map new file mode 100644 index 0000000000000000000000000000000000000000..d2620d3e4a70c5fa18a19904427570856539a784 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/dist/src/widget-example.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"widget-example.d.ts","sourceRoot":"","sources":["../../src/widget-example.ts"],"names":[],"mappings":"AAAA;;GAEG;AAEH,OAAO,KAAK,EAAE,0BAA0B,EAAE,MAAM,SAAS,CAAC;AAE1D,KAAK,SAAS,GAAG,MAAM,CAAC,MAAM,EAAE,CAAC,MAAM,GAAG,MAAM,CAAC,EAAE,CAAC,CAAC;AAGrD,MAAM,MAAM,yBAAyB,GAAG,KAAK,CAAC;IAAE,KAAK,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAA;CAAE,CAAC,CAAC;AAChF,MAAM,WAAW,8BAA8B;IAC9C,MAAM,EAAE,MAAM,CAAC;IACf,KAAK,EAAE,MAAM,CAAC;CACd;AACD,MAAM,WAAW,uBAAuB;IACvC,IAAI,EAAE,MAAM,CAAC;CACb;AACD,MAAM,WAAW,sBAAsB;IACtC,GAAG,EAAE,MAAM,CAAC;CACZ;AAED,MAAM,MAAM,mBAAmB,GAC5B,yBAAyB,GACzB,8BAA8B,GAC9B,uBAAuB,GACvB,sBAAsB,CAAC;AAG1B,MAAM,WAAW,iBAAiB,CAAC,OAAO;IACzC,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,UAAU,CAAC,EAAE;QAEZ,oBAAoB,CAAC,EAAE,MAAM,CAAC;QAE9B,KAAK,CAAC,EAAE,MAAM,CAAC;QACf,KAAK,CAAC,EAAE,MAAM,CAAC;QACf,WAAW,CAAC,EAAE,MAAM,CAAC;QACrB,cAAc,CAAC,EAAE,MAAM,CAAC;QACxB,SAAS,CAAC,EAAE,OAAO,CAAC;QAEpB,eAAe,CAAC,EAAE,MAAM,CAAC;QACzB,cAAc,CAAC,EAAE,MAAM,CAAC;QACxB,mBAAmB,CAAC,EAAE,MAAM,CAAC;KAC7B,CAAC;IACF;;OAEG;IACH,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB;AAED,MAAM,WAAW,sBAAsB,CAAC,OAAO,GAAG,mBAAmB,CAAE,SAAQ,iBAAiB,CAAC,OAAO,CAAC;IACxG,QAAQ,EAAE,0BAA0B,EAAE,CAAC;CACvC;AAED,MAAM,WAAW,sBAAsB,CAAC,OAAO,GAAG,mBAAmB,CAAE,SAAQ,iBAAiB,CAAC,OAAO,CAAC;IACxG,IAAI,EAAE,MAAM,CAAC;CACb;AAED,MAAM,WAAW,gCAAgC,CAAC,OAAO,GAAG,mBAAmB,CAC9E,SAAQ,sBAAsB,CAAC,OAAO,CAAC;IACvC,OAAO,EAAE,MAAM,CAAC;CAChB;AAED,MAAM,WAAW,8BAA8B,CAAC,OAAO,GAAG,mBAAmB,CAAE,SAAQ,sBAAsB,CAAC,OAAO,CAAC;IACrH,KAAK,EAAE,SAAS,CAAC;CACjB;AAED,MAAM,WAAW,uBAAuB,CAAC,OAAO,GAAG,mBAAmB,CAAE,SAAQ,iBAAiB,CAAC,OAAO,CAAC;IACzG,GAAG,EAAE,MAAM,CAAC;CACZ;AACD,MAAM,WAAW,gCAAgC,CAAC,OAAO,GAAG,mBAAmB,CAC9E,SAAQ,uBAAuB,CAAC,OAAO,CAAC;IACxC,MAAM,EAAE,MAAM,CAAC;CACf;AAED,MAAM,MAAM,8BAA8B,CAAC,OAAO,GAAG,mBAAmB,IAAI,uBAAuB,CAAC,OAAO,CAAC,GAC3G,sBAAsB,CAAC,OAAO,CAAC,CAAC;AAEjC,MAAM,MAAM,kCAAkC,CAAC,OAAO,GAAG,mBAAmB,IAAI,uBAAuB,CAAC,OAAO,CAAC,GAC/G,8BAA8B,CAAC,OAAO,CAAC,CAAC;AAEzC,MAAM,WAAW,gCAAgC,CAAC,OAAO,GAAG,mBAAmB,CAAE,SAAQ,iBAAiB,CAAC,OAAO,CAAC;IAClH,eAAe,EAAE,SAAS,CAAC;CAC3B;AAED,MAAM,WAAW,2BAA2B,CAAC,OAAO,GAAG,mBAAmB,CAAE,SAAQ,iBAAiB,CAAC,OAAO,CAAC;IAC7G,KAAK,EAAE,SAAS,CAAC;CACjB;AAED,MAAM,WAAW,8BAA8B,CAAC,OAAO,GAAG,mBAAmB,CAAE,SAAQ,sBAAsB,CAAC,OAAO,CAAC;IACrH,IAAI,EAAE,MAAM,CAAC;IACb,gBAAgB,EAAE,MAAM,CAAC;IACzB,WAAW,EAAE,OAAO,CAAC;CACrB;AAED,MAAM,WAAW,oCAAoC,CAAC,OAAO,GAAG,mBAAmB,CAClF,SAAQ,iBAAiB,CAAC,OAAO,CAAC;IAClC,eAAe,EAAE,MAAM,CAAC;IACxB,SAAS,EAAE,MAAM,EAAE,CAAC;CACpB;AAID,MAAM,MAAM,aAAa,CAAC,OAAO,GAAG,mBAAmB,IACpD,sBAAsB,CAAC,OAAO,CAAC,GAC/B,sBAAsB,CAAC,OAAO,CAAC,GAC/B,gCAAgC,CAAC,OAAO,CAAC,GACzC,8BAA8B,CAAC,OAAO,CAAC,GACvC,uBAAuB,CAAC,OAAO,CAAC,GAChC,gCAAgC,CAAC,OAAO,CAAC,GACzC,8BAA8B,CAAC,OAAO,CAAC,GACvC,kCAAkC,CAAC,OAAO,CAAC,GAC3C,gCAAgC,CAAC,OAAO,CAAC,GACzC,2BAA2B,CAAC,OAAO,CAAC,GACpC,8BAA8B,CAAC,OAAO,CAAC,GACvC,oCAAoC,CAAC,OAAO,CAAC,CAAC;AAEjD,KAAK,WAAW,CAAC,CAAC,IAAI,CAAC,SAAS,OAAO,GAAG,MAAM,CAAC,GAAG,KAAK,CAAC;AAE1D,MAAM,MAAM,sBAAsB,GAAG,WAAW,CAAC,aAAa,CAAC,CAAC"} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/package.json b/data/node_modules/@huggingface/tasks/package.json new file mode 100644 index 0000000000000000000000000000000000000000..c17300f7a7bc933d6d02b3746c2f07c42009cfd1 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/package.json @@ -0,0 +1,50 @@ +{ + "name": "@huggingface/tasks", + "packageManager": "pnpm@8.10.5", + "version": "0.11.4", + "description": "List of ML tasks for huggingface.co/tasks", + "repository": "https://github.com/huggingface/huggingface.js.git", + "publishConfig": { + "access": "public" + }, + "main": "./dist/index.cjs", + "module": "./dist/index.js", + "types": "./dist/src/index.d.ts", + "exports": { + ".": { + "types": "./dist/src/index.d.ts", + "require": "./dist/index.cjs", + "import": "./dist/index.js" + } + }, + "source": "src/index.ts", + "type": "module", + "files": [ + "dist", + "src", + "tsconfig.json" + ], + "keywords": [ + "huggingface", + "hub", + "languages" + ], + "author": "Hugging Face", + "license": "MIT", + "devDependencies": { + "@types/node": "^20.11.5", + "quicktype-core": "https://github.com/huggingface/quicktype/raw/pack-18.0.17/packages/quicktype-core/quicktype-core-18.0.17.tgz", + "type-fest": "^3.13.1" + }, + "scripts": { + "lint": "eslint --quiet --fix --ext .cjs,.ts .", + "lint:check": "eslint --ext .cjs,.ts .", + "format": "prettier --write .", + "format:check": "prettier --check .", + "build": "tsup src/index.ts --format cjs,esm --clean && tsc --emitDeclarationOnly --declaration", + "check": "tsc", + "inference-codegen": "tsx scripts/inference-codegen.ts && prettier --write src/tasks/*/inference.ts", + "inference-tgi-import": "tsx scripts/inference-tgi-import.ts && prettier --write src/tasks/text-generation/spec/*.json && prettier --write src/tasks/chat-completion/spec/*.json", + "inference-tei-import": "tsx scripts/inference-tei-import.ts && prettier --write src/tasks/feature-extraction/spec/*.json" + } +} \ No newline at end of file diff --git a/data/node_modules/@huggingface/tasks/src/dataset-libraries.ts b/data/node_modules/@huggingface/tasks/src/dataset-libraries.ts new file mode 100644 index 0000000000000000000000000000000000000000..9818ea6be88ccf662b2fddeb86ecb58109ed0487 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/dataset-libraries.ts @@ -0,0 +1,83 @@ +/** + * Elements configurable by a dataset library. + */ +export interface DatasetLibraryUiElement { + /** + * Pretty name of the library. + * displayed (in tags?, and) on the main + * call-to-action button on the dataset page. + */ + prettyLabel: string; + /** + * Repo name of the library's (usually on GitHub) code repo + */ + repoName: string; + /** + * URL to library's (usually on GitHub) code repo + */ + repoUrl: string; + /** + * URL to library's docs + */ + docsUrl?: string; +} + +export const DATASET_LIBRARIES_UI_ELEMENTS = { + mlcroissant: { + prettyLabel: "Croissant", + repoName: "croissant", + repoUrl: "https://github.com/mlcommons/croissant/tree/main/python/mlcroissant", + docsUrl: "https://github.com/mlcommons/croissant/blob/main/python/mlcroissant/README.md", + }, + webdataset: { + prettyLabel: "WebDataset", + repoName: "webdataset", + repoUrl: "https://github.com/webdataset/webdataset", + docsUrl: "https://huggingface.co/docs/hub/datasets-webdataset", + }, + datasets: { + prettyLabel: "Datasets", + repoName: "datasets", + repoUrl: "https://github.com/huggingface/datasets", + docsUrl: "https://huggingface.co/docs/hub/datasets-usage", + }, + pandas: { + prettyLabel: "pandas", + repoName: "pandas", + repoUrl: "https://github.com/pandas-dev/pandas", + docsUrl: "https://huggingface.co/docs/hub/datasets-pandas", + }, + dask: { + prettyLabel: "Dask", + repoName: "dask", + repoUrl: "https://github.com/dask/dask", + docsUrl: "https://huggingface.co/docs/hub/datasets-dask", + }, + distilabel: { + prettyLabel: "Distilabel", + repoName: "distilabel", + repoUrl: "https://github.com/argilla-io/distilabel", + docsUrl: "https://distilabel.argilla.io", + }, + fiftyone: { + prettyLabel: "FiftyOne", + repoName: "fiftyone", + repoUrl: "https://github.com/voxel51/fiftyone", + docsUrl: "https://docs.voxel51.com", + }, + argilla: { + prettyLabel: "Argilla", + repoName: "argilla", + repoUrl: "https://github.com/argilla-io/argilla", + docsUrl: "https://argilla-io.github.io/argilla", + }, + polars: { + prettyLabel: "Polars", + repoName: "polars", + repoUrl: "https://github.com/pola-rs/polars", + docsUrl: "https://docs.pola.rs/", + }, +} satisfies Record; + +/// List of the dataset libraries supported by the Hub +export type DatasetLibraryKey = keyof typeof DATASET_LIBRARIES_UI_ELEMENTS; diff --git a/data/node_modules/@huggingface/tasks/src/default-widget-inputs.ts b/data/node_modules/@huggingface/tasks/src/default-widget-inputs.ts new file mode 100644 index 0000000000000000000000000000000000000000..b0f96ff404f68caa491ebb37b1d2cb5c827eb5d5 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/default-widget-inputs.ts @@ -0,0 +1,718 @@ +import type { WidgetExample } from "./widget-example"; +import type { WidgetType } from "./pipelines"; + +type LanguageCode = string; + +type PerLanguageMapping = Map; + +/// NOTE TO CONTRIBUTORS: +/// +/// When adding sample inputs for a new language, you don't +/// necessarily have to translate the inputs from existing languages. +/// (which were quite random to begin with) +/// +/// i.e. Feel free to be creative and provide better samples. +// + +/// The placeholder will be replaced by the correct mask token +/// in the following examples, depending on the model type +/// +/// see [INTERNAL] github.com/huggingface/moon-landing/blob/c5c3d45fe0ab27347b3ab27bdad646ef20732351/server/lib/App.ts#L254 +// + +const MAPPING_EN: PerLanguageMapping = new Map([ + ["text-classification", [`I like you. I love you`]], + [ + "token-classification", + [ + `My name is Wolfgang and I live in Berlin`, + `My name is Sarah and I live in London`, + `My name is Clara and I live in Berkeley, California.`, + ], + ], + [ + "table-question-answering", + [ + { + text: `How many stars does the transformers repository have?`, + table: { + Repository: ["Transformers", "Datasets", "Tokenizers"], + Stars: [36542, 4512, 3934], + Contributors: [651, 77, 34], + "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], + }, + }, + ], + ], + [ + "question-answering", + [ + { + text: `Where do I live?`, + context: `My name is Wolfgang and I live in Berlin`, + }, + { + text: `Where do I live?`, + context: `My name is Sarah and I live in London`, + }, + { + text: `What's my name?`, + context: `My name is Clara and I live in Berkeley.`, + }, + { + text: `Which name is also used to describe the Amazon rainforest in English?`, + context: `The Amazon rainforest (Portuguese: Floresta Amazônica or Amazônia; Spanish: Selva Amazónica, Amazonía or usually Amazonia; French: Forêt amazonienne; Dutch: Amazoneregenwoud), also known in English as Amazonia or the Amazon Jungle, is a moist broadleaf forest that covers most of the Amazon basin of South America. This basin encompasses 7,000,000 square kilometres (2,700,000 sq mi), of which 5,500,000 square kilometres (2,100,000 sq mi) are covered by the rainforest. This region includes territory belonging to nine nations. The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. States or departments in four nations contain "Amazonas" in their names. The Amazon represents over half of the planet's remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species.`, + }, + ], + ], + [ + "zero-shot-classification", + [ + { + text: "I have a problem with my iphone that needs to be resolved asap!!", + candidate_labels: "urgent, not urgent, phone, tablet, computer", + multi_class: true, + }, + { + text: "Last week I upgraded my iOS version and ever since then my phone has been overheating whenever I use your app.", + candidate_labels: "mobile, website, billing, account access", + multi_class: false, + }, + { + text: "A new model offers an explanation for how the Galilean satellites formed around the solar system’s largest world. Konstantin Batygin did not set out to solve one of the solar system’s most puzzling mysteries when he went for a run up a hill in Nice, France. Dr. Batygin, a Caltech researcher, best known for his contributions to the search for the solar system’s missing “Planet Nine,” spotted a beer bottle. At a steep, 20 degree grade, he wondered why it wasn’t rolling down the hill. He realized there was a breeze at his back holding the bottle in place. Then he had a thought that would only pop into the mind of a theoretical astrophysicist: “Oh! This is how Europa formed.” Europa is one of Jupiter’s four large Galilean moons. And in a paper published Monday in the Astrophysical Journal, Dr. Batygin and a co-author, Alessandro Morbidelli, a planetary scientist at the Côte d’Azur Observatory in France, present a theory explaining how some moons form around gas giants like Jupiter and Saturn, suggesting that millimeter-sized grains of hail produced during the solar system’s formation became trapped around these massive worlds, taking shape one at a time into the potentially habitable moons we know today.", + candidate_labels: "space & cosmos, scientific discovery, microbiology, robots, archeology", + multi_class: true, + }, + ], + ], + ["translation", [`My name is Wolfgang and I live in Berlin`, `My name is Sarah and I live in London`]], + [ + "summarization", + [ + `The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.`, + ], + ], + [ + "conversational", + [ + `Hey my name is Julien! How are you?`, + `Hey my name is Thomas! How are you?`, + `Hey my name is Mariama! How are you?`, + `Hey my name is Clara! How are you?`, + `Hey my name is Julien! How are you?`, + `Hi.`, + ], + ], + [ + "text-generation", + [ + `My name is Julien and I like to`, + `My name is Thomas and my main`, + `My name is Mariama, my favorite`, + `My name is Clara and I am`, + `My name is Lewis and I like to`, + `My name is Merve and my favorite`, + `My name is Teven and I am`, + `Once upon a time,`, + ], + ], + ["fill-mask", [`Paris is the of France.`, `The goal of life is .`]], + [ + "sentence-similarity", + [ + { + source_sentence: "That is a happy person", + sentences: ["That is a happy dog", "That is a very happy person", "Today is a sunny day"], + }, + ], + ], +]); + +const MAPPING_ZH: PerLanguageMapping = new Map([ + ["text-classification", [`我喜欢你。 我爱你`]], + ["token-classification", [`我叫沃尔夫冈,我住在柏林。`, `我叫萨拉,我住在伦敦。`, `我叫克拉拉,我住在加州伯克利。`]], + [ + "question-answering", + [ + { + text: `我住在哪里?`, + context: `我叫沃尔夫冈,我住在柏林。`, + }, + { + text: `我住在哪里?`, + context: `我叫萨拉,我住在伦敦。`, + }, + { + text: `我的名字是什么?`, + context: `我叫克拉拉,我住在伯克利。`, + }, + ], + ], + ["translation", [`我叫沃尔夫冈,我住在柏林。`, `我叫萨拉,我住在伦敦。`]], + [ + "zero-shot-classification", + [ + { + text: "房间干净明亮,非常不错", + candidate_labels: "这是一条差评, 这是一条好评", + }, + ], + ], + [ + "summarization", + [ + `该塔高324米(1063英尺),与一幢81层的建筑物一样高,是巴黎最高的建筑物。 它的底座是方形的,每边长125米(410英尺)。 在建造过程中,艾菲尔铁塔超过了华盛顿纪念碑,成为世界上最高的人造结构,它保持了41年的头衔,直到1930年纽约市的克莱斯勒大楼竣工。这是第一个到达300米高度的结构。 由于1957年在塔顶增加了广播天线,因此它现在比克莱斯勒大厦高5.2米(17英尺)。 除发射器外,艾菲尔铁塔是法国第二高的独立式建筑,仅次于米劳高架桥。`, + ], + ], + [ + "text-generation", + [`我叫朱利安,我喜欢`, `我叫托马斯,我的主要`, `我叫玛丽亚,我最喜欢的`, `我叫克拉拉,我是`, `从前,`], + ], + ["fill-mask", [`巴黎是国的首都。`, `生活的真谛是。`]], + [ + "sentence-similarity", + [ + { + source_sentence: "那是 個快樂的人", + sentences: ["那是 條快樂的狗", "那是 個非常幸福的人", "今天是晴天"], + }, + ], + ], +]); + +const MAPPING_FR: PerLanguageMapping = new Map([ + ["text-classification", [`Je t'apprécie beaucoup. Je t'aime.`]], + ["token-classification", [`Mon nom est Wolfgang et je vis à Berlin`]], + [ + "question-answering", + [ + { + text: `Où est-ce que je vis?`, + context: `Mon nom est Wolfgang et je vis à Berlin`, + }, + ], + ], + ["translation", [`Mon nom est Wolfgang et je vis à Berlin`]], + [ + "summarization", + [ + `La tour fait 324 mètres (1,063 pieds) de haut, environ la même hauteur qu'un immeuble de 81 étages, et est la plus haute structure de Paris. Sa base est carrée, mesurant 125 mètres (410 pieds) sur chaque côté. Durant sa construction, la tour Eiffel surpassa le Washington Monument pour devenir la plus haute structure construite par l'homme dans le monde, un titre qu'elle conserva pendant 41 ans jusqu'à l'achèvement du Chrysler Building à New-York City en 1930. Ce fut la première structure à atteindre une hauteur de 300 mètres. Avec l'ajout d'une antenne de radiodiffusion au sommet de la tour Eiffel en 1957, celle-ci redevint plus haute que le Chrysler Building de 5,2 mètres (17 pieds). En excluant les transmetteurs, elle est la seconde plus haute stucture autoportante de France après le viaduc de Millau.`, + ], + ], + ["text-generation", [`Mon nom est Julien et j'aime`, `Mon nom est Thomas et mon principal`, `Il était une fois`]], + ["fill-mask", [`Paris est la de la France.`]], + [ + "sentence-similarity", + [ + { + source_sentence: "C'est une personne heureuse", + sentences: [ + "C'est un chien heureux", + "C'est une personne très heureuse", + "Aujourd'hui est une journée ensoleillée", + ], + }, + ], + ], +]); + +const MAPPING_ES: PerLanguageMapping = new Map([ + ["text-classification", [`Te quiero. Te amo.`]], + ["token-classification", [`Me llamo Wolfgang y vivo en Berlin`]], + [ + "question-answering", + [ + { + text: `¿Dónde vivo?`, + context: `Me llamo Wolfgang y vivo en Berlin`, + }, + { + text: `¿Quién inventó el submarino?`, + context: `Isaac Peral fue un murciano que inventó el submarino`, + }, + { + text: `¿Cuántas personas hablan español?`, + context: `El español es el segundo idioma más hablado del mundo con más de 442 millones de hablantes`, + }, + ], + ], + [ + "translation", + [ + `Me llamo Wolfgang y vivo en Berlin`, + `Los ingredientes de una tortilla de patatas son: huevos, patatas y cebolla`, + ], + ], + [ + "summarization", + [ + `La torre tiene 324 metros (1.063 pies) de altura, aproximadamente la misma altura que un edificio de 81 pisos y la estructura más alta de París. Su base es cuadrada, mide 125 metros (410 pies) a cada lado. Durante su construcción, la Torre Eiffel superó al Washington Monument para convertirse en la estructura artificial más alta del mundo, un título que mantuvo durante 41 años hasta que el Chrysler Building en la ciudad de Nueva York se terminó en 1930. Fue la primera estructura en llegar Una altura de 300 metros. Debido a la adición de una antena de transmisión en la parte superior de la torre en 1957, ahora es más alta que el Chrysler Building en 5,2 metros (17 pies). Excluyendo los transmisores, la Torre Eiffel es la segunda estructura independiente más alta de Francia después del Viaducto de Millau.`, + ], + ], + [ + "text-generation", + [ + `Me llamo Julien y me gusta`, + `Me llamo Thomas y mi principal`, + `Me llamo Manuel y trabajo en`, + `Érase una vez,`, + `Si tú me dices ven, `, + ], + ], + ["fill-mask", [`Mi nombre es y vivo en Nueva York.`, `El español es un idioma muy en el mundo.`]], + [ + "sentence-similarity", + [ + { + source_sentence: "Esa es una persona feliz", + sentences: ["Ese es un perro feliz", "Esa es una persona muy feliz", "Hoy es un día soleado"], + }, + ], + ], +]); + +const MAPPING_RU: PerLanguageMapping = new Map([ + ["text-classification", [`Ты мне нравишься. Я тебя люблю`]], + ["token-classification", [`Меня зовут Вольфганг и я живу в Берлине`]], + [ + "question-answering", + [ + { + text: `Где живу?`, + context: `Меня зовут Вольфганг и я живу в Берлине`, + }, + ], + ], + ["translation", [`Меня зовут Вольфганг и я живу в Берлине`]], + [ + "summarization", + [ + `Высота башни составляет 324 метра (1063 фута), примерно такая же высота, как у 81-этажного здания, и самое высокое сооружение в Париже. Его основание квадратно, размером 125 метров (410 футов) с любой стороны. Во время строительства Эйфелева башня превзошла монумент Вашингтона, став самым высоким искусственным сооружением в мире, и этот титул она удерживала в течение 41 года до завершения строительство здания Крайслер в Нью-Йорке в 1930 году. Это первое сооружение которое достигло высоты 300 метров. Из-за добавления вещательной антенны на вершине башни в 1957 году она сейчас выше здания Крайслер на 5,2 метра (17 футов). За исключением передатчиков, Эйфелева башня является второй самой высокой отдельно стоящей структурой во Франции после виадука Мийо.`, + ], + ], + ["text-generation", [`Меня зовут Жюльен и`, `Меня зовут Томас и мой основной`, `Однажды`]], + ["fill-mask", [`Меня зовут и я инженер живущий в Нью-Йорке.`]], + [ + "sentence-similarity", + [ + { + source_sentence: "Это счастливый человек", + sentences: ["Это счастливая собака", "Это очень счастливый человек", "Сегодня солнечный день"], + }, + ], + ], +]); + +const MAPPING_UK: PerLanguageMapping = new Map([ + ["translation", [`Мене звати Вольфґанґ і я живу в Берліні.`]], + ["fill-mask", [`Мене звати .`]], +]); + +const MAPPING_IT: PerLanguageMapping = new Map([ + ["text-classification", [`Mi piaci. Ti amo`]], + [ + "token-classification", + [ + `Mi chiamo Wolfgang e vivo a Berlino`, + `Mi chiamo Sarah e vivo a Londra`, + `Mi chiamo Clara e vivo a Berkeley in California.`, + ], + ], + [ + "question-answering", + [ + { + text: `Dove vivo?`, + context: `Mi chiamo Wolfgang e vivo a Berlino`, + }, + { + text: `Dove vivo?`, + context: `Mi chiamo Sarah e vivo a Londra`, + }, + { + text: `Come mio chiamo?`, + context: `Mi chiamo Clara e vivo a Berkeley.`, + }, + ], + ], + ["translation", [`Mi chiamo Wolfgang e vivo a Berlino`, `Mi chiamo Sarah e vivo a Londra`]], + [ + "summarization", + [ + `La torre degli Asinelli è una delle cosiddette due torri di Bologna, simbolo della città, situate in piazza di porta Ravegnana, all'incrocio tra le antiche strade San Donato (ora via Zamboni), San Vitale, Maggiore e Castiglione. Eretta, secondo la tradizione, fra il 1109 e il 1119 dal nobile Gherardo Asinelli, la torre è alta 97,20 metri, pende verso ovest per 2,23 metri e presenta all'interno una scalinata composta da 498 gradini. Ancora non si può dire con certezza quando e da chi fu costruita la torre degli Asinelli. Si presume che la torre debba il proprio nome a Gherardo Asinelli, il nobile cavaliere di fazione ghibellina al quale se ne attribuisce la costruzione, iniziata secondo una consolidata tradizione l'11 ottobre 1109 e terminata dieci anni dopo, nel 1119.`, + ], + ], + [ + "text-generation", + [ + `Mi chiamo Loreto e mi piace`, + `Mi chiamo Thomas e il mio principale`, + `Mi chiamo Marianna, la mia cosa preferita`, + `Mi chiamo Clara e sono`, + `C'era una volta`, + ], + ], + ["fill-mask", [`Roma è la d'Italia.`, `Lo scopo della vita è .`]], + [ + "sentence-similarity", + [ + { + source_sentence: "Questa è una persona felice", + sentences: ["Questo è un cane felice", "Questa è una persona molto felice", "Oggi è una giornata di sole"], + }, + ], + ], +]); + +const MAPPING_FA: PerLanguageMapping = new Map([ + [ + "text-classification", + [`پروژه به موقع تحویل شد و همه چیز خوب بود.`, `سیب‌زمینی بی‌کیفیت بود.`, `قیمت و کیفیت عالی`, `خوب نبود اصلا`], + ], + [ + "token-classification", + [ + `این سریال به صورت رسمی در تاریخ دهم می ۲۰۱۱ توسط شبکه فاکس برای پخش رزرو شد.`, + `دفتر مرکزی شرکت پارس‌مینو در شهر اراک در استان مرکزی قرار دارد.`, + `وی در سال ۲۰۱۳ درگذشت و مسئول خاکسپاری و اقوامش برای او مراسم یادبود گرفتند.`, + ], + ], + [ + "question-answering", + [ + { + text: `من کجا زندگی میکنم؟`, + context: `نام من پژمان است و در گرگان زندگی میکنم.`, + }, + { + text: `نامم چیست و کجا زندگی می‌کنم؟`, + context: `اسمم سارا است و در آفریقای جنوبی زندگی میکنم.`, + }, + { + text: `نام من چیست؟`, + context: `من مریم هستم و در تبریز زندگی می‌کنم.`, + }, + { + text: `بیشترین مساحت جنگل آمازون در کدام کشور است؟`, + context: [ + "آمازون نام بزرگ‌ترین جنگل بارانی جهان است که در شمال آمریکای جنوبی قرار گرفته و بیشتر آن در خاک برزیل و پرو", + "جای دارد. بیش از نیمی از همه جنگل‌های بارانی باقی‌مانده در جهان در آمازون قرار دارد.", + "مساحت جنگل‌های آمازون ۵٫۵ میلیون کیلومتر مربع است که بین ۹ کشور تقسیم شده‌است.", + ].join("\n"), + }, + ], + ], + [ + "translation", + [ + "بیشتر مساحت جنگل‌های آمازون در حوضه آبریز رود آمازون و ۱۱۰۰ شاخه آن واقع شده‌است.", + "مردمان نَبَطی از هزاره‌های یکم و دوم پیش از میلاد در این منطقه زندگی می‌کردند.", + ], + ], + [ + "summarization", + [ + [ + "شاهنامه اثر حکیم ابوالقاسم فردوسی توسی، حماسه‌ای منظوم، بر حسب دست نوشته‌های ", + "موجود دربرگیرنده نزدیک به ۵۰٬۰۰۰ بیت تا نزدیک به ۶۱٬۰۰۰ بیت و یکی از ", + "بزرگ‌ترین و برجسته‌ترین سروده‌های حماسی جهان است که سرایش آن دست‌آوردِ ", + "دست‌کم سی سال کارِ پیوستهٔ این سخن‌سرای نامدار ایرانی است. موضوع این شاهکار ادبی،", + " افسانه‌ها و تاریخ ایران از آغاز تا حملهٔ عرب‌ها به ایران در سدهٔ هفتم میلادی است", + " (شاهنامه از سه بخش اسطوره، پهلوانی و تاریخی تشکیل شده‌است) که در چهار", + " دودمان پادشاهیِ پیشدادیان، کیانیان، اشکانیان و ساسانیان گنجانده می‌شود.", + " شاهنامه بر وزن «فَعولُن فعولن فعولن فَعَلْ»، در بحرِ مُتَقارِبِ مثمَّنِ محذوف نگاشته شده‌است.", + "هنگامی که زبان دانش و ادبیات در ایران زبان عربی بود، فردوسی، با سرودن شاهنامه", + " با ویژگی‌های هدف‌مندی که داشت، زبان پارسی را زنده و پایدار کرد. یکی از ", + " بن‌مایه‌های مهمی که فردوسی برای سرودن شاهنامه از آن استفاده کرد،", + " شاهنامهٔ ابومنصوری بود. شاهنامه نفوذ بسیاری در جهت‌گیری ", + " فرهنگ فارسی و نیز بازتاب‌های شکوه‌مندی در ادبیات جهان داشته‌است و شاعران ", + " بزرگی مانند گوته و ویکتور هوگو از آن به نیکی یاد کرده‌اند.", + ].join("\n"), + ], + ], + ["text-generation", ["اسم من نازنین است و من", "روزی روزگاری"]], + [ + "fill-mask", + [ + `زندگی یک سوال است و این که چگونه کنیم پاسخ این سوال!`, + `زندگی از مرگ پرسید: چرا همه من را دارند اما از تو متنفرند؟`, + ], + ], +]); + +const MAPPING_AR: PerLanguageMapping = new Map([ + ["text-classification", [`أحبك. أهواك`]], + [ + "token-classification", + [`إسمي محمد وأسكن في برلين`, `إسمي ساره وأسكن في لندن`, `إسمي سامي وأسكن في القدس في فلسطين.`], + ], + [ + "question-answering", + [ + { + text: `أين أسكن؟`, + context: `إسمي محمد وأسكن في بيروت`, + }, + { + text: `أين أسكن؟`, + context: `إسمي ساره وأسكن في لندن`, + }, + { + text: `ما اسمي؟`, + context: `اسمي سعيد وأسكن في حيفا.`, + }, + { + text: `ما لقب خالد بن الوليد بالعربية؟`, + context: `خالد بن الوليد من أبطال وقادة الفتح الإسلامي وقد تحدثت عنه اللغات الإنجليزية والفرنسية والإسبانية ولقب بسيف الله المسلول.`, + }, + ], + ], + ["translation", [`إسمي محمد وأسكن في برلين`, `إسمي ساره وأسكن في لندن`]], + [ + "summarization", + [ + `تقع الأهرامات في الجيزة قرب القاهرة في مصر وقد بنيت منذ عدة قرون، وقيل إنها كانت قبورا للفراعنة وتم بناؤها بعملية هندسية رائعة واستقدمت حجارتها من جبل المقطم وتم نقلها بالسفن أو على الرمل، وما تزال شامخة ويقصدها السياح من كافة أرجاء المعمورة.`, + ], + ], + [ + "text-generation", + [ + `إسمي محمد وأحب أن`, + `دع المكارم لا ترحل لبغيتها - واقعد فإنك أنت الطاعم الكاسي.`, + `لماذا نحن هنا؟`, + `القدس مدينة تاريخية، بناها الكنعانيون في`, + `كان يا ما كان في قديم الزمان`, + ], + ], + ["fill-mask", [`باريس فرنسا.`, `فلسفة الحياة هي .`]], + [ + "sentence-similarity", + [ + { + source_sentence: "هذا شخص سعيد", + sentences: ["هذا كلب سعيد", "هذا شخص سعيد جدا", "اليوم هو يوم مشمس"], + }, + ], + ], +]); + +const MAPPING_BN: PerLanguageMapping = new Map([ + ["text-classification", [`বাঙালির ঘরে ঘরে আজ নবান্ন উৎসব।`]], + [ + "token-classification", + [`আমার নাম জাহিদ এবং আমি ঢাকায় বাস করি।`, `তিনি গুগলে চাকরী করেন।`, `আমার নাম সুস্মিতা এবং আমি কলকাতায় বাস করি।`], + ], + ["translation", [`আমার নাম জাহিদ, আমি রংপুরে বাস করি।`, `আপনি কী আজকে বাসায় আসবেন?`]], + [ + "summarization", + [ + `‘ইকোনমিস্ট’ লিখেছে, অ্যান্টিবডির চার মাস স্থায়ী হওয়ার খবরটি দুই কারণে আনন্দের। অ্যান্টিবডি যত দিন পর্যন্ত শরীরে টিকবে, তত দিন সংক্রমণ থেকে সুরক্ষিত থাকা সম্ভব। অর্থাৎ, এমন এক টিকার প্রয়োজন হবে, যা অ্যান্টিবডির উত্পাদনকে প্ররোচিত করতে পারে এবং দীর্ঘস্থায়ী সুরক্ষা দিতে পারে। এগুলো খুঁজে বের করাও সহজ। এটি আভাস দেয়, ব্যাপক হারে অ্যান্টিবডি শনাক্তকরণ ফলাফল মোটামুটি নির্ভুল হওয়া উচিত। দ্বিতীয় আরেকটি গবেষণার নেতৃত্ব দিয়েছেন যুক্তরাজ্যের মেডিকেল রিসার্চ কাউন্সিলের (এমআরসি) ইমিউনোলজিস্ট তাও দং। তিনি টি-সেল শনাক্তকরণে কাজ করেছেন। টি-সেল শনাক্তকরণের প্রক্রিয়া অবশ্য অ্যান্টিবডির মতো এত আলোচিত নয়। তবে সংক্রমণের বিরুদ্ধে লড়াই এবং দীর্ঘমেয়াদি সুরক্ষায় সমান গুরুত্বপূর্ণ ভূমিকা পালন করে। গবেষণাসংক্রান্ত নিবন্ধ প্রকাশিত হয়েছে ‘নেচার ইমিউনোলজি’ সাময়িকীতে। তাঁরা বলছেন, গবেষণার ক্ষেত্রে কোভিড-১৯ মৃদু সংক্রমণের শিকার ২৮ ব্যক্তির রক্তের নমুনা, ১৪ জন গুরুতর অসুস্থ ও ১৬ জন সুস্থ ব্যক্তির রক্তের নমুনা পরীক্ষা করেছেন। গবেষণা নিবন্ধে বলা হয়, সংক্রমিত ব্যক্তিদের ক্ষেত্রে টি-সেলের তীব্র প্রতিক্রিয়া তাঁরা দেখেছেন। এ ক্ষেত্রে মৃদু ও গুরুতর অসুস্থ ব্যক্তিদের ক্ষেত্রে প্রতিক্রিয়ার ভিন্নতা পাওয়া গেছে।`, + ], + ], + ["text-generation", [`আমি রতন এবং আমি`, `তুমি যদি চাও তবে`, `মিথিলা আজকে বড্ড`]], + ["fill-mask", [`আমি বাংলায় গাই।`, `আমি খুব ভালোবাসি। `]], + [ + "question-answering", + [ + { + text: `প্রথম এশিয়া কাপ ক্রিকেট টুর্নামেন্ট কোথায় অনুষ্ঠিত হয় ?`, + context: `প্রথম টুর্নামেন্ট অনুষ্ঠিত হয় ১৯৮৪ সালে সংযুক্ত আরব আমিরাত এর শারজাহ তে যেখানে কাউন্সিলের মূল অফিস ছিল (১৯৯৫ পর্যন্ত)। ভারত শ্রীলঙ্কার সাথে আন্তরিকতাহীন ক্রিকেট সম্পর্কের কারণে ১৯৮৬ সালের টুর্নামেন্ট বর্জন করে। ১৯৯৩ সালে ভারত ও পাকিস্তান এর মধ্যে রাজনৈতিক অস্থিরতার কারণে এটি বাতিল হয়ে যায়। শ্রীলঙ্কা এশিয়া কাপ শুরু থেকে অংশ গ্রহণ করে আসছে। আন্তর্জাতিক ক্রিকেট কাউন্সিল নিয়ম করে দিয়েছে যে এশিয়া কাপের সকল খেলা অনুষ্ঠিত হবে অফিসিয়াল একদিনের আন্তর্জাতিক ক্রিকেট হিসেবে। এসিসি ঘোষনা অনুযায়ী প্রতি দুই বছর পর পর টুর্নামেন্ট অনুষ্ঠিত হয় ২০০৮ সাল থেকে।`, + }, + { + text: `ভারতীয় বাঙালি কথাসাহিত্যিক মহাশ্বেতা দেবীর মৃত্যু কবে হয় ?`, + context: `২০১৬ সালের ২৩ জুলাই হৃদরোগে আক্রান্ত হয়ে মহাশ্বেতা দেবী কলকাতার বেল ভিউ ক্লিনিকে ভর্তি হন। সেই বছরই ২৮ জুলাই একাধিক অঙ্গ বিকল হয়ে তাঁর মৃত্যু ঘটে। তিনি মধুমেহ, সেপ্টিসেমিয়া ও মূত্র সংক্রমণ রোগেও ভুগছিলেন।`, + }, + { + text: `মাস্টারদা সূর্যকুমার সেনের বাবার নাম কী ছিল ?`, + context: `সূর্য সেন ১৮৯৪ সালের ২২ মার্চ চট্টগ্রামের রাউজান থানার নোয়াপাড়ায় অর্থনৈতিক ভাবে অস্বচ্ছল পরিবারে জন্মগ্রহণ করেন। তাঁর পিতার নাম রাজমনি সেন এবং মাতার নাম শশী বালা সেন। রাজমনি সেনের দুই ছেলে আর চার মেয়ে। সূর্য সেন তাঁদের পরিবারের চতুর্থ সন্তান। দুই ছেলের নাম সূর্য ও কমল। চার মেয়ের নাম বরদাসুন্দরী, সাবিত্রী, ভানুমতী ও প্রমিলা। শৈশবে পিতা মাতাকে হারানো সূর্য সেন কাকা গৌরমনি সেনের কাছে মানুষ হয়েছেন। সূর্য সেন ছেলেবেলা থেকেই খুব মনোযোগী ভাল ছাত্র ছিলেন এবং ধর্মভাবাপন্ন গম্ভীর প্রকৃতির ছিলেন।`, + }, + ], + ], + [ + "sentence-similarity", + [ + { + source_sentence: "সে একজন সুখী ব্যক্তি", + sentences: ["সে হ্যাপি কুকুর", "সে খুব সুখী মানুষ", "আজ একটি রৌদ্রোজ্জ্বল দিন"], + }, + ], + ], +]); + +const MAPPING_MN: PerLanguageMapping = new Map([ + ["text-classification", [`Би чамд хайртай`]], + [ + "token-classification", + [ + `Намайг Дорж гэдэг. Би Улаанбаатарт амьдардаг.`, + `Намайг Ганбат гэдэг. Би Увс аймагт төрсөн.`, + `Манай улс таван хошуу малтай.`, + ], + ], + [ + "question-answering", + [ + { + text: `Та хаана амьдардаг вэ?`, + context: `Намайг Дорж гэдэг. Би Улаанбаатарт амьдардаг.`, + }, + { + text: `Таныг хэн гэдэг вэ?`, + context: `Намайг Дорж гэдэг. Би Улаанбаатарт амьдардаг.`, + }, + { + text: `Миний нэрийг хэн гэдэг вэ?`, + context: `Намайг Ганбат гэдэг. Би Увс аймагт төрсөн.`, + }, + ], + ], + ["translation", [`Намайг Дорж гэдэг. Би Улаанбаатарт амьдардаг.`, `Намайг Ганбат гэдэг. Би Увс аймагт төрсөн.`]], + [ + "summarization", + [ + `Монгол Улс (1992 оноос хойш) — дорно болон төв Азид оршдог бүрэн эрхт улс. Хойд талаараа Орос, бусад талаараа Хятад улстай хиллэдэг далайд гарцгүй орон. Нийслэл — Улаанбаатар хот. Алтайн нуруунаас Хянган, Соёноос Говь хүрсэн 1 сая 566 мянган км2 уудам нутагтай, дэлхийд нутаг дэвсгэрийн хэмжээгээр 19-рт жагсдаг. 2015 оны эхэнд Монгол Улсын хүн ам 3 сая хүрсэн (135-р олон). Үндсэндээ монгол үндэстэн (95 хувь), мөн хасаг, тува хүн байна. 16-р зуунаас хойш буддын шашин, 20-р зуунаас шашингүй байдал дэлгэрсэн ба албан хэрэгт монгол хэлээр харилцана.`, + ], + ], + [ + "text-generation", + [`Намайг Дорж гэдэг. Би`, `Хамгийн сайн дуучин бол`, `Миний дуртай хамтлаг бол`, `Эрт урьдын цагт`], + ], + ["fill-mask", [`Монгол улсын Улаанбаатар хотоос ярьж байна.`, `Миний амьдралын зорилго бол .`]], + [ + "automatic-speech-recognition", + [ + { + label: `Common Voice Train Example`, + src: `https://cdn-media.huggingface.co/common_voice/train/common_voice_mn_18577472.wav`, + }, + { + label: `Common Voice Test Example`, + src: `https://cdn-media.huggingface.co/common_voice/test/common_voice_mn_18577346.wav`, + }, + ], + ], + [ + "text-to-speech", + [ + `Би Монгол улсын иргэн.`, + `Энэхүү жишээ нь цаанаа ямар ч утга агуулаагүй болно`, + `Сар шинэдээ сайхан шинэлэж байна уу?`, + ], + ], + [ + "sentence-similarity", + [ + { + source_sentence: "Энэ бол аз жаргалтай хүн юм", + sentences: ["Энэ бол аз жаргалтай нохой юм", "Энэ бол маш их аз жаргалтай хүн юм", "Өнөөдөр нарлаг өдөр байна"], + }, + ], + ], +]); + +const MAPPING_SI: PerLanguageMapping = new Map([ + ["translation", [`සිංහල ඉතා අලංකාර භාෂාවකි.`, `මෙම තාක්ෂණය භාවිතා කරන ඔබට ස්තූතියි.`]], + ["fill-mask", [`මම ගෙදර .`, ` ඉගෙනීමට ගියාය.`]], +]); + +const MAPPING_DE: PerLanguageMapping = new Map([ + [ + "question-answering", + [ + { + text: `Wo wohne ich?`, + context: `Mein Name ist Wolfgang und ich lebe in Berlin`, + }, + { + text: `Welcher Name wird auch verwendet, um den Amazonas-Regenwald auf Englisch zu beschreiben?`, + context: `Der Amazonas-Regenwald, auf Englisch auch als Amazonien oder Amazonas-Dschungel bekannt, ist ein feuchter Laubwald, der den größten Teil des Amazonas-Beckens Südamerikas bedeckt. Dieses Becken umfasst 7.000.000 Quadratkilometer (2.700.000 Quadratmeilen), von denen 5.500.000 Quadratkilometer (2.100.000 Quadratmeilen) vom Regenwald bedeckt sind. Diese Region umfasst Gebiete von neun Nationen. Der größte Teil des Waldes befindet sich in Brasilien mit 60% des Regenwaldes, gefolgt von Peru mit 13%, Kolumbien mit 10% und geringen Mengen in Venezuela, Ecuador, Bolivien, Guyana, Suriname und Französisch-Guayana. Staaten oder Abteilungen in vier Nationen enthalten "Amazonas" in ihren Namen. Der Amazonas repräsentiert mehr als die Hälfte der verbleibenden Regenwälder des Planeten und umfasst den größten und artenreichsten tropischen Regenwald der Welt mit geschätzten 390 Milliarden Einzelbäumen, die in 16.000 Arten unterteilt sind.`, + }, + ], + ], + [ + "sentence-similarity", + [ + { + source_sentence: "Das ist eine glückliche Person", + sentences: [ + "Das ist ein glücklicher Hund", + "Das ist eine sehr glückliche Person", + "Heute ist ein sonniger Tag", + ], + }, + ], + ], +]); + +const MAPPING_DV: PerLanguageMapping = new Map([ + ["text-classification", [`އަހަރެން ގަޔާވޭ. އަހަރެން ލޯބިވޭ`]], + [ + "token-classification", + [ + `އަހަރެންގެ ނަމަކީ އަހުމަދު އަދި އަހަރެން ދިރިއުޅެނީ މާލޭގަ`, + `އަހަރެންގެ ނަމަކީ ސާރާ އަދި އަހަރެން ދިރިއުޅެނީ އުތީމުގަ`, + `އަހަރެންގެ ނަމަކީ އައިޝާ އަދި އަހަރެން ދިރިއުޅެނީ ފޭދޫ، އައްޑޫގަ`, + ], + ], + [ + "question-answering", + [ + { + text: `އަހަރެން ދިރިއުޅެނީ ކޮންތާކު؟`, + context: `އަހަރެންގެ ނަމަކީ އަހުމަދު އަދި އަހަރެން ދިރިއުޅެނީ މާލޭގަ`, + }, + { + text: `އަހަރެން ދިރިއުޅެނީ ކޮންތާކު؟`, + context: `އަހަރެންގެ ނަމަކީ ސާރާ އަދި އަހަރެން ދިރިއުޅެނީ އުތީމުގަ`, + }, + { + text: `އަހަރެންގެ ނަމަކީ ކޮބާ؟`, + context: `އަހަރެންގެ ނަމަކީ އައިޝާ އަދި އަހަރެން ދިރިއުޅެނީ ފޭދޫގަ`, + }, + { + text: `އެމޭޒަން ރެއިންފޮރެސްޓް ސިފަކޮށްދިނުމަށް އިނގިރޭސި ބަހުން ބޭނުންކުރާނީ ކޮންނަމެއް؟`, + context: `އެމޭޒަން ރެއިންފޮރެސްޓް (ޕޯޗުޖީޒް: ފްލޮރެސްޓާ އެމަސޮނިކާ ނުވަތަ އެމަސޮނިއާ؛ ސްޕެނިޝް: ސެލްވާ އެމަސޮނިކާ, އެމަސޮނިއާ ނޫނީ އާންމުކޮށް އެމަޒޯނިއާ؛ ފްރެންޗް: ފޮރޭ އެމެޒޮނިއެން؛ ޑަޗް: އެމެޒޯންރޭގެވައުޑް)، އިގިރޭސި ބަހުން ބުނާ އެމެޒޯނިއާ ނުވަތަ ދަ އެމޭޒަން ޖަންގަލް އަކީ, ސައުތު އެމެރިކާގެ އެމޭޒަން ބޭސިން ސަރަހައްދުގެ ބޮޑުބައެއްގައި ހިމެނޭ މޮއިސްޓް ބޮރޯޑްލީފް ފޮރެސްޓެއެކެވެ. އެމޭޒަން ބޭސިން ސަރަހައްދުގެ ބޮޑު މިނަކީ 7 މިލިއަން އަކަ ކިލޯމީޓަރ (2.7 މިލިއަން އަކަ މައިލް(. މީގެ ތެރެއިން 5.5 މިލިއަން އަކަ ކިލޯމީޓަރ (2.1 މިލިއަން އަކަ މައިލް) އަކީ މި ފޮރެސްޓެވެ. މި ސަރަހައްދުގައި 9 ގައުމަކަށް ނިސްބަތްވާ ޓެރިޓަރީ ހިމެނެއެވެ. 60% އާއިއެކެ އެންމެ ބޮޑު ބައެއް ނިސްބަތްވަނީ ބްރެޒިލްއަށެވެ. އޭގެ ފަހުތުން 13% އާއެކު ޕެރޫ އާއި 10% އާއެކު ކޮލަމްބިއާ އަދި ކުޑަ ބައެއް ހިމެނޭ ގޮތުން ވެނެޒުއެލާ, އެކްއަޑޯ, ބޮލިވިއާ, ގުޔާނާ, ސުރިނާމް އަދި ފްރެންޗް ގްއާނާ އަށް ވެސް ނިސްބަތްވެއެވެ. މީގެ ތެރެއިން 4 ގައުމެއްގައި "އެމެޒޮނާސް" ހިމަނައިގެން ސްޓޭޓް ނުވަތަ ޑިޕާޓްމަންޓް އަކަށް ނަންދީފައިވެއެވެ. މުޅި ދުނިޔޭގައި ބާކީ ހުރި ރެއިންފޮރެސްޓްގެ ތެރެއިން ދެބައިކުޅަ އެއްބަޔަށްވުރެބޮޑުވަރެއް އެމޭޒޮން ރެއިންފޮރެސްޓް ހިއްސާކުރެއެވެ. މިއީ މުޅި ދުނިޔެއިން އެންމޮ ބޮޑު އަދި އެންމެ ބައޮޑައިވަރސް ރެއިންފޮރެސްޓް ޓްރެކްޓެވެ. ލަފާކުރެވޭ ގޮތުން 16 ހާސް ސްޕީޝީސްއަށް ބެހިގެންވާ 390 މިލިއަން ވައްތަރުގެ ގަސް މިތާގައި ހިމެނެއެވެ`, + }, + ], + ], + [ + "translation", + [ + `އަހަރެންގެ ނަމަކީ އަހުމަދު އަދި އަހަރެން ދިރިއުޅެނީ މާލޭގަ`, + `އަހަރެންގެ ނަމަކީ ސާރާ އަދި އަހަރެން ދިރިއުޅެނީ އުތީމުގަ`, + ], + ], + [ + "summarization", + [ + `ޓަވަރުގެ އުސްމިނަކީ 324 މީޓަރު، އެއީ ގާތްގަނޑަކަށް 81 ބުރީގެ އިމާރާތަކާއި އެއްވަރެވެ. އެއީ ޕެރިސްގައި ހުރި އެންމެ އުސް އިމާރާތެވެ. އޭގެ ހަތަރެސްކަނަށް ހުރި ބުޑުގެ ދިގުމިނަކީ ކޮންމެ ފަރާތަކުން 125 މީޓަރެވެ. (410 ފޫޓު) އައިފިލް ޓަވަރު ބިނާކުރި އިރު، ވޮޝިންގްޓަން މޮނިއުމެންޓްގެ އުސްމިން ފަހަނައަޅާ ގޮސް، ދުނިޔޭގައި މީހުން އުފެއްދި ތަންތަނުގެ ތެރެއިން އެންމެ އުސް ތަނުގެ ލަގަބު ލިބުނެވެ. އަދި 1930 ގައި ނިއު ޔޯކްގެ ކްރައިސްލަރ ބިލްޑިންގް ބިނާކުރުމާއި ހަމައަށް 41 އަހަރު ވަންދެން މިލަގަބު ހިފެހެއްޓިއެވެ. މިއީ 300 މީޓަރަށް ވުރެ އުސްކޮށް އިމާރާތްކުރެވުނު ފުރަތަމަ ތަނެވެ. 1957 ގައި ޓަވަރުގެ އެންމެ މަތީގައި ހަރުކުރެވުނު ބްރޯޑްކާސްޓިންގ އޭރިއަލްގެ ސަބަބުން މިހާރު މި ޓަވަރު ކްރައިސްލަރ ބިލްޑިންގއަށް ވުރެ 5.2 މީޓަރ (17 ފޫޓު) އުހެވެ. މި ޓްރާންސްމިޓަރު ނުލާ، އައިފިލް ޓަވަރަކީ، މިލާއު ވިއާޑަކްޓަށް ފަހު ފްރާންސްގައި ހުރި 2 ވަނައަށް އެންމެ އުސް ފްރީސްޓޭންޑިންގ އިމާރާތެވެ`, + ], + ], + [ + "text-generation", + [ + `އަހަރެންގެ ނަމަކީ ޔޫސުފް އަދި އަހަރެންގެ މައިގަނޑު`, + `އަހަރެންގެ ނަމަކީ މަރިއަމް، އަހަރެން އެންމެ ގަޔާވާ`, + `އަހަރެންގެ ނަމަކީ ފާތުމަތު އަދި އަހަރެން`, + `،އެއް ޒަމާނެއްގައި`, + ], + ], + ["fill-mask", [`. މާލެ އަކީ ދިވެހިރާއްޖޭގެ`, `ގަރުދިޔައަކީ ދިވެހިންގެ މެދުގައި ކެއުމެއް.`]], +]); + +export const MAPPING_DEFAULT_WIDGET = new Map([ + ["en", MAPPING_EN], + ["zh", MAPPING_ZH], + ["fr", MAPPING_FR], + ["es", MAPPING_ES], + ["ru", MAPPING_RU], + ["uk", MAPPING_UK], + ["it", MAPPING_IT], + ["fa", MAPPING_FA], + ["ar", MAPPING_AR], + ["bn", MAPPING_BN], + ["mn", MAPPING_MN], + ["si", MAPPING_SI], + ["de", MAPPING_DE], + ["dv", MAPPING_DV], +]); diff --git a/data/node_modules/@huggingface/tasks/src/hardware.ts b/data/node_modules/@huggingface/tasks/src/hardware.ts new file mode 100644 index 0000000000000000000000000000000000000000..d261b08083dcaae3d584cb39ee5eb961feafe077 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/hardware.ts @@ -0,0 +1,425 @@ +/** + * Biden AI Executive Order + * https://www.whitehouse.gov/briefing-room/presidential-actions/2023/10/30/executive-order-on-the-safe-secure-and-trustworthy-development-and-use-of-artificial-intelligence/ + */ +export const TFLOPS_THRESHOLD_WHITE_HOUSE_MODEL_TRAINING_TOTAL = 10 ** 14; +export const TFLOPS_THRESHOLD_WHITE_HOUSE_MODEL_TRAINING_TOTAL_BIOLOGY = 10 ** 11; +export const TFLOPS_THRESHOLD_WHITE_HOUSE_CLUSTER = 10 ** 8; + +/** + * EU AI Act + * https://ec.europa.eu/commission/presscorner/detail/en/qanda_21_1683 + */ +export const TFLOPS_THRESHOLD_EU_AI_ACT_MODEL_TRAINING_TOTAL = 10 ** 13; + +export interface HardwareSpec { + /** + * Approximate value, in FP16 whenever possible. + * This is only approximate/theoretical and shouldn't be taken too seriously. + * Currently the CPU values are from cpu-monkey.com + * while the GPU values are from techpowerup.com + * + * Note to reviewers: I got fed up with data entry, + * and HuggingChat running Llama3 with Web search was failing a bit, + * so some of those values might be slightly inaccurate. Forgive me and please feel free to improve. + */ + tflops: number; + /** + * If an array is specified, options of memory size (can be VRAM, unified RAM) + * e.g. an A100 exists in 40 or 80 GB. + */ + memory?: number[]; +} + +export const DEFAULT_MEMORY_OPTIONS = [8, 16, 24, 32, 40, 48, 64, 80, 96, 128, 256, 512]; + +export const SKUS = { + GPU: { + NVIDIA: { + H100: { + tflops: 267.6, + memory: [80], + }, + L40: { + tflops: 90.52, + memory: [48], + }, + "RTX 6000 Ada": { + tflops: 91.1, + memory: [48], + }, + "RTX 5880 Ada": { + tflops: 69.3, + memory: [48], + }, + "RTX 5000 Ada": { + tflops: 65.3, + memory: [32], + }, + "RTX 4500 Ada": { + tflops: 39.6, + memory: [24], + }, + "RTX 4000 Ada": { + tflops: 26.7, + memory: [20], + }, + "RTX 4000 SFF Ada": { + tflops: 19.2, + memory: [20], + }, + "RTX 2000 Ada": { + tflops: 12.0, + memory: [16], + }, + A100: { + tflops: 77.97, + memory: [80, 40], + }, + A40: { + tflops: 37.42, + memory: [48], + }, + A10: { + tflops: 31.24, + memory: [24], + }, + "RTX 4090": { + tflops: 82.58, + memory: [24], + }, + "RTX 4090D": { + tflops: 79.49, + memory: [24], + }, + "RTX 4080 SUPER": { + tflops: 52.2, + memory: [16], + }, + "RTX 4080": { + tflops: 48.7, + memory: [16], + }, + "RTX 4070": { + tflops: 29.15, + memory: [12], + }, + "RTX 4070 Ti": { + tflops: 40.09, + memory: [12], + }, + "RTX 4070 Super": { + tflops: 35.48, + memory: [12], + }, + "RTX 4070 Ti Super": { + tflops: 44.1, + memory: [16], + }, + "RTX 4060": { + tflops: 15.11, + memory: [8], + }, + "RTX 4060 Ti": { + tflops: 22.06, + memory: [8, 16], + }, + "RTX 3090": { + tflops: 35.58, + memory: [24], + }, + "RTX 3090 Ti": { + tflops: 40, + memory: [24], + }, + "RTX 3080": { + tflops: 30.6, + memory: [12, 10], + }, + "RTX 3080 Ti": { + tflops: 34.1, + memory: [12], + }, + "RTX 3070": { + tflops: 20.31, + memory: [8], + }, + "RTX 3070 Ti": { + tflops: 21.75, + memory: [8], + }, + "RTX 3070 Ti Laptop": { + tflops: 16.6, + memory: [8], + }, + "RTX 3060 Ti": { + tflops: 16.2, + memory: [8], + }, + "RTX 3060": { + tflops: 12.74, + memory: [12, 8], + }, + "RTX 2070": { + tflops: 14.93, + memory: [8], + }, + "RTX 3050 Mobile": { + tflops: 7.639, + memory: [6], + }, + "RTX 2060 Mobile": { + tflops: 9.22, + memory: [6], + }, + "GTX 1080 Ti": { + tflops: 11.34, // float32 (GPU does not support native float16) + memory: [11], + }, + "GTX 1070 Ti": { + tflops: 8.2, // float32 (GPU does not support native float16) + memory: [8], + }, + "RTX Titan": { + tflops: 32.62, + memory: [24], + }, + "GTX 1660": { + tflops: 10.05, + memory: [6], + }, + "GTX 1650 Mobile": { + tflops: 6.39, + memory: [4], + }, + T4: { + tflops: 65.13, + memory: [16], + }, + V100: { + tflops: 28.26, + memory: [32, 16], + }, + "Quadro P6000": { + tflops: 12.63, // float32 (GPU does not support native float16) + memory: [24], + }, + P40: { + tflops: 11.76, // float32 (GPU does not support native float16) + memory: [24], + }, + }, + AMD: { + MI300: { + tflops: 383.0, + memory: [192], + }, + MI250: { + tflops: 362.1, + memory: [128], + }, + MI210: { + tflops: 181.0, + memory: [64], + }, + MI100: { + tflops: 184.6, + memory: [32], + }, + "RX 7900 XTX": { + tflops: 122.8, + memory: [24], + }, + "RX 7900 XT": { + tflops: 103.0, + memory: [20], + }, + "RX 7900 GRE": { + tflops: 91.96, + memory: [16], + }, + "RX 7800 XT": { + tflops: 74.65, + memory: [16], + }, + "RX 7700 XT": { + tflops: 70.34, + memory: [12], + }, + "RX 7600 XT": { + tflops: 45.14, + memory: [16, 8], + }, + "RX 6950 XT": { + tflops: 47.31, + memory: [16], + }, + "RX 6800": { + tflops: 32.33, + memory: [16], + }, + "Radeon Pro VII": { + tflops: 26.11, + memory: [16], + }, + }, + }, + CPU: { + Intel: { + "Xeon 4th Generation (Sapphire Rapids)": { + tflops: 1.3, + }, + "Xeon 3th Generation (Ice Lake)": { + tflops: 0.8, + }, + "Xeon 2th Generation (Cascade Lake)": { + tflops: 0.55, + }, + "Intel Core 13th Generation (i9)": { + tflops: 0.85, + }, + "Intel Core 13th Generation (i7)": { + tflops: 0.82, + }, + "Intel Core 13th Generation (i5)": { + tflops: 0.68, + }, + "Intel Core 13th Generation (i3)": { + tflops: 0.57, + }, + "Intel Core 12th Generation (i9)": { + tflops: 0.79, + }, + "Intel Core 12th Generation (i7)": { + tflops: 0.77, + }, + "Intel Core 12th Generation (i5)": { + tflops: 0.65, + }, + "Intel Core 12th Generation (i3)": { + tflops: 0.53, + }, + "Intel Core 11th Generation (i9)": { + tflops: 0.7, + }, + "Intel Core 11th Generation (i7)": { + tflops: 0.6, + }, + "Intel Core 11th Generation (i5)": { + tflops: 0.5, + }, + "Intel Core 11th Generation (i3)": { + tflops: 0.35, + }, + "Intel Core 10th Generation (i9)": { + tflops: 0.46, + }, + "Intel Core 10th Generation (i7)": { + tflops: 0.46, + }, + "Intel Core 10th Generation (i5)": { + tflops: 0.46, + }, + "Intel Core 10th Generation (i3)": { + tflops: 0.44, + }, + }, + AMD: { + "EPYC 4th Generation (Genoa)": { + tflops: 5, + }, + "EPYC 3th Generation (Milan)": { + tflops: 2.4, + }, + "EPYC 2th Generation (Rome)": { + tflops: 0.6, + }, + "EPYC 1st Generation (Naples)": { + tflops: 0.6, + }, + "Ryzen Zen4 7000 (Ryzen 9)": { + tflops: 0.56, + }, + "Ryzen Zen4 7000 (Ryzen 7)": { + tflops: 0.56, + }, + "Ryzen Zen4 7000 (Ryzen 5)": { + tflops: 0.56, + }, + "Ryzen Zen3 5000 (Ryzen 9)": { + tflops: 1.33, + }, + "Ryzen Zen3 5000 (Ryzen 7)": { + tflops: 1.33, + }, + "Ryzen Zen3 5000 (Ryzen 5)": { + tflops: 0.72, + }, + "Ryzen Zen 2 3000 (Threadripper)": { + tflops: 0.72, + }, + "Ryzen Zen 2 3000 (Ryzen 9)": { + tflops: 0.72, + }, + "Ryzen Zen 2 3000 (Ryzen 7)": { + tflops: 0.72, + }, + "Ryzen Zen 2 3000 (Ryzen 5)": { + tflops: 0.72, + }, + "Ryzen Zen 2 3000 (Ryzen 3)": { + tflops: 0.72, + }, + }, + }, + "Apple Silicon": { + "-": { + "Apple M1": { + tflops: 2.6, + memory: [8, 16], + }, + "Apple M1 Pro": { + tflops: 5.2, + memory: [16, 24, 32], + }, + "Apple M1 Max": { + tflops: 10.4, + memory: [16, 24, 32, 64], + }, + "Apple M1 Ultra": { + tflops: 21, + memory: [16, 24, 32, 64, 96, 128], + }, + "Apple M2": { + tflops: 3.6, + memory: [8, 16, 24], + }, + "Apple M2 Pro": { + tflops: 13.6, + memory: [16, 24, 32], + }, + "Apple M2 Max": { + tflops: 13.49, + memory: [32, 64, 96], + }, + "Apple M2 Ultra": { + tflops: 27.2, + memory: [64, 96, 128, 192], + }, + "Apple M3": { + tflops: 2.84, + memory: [8, 16, 24], + }, + "Apple M3 Pro": { + tflops: 14, + memory: [18, 36], + }, + "Apple M3 Max": { + tflops: 14.2, + memory: [36, 48, 64, 96, 128], + }, + }, + }, +} satisfies Record>>; + +export type SkuType = keyof typeof SKUS; diff --git a/data/node_modules/@huggingface/tasks/src/index.ts b/data/node_modules/@huggingface/tasks/src/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..213b324753277357a2eedb8cd620674e5e6b33ef --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/index.ts @@ -0,0 +1,53 @@ +export { LIBRARY_TASK_MAPPING } from "./library-to-tasks"; +export { MAPPING_DEFAULT_WIDGET } from "./default-widget-inputs"; +export type { TaskData, TaskDemo, TaskDemoEntry, ExampleRepo } from "./tasks"; +export * from "./tasks"; +export { + PIPELINE_DATA, + PIPELINE_TYPES, + type WidgetType, + type PipelineType, + type PipelineData, + type Modality, + MODALITIES, + MODALITY_LABELS, + SUBTASK_TYPES, + PIPELINE_TYPES_SET, +} from "./pipelines"; +export { ALL_DISPLAY_MODEL_LIBRARY_KEYS, ALL_MODEL_LIBRARY_KEYS, MODEL_LIBRARIES_UI_ELEMENTS } from "./model-libraries"; +export type { LibraryUiElement, ModelLibraryKey } from "./model-libraries"; +export type { ModelData, TransformersInfo } from "./model-data"; +export type { AddedToken, SpecialTokensMap, TokenizerConfig } from "./tokenizer-data"; +export type { + WidgetExample, + WidgetExampleAttribute, + WidgetExampleAssetAndPromptInput, + WidgetExampleAssetAndTextInput, + WidgetExampleAssetAndZeroShotInput, + WidgetExampleAssetInput, + WidgetExampleChatInput, + WidgetExampleSentenceSimilarityInput, + WidgetExampleStructuredDataInput, + WidgetExampleTableDataInput, + WidgetExampleTextAndContextInput, + WidgetExampleTextAndTableInput, + WidgetExampleTextInput, + WidgetExampleZeroShotTextInput, + WidgetExampleOutput, + WidgetExampleOutputUrl, + WidgetExampleOutputLabels, + WidgetExampleOutputAnswerScore, + WidgetExampleOutputText, +} from "./widget-example"; +export { SPECIAL_TOKENS_ATTRIBUTES } from "./tokenizer-data"; + +import * as snippets from "./snippets"; +export { snippets }; + +export { SKUS, DEFAULT_MEMORY_OPTIONS } from "./hardware"; +export type { HardwareSpec, SkuType } from "./hardware"; +export { LOCAL_APPS } from "./local-apps"; +export type { LocalApp, LocalAppKey } from "./local-apps"; + +export { DATASET_LIBRARIES_UI_ELEMENTS } from "./dataset-libraries"; +export type { DatasetLibraryUiElement, DatasetLibraryKey } from "./dataset-libraries"; diff --git a/data/node_modules/@huggingface/tasks/src/library-to-tasks.ts b/data/node_modules/@huggingface/tasks/src/library-to-tasks.ts new file mode 100644 index 0000000000000000000000000000000000000000..c8411fbaab53e78dedb1ab5115f4d3c4b079555c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/library-to-tasks.ts @@ -0,0 +1,76 @@ +import type { ModelLibraryKey } from "./model-libraries"; +import type { PipelineType } from "./pipelines"; + +/** + * Mapping from library name to its supported tasks. + * Inference API (serverless) should be disabled for all other (library, task) pairs beyond this mapping. + * This mapping is partially generated automatically by "python-api-export-tasks" action in + * huggingface/api-inference-community repo upon merge. For transformers, the mapping is manually + * based on api-inference (hf_types.rs). + */ +export const LIBRARY_TASK_MAPPING: Partial> = { + "adapter-transformers": ["question-answering", "text-classification", "token-classification"], + allennlp: ["question-answering"], + asteroid: [ + // "audio-source-separation", + "audio-to-audio", + ], + bertopic: ["text-classification"], + diffusers: ["image-to-image", "text-to-image"], + doctr: ["object-detection"], + espnet: ["text-to-speech", "automatic-speech-recognition"], + fairseq: ["text-to-speech", "audio-to-audio"], + fastai: ["image-classification"], + fasttext: ["feature-extraction", "text-classification"], + flair: ["token-classification"], + k2: ["automatic-speech-recognition"], + keras: ["image-classification"], + nemo: ["automatic-speech-recognition"], + open_clip: ["zero-shot-classification", "zero-shot-image-classification"], + paddlenlp: ["fill-mask", "summarization", "zero-shot-classification"], + peft: ["text-generation"], + "pyannote-audio": ["automatic-speech-recognition"], + "sentence-transformers": ["feature-extraction", "sentence-similarity"], + setfit: ["text-classification"], + sklearn: ["tabular-classification", "tabular-regression", "text-classification"], + spacy: ["token-classification", "text-classification", "sentence-similarity"], + "span-marker": ["token-classification"], + speechbrain: [ + "audio-classification", + "audio-to-audio", + "automatic-speech-recognition", + "text-to-speech", + "text2text-generation", + ], + stanza: ["token-classification"], + timm: ["image-classification"], + transformers: [ + "audio-classification", + "automatic-speech-recognition", + "depth-estimation", + "document-question-answering", + "feature-extraction", + "fill-mask", + "image-classification", + "image-segmentation", + "image-to-image", + "image-to-text", + "object-detection", + "question-answering", + "summarization", + "table-question-answering", + "text2text-generation", + "text-classification", + "text-generation", + "text-to-audio", + "text-to-speech", + "token-classification", + "translation", + "video-classification", + "visual-question-answering", + "zero-shot-classification", + "zero-shot-image-classification", + "zero-shot-object-detection", + ], + mindspore: ["image-classification"], +}; diff --git a/data/node_modules/@huggingface/tasks/src/local-apps.ts b/data/node_modules/@huggingface/tasks/src/local-apps.ts new file mode 100644 index 0000000000000000000000000000000000000000..32515966777341cb81994d3ba4348eceb09f07fa --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/local-apps.ts @@ -0,0 +1,183 @@ +import type { ModelData } from "./model-data"; +import type { PipelineType } from "./pipelines"; + +/** + * Elements configurable by a local app. + */ +export type LocalApp = { + /** + * Name that appears in buttons + */ + prettyLabel: string; + /** + * Link to get more info about a local app (website etc) + */ + docsUrl: string; + /** + * main category of app + */ + mainTask: PipelineType; + /** + * Whether to display a pill "macOS-only" + */ + macOSOnly?: boolean; + + comingSoon?: boolean; + /** + * IMPORTANT: function to figure out whether to display the button on a model page's main "Use this model" dropdown. + */ + displayOnModelPage: (model: ModelData) => boolean; +} & ( + | { + /** + * If the app supports deeplink, URL to open. + */ + deeplink: (model: ModelData, filepath?: string) => URL; + } + | { + /** + * And if not (mostly llama.cpp), snippet to copy/paste in your terminal + * Support the placeholder {{GGUF_FILE}} that will be replaced by the gguf file path or the list of available files. + */ + snippet: (model: ModelData, filepath?: string) => string | string[]; + } +); + +function isGgufModel(model: ModelData) { + return model.tags.includes("gguf"); +} + +const snippetLlamacpp = (model: ModelData, filepath?: string): string[] => { + return [ + `# Option 1: use llama.cpp with brew +brew install llama.cpp + +# Load and run the model +llama \\ + --hf-repo "${model.id}" \\ + --hf-file ${filepath ?? "{{GGUF_FILE}}"} \\ + -p "I believe the meaning of life is" \\ + -n 128`, + `# Option 2: build llama.cpp from source with curl support +git clone https://github.com/ggerganov/llama.cpp.git +cd llama.cpp +LLAMA_CURL=1 make + +# Load and run the model +./main \\ + --hf-repo "${model.id}" \\ + -m ${filepath ?? "{{GGUF_FILE}}"} \\ + -p "I believe the meaning of life is" \\ + -n 128`, + ]; +}; + +/** + * Add your new local app here. + * + * This is open to new suggestions and awesome upcoming apps. + * + * /!\ IMPORTANT + * + * If possible, you need to support deeplinks and be as cross-platform as possible. + * + * Ping the HF team if we can help with anything! + */ +export const LOCAL_APPS = { + "llama.cpp": { + prettyLabel: "llama.cpp", + docsUrl: "https://github.com/ggerganov/llama.cpp", + mainTask: "text-generation", + displayOnModelPage: isGgufModel, + snippet: snippetLlamacpp, + }, + lmstudio: { + prettyLabel: "LM Studio", + docsUrl: "https://lmstudio.ai", + mainTask: "text-generation", + displayOnModelPage: isGgufModel, + deeplink: (model, filepath) => + new URL(`lmstudio://open_from_hf?model=${model.id}${filepath ? `&file=${filepath}` : ""}`), + }, + jan: { + prettyLabel: "Jan", + docsUrl: "https://jan.ai", + mainTask: "text-generation", + displayOnModelPage: isGgufModel, + deeplink: (model) => new URL(`jan://models/huggingface/${model.id}`), + }, + backyard: { + prettyLabel: "Backyard AI", + docsUrl: "https://backyard.ai", + mainTask: "text-generation", + displayOnModelPage: isGgufModel, + deeplink: (model) => new URL(`https://backyard.ai/hf/model/${model.id}`), + }, + sanctum: { + prettyLabel: "Sanctum", + docsUrl: "https://sanctum.ai", + mainTask: "text-generation", + displayOnModelPage: isGgufModel, + deeplink: (model) => new URL(`sanctum://open_from_hf?model=${model.id}`), + }, + jellybox: { + prettyLabel: "Jellybox", + docsUrl: "https://jellybox.com", + mainTask: "text-generation", + displayOnModelPage: (model) => + isGgufModel(model) || + (model.library_name === "diffusers" && + model.tags.includes("safetensors") && + (model.pipeline_tag === "text-to-image" || model.tags.includes("lora"))), + deeplink: (model) => { + if (isGgufModel(model)) { + return new URL(`jellybox://llm/models/huggingface/LLM/${model.id}`); + } else if (model.tags.includes("lora")) { + return new URL(`jellybox://image/models/huggingface/ImageLora/${model.id}`); + } else { + return new URL(`jellybox://image/models/huggingface/Image/${model.id}`); + } + }, + }, + msty: { + prettyLabel: "Msty", + docsUrl: "https://msty.app", + mainTask: "text-generation", + displayOnModelPage: isGgufModel, + deeplink: (model) => new URL(`msty://models/search/hf/${model.id}`), + }, + recursechat: { + prettyLabel: "RecurseChat", + docsUrl: "https://recurse.chat", + mainTask: "text-generation", + macOSOnly: true, + displayOnModelPage: isGgufModel, + deeplink: (model) => new URL(`recursechat://new-hf-gguf-model?hf-model-id=${model.id}`), + }, + drawthings: { + prettyLabel: "Draw Things", + docsUrl: "https://drawthings.ai", + mainTask: "text-to-image", + macOSOnly: true, + displayOnModelPage: (model) => + model.library_name === "diffusers" && (model.pipeline_tag === "text-to-image" || model.tags.includes("lora")), + deeplink: (model) => { + if (model.tags.includes("lora")) { + return new URL(`https://drawthings.ai/import/diffusers/pipeline.load_lora_weights?repo_id=${model.id}`); + } else { + return new URL(`https://drawthings.ai/import/diffusers/pipeline.from_pretrained?repo_id=${model.id}`); + } + }, + }, + diffusionbee: { + prettyLabel: "DiffusionBee", + docsUrl: "https://diffusionbee.com", + mainTask: "text-to-image", + macOSOnly: true, + comingSoon: true, + displayOnModelPage: (model) => model.library_name === "diffusers" && model.pipeline_tag === "text-to-image", + deeplink: (model) => new URL(`diffusionbee://open_from_hf?model=${model.id}`), + }, +} satisfies Record; + +export type LocalAppKey = keyof typeof LOCAL_APPS; diff --git a/data/node_modules/@huggingface/tasks/src/model-data.ts b/data/node_modules/@huggingface/tasks/src/model-data.ts new file mode 100644 index 0000000000000000000000000000000000000000..addba268d54e4bf54984b1cb3f5ca322b77cb4de --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/model-data.ts @@ -0,0 +1,134 @@ +import type { PipelineType } from "./pipelines"; +import type { WidgetExample } from "./widget-example"; +import type { TokenizerConfig } from "./tokenizer-data"; + +/** + * Public interface for model metadata + */ +export interface ModelData { + /** + * id of model (e.g. 'user/repo_name') + */ + id: string; + /** + * Whether or not to enable inference widget for this model + * TODO(type it) + */ + inference: string; + /** + * is this model private? + */ + private?: boolean; + /** + * this dictionary has useful information about the model configuration + */ + config?: { + architectures?: string[]; + /** + * Dict of AutoModel or Auto… class name to local import path in the repo + */ + auto_map?: { + /** + * String Property + */ + [x: string]: string; + }; + model_type?: string; + quantization_config?: { + bits?: number; + load_in_4bit?: boolean; + load_in_8bit?: boolean; + }; + tokenizer_config?: TokenizerConfig; + adapter_transformers?: { + model_name?: string; + model_class?: string; + }; + diffusers?: { + _class_name?: string; + }; + sklearn?: { + model?: { + file?: string; + }; + model_format?: string; + }; + speechbrain?: { + speechbrain_interface?: string; + vocoder_interface?: string; + vocoder_model_id?: string; + }; + peft?: { + base_model_name_or_path?: string; + task_type?: string; + }; + }; + /** + * all the model tags + */ + tags: string[]; + /** + * transformers-specific info to display in the code sample. + */ + transformersInfo?: TransformersInfo; + /** + * Pipeline type + */ + pipeline_tag?: PipelineType | undefined; + /** + * for relevant models, get mask token + */ + mask_token?: string | undefined; + /** + * Example data that will be fed into the widget. + * + * can be set in the model card metadata (under `widget`), + * or by default in `DefaultWidget.ts` + */ + widgetData?: WidgetExample[] | undefined; + /** + * Parameters that will be used by the widget when calling Inference API (serverless) + * https://huggingface.co/docs/api-inference/detailed_parameters + * + * can be set in the model card metadata (under `inference/parameters`) + * Example: + * inference: + * parameters: + * key: val + */ + cardData?: { + inference?: + | boolean + | { + parameters?: Record; + }; + base_model?: string | string[]; + }; + /** + * Library name + * Example: transformers, SpeechBrain, Stanza, etc. + */ + library_name?: string; +} + +/** + * transformers-specific info to display in the code sample. + */ +export interface TransformersInfo { + /** + * e.g. AutoModelForSequenceClassification + */ + auto_model: string; + /** + * if set in config.json's auto_map + */ + custom_class?: string; + /** + * e.g. text-classification + */ + pipeline_tag?: PipelineType; + /** + * e.g. "AutoTokenizer" | "AutoFeatureExtractor" | "AutoProcessor" + */ + processor?: string; +} diff --git a/data/node_modules/@huggingface/tasks/src/model-libraries-downloads.ts b/data/node_modules/@huggingface/tasks/src/model-libraries-downloads.ts new file mode 100644 index 0000000000000000000000000000000000000000..70fc3bface26cdf0ad939623921435724deb8ced --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/model-libraries-downloads.ts @@ -0,0 +1,18 @@ +/** + * This file contains the (simplified) types used + * to represent queries that are made to Elastic + * in order to count number of model downloads + * + * Read this doc about download stats on the Hub: + * + * https://huggingface.co/docs/hub/models-download-stats + * Available fields: + * - path: the complete file path (relative) (e.g: "prefix/file.extension") + * - path_prefix: the prefix of the file path (e.g: "prefix/", empty if no prefix) + * - path_extension: the extension of the file path (e.g: "extension") + * - path_filename: the name of the file path (e.g: "file") + * see also: + * https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html + */ + +export type ElasticSearchQuery = string; diff --git a/data/node_modules/@huggingface/tasks/src/model-libraries-snippets.ts b/data/node_modules/@huggingface/tasks/src/model-libraries-snippets.ts new file mode 100644 index 0000000000000000000000000000000000000000..48097e3481c63d68994f6b686f72fa0c210f46bc --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/model-libraries-snippets.ts @@ -0,0 +1,816 @@ +import type { ModelData } from "./model-data"; +import { LIBRARY_TASK_MAPPING } from "./library-to-tasks"; + +const TAG_CUSTOM_CODE = "custom_code"; + +function nameWithoutNamespace(modelId: string): string { + const splitted = modelId.split("/"); + return splitted.length === 1 ? splitted[0] : splitted[1]; +} + +//#region snippets + +export const adapters = (model: ModelData): string[] => [ + `from adapters import AutoAdapterModel + +model = AutoAdapterModel.from_pretrained("${model.config?.adapter_transformers?.model_name}") +model.load_adapter("${model.id}", set_active=True)`, +]; + +const allennlpUnknown = (model: ModelData) => [ + `import allennlp_models +from allennlp.predictors.predictor import Predictor + +predictor = Predictor.from_path("hf://${model.id}")`, +]; + +const allennlpQuestionAnswering = (model: ModelData) => [ + `import allennlp_models +from allennlp.predictors.predictor import Predictor + +predictor = Predictor.from_path("hf://${model.id}") +predictor_input = {"passage": "My name is Wolfgang and I live in Berlin", "question": "Where do I live?"} +predictions = predictor.predict_json(predictor_input)`, +]; + +export const allennlp = (model: ModelData): string[] => { + if (model.tags.includes("question-answering")) { + return allennlpQuestionAnswering(model); + } + return allennlpUnknown(model); +}; + +export const asteroid = (model: ModelData): string[] => [ + `from asteroid.models import BaseModel + +model = BaseModel.from_pretrained("${model.id}")`, +]; + +export const audioseal = (model: ModelData): string[] => { + const watermarkSnippet = `# Watermark Generator +from audioseal import AudioSeal + +model = AudioSeal.load_generator("${model.id}") +# pass a tensor (tensor_wav) of shape (batch, channels, samples) and a sample rate +wav, sr = tensor_wav, 16000 + +watermark = model.get_watermark(wav, sr) +watermarked_audio = wav + watermark`; + + const detectorSnippet = `# Watermark Detector +from audioseal import AudioSeal + +detector = AudioSeal.load_detector("${model.id}") + +result, message = detector.detect_watermark(watermarked_audio, sr)`; + return [watermarkSnippet, detectorSnippet]; +}; + +function get_base_diffusers_model(model: ModelData): string { + return model.cardData?.base_model?.toString() ?? "fill-in-base-model"; +} + +export const bertopic = (model: ModelData): string[] => [ + `from bertopic import BERTopic + +model = BERTopic.load("${model.id}")`, +]; + +export const bm25s = (model: ModelData): string[] => [ + `from bm25s.hf import BM25HF + +retriever = BM25HF.load_from_hub("${model.id}")`, +]; + +export const depth_anything_v2 = (model: ModelData): string[] => { + let encoder: string; + let features: string; + let out_channels: string; + + encoder = ""; + features = ""; + out_channels = ""; + + if (model.id === "depth-anything/Depth-Anything-V2-Small") { + encoder = "vits"; + features = "64"; + out_channels = "[48, 96, 192, 384]"; + } else if (model.id === "depth-anything/Depth-Anything-V2-Base") { + encoder = "vitb"; + features = "128"; + out_channels = "[96, 192, 384, 768]"; + } else if (model.id === "depth-anything/Depth-Anything-V2-Large") { + encoder = "vitl"; + features = "256"; + out_channels = "[256, 512, 1024, 1024"; + } + + return [ + ` +# Install from https://github.com/DepthAnything/Depth-Anything-V2 + +# Load the model and infer depth from an image +import cv2 +import torch + +from depth_anything_v2.dpt import DepthAnythingV2 + +# instantiate the model +model = DepthAnythingV2(encoder="${encoder}", features=${features}, out_channels=${out_channels}) + +# load the weights +filepath = hf_hub_download(repo_id="${model.id}", filename="depth_anything_v2_${encoder}.pth", repo_type="model") +state_dict = torch.load(filepath, map_location="cpu") +model.load_state_dict(state_dict).eval() + +raw_img = cv2.imread("your/image/path") +depth = model.infer_image(raw_img) # HxW raw depth map in numpy + `, + ]; +}; + +const diffusers_default = (model: ModelData) => [ + `from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained("${model.id}")`, +]; + +const diffusers_controlnet = (model: ModelData) => [ + `from diffusers import ControlNetModel, StableDiffusionControlNetPipeline + +controlnet = ControlNetModel.from_pretrained("${model.id}") +pipeline = StableDiffusionControlNetPipeline.from_pretrained( + "${get_base_diffusers_model(model)}", controlnet=controlnet +)`, +]; + +const diffusers_lora = (model: ModelData) => [ + `from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}") +pipeline.load_lora_weights("${model.id}")`, +]; + +const diffusers_textual_inversion = (model: ModelData) => [ + `from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}") +pipeline.load_textual_inversion("${model.id}")`, +]; + +export const diffusers = (model: ModelData): string[] => { + if (model.tags.includes("controlnet")) { + return diffusers_controlnet(model); + } else if (model.tags.includes("lora")) { + return diffusers_lora(model); + } else if (model.tags.includes("textual_inversion")) { + return diffusers_textual_inversion(model); + } else { + return diffusers_default(model); + } +}; + +export const edsnlp = (model: ModelData): string[] => { + const packageName = nameWithoutNamespace(model.id).replaceAll("-", "_"); + return [ + `# Load it from the Hub directly +import edsnlp +nlp = edsnlp.load("${model.id}") +`, + `# Or install it as a package +!pip install git+https://huggingface.co/${model.id} + +# and import it as a module +import ${packageName} + +nlp = ${packageName}.load() # or edsnlp.load("${packageName}") +`, + ]; +}; + +export const espnetTTS = (model: ModelData): string[] => [ + `from espnet2.bin.tts_inference import Text2Speech + +model = Text2Speech.from_pretrained("${model.id}") + +speech, *_ = model("text to generate speech from")`, +]; + +export const espnetASR = (model: ModelData): string[] => [ + `from espnet2.bin.asr_inference import Speech2Text + +model = Speech2Text.from_pretrained( + "${model.id}" +) + +speech, rate = soundfile.read("speech.wav") +text, *_ = model(speech)[0]`, +]; + +const espnetUnknown = () => [`unknown model type (must be text-to-speech or automatic-speech-recognition)`]; + +export const espnet = (model: ModelData): string[] => { + if (model.tags.includes("text-to-speech")) { + return espnetTTS(model); + } else if (model.tags.includes("automatic-speech-recognition")) { + return espnetASR(model); + } + return espnetUnknown(); +}; + +export const fairseq = (model: ModelData): string[] => [ + `from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub + +models, cfg, task = load_model_ensemble_and_task_from_hf_hub( + "${model.id}" +)`, +]; + +export const flair = (model: ModelData): string[] => [ + `from flair.models import SequenceTagger + +tagger = SequenceTagger.load("${model.id}")`, +]; + +export const gliner = (model: ModelData): string[] => [ + `from gliner import GLiNER + +model = GLiNER.from_pretrained("${model.id}")`, +]; + +export const keras = (model: ModelData): string[] => [ + `# Available backend options are: "jax", "tensorflow", "torch". +import os +os.environ["KERAS_BACKEND"] = "tensorflow" + +import keras + +model = keras.saving.load_model("hf://${model.id}") +`, +]; + +export const keras_nlp = (model: ModelData): string[] => [ + `# Available backend options are: "jax", "tensorflow", "torch". +import os +os.environ["KERAS_BACKEND"] = "tensorflow" + +import keras_nlp + +tokenizer = keras_nlp.models.Tokenizer.from_preset("hf://${model.id}") +backbone = keras_nlp.models.Backbone.from_preset("hf://${model.id}") +`, +]; + +export const tf_keras = (model: ModelData): string[] => [ + `# Note: 'keras<3.x' or 'tf_keras' must be installed (legacy) +# See https://github.com/keras-team/tf-keras for more details. +from huggingface_hub import from_pretrained_keras + +model = from_pretrained_keras("${model.id}") +`, +]; + +export const mamba_ssm = (model: ModelData): string[] => [ + `from mamba_ssm import MambaLMHeadModel + +model = MambaLMHeadModel.from_pretrained("${model.id}")`, +]; + +export const mars5_tts = (model: ModelData): string[] => [ + `# Install from https://github.com/Camb-ai/MARS5-TTS + +from inference import Mars5TTS +mars5 = Mars5TTS.from_pretrained("${model.id}")`, +]; + +export const mesh_anything = (): string[] => [ + `# Install from https://github.com/buaacyw/MeshAnything.git + +from MeshAnything.models.meshanything import MeshAnything + +# refer to https://github.com/buaacyw/MeshAnything/blob/main/main.py#L91 on how to define args +# and https://github.com/buaacyw/MeshAnything/blob/main/app.py regarding usage +model = MeshAnything(args)`, +]; + +export const open_clip = (model: ModelData): string[] => [ + `import open_clip + +model, preprocess_train, preprocess_val = open_clip.create_model_and_transforms('hf-hub:${model.id}') +tokenizer = open_clip.get_tokenizer('hf-hub:${model.id}')`, +]; + +export const paddlenlp = (model: ModelData): string[] => { + if (model.config?.architectures?.[0]) { + const architecture = model.config.architectures[0]; + return [ + [ + `from paddlenlp.transformers import AutoTokenizer, ${architecture}`, + "", + `tokenizer = AutoTokenizer.from_pretrained("${model.id}", from_hf_hub=True)`, + `model = ${architecture}.from_pretrained("${model.id}", from_hf_hub=True)`, + ].join("\n"), + ]; + } else { + return [ + [ + `# ⚠️ Type of model unknown`, + `from paddlenlp.transformers import AutoTokenizer, AutoModel`, + "", + `tokenizer = AutoTokenizer.from_pretrained("${model.id}", from_hf_hub=True)`, + `model = AutoModel.from_pretrained("${model.id}", from_hf_hub=True)`, + ].join("\n"), + ]; + } +}; + +export const pyannote_audio_pipeline = (model: ModelData): string[] => [ + `from pyannote.audio import Pipeline + +pipeline = Pipeline.from_pretrained("${model.id}") + +# inference on the whole file +pipeline("file.wav") + +# inference on an excerpt +from pyannote.core import Segment +excerpt = Segment(start=2.0, end=5.0) + +from pyannote.audio import Audio +waveform, sample_rate = Audio().crop("file.wav", excerpt) +pipeline({"waveform": waveform, "sample_rate": sample_rate})`, +]; + +const pyannote_audio_model = (model: ModelData): string[] => [ + `from pyannote.audio import Model, Inference + +model = Model.from_pretrained("${model.id}") +inference = Inference(model) + +# inference on the whole file +inference("file.wav") + +# inference on an excerpt +from pyannote.core import Segment +excerpt = Segment(start=2.0, end=5.0) +inference.crop("file.wav", excerpt)`, +]; + +export const pyannote_audio = (model: ModelData): string[] => { + if (model.tags.includes("pyannote-audio-pipeline")) { + return pyannote_audio_pipeline(model); + } + return pyannote_audio_model(model); +}; + +const tensorflowttsTextToMel = (model: ModelData): string[] => [ + `from tensorflow_tts.inference import AutoProcessor, TFAutoModel + +processor = AutoProcessor.from_pretrained("${model.id}") +model = TFAutoModel.from_pretrained("${model.id}") +`, +]; + +const tensorflowttsMelToWav = (model: ModelData): string[] => [ + `from tensorflow_tts.inference import TFAutoModel + +model = TFAutoModel.from_pretrained("${model.id}") +audios = model.inference(mels) +`, +]; + +const tensorflowttsUnknown = (model: ModelData): string[] => [ + `from tensorflow_tts.inference import TFAutoModel + +model = TFAutoModel.from_pretrained("${model.id}") +`, +]; + +export const tensorflowtts = (model: ModelData): string[] => { + if (model.tags.includes("text-to-mel")) { + return tensorflowttsTextToMel(model); + } else if (model.tags.includes("mel-to-wav")) { + return tensorflowttsMelToWav(model); + } + return tensorflowttsUnknown(model); +}; + +export const timm = (model: ModelData): string[] => [ + `import timm + +model = timm.create_model("hf_hub:${model.id}", pretrained=True)`, +]; + +const skopsPickle = (model: ModelData, modelFile: string) => { + return [ + `import joblib +from skops.hub_utils import download +download("${model.id}", "path_to_folder") +model = joblib.load( + "${modelFile}" +) +# only load pickle files from sources you trust +# read more about it here https://skops.readthedocs.io/en/stable/persistence.html`, + ]; +}; + +const skopsFormat = (model: ModelData, modelFile: string) => { + return [ + `from skops.hub_utils import download +from skops.io import load +download("${model.id}", "path_to_folder") +# make sure model file is in skops format +# if model is a pickle file, make sure it's from a source you trust +model = load("path_to_folder/${modelFile}")`, + ]; +}; + +const skopsJobLib = (model: ModelData) => { + return [ + `from huggingface_hub import hf_hub_download +import joblib +model = joblib.load( + hf_hub_download("${model.id}", "sklearn_model.joblib") +) +# only load pickle files from sources you trust +# read more about it here https://skops.readthedocs.io/en/stable/persistence.html`, + ]; +}; + +export const sklearn = (model: ModelData): string[] => { + if (model.tags.includes("skops")) { + const skopsmodelFile = model.config?.sklearn?.model?.file; + const skopssaveFormat = model.config?.sklearn?.model_format; + if (!skopsmodelFile) { + return [`# ⚠️ Model filename not specified in config.json`]; + } + if (skopssaveFormat === "pickle") { + return skopsPickle(model, skopsmodelFile); + } else { + return skopsFormat(model, skopsmodelFile); + } + } else { + return skopsJobLib(model); + } +}; + +export const stable_audio_tools = (model: ModelData): string[] => [ + `import torch +import torchaudio +from einops import rearrange +from stable_audio_tools import get_pretrained_model +from stable_audio_tools.inference.generation import generate_diffusion_cond + +device = "cuda" if torch.cuda.is_available() else "cpu" + +# Download model +model, model_config = get_pretrained_model("${model.id}") +sample_rate = model_config["sample_rate"] +sample_size = model_config["sample_size"] + +model = model.to(device) + +# Set up text and timing conditioning +conditioning = [{ + "prompt": "128 BPM tech house drum loop", +}] + +# Generate stereo audio +output = generate_diffusion_cond( + model, + conditioning=conditioning, + sample_size=sample_size, + device=device +) + +# Rearrange audio batch to a single sequence +output = rearrange(output, "b d n -> d (b n)") + +# Peak normalize, clip, convert to int16, and save to file +output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu() +torchaudio.save("output.wav", output, sample_rate)`, +]; + +export const fastai = (model: ModelData): string[] => [ + `from huggingface_hub import from_pretrained_fastai + +learn = from_pretrained_fastai("${model.id}")`, +]; + +export const sampleFactory = (model: ModelData): string[] => [ + `python -m sample_factory.huggingface.load_from_hub -r ${model.id} -d ./train_dir`, +]; + +export const sentenceTransformers = (model: ModelData): string[] => [ + `from sentence_transformers import SentenceTransformer + +model = SentenceTransformer("${model.id}")`, +]; + +export const setfit = (model: ModelData): string[] => [ + `from setfit import SetFitModel + +model = SetFitModel.from_pretrained("${model.id}")`, +]; + +export const spacy = (model: ModelData): string[] => [ + `!pip install https://huggingface.co/${model.id}/resolve/main/${nameWithoutNamespace(model.id)}-any-py3-none-any.whl + +# Using spacy.load(). +import spacy +nlp = spacy.load("${nameWithoutNamespace(model.id)}") + +# Importing as module. +import ${nameWithoutNamespace(model.id)} +nlp = ${nameWithoutNamespace(model.id)}.load()`, +]; + +export const span_marker = (model: ModelData): string[] => [ + `from span_marker import SpanMarkerModel + +model = SpanMarkerModel.from_pretrained("${model.id}")`, +]; + +export const stanza = (model: ModelData): string[] => [ + `import stanza + +stanza.download("${nameWithoutNamespace(model.id).replace("stanza-", "")}") +nlp = stanza.Pipeline("${nameWithoutNamespace(model.id).replace("stanza-", "")}")`, +]; + +const speechBrainMethod = (speechbrainInterface: string) => { + switch (speechbrainInterface) { + case "EncoderClassifier": + return "classify_file"; + case "EncoderDecoderASR": + case "EncoderASR": + return "transcribe_file"; + case "SpectralMaskEnhancement": + return "enhance_file"; + case "SepformerSeparation": + return "separate_file"; + default: + return undefined; + } +}; + +export const speechbrain = (model: ModelData): string[] => { + const speechbrainInterface = model.config?.speechbrain?.speechbrain_interface; + if (speechbrainInterface === undefined) { + return [`# interface not specified in config.json`]; + } + + const speechbrainMethod = speechBrainMethod(speechbrainInterface); + if (speechbrainMethod === undefined) { + return [`# interface in config.json invalid`]; + } + + return [ + `from speechbrain.pretrained import ${speechbrainInterface} +model = ${speechbrainInterface}.from_hparams( + "${model.id}" +) +model.${speechbrainMethod}("file.wav")`, + ]; +}; + +export const transformers = (model: ModelData): string[] => { + const info = model.transformersInfo; + if (!info) { + return [`# ⚠️ Type of model unknown`]; + } + const remote_code_snippet = model.tags.includes(TAG_CUSTOM_CODE) ? ", trust_remote_code=True" : ""; + + let autoSnippet: string; + if (info.processor) { + const varName = + info.processor === "AutoTokenizer" + ? "tokenizer" + : info.processor === "AutoFeatureExtractor" + ? "extractor" + : "processor"; + autoSnippet = [ + "# Load model directly", + `from transformers import ${info.processor}, ${info.auto_model}`, + "", + `${varName} = ${info.processor}.from_pretrained("${model.id}"` + remote_code_snippet + ")", + `model = ${info.auto_model}.from_pretrained("${model.id}"` + remote_code_snippet + ")", + ].join("\n"); + } else { + autoSnippet = [ + "# Load model directly", + `from transformers import ${info.auto_model}`, + `model = ${info.auto_model}.from_pretrained("${model.id}"` + remote_code_snippet + ")", + ].join("\n"); + } + + if (model.pipeline_tag && LIBRARY_TASK_MAPPING.transformers?.includes(model.pipeline_tag)) { + const pipelineSnippet = ["# Use a pipeline as a high-level helper", "from transformers import pipeline", ""]; + + if (model.tags.includes("conversational") && model.config?.tokenizer_config?.chat_template) { + pipelineSnippet.push("messages = [", ' {"role": "user", "content": "Who are you?"},', "]"); + } + pipelineSnippet.push(`pipe = pipeline("${model.pipeline_tag}", model="${model.id}"` + remote_code_snippet + ")"); + if (model.tags.includes("conversational") && model.config?.tokenizer_config?.chat_template) { + pipelineSnippet.push("pipe(messages)"); + } + + return [pipelineSnippet.join("\n"), autoSnippet]; + } + return [autoSnippet]; +}; + +export const transformersJS = (model: ModelData): string[] => { + if (!model.pipeline_tag) { + return [`// ⚠️ Unknown pipeline tag`]; + } + + const libName = "@xenova/transformers"; + + return [ + `// npm i ${libName} +import { pipeline } from '${libName}'; + +// Allocate pipeline +const pipe = await pipeline('${model.pipeline_tag}', '${model.id}');`, + ]; +}; + +const peftTask = (peftTaskType?: string) => { + switch (peftTaskType) { + case "CAUSAL_LM": + return "CausalLM"; + case "SEQ_2_SEQ_LM": + return "Seq2SeqLM"; + case "TOKEN_CLS": + return "TokenClassification"; + case "SEQ_CLS": + return "SequenceClassification"; + default: + return undefined; + } +}; + +export const peft = (model: ModelData): string[] => { + const { base_model_name_or_path: peftBaseModel, task_type: peftTaskType } = model.config?.peft ?? {}; + const pefttask = peftTask(peftTaskType); + if (!pefttask) { + return [`Task type is invalid.`]; + } + if (!peftBaseModel) { + return [`Base model is not found.`]; + } + + return [ + `from peft import PeftModel, PeftConfig +from transformers import AutoModelFor${pefttask} + +config = PeftConfig.from_pretrained("${model.id}") +base_model = AutoModelFor${pefttask}.from_pretrained("${peftBaseModel}") +model = PeftModel.from_pretrained(base_model, "${model.id}")`, + ]; +}; + +export const fasttext = (model: ModelData): string[] => [ + `from huggingface_hub import hf_hub_download +import fasttext + +model = fasttext.load_model(hf_hub_download("${model.id}", "model.bin"))`, +]; + +export const stableBaselines3 = (model: ModelData): string[] => [ + `from huggingface_sb3 import load_from_hub +checkpoint = load_from_hub( + repo_id="${model.id}", + filename="{MODEL FILENAME}.zip", +)`, +]; + +const nemoDomainResolver = (domain: string, model: ModelData): string[] | undefined => { + switch (domain) { + case "ASR": + return [ + `import nemo.collections.asr as nemo_asr +asr_model = nemo_asr.models.ASRModel.from_pretrained("${model.id}") + +transcriptions = asr_model.transcribe(["file.wav"])`, + ]; + default: + return undefined; + } +}; + +export const mlAgents = (model: ModelData): string[] => [ + `mlagents-load-from-hf --repo-id="${model.id}" --local-dir="./download: string[]s"`, +]; + +export const sentis = (/* model: ModelData */): string[] => [ + `string modelName = "[Your model name here].sentis"; +Model model = ModelLoader.Load(Application.streamingAssetsPath + "/" + modelName); +IWorker engine = WorkerFactory.CreateWorker(BackendType.GPUCompute, model); +// Please see provided C# file for more details +`, +]; + +export const voicecraft = (model: ModelData): string[] => [ + `from voicecraft import VoiceCraft + +model = VoiceCraft.from_pretrained("${model.id}")`, +]; + +export const chattts = (): string[] => [ + `import ChatTTS +import torchaudio + +chat = ChatTTS.Chat() +chat.load_models(compile=False) # Set to True for better performance + +texts = ["PUT YOUR TEXT HERE",] + +wavs = chat.infer(texts, ) + +torchaudio.save("output1.wav", torch.from_numpy(wavs[0]), 24000)`, +]; + +export const mlx = (model: ModelData): string[] => [ + `pip install huggingface_hub hf_transfer + +export HF_HUB_ENABLE_HF_TRANS: string[]FER=1 +huggingface-cli download --local-dir ${nameWithoutNamespace(model.id)} ${model.id}`, +]; + +export const mlxim = (model: ModelData): string[] => [ + `from mlxim.model import create_model + +model = create_model(${model.id})`, +]; + +export const nemo = (model: ModelData): string[] => { + let command: string[] | undefined = undefined; + // Resolve the tag to a nemo domain/sub-domain + if (model.tags.includes("automatic-speech-recognition")) { + command = nemoDomainResolver("ASR", model); + } + + return command ?? [`# tag did not correspond to a valid NeMo domain.`]; +}; + +export const pythae = (model: ModelData): string[] => [ + `from pythae.models import AutoModel + +model = AutoModel.load_from_hf_hub("${model.id}")`, +]; + +const musicgen = (model: ModelData): string[] => [ + `from audiocraft.models import MusicGen + +model = MusicGen.get_pretrained("${model.id}") + +descriptions = ['happy rock', 'energetic EDM', 'sad jazz'] +wav = model.generate(descriptions) # generates 3 samples.`, +]; + +const magnet = (model: ModelData): string[] => [ + `from audiocraft.models import MAGNeT + +model = MAGNeT.get_pretrained("${model.id}") + +descriptions = ['disco beat', 'energetic EDM', 'funky groove'] +wav = model.generate(descriptions) # generates 3 samples.`, +]; + +const audiogen = (model: ModelData): string[] => [ + `from audiocraft.models import AudioGen + +model = AudioGen.get_pretrained("${model.id}") +model.set_generation_params(duration=5) # generate 5 seconds. +descriptions = ['dog barking', 'sirene of an emergency vehicle', 'footsteps in a corridor'] +wav = model.generate(descriptions) # generates 3 samples.`, +]; + +export const audiocraft = (model: ModelData): string[] => { + if (model.tags.includes("musicgen")) { + return musicgen(model); + } else if (model.tags.includes("audiogen")) { + return audiogen(model); + } else if (model.tags.includes("magnet")) { + return magnet(model); + } else { + return [`# Type of model unknown.`]; + } +}; + +export const whisperkit = (): string[] => [ + `# Install CLI with Homebrew on macOS device +brew install whisperkit-cli + +# View all available inference options +whisperkit-cli transcribe --help + +# Download and run inference using whisper base model +whisperkit-cli transcribe --audio-path /path/to/audio.mp3 + +# Or use your preferred model variant +whisperkit-cli transcribe --model "large-v3" --model-prefix "distil" --audio-path /path/to/audio.mp3 --verbose`, +]; +//#endregion diff --git a/data/node_modules/@huggingface/tasks/src/model-libraries.ts b/data/node_modules/@huggingface/tasks/src/model-libraries.ts new file mode 100644 index 0000000000000000000000000000000000000000..36e758d528a0cf4a72d441162335e3e51a76f867 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/model-libraries.ts @@ -0,0 +1,592 @@ +import * as snippets from "./model-libraries-snippets"; +import type { ModelData } from "./model-data"; +import type { ElasticSearchQuery } from "./model-libraries-downloads"; + +/** + * Elements configurable by a model library. + */ +export interface LibraryUiElement { + /** + * Pretty name of the library. + * displayed in tags, and on the main + * call-to-action button on the model page. + */ + prettyLabel: string; + /** + * Repo name of the library's (usually on GitHub) code repo + */ + repoName: string; + /** + * URL to library's (usually on GitHub) code repo + */ + repoUrl: string; + /** + * URL to library's docs + */ + docsUrl?: string; + /** + * Code snippet(s) displayed on model page + */ + snippets?: (model: ModelData) => string[]; + /** + * Elastic query used to count this library's model downloads + * + * By default, those files are counted: + * "config.json", "config.yaml", "hyperparams.yaml", "meta.yaml" + */ + countDownloads?: ElasticSearchQuery; + /** + * should we display this library in hf.co/models filter + * (only for popular libraries with > 100 models) + */ + filter?: boolean; +} + +/** + * Add your new library here. + * + * This is for modeling (= architectures) libraries, not for file formats (like ONNX, etc). + * (unlike libraries, file formats live in an enum inside the internal codebase.) + * + * Doc on how to add a library to the Hub: + * + * https://huggingface.co/docs/hub/models-adding-libraries + * + * /!\ IMPORTANT + * + * The key you choose is the tag your models have in their library_name on the Hub. + */ + +export const MODEL_LIBRARIES_UI_ELEMENTS = { + "adapter-transformers": { + prettyLabel: "Adapters", + repoName: "adapters", + repoUrl: "https://github.com/Adapter-Hub/adapters", + docsUrl: "https://huggingface.co/docs/hub/adapters", + snippets: snippets.adapters, + filter: true, + countDownloads: `path:"adapter_config.json"`, + }, + allennlp: { + prettyLabel: "AllenNLP", + repoName: "AllenNLP", + repoUrl: "https://github.com/allenai/allennlp", + docsUrl: "https://huggingface.co/docs/hub/allennlp", + snippets: snippets.allennlp, + filter: true, + }, + asteroid: { + prettyLabel: "Asteroid", + repoName: "Asteroid", + repoUrl: "https://github.com/asteroid-team/asteroid", + docsUrl: "https://huggingface.co/docs/hub/asteroid", + snippets: snippets.asteroid, + filter: true, + countDownloads: `path:"pytorch_model.bin"`, + }, + audiocraft: { + prettyLabel: "Audiocraft", + repoName: "audiocraft", + repoUrl: "https://github.com/facebookresearch/audiocraft", + snippets: snippets.audiocraft, + filter: false, + countDownloads: `path:"state_dict.bin"`, + }, + audioseal: { + prettyLabel: "AudioSeal", + repoName: "audioseal", + repoUrl: "https://github.com/facebookresearch/audioseal", + filter: false, + countDownloads: `path_extension:"pth"`, + snippets: snippets.audioseal, + }, + bertopic: { + prettyLabel: "BERTopic", + repoName: "BERTopic", + repoUrl: "https://github.com/MaartenGr/BERTopic", + snippets: snippets.bertopic, + filter: true, + }, + big_vision: { + prettyLabel: "Big Vision", + repoName: "big_vision", + repoUrl: "https://github.com/google-research/big_vision", + filter: false, + countDownloads: `path_extension:"npz"`, + }, + bm25s: { + prettyLabel: "BM25S", + repoName: "bm25s", + repoUrl: "https://github.com/xhluca/bm25s", + snippets: snippets.bm25s, + filter: false, + countDownloads: `path:"params.index.json"`, + }, + champ: { + prettyLabel: "Champ", + repoName: "Champ", + repoUrl: "https://github.com/fudan-generative-vision/champ", + countDownloads: `path:"champ/motion_module.pth"`, + }, + chat_tts: { + prettyLabel: "ChatTTS", + repoName: "ChatTTS", + repoUrl: "https://github.com/2noise/ChatTTS.git", + snippets: snippets.chattts, + filter: false, + countDownloads: `path:"asset/GPT.pt"`, + }, + colpali: { + prettyLabel: "ColPali", + repoName: "ColPali", + repoUrl: "https://github.com/ManuelFay/colpali", + filter: false, + countDownloads: `path:"adapter_config.json"`, + }, + "depth-anything-v2": { + prettyLabel: "DepthAnythingV2", + repoName: "Depth Anything V2", + repoUrl: "https://github.com/DepthAnything/Depth-Anything-V2", + snippets: snippets.depth_anything_v2, + filter: false, + countDownloads: `path_extension:"pth"`, + }, + diffusers: { + prettyLabel: "Diffusers", + repoName: "🤗/diffusers", + repoUrl: "https://github.com/huggingface/diffusers", + docsUrl: "https://huggingface.co/docs/hub/diffusers", + snippets: snippets.diffusers, + filter: true, + /// diffusers has its own more complex "countDownloads" query + }, + doctr: { + prettyLabel: "docTR", + repoName: "doctr", + repoUrl: "https://github.com/mindee/doctr", + }, + edsnlp: { + prettyLabel: "EDS-NLP", + repoName: "edsnlp", + repoUrl: "https://github.com/aphp/edsnlp", + docsUrl: "https://aphp.github.io/edsnlp/latest/", + filter: false, + snippets: snippets.edsnlp, + countDownloads: `path_filename:"config" AND path_extension:"cfg"`, + }, + elm: { + prettyLabel: "ELM", + repoName: "elm", + repoUrl: "https://github.com/slicex-ai/elm", + filter: false, + countDownloads: `path_filename:"slicex_elm_config" AND path_extension:"json"`, + }, + espnet: { + prettyLabel: "ESPnet", + repoName: "ESPnet", + repoUrl: "https://github.com/espnet/espnet", + docsUrl: "https://huggingface.co/docs/hub/espnet", + snippets: snippets.espnet, + filter: true, + }, + fairseq: { + prettyLabel: "Fairseq", + repoName: "fairseq", + repoUrl: "https://github.com/pytorch/fairseq", + snippets: snippets.fairseq, + filter: true, + }, + fastai: { + prettyLabel: "fastai", + repoName: "fastai", + repoUrl: "https://github.com/fastai/fastai", + docsUrl: "https://huggingface.co/docs/hub/fastai", + snippets: snippets.fastai, + filter: true, + }, + fasttext: { + prettyLabel: "fastText", + repoName: "fastText", + repoUrl: "https://fasttext.cc/", + snippets: snippets.fasttext, + filter: true, + countDownloads: `path_extension:"bin"`, + }, + flair: { + prettyLabel: "Flair", + repoName: "Flair", + repoUrl: "https://github.com/flairNLP/flair", + docsUrl: "https://huggingface.co/docs/hub/flair", + snippets: snippets.flair, + filter: true, + countDownloads: `path:"pytorch_model.bin"`, + }, + "gemma.cpp": { + prettyLabel: "gemma.cpp", + repoName: "gemma.cpp", + repoUrl: "https://github.com/google/gemma.cpp", + filter: false, + countDownloads: `path_extension:"sbs"`, + }, + gliner: { + prettyLabel: "GLiNER", + repoName: "GLiNER", + repoUrl: "https://github.com/urchade/GLiNER", + snippets: snippets.gliner, + filter: false, + countDownloads: `path:"gliner_config.json"`, + }, + "glyph-byt5": { + prettyLabel: "Glyph-ByT5", + repoName: "Glyph-ByT5", + repoUrl: "https://github.com/AIGText/Glyph-ByT5", + filter: false, + countDownloads: `path:"checkpoints/byt5_model.pt"`, + }, + grok: { + prettyLabel: "Grok", + repoName: "Grok", + repoUrl: "https://github.com/xai-org/grok-1", + filter: false, + countDownloads: `path:"ckpt/tensor00000_000" OR path:"ckpt-0/tensor00000_000"`, + }, + hallo: { + prettyLabel: "Hallo", + repoName: "Hallo", + repoUrl: "https://github.com/fudan-generative-vision/hallo", + countDownloads: `path:"hallo/net.pth"`, + }, + "hunyuan-dit": { + prettyLabel: "HunyuanDiT", + repoName: "HunyuanDiT", + repoUrl: "https://github.com/Tencent/HunyuanDiT", + countDownloads: `path:"pytorch_model_ema.pt" OR path:"pytorch_model_distill.pt"`, + }, + keras: { + prettyLabel: "Keras", + repoName: "Keras", + repoUrl: "https://github.com/keras-team/keras", + docsUrl: "https://huggingface.co/docs/hub/keras", + snippets: snippets.keras, + filter: true, + countDownloads: `path:"config.json" OR path_extension:"keras"`, + }, + "tf-keras": { + // Legacy "Keras 2" library (tensorflow-only) + prettyLabel: "TF-Keras", + repoName: "TF-Keras", + repoUrl: "https://github.com/keras-team/tf-keras", + docsUrl: "https://huggingface.co/docs/hub/tf-keras", + snippets: snippets.tf_keras, + filter: true, + countDownloads: `path:"saved_model.pb"`, + }, + "keras-nlp": { + prettyLabel: "KerasNLP", + repoName: "KerasNLP", + repoUrl: "https://keras.io/keras_nlp/", + docsUrl: "https://github.com/keras-team/keras-nlp", + snippets: snippets.keras_nlp, + }, + k2: { + prettyLabel: "K2", + repoName: "k2", + repoUrl: "https://github.com/k2-fsa/k2", + }, + liveportrait: { + prettyLabel: "LivePortrait", + repoName: "LivePortrait", + repoUrl: "https://github.com/KwaiVGI/LivePortrait", + filter: false, + countDownloads: `path:"liveportrait/landmark.onnx"`, + }, + mindspore: { + prettyLabel: "MindSpore", + repoName: "mindspore", + repoUrl: "https://github.com/mindspore-ai/mindspore", + }, + "mamba-ssm": { + prettyLabel: "MambaSSM", + repoName: "MambaSSM", + repoUrl: "https://github.com/state-spaces/mamba", + filter: false, + snippets: snippets.mamba_ssm, + }, + "mars5-tts": { + prettyLabel: "MARS5-TTS", + repoName: "MARS5-TTS", + repoUrl: "https://github.com/Camb-ai/MARS5-TTS", + filter: false, + countDownloads: `path:"mars5_ar.safetensors"`, + snippets: snippets.mars5_tts, + }, + "mesh-anything": { + prettyLabel: "MeshAnything", + repoName: "MeshAnything", + repoUrl: "https://github.com/buaacyw/MeshAnything", + filter: false, + countDownloads: `path:"MeshAnything_350m.pth"`, + snippets: snippets.mesh_anything, + }, + "ml-agents": { + prettyLabel: "ml-agents", + repoName: "ml-agents", + repoUrl: "https://github.com/Unity-Technologies/ml-agents", + docsUrl: "https://huggingface.co/docs/hub/ml-agents", + snippets: snippets.mlAgents, + filter: true, + countDownloads: `path_extension:"onnx"`, + }, + mlx: { + prettyLabel: "MLX", + repoName: "MLX", + repoUrl: "https://github.com/ml-explore/mlx-examples/tree/main", + snippets: snippets.mlx, + filter: true, + }, + "mlx-image": { + prettyLabel: "mlx-image", + repoName: "mlx-image", + repoUrl: "https://github.com/riccardomusmeci/mlx-image", + docsUrl: "https://huggingface.co/docs/hub/mlx-image", + snippets: snippets.mlxim, + filter: false, + countDownloads: `path:"model.safetensors"`, + }, + "mlc-llm": { + prettyLabel: "MLC-LLM", + repoName: "MLC-LLM", + repoUrl: "https://github.com/mlc-ai/mlc-llm", + docsUrl: "https://llm.mlc.ai/docs/", + filter: false, + countDownloads: `path:"mlc-chat-config.json"`, + }, + nemo: { + prettyLabel: "NeMo", + repoName: "NeMo", + repoUrl: "https://github.com/NVIDIA/NeMo", + snippets: snippets.nemo, + filter: true, + countDownloads: `path_extension:"nemo" OR path:"model_config.yaml"`, + }, + open_clip: { + prettyLabel: "OpenCLIP", + repoName: "OpenCLIP", + repoUrl: "https://github.com/mlfoundations/open_clip", + snippets: snippets.open_clip, + filter: true, + countDownloads: `path_extension:"bin" AND path_filename:*pytorch_model`, + }, + paddlenlp: { + prettyLabel: "paddlenlp", + repoName: "PaddleNLP", + repoUrl: "https://github.com/PaddlePaddle/PaddleNLP", + docsUrl: "https://huggingface.co/docs/hub/paddlenlp", + snippets: snippets.paddlenlp, + filter: true, + countDownloads: `path:"model_config.json"`, + }, + peft: { + prettyLabel: "PEFT", + repoName: "PEFT", + repoUrl: "https://github.com/huggingface/peft", + snippets: snippets.peft, + filter: true, + countDownloads: `path:"adapter_config.json"`, + }, + "pyannote-audio": { + prettyLabel: "pyannote.audio", + repoName: "pyannote-audio", + repoUrl: "https://github.com/pyannote/pyannote-audio", + snippets: snippets.pyannote_audio, + filter: true, + }, + pythae: { + prettyLabel: "pythae", + repoName: "pythae", + repoUrl: "https://github.com/clementchadebec/benchmark_VAE", + snippets: snippets.pythae, + filter: true, + }, + recurrentgemma: { + prettyLabel: "RecurrentGemma", + repoName: "recurrentgemma", + repoUrl: "https://github.com/google-deepmind/recurrentgemma", + filter: false, + countDownloads: `path:"tokenizer.model"`, + }, + "sample-factory": { + prettyLabel: "sample-factory", + repoName: "sample-factory", + repoUrl: "https://github.com/alex-petrenko/sample-factory", + docsUrl: "https://huggingface.co/docs/hub/sample-factory", + snippets: snippets.sampleFactory, + filter: true, + countDownloads: `path:"cfg.json"`, + }, + "sentence-transformers": { + prettyLabel: "sentence-transformers", + repoName: "sentence-transformers", + repoUrl: "https://github.com/UKPLab/sentence-transformers", + docsUrl: "https://huggingface.co/docs/hub/sentence-transformers", + snippets: snippets.sentenceTransformers, + filter: true, + }, + setfit: { + prettyLabel: "setfit", + repoName: "setfit", + repoUrl: "https://github.com/huggingface/setfit", + docsUrl: "https://huggingface.co/docs/hub/setfit", + snippets: snippets.setfit, + filter: true, + }, + sklearn: { + prettyLabel: "Scikit-learn", + repoName: "Scikit-learn", + repoUrl: "https://github.com/scikit-learn/scikit-learn", + snippets: snippets.sklearn, + filter: true, + countDownloads: `path:"sklearn_model.joblib"`, + }, + spacy: { + prettyLabel: "spaCy", + repoName: "spaCy", + repoUrl: "https://github.com/explosion/spaCy", + docsUrl: "https://huggingface.co/docs/hub/spacy", + snippets: snippets.spacy, + filter: true, + countDownloads: `path_extension:"whl"`, + }, + "span-marker": { + prettyLabel: "SpanMarker", + repoName: "SpanMarkerNER", + repoUrl: "https://github.com/tomaarsen/SpanMarkerNER", + docsUrl: "https://huggingface.co/docs/hub/span_marker", + snippets: snippets.span_marker, + filter: true, + }, + speechbrain: { + prettyLabel: "speechbrain", + repoName: "speechbrain", + repoUrl: "https://github.com/speechbrain/speechbrain", + docsUrl: "https://huggingface.co/docs/hub/speechbrain", + snippets: snippets.speechbrain, + filter: true, + countDownloads: `path:"hyperparams.yaml"`, + }, + "stable-audio-tools": { + prettyLabel: "Stable Audio Tools", + repoName: "stable-audio-tools", + repoUrl: "https://github.com/Stability-AI/stable-audio-tools.git", + filter: false, + countDownloads: `path:"model.safetensors"`, + snippets: snippets.stable_audio_tools, + }, + "diffusion-single-file": { + prettyLabel: "Diffusion Single File", + repoName: "diffusion-single-file", + repoUrl: "https://github.com/comfyanonymous/ComfyUI", + filter: false, + countDownloads: `path_extension:"safetensors"`, + }, + "stable-baselines3": { + prettyLabel: "stable-baselines3", + repoName: "stable-baselines3", + repoUrl: "https://github.com/huggingface/huggingface_sb3", + docsUrl: "https://huggingface.co/docs/hub/stable-baselines3", + snippets: snippets.stableBaselines3, + filter: true, + countDownloads: `path_extension:"zip"`, + }, + stanza: { + prettyLabel: "Stanza", + repoName: "stanza", + repoUrl: "https://github.com/stanfordnlp/stanza", + docsUrl: "https://huggingface.co/docs/hub/stanza", + snippets: snippets.stanza, + filter: true, + countDownloads: `path:"models/default.zip"`, + }, + tensorflowtts: { + prettyLabel: "TensorFlowTTS", + repoName: "TensorFlowTTS", + repoUrl: "https://github.com/TensorSpeech/TensorFlowTTS", + snippets: snippets.tensorflowtts, + }, + "tic-clip": { + prettyLabel: "TiC-CLIP", + repoName: "TiC-CLIP", + repoUrl: "https://github.com/apple/ml-tic-clip", + filter: false, + countDownloads: `path_extension:"pt" AND path_prefix:"checkpoints/"`, + }, + timesfm: { + prettyLabel: "TimesFM", + repoName: "timesfm", + repoUrl: "https://github.com/google-research/timesfm", + filter: false, + countDownloads: `path:"checkpoints/checkpoint_1100000/state/checkpoint"`, + }, + timm: { + prettyLabel: "timm", + repoName: "pytorch-image-models", + repoUrl: "https://github.com/rwightman/pytorch-image-models", + docsUrl: "https://huggingface.co/docs/hub/timm", + snippets: snippets.timm, + filter: true, + countDownloads: `path:"pytorch_model.bin" OR path:"model.safetensors"`, + }, + transformers: { + prettyLabel: "Transformers", + repoName: "🤗/transformers", + repoUrl: "https://github.com/huggingface/transformers", + docsUrl: "https://huggingface.co/docs/hub/transformers", + snippets: snippets.transformers, + filter: true, + }, + "transformers.js": { + prettyLabel: "Transformers.js", + repoName: "transformers.js", + repoUrl: "https://github.com/xenova/transformers.js", + docsUrl: "https://huggingface.co/docs/hub/transformers-js", + snippets: snippets.transformersJS, + filter: true, + }, + "unity-sentis": { + prettyLabel: "unity-sentis", + repoName: "unity-sentis", + repoUrl: "https://github.com/Unity-Technologies/sentis-samples", + snippets: snippets.sentis, + filter: true, + countDownloads: `path_extension:"sentis"`, + }, + voicecraft: { + prettyLabel: "VoiceCraft", + repoName: "VoiceCraft", + repoUrl: "https://github.com/jasonppy/VoiceCraft", + docsUrl: "https://github.com/jasonppy/VoiceCraft", + snippets: snippets.voicecraft, + }, + whisperkit: { + prettyLabel: "WhisperKit", + repoName: "WhisperKit", + repoUrl: "https://github.com/argmaxinc/WhisperKit", + docsUrl: "https://github.com/argmaxinc/WhisperKit?tab=readme-ov-file#homebrew", + snippets: snippets.whisperkit, + countDownloads: `path_filename:"model" AND path_extension:"mil" AND _exists_:"path_prefix"`, + }, +} satisfies Record; + +export type ModelLibraryKey = keyof typeof MODEL_LIBRARIES_UI_ELEMENTS; + +export const ALL_MODEL_LIBRARY_KEYS = Object.keys(MODEL_LIBRARIES_UI_ELEMENTS) as ModelLibraryKey[]; + +export const ALL_DISPLAY_MODEL_LIBRARY_KEYS = ( + Object.entries(MODEL_LIBRARIES_UI_ELEMENTS as Record) as [ + ModelLibraryKey, + LibraryUiElement, + ][] +) + // eslint-disable-next-line @typescript-eslint/no-unused-vars + .filter(([_, v]) => v.filter) + .map(([k]) => k); diff --git a/data/node_modules/@huggingface/tasks/src/pipelines.ts b/data/node_modules/@huggingface/tasks/src/pipelines.ts new file mode 100644 index 0000000000000000000000000000000000000000..0bf6e7cd2dc0022a5678091a35f0114159664c48 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/pipelines.ts @@ -0,0 +1,674 @@ +export const MODALITIES = ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"] as const; + +export type Modality = (typeof MODALITIES)[number]; + +export const MODALITY_LABELS = { + multimodal: "Multimodal", + nlp: "Natural Language Processing", + audio: "Audio", + cv: "Computer Vision", + rl: "Reinforcement Learning", + tabular: "Tabular", + other: "Other", +} satisfies Record; + +/** + * Public interface for a sub task. + * + * This can be used in a model card's `model-index` metadata. + * and is more granular classification that can grow significantly + * over time as new tasks are added. + */ +export interface SubTask { + /** + * type of the task (e.g. audio-source-separation) + */ + type: string; + /** + * displayed name of the task (e.g. Audio Source Separation) + */ + name: string; +} + +/** + * Public interface for a PipelineData. + * + * This information corresponds to a pipeline type (aka task) + * in the Hub. + */ +export interface PipelineData { + /** + * displayed name of the task (e.g. Text Classification) + */ + name: string; + subtasks?: SubTask[]; + modality: Modality; + /** + * color for the tag icon. + */ + color: "blue" | "green" | "indigo" | "orange" | "red" | "yellow"; + /** + * whether to hide in /models filters + */ + hideInModels?: boolean; + /** + * whether to hide in /datasets filters + */ + hideInDatasets?: boolean; +} + +/// Coarse-grained taxonomy of tasks +/// +/// This type is used in multiple places in the Hugging Face +/// ecosystem: +/// - To determine which widget to show. +/// - To determine which endpoint of Inference Endpoints to use. +/// - As filters at the left of models and datasets page. +/// +/// Note that this is sensitive to order. +/// For each domain, the order should be of decreasing specificity. +/// This will impact the default pipeline tag of a model when not +/// specified. +export const PIPELINE_DATA = { + "text-classification": { + name: "Text Classification", + subtasks: [ + { + type: "acceptability-classification", + name: "Acceptability Classification", + }, + { + type: "entity-linking-classification", + name: "Entity Linking Classification", + }, + { + type: "fact-checking", + name: "Fact Checking", + }, + { + type: "intent-classification", + name: "Intent Classification", + }, + { + type: "language-identification", + name: "Language Identification", + }, + { + type: "multi-class-classification", + name: "Multi Class Classification", + }, + { + type: "multi-label-classification", + name: "Multi Label Classification", + }, + { + type: "multi-input-text-classification", + name: "Multi-input Text Classification", + }, + { + type: "natural-language-inference", + name: "Natural Language Inference", + }, + { + type: "semantic-similarity-classification", + name: "Semantic Similarity Classification", + }, + { + type: "sentiment-classification", + name: "Sentiment Classification", + }, + { + type: "topic-classification", + name: "Topic Classification", + }, + { + type: "semantic-similarity-scoring", + name: "Semantic Similarity Scoring", + }, + { + type: "sentiment-scoring", + name: "Sentiment Scoring", + }, + { + type: "sentiment-analysis", + name: "Sentiment Analysis", + }, + { + type: "hate-speech-detection", + name: "Hate Speech Detection", + }, + { + type: "text-scoring", + name: "Text Scoring", + }, + ], + modality: "nlp", + color: "orange", + }, + "token-classification": { + name: "Token Classification", + subtasks: [ + { + type: "named-entity-recognition", + name: "Named Entity Recognition", + }, + { + type: "part-of-speech", + name: "Part of Speech", + }, + { + type: "parsing", + name: "Parsing", + }, + { + type: "lemmatization", + name: "Lemmatization", + }, + { + type: "word-sense-disambiguation", + name: "Word Sense Disambiguation", + }, + { + type: "coreference-resolution", + name: "Coreference-resolution", + }, + ], + modality: "nlp", + color: "blue", + }, + "table-question-answering": { + name: "Table Question Answering", + modality: "nlp", + color: "green", + }, + "question-answering": { + name: "Question Answering", + subtasks: [ + { + type: "extractive-qa", + name: "Extractive QA", + }, + { + type: "open-domain-qa", + name: "Open Domain QA", + }, + { + type: "closed-domain-qa", + name: "Closed Domain QA", + }, + ], + modality: "nlp", + color: "blue", + }, + "zero-shot-classification": { + name: "Zero-Shot Classification", + modality: "nlp", + color: "yellow", + }, + translation: { + name: "Translation", + modality: "nlp", + color: "green", + }, + summarization: { + name: "Summarization", + subtasks: [ + { + type: "news-articles-summarization", + name: "News Articles Summarization", + }, + { + type: "news-articles-headline-generation", + name: "News Articles Headline Generation", + }, + ], + modality: "nlp", + color: "indigo", + }, + "feature-extraction": { + name: "Feature Extraction", + modality: "nlp", + color: "red", + }, + "text-generation": { + name: "Text Generation", + subtasks: [ + { + type: "dialogue-modeling", + name: "Dialogue Modeling", + }, + { + type: "dialogue-generation", + name: "Dialogue Generation", + }, + { + type: "conversational", + name: "Conversational", + }, + { + type: "language-modeling", + name: "Language Modeling", + }, + ], + modality: "nlp", + color: "indigo", + }, + "text2text-generation": { + name: "Text2Text Generation", + subtasks: [ + { + type: "text-simplification", + name: "Text simplification", + }, + { + type: "explanation-generation", + name: "Explanation Generation", + }, + { + type: "abstractive-qa", + name: "Abstractive QA", + }, + { + type: "open-domain-abstractive-qa", + name: "Open Domain Abstractive QA", + }, + { + type: "closed-domain-qa", + name: "Closed Domain QA", + }, + { + type: "open-book-qa", + name: "Open Book QA", + }, + { + type: "closed-book-qa", + name: "Closed Book QA", + }, + ], + modality: "nlp", + color: "indigo", + }, + "fill-mask": { + name: "Fill-Mask", + subtasks: [ + { + type: "slot-filling", + name: "Slot Filling", + }, + { + type: "masked-language-modeling", + name: "Masked Language Modeling", + }, + ], + modality: "nlp", + color: "red", + }, + "sentence-similarity": { + name: "Sentence Similarity", + modality: "nlp", + color: "yellow", + }, + "text-to-speech": { + name: "Text-to-Speech", + modality: "audio", + color: "yellow", + }, + "text-to-audio": { + name: "Text-to-Audio", + modality: "audio", + color: "yellow", + }, + "automatic-speech-recognition": { + name: "Automatic Speech Recognition", + modality: "audio", + color: "yellow", + }, + "audio-to-audio": { + name: "Audio-to-Audio", + modality: "audio", + color: "blue", + }, + "audio-classification": { + name: "Audio Classification", + subtasks: [ + { + type: "keyword-spotting", + name: "Keyword Spotting", + }, + { + type: "speaker-identification", + name: "Speaker Identification", + }, + { + type: "audio-intent-classification", + name: "Audio Intent Classification", + }, + { + type: "audio-emotion-recognition", + name: "Audio Emotion Recognition", + }, + { + type: "audio-language-identification", + name: "Audio Language Identification", + }, + ], + modality: "audio", + color: "green", + }, + "voice-activity-detection": { + name: "Voice Activity Detection", + modality: "audio", + color: "red", + }, + "depth-estimation": { + name: "Depth Estimation", + modality: "cv", + color: "yellow", + }, + "image-classification": { + name: "Image Classification", + subtasks: [ + { + type: "multi-label-image-classification", + name: "Multi Label Image Classification", + }, + { + type: "multi-class-image-classification", + name: "Multi Class Image Classification", + }, + ], + modality: "cv", + color: "blue", + }, + "object-detection": { + name: "Object Detection", + subtasks: [ + { + type: "face-detection", + name: "Face Detection", + }, + { + type: "vehicle-detection", + name: "Vehicle Detection", + }, + ], + modality: "cv", + color: "yellow", + }, + "image-segmentation": { + name: "Image Segmentation", + subtasks: [ + { + type: "instance-segmentation", + name: "Instance Segmentation", + }, + { + type: "semantic-segmentation", + name: "Semantic Segmentation", + }, + { + type: "panoptic-segmentation", + name: "Panoptic Segmentation", + }, + ], + modality: "cv", + color: "green", + }, + "text-to-image": { + name: "Text-to-Image", + modality: "cv", + color: "yellow", + }, + "image-to-text": { + name: "Image-to-Text", + subtasks: [ + { + type: "image-captioning", + name: "Image Captioning", + }, + ], + modality: "cv", + color: "red", + }, + "image-to-image": { + name: "Image-to-Image", + subtasks: [ + { + type: "image-inpainting", + name: "Image Inpainting", + }, + { + type: "image-colorization", + name: "Image Colorization", + }, + { + type: "super-resolution", + name: "Super Resolution", + }, + ], + modality: "cv", + color: "indigo", + }, + "image-to-video": { + name: "Image-to-Video", + modality: "cv", + color: "indigo", + }, + "unconditional-image-generation": { + name: "Unconditional Image Generation", + modality: "cv", + color: "green", + }, + "video-classification": { + name: "Video Classification", + modality: "cv", + color: "blue", + }, + "reinforcement-learning": { + name: "Reinforcement Learning", + modality: "rl", + color: "red", + }, + robotics: { + name: "Robotics", + modality: "rl", + subtasks: [ + { + type: "grasping", + name: "Grasping", + }, + { + type: "task-planning", + name: "Task Planning", + }, + ], + color: "blue", + }, + "tabular-classification": { + name: "Tabular Classification", + modality: "tabular", + subtasks: [ + { + type: "tabular-multi-class-classification", + name: "Tabular Multi Class Classification", + }, + { + type: "tabular-multi-label-classification", + name: "Tabular Multi Label Classification", + }, + ], + color: "blue", + }, + "tabular-regression": { + name: "Tabular Regression", + modality: "tabular", + subtasks: [ + { + type: "tabular-single-column-regression", + name: "Tabular Single Column Regression", + }, + ], + color: "blue", + }, + "tabular-to-text": { + name: "Tabular to Text", + modality: "tabular", + subtasks: [ + { + type: "rdf-to-text", + name: "RDF to text", + }, + ], + color: "blue", + hideInModels: true, + }, + "table-to-text": { + name: "Table to Text", + modality: "nlp", + color: "blue", + hideInModels: true, + }, + "multiple-choice": { + name: "Multiple Choice", + subtasks: [ + { + type: "multiple-choice-qa", + name: "Multiple Choice QA", + }, + { + type: "multiple-choice-coreference-resolution", + name: "Multiple Choice Coreference Resolution", + }, + ], + modality: "nlp", + color: "blue", + hideInModels: true, + }, + "text-retrieval": { + name: "Text Retrieval", + subtasks: [ + { + type: "document-retrieval", + name: "Document Retrieval", + }, + { + type: "utterance-retrieval", + name: "Utterance Retrieval", + }, + { + type: "entity-linking-retrieval", + name: "Entity Linking Retrieval", + }, + { + type: "fact-checking-retrieval", + name: "Fact Checking Retrieval", + }, + ], + modality: "nlp", + color: "indigo", + hideInModels: true, + }, + "time-series-forecasting": { + name: "Time Series Forecasting", + modality: "tabular", + subtasks: [ + { + type: "univariate-time-series-forecasting", + name: "Univariate Time Series Forecasting", + }, + { + type: "multivariate-time-series-forecasting", + name: "Multivariate Time Series Forecasting", + }, + ], + color: "blue", + }, + "text-to-video": { + name: "Text-to-Video", + modality: "cv", + color: "green", + }, + "image-text-to-text": { + name: "Image-Text-to-Text", + modality: "multimodal", + color: "red", + hideInDatasets: true, + }, + "visual-question-answering": { + name: "Visual Question Answering", + subtasks: [ + { + type: "visual-question-answering", + name: "Visual Question Answering", + }, + ], + modality: "multimodal", + color: "red", + }, + "document-question-answering": { + name: "Document Question Answering", + subtasks: [ + { + type: "document-question-answering", + name: "Document Question Answering", + }, + ], + modality: "multimodal", + color: "blue", + hideInDatasets: true, + }, + "zero-shot-image-classification": { + name: "Zero-Shot Image Classification", + modality: "cv", + color: "yellow", + }, + "graph-ml": { + name: "Graph Machine Learning", + modality: "other", + color: "green", + }, + "mask-generation": { + name: "Mask Generation", + modality: "cv", + color: "indigo", + }, + "zero-shot-object-detection": { + name: "Zero-Shot Object Detection", + modality: "cv", + color: "yellow", + }, + "text-to-3d": { + name: "Text-to-3D", + modality: "cv", + color: "yellow", + }, + "image-to-3d": { + name: "Image-to-3D", + modality: "cv", + color: "green", + }, + "image-feature-extraction": { + name: "Image Feature Extraction", + modality: "cv", + color: "indigo", + }, + other: { + name: "Other", + modality: "other", + color: "blue", + hideInModels: true, + hideInDatasets: true, + }, +} satisfies Record; + +export type PipelineType = keyof typeof PIPELINE_DATA; + +export type WidgetType = PipelineType | "conversational"; + +export const PIPELINE_TYPES = Object.keys(PIPELINE_DATA) as PipelineType[]; + +export const SUBTASK_TYPES = Object.values(PIPELINE_DATA) + .flatMap((data) => ("subtasks" in data ? data.subtasks : [])) + .map((s) => s.type); + +export const PIPELINE_TYPES_SET = new Set(PIPELINE_TYPES); diff --git a/data/node_modules/@huggingface/tasks/src/snippets/curl.ts b/data/node_modules/@huggingface/tasks/src/snippets/curl.ts new file mode 100644 index 0000000000000000000000000000000000000000..d3fbc0bcb570252ffe6b9b93522c50ff9c2f4adf --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/snippets/curl.ts @@ -0,0 +1,80 @@ +import type { PipelineType } from "../pipelines.js"; +import { getModelInputSnippet } from "./inputs.js"; +import type { ModelDataMinimal } from "./types.js"; + +export const snippetBasic = (model: ModelDataMinimal, accessToken: string): string => + `curl https://api-inference.huggingface.co/models/${model.id} \\ + -X POST \\ + -d '{"inputs": ${getModelInputSnippet(model, true)}}' \\ + -H 'Content-Type: application/json' \\ + -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" +`; + +export const snippetTextGeneration = (model: ModelDataMinimal, accessToken: string): string => { + if (model.config?.tokenizer_config?.chat_template) { + // Conversational model detected, so we display a code snippet that features the Messages API + return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\ +-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\ +-H 'Content-Type: application/json' \\ +-d '{ + "model": "${model.id}", + "messages": [{"role": "user", "content": "What is the capital of France?"}], + "max_tokens": 500, + "stream": false +}' +`; + } else { + return snippetBasic(model, accessToken); + } +}; + +export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): string => + `curl https://api-inference.huggingface.co/models/${model.id} \\ + -X POST \\ + -d '{"inputs": ${getModelInputSnippet(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\ + -H 'Content-Type: application/json' \\ + -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" +`; + +export const snippetFile = (model: ModelDataMinimal, accessToken: string): string => + `curl https://api-inference.huggingface.co/models/${model.id} \\ + -X POST \\ + --data-binary '@${getModelInputSnippet(model, true, true)}' \\ + -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" +`; + +export const curlSnippets: Partial string>> = { + // Same order as in js/src/lib/interfaces/Types.ts + "text-classification": snippetBasic, + "token-classification": snippetBasic, + "table-question-answering": snippetBasic, + "question-answering": snippetBasic, + "zero-shot-classification": snippetZeroShotClassification, + translation: snippetBasic, + summarization: snippetBasic, + "feature-extraction": snippetBasic, + "text-generation": snippetTextGeneration, + "text2text-generation": snippetBasic, + "fill-mask": snippetBasic, + "sentence-similarity": snippetBasic, + "automatic-speech-recognition": snippetFile, + "text-to-image": snippetBasic, + "text-to-speech": snippetBasic, + "text-to-audio": snippetBasic, + "audio-to-audio": snippetFile, + "audio-classification": snippetFile, + "image-classification": snippetFile, + "image-to-text": snippetFile, + "object-detection": snippetFile, + "image-segmentation": snippetFile, +}; + +export function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string): string { + return model.pipeline_tag && model.pipeline_tag in curlSnippets + ? curlSnippets[model.pipeline_tag]?.(model, accessToken) ?? "" + : ""; +} + +export function hasCurlInferenceSnippet(model: Pick): boolean { + return !!model.pipeline_tag && model.pipeline_tag in curlSnippets; +} diff --git a/data/node_modules/@huggingface/tasks/src/snippets/index.ts b/data/node_modules/@huggingface/tasks/src/snippets/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..3cf9b9d236dd2347c76e63300db71a3f964f35dc --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/snippets/index.ts @@ -0,0 +1,6 @@ +import * as inputs from "./inputs"; +import * as curl from "./curl"; +import * as python from "./python"; +import * as js from "./js"; + +export { inputs, curl, python, js }; diff --git a/data/node_modules/@huggingface/tasks/src/snippets/inputs.ts b/data/node_modules/@huggingface/tasks/src/snippets/inputs.ts new file mode 100644 index 0000000000000000000000000000000000000000..f3c76d12c8e8ab9f58c2e3733cc896ec46e31251 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/snippets/inputs.ts @@ -0,0 +1,136 @@ +import type { PipelineType } from "../pipelines"; +import type { ModelDataMinimal } from "./types"; + +const inputsZeroShotClassification = () => + `"Hi, I recently bought a device from your company but it is not working as advertised and I would like to get reimbursed!"`; + +const inputsTranslation = () => `"Меня зовут Вольфганг и я живу в Берлине"`; + +const inputsSummarization = () => + `"The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct."`; + +const inputsTableQuestionAnswering = () => + `{ + "query": "How many stars does the transformers repository have?", + "table": { + "Repository": ["Transformers", "Datasets", "Tokenizers"], + "Stars": ["36542", "4512", "3934"], + "Contributors": ["651", "77", "34"], + "Programming language": [ + "Python", + "Python", + "Rust, Python and NodeJS" + ] + } +}`; + +const inputsVisualQuestionAnswering = () => + `{ + "image": "cat.png", + "question": "What is in this image?" +}`; + +const inputsQuestionAnswering = () => + `{ + "question": "What is my name?", + "context": "My name is Clara and I live in Berkeley." +}`; + +const inputsTextClassification = () => `"I like you. I love you"`; + +const inputsTokenClassification = () => `"My name is Sarah Jessica Parker but you can call me Jessica"`; + +const inputsTextGeneration = () => `"Can you please let us know more details about your "`; + +const inputsText2TextGeneration = () => `"The answer to the universe is"`; + +const inputsFillMask = (model: ModelDataMinimal) => `"The answer to the universe is ${model.mask_token}."`; + +const inputsSentenceSimilarity = () => + `{ + "source_sentence": "That is a happy person", + "sentences": [ + "That is a happy dog", + "That is a very happy person", + "Today is a sunny day" + ] +}`; + +const inputsFeatureExtraction = () => `"Today is a sunny day and I will get some ice cream."`; + +const inputsImageClassification = () => `"cats.jpg"`; + +const inputsImageToText = () => `"cats.jpg"`; + +const inputsImageSegmentation = () => `"cats.jpg"`; + +const inputsObjectDetection = () => `"cats.jpg"`; + +const inputsAudioToAudio = () => `"sample1.flac"`; + +const inputsAudioClassification = () => `"sample1.flac"`; + +const inputsTextToImage = () => `"Astronaut riding a horse"`; + +const inputsTextToSpeech = () => `"The answer to the universe is 42"`; + +const inputsTextToAudio = () => `"liquid drum and bass, atmospheric synths, airy sounds"`; + +const inputsAutomaticSpeechRecognition = () => `"sample1.flac"`; + +const inputsTabularPrediction = () => + `'{"Height":[11.52,12.48],"Length1":[23.2,24.0],"Length2":[25.4,26.3],"Species": ["Bream","Bream"]}'`; + +const inputsZeroShotImageClassification = () => `"cats.jpg"`; + +const modelInputSnippets: { + [key in PipelineType]?: (model: ModelDataMinimal) => string; +} = { + "audio-to-audio": inputsAudioToAudio, + "audio-classification": inputsAudioClassification, + "automatic-speech-recognition": inputsAutomaticSpeechRecognition, + "document-question-answering": inputsVisualQuestionAnswering, + "feature-extraction": inputsFeatureExtraction, + "fill-mask": inputsFillMask, + "image-classification": inputsImageClassification, + "image-to-text": inputsImageToText, + "image-segmentation": inputsImageSegmentation, + "object-detection": inputsObjectDetection, + "question-answering": inputsQuestionAnswering, + "sentence-similarity": inputsSentenceSimilarity, + summarization: inputsSummarization, + "table-question-answering": inputsTableQuestionAnswering, + "tabular-regression": inputsTabularPrediction, + "tabular-classification": inputsTabularPrediction, + "text-classification": inputsTextClassification, + "text-generation": inputsTextGeneration, + "text-to-image": inputsTextToImage, + "text-to-speech": inputsTextToSpeech, + "text-to-audio": inputsTextToAudio, + "text2text-generation": inputsText2TextGeneration, + "token-classification": inputsTokenClassification, + translation: inputsTranslation, + "zero-shot-classification": inputsZeroShotClassification, + "zero-shot-image-classification": inputsZeroShotImageClassification, +}; + +// Use noWrap to put the whole snippet on a single line (removing new lines and tabulations) +// Use noQuotes to strip quotes from start & end (example: "abc" -> abc) +export function getModelInputSnippet(model: ModelDataMinimal, noWrap = false, noQuotes = false): string { + if (model.pipeline_tag) { + const inputs = modelInputSnippets[model.pipeline_tag]; + if (inputs) { + let result = inputs(model); + if (noWrap) { + result = result.replace(/(?:(?:\r?\n|\r)\t*)|\t+/g, " "); + } + if (noQuotes) { + const REGEX_QUOTES = /^"(.+)"$/s; + const match = result.match(REGEX_QUOTES); + result = match ? match[1] : result; + } + return result; + } + } + return "No input example has been defined for this model task."; +} diff --git a/data/node_modules/@huggingface/tasks/src/snippets/js.ts b/data/node_modules/@huggingface/tasks/src/snippets/js.ts new file mode 100644 index 0000000000000000000000000000000000000000..83fa63a964b6d03595ef40f327f14dbf34e5bc06 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/snippets/js.ts @@ -0,0 +1,183 @@ +import type { PipelineType } from "../pipelines.js"; +import { getModelInputSnippet } from "./inputs.js"; +import type { ModelDataMinimal } from "./types.js"; + +export const snippetBasic = (model: ModelDataMinimal, accessToken: string): string => + `async function query(data) { + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { + Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" + "Content-Type": "application/json", + }, + method: "POST", + body: JSON.stringify(data), + } + ); + const result = await response.json(); + return result; +} + +query({"inputs": ${getModelInputSnippet(model)}}).then((response) => { + console.log(JSON.stringify(response)); +});`; + +export const snippetTextGeneration = (model: ModelDataMinimal, accessToken: string): string => { + if (model.config?.tokenizer_config?.chat_template) { + // Conversational model detected, so we display a code snippet that features the Messages API + return `import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference("${accessToken || `{API_TOKEN}`}"); + +for await (const chunk of inference.chatCompletionStream({ + model: "${model.id}", + messages: [{ role: "user", content: "What is the capital of France?" }], + max_tokens: 500, +})) { + process.stdout.write(chunk.choices[0]?.delta?.content || ""); +} +`; + } else { + return snippetBasic(model, accessToken); + } +}; +export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): string => + `async function query(data) { + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { + Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" + "Content-Type": "application/json", + }, + method: "POST", + body: JSON.stringify(data), + } + ); + const result = await response.json(); + return result; +} + +query({"inputs": ${getModelInputSnippet( + model + )}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}).then((response) => { + console.log(JSON.stringify(response)); +});`; + +export const snippetTextToImage = (model: ModelDataMinimal, accessToken: string): string => + `async function query(data) { + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { + Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" + "Content-Type": "application/json", + }, + method: "POST", + body: JSON.stringify(data), + } + ); + const result = await response.blob(); + return result; +} +query({"inputs": ${getModelInputSnippet(model)}}).then((response) => { + // Use image +});`; + +export const snippetTextToAudio = (model: ModelDataMinimal, accessToken: string): string => { + const commonSnippet = `async function query(data) { + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { + Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" + "Content-Type": "application/json", + }, + method: "POST", + body: JSON.stringify(data), + } + );`; + if (model.library_name === "transformers") { + return ( + commonSnippet + + ` + const result = await response.blob(); + return result; + } + query({"inputs": ${getModelInputSnippet(model)}}).then((response) => { + // Returns a byte object of the Audio wavform. Use it directly! + });` + ); + } else { + return ( + commonSnippet + + ` + const result = await response.json(); + return result; + } + + query({"inputs": ${getModelInputSnippet(model)}}).then((response) => { + console.log(JSON.stringify(response)); + });` + ); + } +}; + +export const snippetFile = (model: ModelDataMinimal, accessToken: string): string => + `async function query(filename) { + const data = fs.readFileSync(filename); + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { + Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" + "Content-Type": "application/json", + }, + method: "POST", + body: data, + } + ); + const result = await response.json(); + return result; +} + +query(${getModelInputSnippet(model)}).then((response) => { + console.log(JSON.stringify(response)); +});`; + +export const jsSnippets: Partial string>> = { + // Same order as in js/src/lib/interfaces/Types.ts + "text-classification": snippetBasic, + "token-classification": snippetBasic, + "table-question-answering": snippetBasic, + "question-answering": snippetBasic, + "zero-shot-classification": snippetZeroShotClassification, + translation: snippetBasic, + summarization: snippetBasic, + "feature-extraction": snippetBasic, + "text-generation": snippetTextGeneration, + "text2text-generation": snippetBasic, + "fill-mask": snippetBasic, + "sentence-similarity": snippetBasic, + "automatic-speech-recognition": snippetFile, + "text-to-image": snippetTextToImage, + "text-to-speech": snippetTextToAudio, + "text-to-audio": snippetTextToAudio, + "audio-to-audio": snippetFile, + "audio-classification": snippetFile, + "image-classification": snippetFile, + "image-to-text": snippetFile, + "object-detection": snippetFile, + "image-segmentation": snippetFile, +}; + +export function getJsInferenceSnippet(model: ModelDataMinimal, accessToken: string): string { + return model.pipeline_tag && model.pipeline_tag in jsSnippets + ? jsSnippets[model.pipeline_tag]?.(model, accessToken) ?? "" + : ""; +} + +export function hasJsInferenceSnippet(model: ModelDataMinimal): boolean { + return !!model.pipeline_tag && model.pipeline_tag in jsSnippets; +} diff --git a/data/node_modules/@huggingface/tasks/src/snippets/python.ts b/data/node_modules/@huggingface/tasks/src/snippets/python.ts new file mode 100644 index 0000000000000000000000000000000000000000..f0fd26397e45b9726423e7898613645e4cbed4ea --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/snippets/python.ts @@ -0,0 +1,177 @@ +import type { PipelineType } from "../pipelines.js"; +import { getModelInputSnippet } from "./inputs.js"; +import type { ModelDataMinimal } from "./types.js"; + +export const snippetConversational = (model: ModelDataMinimal, accessToken: string): string => + `from huggingface_hub import InferenceClient + +client = InferenceClient( + "${model.id}", + token="${accessToken || "{API_TOKEN}"}", +) + +for message in client.chat_completion( + messages=[{"role": "user", "content": "What is the capital of France?"}], + max_tokens=500, + stream=True, +): + print(message.choices[0].delta.content, end="") +`; + +export const snippetZeroShotClassification = (model: ModelDataMinimal): string => + `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +output = query({ + "inputs": ${getModelInputSnippet(model)}, + "parameters": {"candidate_labels": ["refund", "legal", "faq"]}, +})`; + +export const snippetZeroShotImageClassification = (model: ModelDataMinimal): string => + `def query(data): + with open(data["image_path"], "rb") as f: + img = f.read() + payload={ + "parameters": data["parameters"], + "inputs": base64.b64encode(img).decode("utf-8") + } + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +output = query({ + "image_path": ${getModelInputSnippet(model)}, + "parameters": {"candidate_labels": ["cat", "dog", "llama"]}, +})`; + +export const snippetBasic = (model: ModelDataMinimal): string => + `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +output = query({ + "inputs": ${getModelInputSnippet(model)}, +})`; + +export const snippetFile = (model: ModelDataMinimal): string => + `def query(filename): + with open(filename, "rb") as f: + data = f.read() + response = requests.post(API_URL, headers=headers, data=data) + return response.json() + +output = query(${getModelInputSnippet(model)})`; + +export const snippetTextToImage = (model: ModelDataMinimal): string => + `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.content +image_bytes = query({ + "inputs": ${getModelInputSnippet(model)}, +}) +# You can access the image with PIL.Image for example +import io +from PIL import Image +image = Image.open(io.BytesIO(image_bytes))`; + +export const snippetTabular = (model: ModelDataMinimal): string => + `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.content +response = query({ + "inputs": {"data": ${getModelInputSnippet(model)}}, +})`; + +export const snippetTextToAudio = (model: ModelDataMinimal): string => { + // Transformers TTS pipeline and api-inference-community (AIC) pipeline outputs are diverged + // with the latest update to inference-api (IA). + // Transformers IA returns a byte object (wav file), whereas AIC returns wav and sampling_rate. + if (model.library_name === "transformers") { + return `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.content + +audio_bytes = query({ + "inputs": ${getModelInputSnippet(model)}, +}) +# You can access the audio with IPython.display for example +from IPython.display import Audio +Audio(audio_bytes)`; + } else { + return `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +audio, sampling_rate = query({ + "inputs": ${getModelInputSnippet(model)}, +}) +# You can access the audio with IPython.display for example +from IPython.display import Audio +Audio(audio, rate=sampling_rate)`; + } +}; + +export const snippetDocumentQuestionAnswering = (model: ModelDataMinimal): string => + `def query(payload): + with open(payload["image"], "rb") as f: + img = f.read() + payload["image"] = base64.b64encode(img).decode("utf-8") + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +output = query({ + "inputs": ${getModelInputSnippet(model)}, +})`; + +export const pythonSnippets: Partial string>> = { + // Same order as in tasks/src/pipelines.ts + "text-classification": snippetBasic, + "token-classification": snippetBasic, + "table-question-answering": snippetBasic, + "question-answering": snippetBasic, + "zero-shot-classification": snippetZeroShotClassification, + translation: snippetBasic, + summarization: snippetBasic, + "feature-extraction": snippetBasic, + "text-generation": snippetBasic, + "text2text-generation": snippetBasic, + "fill-mask": snippetBasic, + "sentence-similarity": snippetBasic, + "automatic-speech-recognition": snippetFile, + "text-to-image": snippetTextToImage, + "text-to-speech": snippetTextToAudio, + "text-to-audio": snippetTextToAudio, + "audio-to-audio": snippetFile, + "audio-classification": snippetFile, + "image-classification": snippetFile, + "tabular-regression": snippetTabular, + "tabular-classification": snippetTabular, + "object-detection": snippetFile, + "image-segmentation": snippetFile, + "document-question-answering": snippetDocumentQuestionAnswering, + "image-to-text": snippetFile, + "zero-shot-image-classification": snippetZeroShotImageClassification, +}; + +export function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string): string { + if (model.pipeline_tag === "text-generation" && model.config?.tokenizer_config?.chat_template) { + // Conversational model detected, so we display a code snippet that features the Messages API + return snippetConversational(model, accessToken); + } else { + const body = + model.pipeline_tag && model.pipeline_tag in pythonSnippets + ? pythonSnippets[model.pipeline_tag]?.(model, accessToken) ?? "" + : ""; + + return `import requests + +API_URL = "https://api-inference.huggingface.co/models/${model.id}" +headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}} + +${body}`; + } +} + +export function hasPythonInferenceSnippet(model: ModelDataMinimal): boolean { + return !!model.pipeline_tag && model.pipeline_tag in pythonSnippets; +} diff --git a/data/node_modules/@huggingface/tasks/src/snippets/types.ts b/data/node_modules/@huggingface/tasks/src/snippets/types.ts new file mode 100644 index 0000000000000000000000000000000000000000..658c3ebf8272b5087ae65c298c1e817606418e23 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/snippets/types.ts @@ -0,0 +1,8 @@ +import type { ModelData } from "../model-data"; + +/** + * Minimal model data required for snippets. + * + * Add more fields as needed. + */ +export type ModelDataMinimal = Pick; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/audio-classification/about.md b/data/node_modules/@huggingface/tasks/src/tasks/audio-classification/about.md new file mode 100644 index 0000000000000000000000000000000000000000..7772acd60f0fa1416a3b9ad4e571f35734957271 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/audio-classification/about.md @@ -0,0 +1,86 @@ +## Use Cases + +### Command Recognition + +Command recognition or keyword spotting classifies utterances into a predefined set of commands. This is often done on-device for fast response time. + +As an example, using the Google Speech Commands dataset, given an input, a model can classify which of the following commands the user is typing: + +``` +'yes', 'no', 'up', 'down', 'left', 'right', 'on', 'off', 'stop', 'go', 'unknown', 'silence' +``` + +Speechbrain models can easily perform this task with just a couple of lines of code! + +```python +from speechbrain.pretrained import EncoderClassifier +model = EncoderClassifier.from_hparams( + "speechbrain/google_speech_command_xvector" +) +model.classify_file("file.wav") +``` + +### Language Identification + +Datasets such as VoxLingua107 allow anyone to train language identification models for up to 107 languages! This can be extremely useful as a preprocessing step for other systems. Here's an example [model](https://huggingface.co/TalTechNLP/voxlingua107-epaca-tdnn)trained on VoxLingua107. + +### Emotion recognition + +Emotion recognition is self explanatory. In addition to trying the widgets, you can use Inference Endpoints to perform audio classification. Here is a simple example that uses a [HuBERT](https://huggingface.co/superb/hubert-large-superb-er) model fine-tuned for this task. + +```python +import json +import requests + +headers = {"Authorization": f"Bearer {API_TOKEN}"} +API_URL = "https://api-inference.huggingface.co/models/superb/hubert-large-superb-er" + +def query(filename): + with open(filename, "rb") as f: + data = f.read() + response = requests.request("POST", API_URL, headers=headers, data=data) + return json.loads(response.content.decode("utf-8")) + +data = query("sample1.flac") +# [{'label': 'neu', 'score': 0.60}, +# {'label': 'hap', 'score': 0.20}, +# {'label': 'ang', 'score': 0.13}, +# {'label': 'sad', 'score': 0.07}] +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer with audio classification models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_TOKEN); +await inference.audioClassification({ + data: await (await fetch("sample.flac")).blob(), + model: "facebook/mms-lid-126", +}); +``` + +### Speaker Identification + +Speaker Identification is classifying the audio of the person speaking. Speakers are usually predefined. You can try out this task with [this model](https://huggingface.co/superb/wav2vec2-base-superb-sid). A useful dataset for this task is VoxCeleb1. + +## Solving audio classification for your own data + +We have some great news! You can do fine-tuning (transfer learning) to train a well-performing model without requiring as much data. Pretrained models such as Wav2Vec2 and HuBERT exist. [Facebook's Wav2Vec2 XLS-R model](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2) is a large multilingual model trained on 128 languages and with 436K hours of speech. Similarly, you can also use [OpenAI's Whisper](https://huggingface.co/docs/transformers/model_doc/whisper) trained on up to 4 Million hours of multilingual speech data for this task too! + +## Useful Resources + +Would you like to learn more about the topic? Awesome! Here you can find some curated resources that you may find helpful! + +### Notebooks + +- [PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/audio_classification.ipynb) + +### Scripts for training + +- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/audio-classification) + +### Documentation + +- [Hugging Face Audio Course](https://huggingface.co/learn/audio-course/chapter4/introduction) +- [Audio classification task guide](https://huggingface.co/docs/transformers/tasks/audio_classification) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/audio-classification/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/audio-classification/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..e58bfb6771486e923476f40900259a974c9690ce --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/audio-classification/data.ts @@ -0,0 +1,77 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A benchmark of 10 different audio tasks.", + id: "superb", + }, + ], + demo: { + inputs: [ + { + filename: "audio.wav", + type: "audio", + }, + ], + outputs: [ + { + data: [ + { + label: "Up", + score: 0.2, + }, + { + label: "Down", + score: 0.8, + }, + ], + type: "chart", + }, + ], + }, + metrics: [ + { + description: "", + id: "accuracy", + }, + { + description: "", + id: "recall", + }, + { + description: "", + id: "precision", + }, + { + description: "", + id: "f1", + }, + ], + models: [ + { + description: "An easy-to-use model for Command Recognition.", + id: "speechbrain/google_speech_command_xvector", + }, + { + description: "An Emotion Recognition model.", + id: "ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition", + }, + { + description: "A language identification model.", + id: "facebook/mms-lid-126", + }, + ], + spaces: [ + { + description: "An application that can classify music into different genre.", + id: "kurianbenoy/audioclassification", + }, + ], + summary: + "Audio classification is the task of assigning a label or class to a given audio. It can be used for recognizing which command a user is giving or the emotion of a statement, as well as identifying a speaker.", + widgetModels: ["facebook/mms-lid-126"], + youtubeId: "KWwzcmG98Ds", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/audio-classification/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/audio-classification/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..ee61c7052b3aa536450ea1d626639ce2bbe3948f --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/audio-classification/inference.ts @@ -0,0 +1,51 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Audio Classification inference + */ +export interface AudioClassificationInput { + /** + * The input audio data + */ + inputs: unknown; + /** + * Additional inference parameters + */ + parameters?: AudioClassificationParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Audio Classification + */ +export interface AudioClassificationParameters { + function_to_apply?: ClassificationOutputTransform; + /** + * When specified, limits the output to the top K most probable classes. + */ + top_k?: number; + [property: string]: unknown; +} +/** + * The function to apply to the model outputs in order to retrieve the scores. + */ +export type ClassificationOutputTransform = "sigmoid" | "softmax" | "none"; +export type AudioClassificationOutput = AudioClassificationOutputElement[]; +/** + * Outputs for Audio Classification inference + */ +export interface AudioClassificationOutputElement { + /** + * The predicted class label. + */ + label: string; + /** + * The corresponding probability. + */ + score: number; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/audio-classification/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/audio-classification/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..d0372bb688881b9a56f3334fe6dad29d2186e2f9 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/audio-classification/spec/input.json @@ -0,0 +1,34 @@ +{ + "$id": "/inference/schemas/audio-classification/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Audio Classification inference", + "title": "AudioClassificationInput", + "type": "object", + "properties": { + "inputs": { + "description": "The input audio data" + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/AudioClassificationParameters" + } + }, + "$defs": { + "AudioClassificationParameters": { + "title": "AudioClassificationParameters", + "description": "Additional inference parameters for Audio Classification", + "type": "object", + "properties": { + "function_to_apply": { + "title": "AudioClassificationOutputTransform", + "$ref": "/inference/schemas/common-definitions.json#/definitions/ClassificationOutputTransform" + }, + "top_k": { + "type": "integer", + "description": "When specified, limits the output to the top K most probable classes." + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/audio-classification/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/audio-classification/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..dac7a92256d072571f14aec5ab54ab6b9871cc99 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/audio-classification/spec/output.json @@ -0,0 +1,10 @@ +{ + "$id": "/inference/schemas/audio-classification/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "title": "AudioClassificationOutput", + "description": "Outputs for Audio Classification inference", + "type": "array", + "items": { + "$ref": "/inference/schemas/common-definitions.json#/definitions/ClassificationOutput" + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/audio-to-audio/about.md b/data/node_modules/@huggingface/tasks/src/tasks/audio-to-audio/about.md new file mode 100644 index 0000000000000000000000000000000000000000..2822a7499dd6ba2bbdd2ebf82c0865ec2bc960d6 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/audio-to-audio/about.md @@ -0,0 +1,56 @@ +## Use Cases + +### Speech Enhancement (Noise removal) + +Speech Enhancement is a bit self explanatory. It improves (or enhances) the quality of an audio by removing noise. There are multiple libraries to solve this task, such as Speechbrain, Asteroid and ESPNet. Here is a simple example using Speechbrain + +```python +from speechbrain.pretrained import SpectralMaskEnhancement +model = SpectralMaskEnhancement.from_hparams( + "speechbrain/mtl-mimic-voicebank" +) +model.enhance_file("file.wav") +``` + +Alternatively, you can use [Inference Endpoints](https://huggingface.co/inference-endpoints) to solve this task + +```python +import json +import requests + +headers = {"Authorization": f"Bearer {API_TOKEN}"} +API_URL = "https://api-inference.huggingface.co/models/speechbrain/mtl-mimic-voicebank" + +def query(filename): + with open(filename, "rb") as f: + data = f.read() + response = requests.request("POST", API_URL, headers=headers, data=data) + return json.loads(response.content.decode("utf-8")) + +data = query("sample1.flac") +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer with audio-to-audio models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_TOKEN); +await inference.audioToAudio({ + data: await (await fetch("sample.flac")).blob(), + model: "speechbrain/sepformer-wham", +}); +``` + +### Audio Source Separation + +Audio Source Separation allows you to isolate different sounds from individual sources. For example, if you have an audio file with multiple people speaking, you can get an audio file for each of them. You can then use an Automatic Speech Recognition system to extract the text from each of these sources as an initial step for your system! + +Audio-to-Audio can also be used to remove noise from audio files: you get one audio for the person speaking and another audio for the noise. This can also be useful when you have multi-person audio with some noise: yyou can get one audio for each person and then one audio for the noise. + +## Training a model for your own data + +If you want to learn how to train models for the Audio-to-Audio task, we recommend the following tutorials: + +- [Speech Enhancement](https://speechbrain.github.io/tutorial_enhancement.html) +- [Source Separation](https://speechbrain.github.io/tutorial_separation.html) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/audio-to-audio/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/audio-to-audio/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..373807a09cd75db6d82acc9b1c0a9f28fef8b71c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/audio-to-audio/data.ts @@ -0,0 +1,66 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "512-element X-vector embeddings of speakers from CMU ARCTIC dataset.", + id: "Matthijs/cmu-arctic-xvectors", + }, + ], + demo: { + inputs: [ + { + filename: "input.wav", + type: "audio", + }, + ], + outputs: [ + { + filename: "label-0.wav", + type: "audio", + }, + { + filename: "label-1.wav", + type: "audio", + }, + ], + }, + metrics: [ + { + description: + "The Signal-to-Noise ratio is the relationship between the target signal level and the background noise level. It is calculated as the logarithm of the target signal divided by the background noise, in decibels.", + id: "snri", + }, + { + description: + "The Signal-to-Distortion ratio is the relationship between the target signal and the sum of noise, interference, and artifact errors", + id: "sdri", + }, + ], + models: [ + { + description: "A solid model of audio source separation.", + id: "speechbrain/sepformer-wham", + }, + { + description: "A speech enhancement model.", + id: "speechbrain/metricgan-plus-voicebank", + }, + ], + spaces: [ + { + description: "An application for speech separation.", + id: "younver/speechbrain-speech-separation", + }, + { + description: "An application for audio style transfer.", + id: "nakas/audio-diffusion_style_transfer", + }, + ], + summary: + "Audio-to-Audio is a family of tasks in which the input is an audio and the output is one or multiple generated audios. Some example tasks are speech enhancement and source separation.", + widgetModels: ["speechbrain/sepformer-wham"], + youtubeId: "iohj7nCCYoM", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/automatic-speech-recognition/about.md b/data/node_modules/@huggingface/tasks/src/tasks/automatic-speech-recognition/about.md new file mode 100644 index 0000000000000000000000000000000000000000..8e2a0714bb1d8c27e218f2224005527a439641dc --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/automatic-speech-recognition/about.md @@ -0,0 +1,90 @@ +## Use Cases + +### Virtual Speech Assistants + +Many edge devices have an embedded virtual assistant to interact with the end users better. These assistances rely on ASR models to recognize different voice commands to perform various tasks. For instance, you can ask your phone for dialing a phone number, ask a general question, or schedule a meeting. + +### Caption Generation + +A caption generation model takes audio as input from sources to generate automatic captions through transcription, for live-streamed or recorded videos. This can help with content accessibility. For example, an audience watching a video that includes a non-native language, can rely on captions to interpret the content. It can also help with information retention at online-classes environments improving knowledge assimilation while reading and taking notes faster. + +## Task Variants + +### Multilingual ASR + +Multilingual ASR models can convert audio inputs with multiple languages into transcripts. Some multilingual ASR models include [language identification](https://huggingface.co/tasks/audio-classification) blocks to improve the performance. + +The use of Multilingual ASR has become popular, the idea of maintaining just a single model for all language can simplify the production pipeline. Take a look at [Whisper](https://huggingface.co/openai/whisper-large-v2) to get an idea on how 100+ languages can be processed by a single model. + +## Inference + +The Hub contains over [17,000 ASR models](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&sort=downloads) that you can test right away in your browser using the model page widgets. You can also use any model as a service using the Serverless Inference API. We also support libraries such as [transformers](https://huggingface.co/models?library=transformers&pipeline_tag=automatic-speech-recognition&sort=downloads), [speechbrain](https://huggingface.co/models?library=speechbrain&pipeline_tag=automatic-speech-recognition&sort=downloads), [NeMo](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&library=nemo&sort=downloads) and [espnet](https://huggingface.co/models?library=espnet&pipeline_tag=automatic-speech-recognition&sort=downloads) via the Serverless Inference API. Here's a simple code snippet to run inference: + +```python +import json +import requests + +headers = {"Authorization": f"Bearer {API_TOKEN}"} +API_URL = "https://api-inference.huggingface.co/models/openai/whisper-large-v3" + +def query(filename): + with open(filename, "rb") as f: + data = f.read() + response = requests.request("POST", API_URL, headers=headers, data=data) + return json.loads(response.content.decode("utf-8")) + +data = query("sample1.flac") +``` + +You can also use [huggingface.js](https://github.com/huggingface/huggingface.js), the JavaScript client, to transcribe audio with the Serverless Inference API. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_TOKEN); +await inference.automaticSpeechRecognition({ + data: await (await fetch("sample.flac")).blob(), + model: "openai/whisper-large-v3", +}); +``` + +For transformers-compatible models like Whisper, Wav2Vec2, and HuBERT, you can also run inference with the library as follows: + +```python +# pip install --upgrade transformers + +from transformers import pipeline + +pipe = pipeline("automatic-speech-recognition", "openai/whisper-large-v3") + +pipe("sample.flac") +# {'text': "GOING ALONG SLUSHY COUNTRY ROADS AND SPEAKING TO DAMP AUDIENCES IN DRAUGHTY SCHOOL ROOMS DAY AFTER DAY FOR A FORTNIGHT HE'LL HAVE TO PUT IN AN APPEARANCE AT SOME PLACE OF WORSHIP ON SUNDAY MORNING AND HE CAN COME TO US IMMEDIATELY AFTERWARDS"} +``` + +## Solving ASR for your own data + +We have some great news! You can fine-tune (transfer learning) a foundational speech model on a specific language without tonnes of data. Pretrained models such as Whisper, Wav2Vec2-MMS and HuBERT exist. [OpenAI's Whisper model](https://huggingface.co/openai/whisper-large-v3) is a large multilingual model trained on 100+ languages and with 4 Million hours of speech. + +The following detailed [blog post](https://huggingface.co/blog/fine-tune-whisper) shows how to fine-tune a pre-trained Whisper checkpoint on labeled data for ASR. With the right data and strategy you can fine-tune a high-performant model on a free Google Colab instance too. We suggest to read the blog post for more info! + +## Hugging Face Whisper Event + +On December 2022, over 450 participants collaborated, fine-tuned and shared 600+ ASR Whisper models in 100+ different languages. You can compare these models on the event's speech recognition [leaderboard](https://huggingface.co/spaces/whisper-event/leaderboard?dataset=mozilla-foundation%2Fcommon_voice_11_0&config=ar&split=test). + +These events help democratize ASR for all languages, including low-resource languages. In addition to the trained models, the [event](https://github.com/huggingface/community-events/tree/main/whisper-fine-tuning-event) helps to build practical collaborative knowledge. + +## Useful Resources + +- [Hugging Face Audio Course](https://huggingface.co/learn/audio-course/chapter5/introduction) +- [Fine-tuning MetaAI's MMS Adapter Models for Multi-Lingual ASR](https://huggingface.co/blog/mms_adapters) +- [Making automatic speech recognition work on large files with Wav2Vec2 in 🤗 Transformers](https://huggingface.co/blog/asr-chunking) +- [Boosting Wav2Vec2 with n-grams in 🤗 Transformers](https://huggingface.co/blog/wav2vec2-with-ngram) +- [ML for Audio Study Group - Intro to Audio and ASR Deep Dive](https://www.youtube.com/watch?v=D-MH6YjuIlE) +- [Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters](https://arxiv.org/pdf/2007.03001.pdf) +- An ASR toolkit made by [NVIDIA: NeMo](https://github.com/NVIDIA/NeMo) with code and pretrained models useful for new ASR models. Watch the [introductory video](https://www.youtube.com/embed/wBgpMf_KQVw) for an overview. +- [An introduction to SpeechT5, a multi-purpose speech recognition and synthesis model](https://huggingface.co/blog/speecht5) +- [Fine-tune Whisper For Multilingual ASR with 🤗Transformers](https://huggingface.co/blog/fine-tune-whisper) +- [Automatic speech recognition task guide](https://huggingface.co/docs/transformers/tasks/asr) +- [Speech Synthesis, Recognition, and More With SpeechT5](https://huggingface.co/blog/speecht5) +- [Fine-Tune W2V2-Bert for low-resource ASR with 🤗 Transformers](https://huggingface.co/blog/fine-tune-w2v2-bert) +- [Speculative Decoding for 2x Faster Whisper Inference](https://huggingface.co/blog/whisper-speculative-decoding) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/automatic-speech-recognition/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/automatic-speech-recognition/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..e116a69e7e867cdad8be8e2b57fac28a010b2d19 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/automatic-speech-recognition/data.ts @@ -0,0 +1,78 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "31,175 hours of multilingual audio-text dataset in 108 languages.", + id: "mozilla-foundation/common_voice_17_0", + }, + { + description: "An English dataset with 1,000 hours of data.", + id: "librispeech_asr", + }, + { + description: "A multi-lingual audio dataset with 370K hours of audio.", + id: "espnet/yodas", + }, + ], + demo: { + inputs: [ + { + filename: "input.flac", + type: "audio", + }, + ], + outputs: [ + { + /// GOING ALONG SLUSHY COUNTRY ROADS AND SPEAKING TO DAMP AUDIENCES I + label: "Transcript", + content: "Going along slushy country roads and speaking to damp audiences in...", + type: "text", + }, + ], + }, + metrics: [ + { + description: "", + id: "wer", + }, + { + description: "", + id: "cer", + }, + ], + models: [ + { + description: "A powerful ASR model by OpenAI.", + id: "openai/whisper-large-v3", + }, + { + description: "A good generic speech model by MetaAI for fine-tuning.", + id: "facebook/w2v-bert-2.0", + }, + { + description: "An end-to-end model that performs ASR and Speech Translation by MetaAI.", + id: "facebook/seamless-m4t-v2-large", + }, + ], + spaces: [ + { + description: "A powerful general-purpose speech recognition application.", + id: "hf-audio/whisper-large-v3", + }, + { + description: "Fastest speech recognition application.", + id: "sanchit-gandhi/whisper-jax", + }, + { + description: "A high quality speech and text translation model by Meta.", + id: "facebook/seamless_m4t", + }, + ], + summary: + "Automatic Speech Recognition (ASR), also known as Speech to Text (STT), is the task of transcribing a given audio to text. It has many applications, such as voice user interfaces.", + widgetModels: ["openai/whisper-large-v3"], + youtubeId: "TksaY_FDgnk", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/automatic-speech-recognition/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/automatic-speech-recognition/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..dfc501519d2e6e8699415e2a10d63fae4d05a20a --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/automatic-speech-recognition/inference.ts @@ -0,0 +1,159 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ + +/** + * Inputs for Automatic Speech Recognition inference + */ +export interface AutomaticSpeechRecognitionInput { + /** + * The input audio data + */ + inputs: unknown; + /** + * Additional inference parameters + */ + parameters?: AutomaticSpeechRecognitionParameters; + [property: string]: unknown; +} + +/** + * Additional inference parameters + * + * Additional inference parameters for Automatic Speech Recognition + */ +export interface AutomaticSpeechRecognitionParameters { + /** + * Parametrization of the text generation process + */ + generate?: GenerationParameters; + /** + * Whether to output corresponding timestamps with the generated text + */ + return_timestamps?: boolean; + [property: string]: unknown; +} + +/** + * Parametrization of the text generation process + * + * Ad-hoc parametrization of the text generation process + */ +export interface GenerationParameters { + /** + * Whether to use sampling instead of greedy decoding when generating new tokens. + */ + do_sample?: boolean; + /** + * Controls the stopping condition for beam-based methods. + */ + early_stopping?: EarlyStoppingUnion; + /** + * If set to float strictly between 0 and 1, only tokens with a conditional probability + * greater than epsilon_cutoff will be sampled. In the paper, suggested values range from + * 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language + * Model Desmoothing](https://hf.co/papers/2210.15191) for more details. + */ + epsilon_cutoff?: number; + /** + * Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to + * float strictly between 0 and 1, a token is only considered if it is greater than either + * eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter + * term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In + * the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. + * See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) + * for more details. + */ + eta_cutoff?: number; + /** + * The maximum length (in tokens) of the generated text, including the input. + */ + max_length?: number; + /** + * The maximum number of tokens to generate. Takes precedence over maxLength. + */ + max_new_tokens?: number; + /** + * The minimum length (in tokens) of the generated text, including the input. + */ + min_length?: number; + /** + * The minimum number of tokens to generate. Takes precedence over maxLength. + */ + min_new_tokens?: number; + /** + * Number of groups to divide num_beams into in order to ensure diversity among different + * groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details. + */ + num_beam_groups?: number; + /** + * Number of beams to use for beam search. + */ + num_beams?: number; + /** + * The value balances the model confidence and the degeneration penalty in contrastive + * search decoding. + */ + penalty_alpha?: number; + /** + * The value used to modulate the next token probabilities. + */ + temperature?: number; + /** + * The number of highest probability vocabulary tokens to keep for top-k-filtering. + */ + top_k?: number; + /** + * If set to float < 1, only the smallest set of most probable tokens with probabilities + * that add up to top_p or higher are kept for generation. + */ + top_p?: number; + /** + * Local typicality measures how similar the conditional probability of predicting a target + * token next is to the expected conditional probability of predicting a random token next, + * given the partial text already generated. If set to float < 1, the smallest set of the + * most locally typical tokens with probabilities that add up to typical_p or higher are + * kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details. + */ + typical_p?: number; + /** + * Whether the model should use the past last key/values attentions to speed up decoding + */ + use_cache?: boolean; + [property: string]: unknown; +} + +/** + * Controls the stopping condition for beam-based methods. + */ +export type EarlyStoppingUnion = boolean | "never"; + +/** + * Outputs of inference for the Automatic Speech Recognition task + */ +export interface AutomaticSpeechRecognitionOutput { + /** + * When returnTimestamps is enabled, chunks contains a list of audio chunks identified by + * the model. + */ + chunks?: AutomaticSpeechRecognitionOutputChunk[]; + /** + * The recognized text. + */ + text: string; + [property: string]: unknown; +} + +export interface AutomaticSpeechRecognitionOutputChunk { + /** + * A chunk of text identified by the model + */ + text: string; + /** + * The start and end timestamps corresponding with the text + */ + timestamps: number[]; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/automatic-speech-recognition/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/automatic-speech-recognition/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..691c7f4b72a7254df4b2943dfc76480b2198ecc5 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/automatic-speech-recognition/spec/input.json @@ -0,0 +1,34 @@ +{ + "$id": "/inference/schemas/automatic-speech-recognition/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Automatic Speech Recognition inference", + "title": "AutomaticSpeechRecognitionInput", + "type": "object", + "properties": { + "inputs": { + "description": "The input audio data" + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/AutomaticSpeechRecognitionParameters" + } + }, + "$defs": { + "AutomaticSpeechRecognitionParameters": { + "title": "AutomaticSpeechRecognitionParameters", + "description": "Additional inference parameters for Automatic Speech Recognition", + "type": "object", + "properties": { + "return_timestamps": { + "type": "boolean", + "description": "Whether to output corresponding timestamps with the generated text" + }, + "generate": { + "description": "Parametrization of the text generation process", + "$ref": "/inference/schemas/common-definitions.json#/definitions/GenerationParameters" + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/automatic-speech-recognition/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/automatic-speech-recognition/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..db8a1cf2419bcd78ae0c98cf7d57eaaed78b90a3 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/automatic-speech-recognition/spec/output.json @@ -0,0 +1,38 @@ +{ + "$id": "/inference/schemas/automatic-speech-recognition/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Automatic Speech Recognition task", + "title": "AutomaticSpeechRecognitionOutput", + "type": "object", + "properties": { + "text": { + "type": "string", + "description": "The recognized text." + }, + "chunks": { + "type": "array", + "description": "When returnTimestamps is enabled, chunks contains a list of audio chunks identified by the model.", + "items": { + "type": "object", + "title": "AutomaticSpeechRecognitionOutputChunk", + "properties": { + "text": { + "type": "string", + "description": "A chunk of text identified by the model" + }, + "timestamps": { + "type": "array", + "description": "The start and end timestamps corresponding with the text", + "items": { + "type": "number" + }, + "minLength": 2, + "maxLength": 2 + } + }, + "required": ["text", "timestamps"] + } + } + }, + "required": ["text"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/chat-completion/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/chat-completion/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..488a1e87e9e7eaf810b56ce107e10e6565ab0fdd --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/chat-completion/inference.ts @@ -0,0 +1,277 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ + +/** + * Chat Completion Input. + * + * Auto-generated from TGI specs. + * For more details, check out + * https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. + */ +export interface ChatCompletionInput { + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + * frequency in the text so far, + * decreasing the model's likelihood to repeat the same line verbatim. + */ + frequency_penalty?: number; + /** + * UNUSED + * Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON + * object that maps tokens + * (specified by their token ID in the tokenizer) to an associated bias value from -100 to + * 100. Mathematically, + * the bias is added to the logits generated by the model prior to sampling. The exact + * effect will vary per model, + * but values between -1 and 1 should decrease or increase likelihood of selection; values + * like -100 or 100 should + * result in a ban or exclusive selection of the relevant token. + */ + logit_bias?: number[]; + /** + * Whether to return log probabilities of the output tokens or not. If true, returns the log + * probabilities of each + * output token returned in the content of message. + */ + logprobs?: boolean; + /** + * The maximum number of tokens that can be generated in the chat completion. + */ + max_tokens?: number; + /** + * A list of messages comprising the conversation so far. + */ + messages: ChatCompletionInputMessage[]; + /** + * [UNUSED] ID of the model to use. See the model endpoint compatibility table for details + * on which models work with the Chat API. + */ + model: string; + /** + * UNUSED + * How many chat completion choices to generate for each input message. Note that you will + * be charged based on the + * number of generated tokens across all of the choices. Keep n as 1 to minimize costs. + */ + n?: number; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they + * appear in the text so far, + * increasing the model's likelihood to talk about new topics + */ + presence_penalty?: number; + seed?: number; + /** + * Up to 4 sequences where the API will stop generating further tokens. + */ + stop?: string[]; + stream?: boolean; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the + * output more random, while + * lower values like 0.2 will make it more focused and deterministic. + * + * We generally recommend altering this or `top_p` but not both. + */ + temperature?: number; + tool_choice?: ChatCompletionInputToolType; + /** + * A prompt to be appended before the tools + */ + tool_prompt?: string; + /** + * A list of tools the model may call. Currently, only functions are supported as a tool. + * Use this to provide a list of + * functions the model may generate JSON inputs for. + */ + tools?: ChatCompletionInputTool[]; + /** + * An integer between 0 and 5 specifying the number of most likely tokens to return at each + * token position, each with + * an associated log probability. logprobs must be set to true if this parameter is used. + */ + top_logprobs?: number; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the + * tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% + * probability mass are considered. + */ + top_p?: number; + [property: string]: unknown; +} + +export interface ChatCompletionInputMessage { + content?: string; + name?: string; + role: string; + tool_calls?: ChatCompletionInputToolCall[]; + [property: string]: unknown; +} + +export interface ChatCompletionInputToolCall { + function: ChatCompletionInputFunctionDefinition; + id: number; + type: string; + [property: string]: unknown; +} + +export interface ChatCompletionInputFunctionDefinition { + arguments: unknown; + description?: string; + name: string; + [property: string]: unknown; +} + +export type ChatCompletionInputToolType = "OneOf" | ChatCompletionInputToolTypeObject; + +export interface ChatCompletionInputToolTypeObject { + FunctionName: string; + [property: string]: unknown; +} + +export interface ChatCompletionInputTool { + function: ChatCompletionInputFunctionDefinition; + type: string; + [property: string]: unknown; +} + +/** + * Chat Completion Output. + * + * Auto-generated from TGI specs. + * For more details, check out + * https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. + */ +export interface ChatCompletionOutput { + choices: ChatCompletionOutputComplete[]; + created: number; + id: string; + model: string; + object: string; + system_fingerprint: string; + usage: ChatCompletionOutputUsage; + [property: string]: unknown; +} + +export interface ChatCompletionOutputComplete { + finish_reason: string; + index: number; + logprobs?: ChatCompletionOutputLogprobs; + message: ChatCompletionOutputMessage; + [property: string]: unknown; +} + +export interface ChatCompletionOutputLogprobs { + content: ChatCompletionOutputLogprob[]; + [property: string]: unknown; +} + +export interface ChatCompletionOutputLogprob { + logprob: number; + token: string; + top_logprobs: ChatCompletionOutputTopLogprob[]; + [property: string]: unknown; +} + +export interface ChatCompletionOutputTopLogprob { + logprob: number; + token: string; + [property: string]: unknown; +} + +export interface ChatCompletionOutputMessage { + content?: string; + name?: string; + role: string; + tool_calls?: ChatCompletionOutputToolCall[]; + [property: string]: unknown; +} + +export interface ChatCompletionOutputToolCall { + function: ChatCompletionOutputFunctionDefinition; + id: number; + type: string; + [property: string]: unknown; +} + +export interface ChatCompletionOutputFunctionDefinition { + arguments: unknown; + description?: string; + name: string; + [property: string]: unknown; +} + +export interface ChatCompletionOutputUsage { + completion_tokens: number; + prompt_tokens: number; + total_tokens: number; + [property: string]: unknown; +} + +/** + * Chat Completion Stream Output. + * + * Auto-generated from TGI specs. + * For more details, check out + * https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. + */ +export interface ChatCompletionStreamOutput { + choices: ChatCompletionStreamOutputChoice[]; + created: number; + id: string; + model: string; + object: string; + system_fingerprint: string; + [property: string]: unknown; +} + +export interface ChatCompletionStreamOutputChoice { + delta: ChatCompletionStreamOutputDelta; + finish_reason?: string; + index: number; + logprobs?: ChatCompletionStreamOutputLogprobs; + [property: string]: unknown; +} + +export interface ChatCompletionStreamOutputDelta { + content?: string; + role: string; + tool_calls?: ChatCompletionStreamOutputDeltaToolCall; + [property: string]: unknown; +} + +export interface ChatCompletionStreamOutputDeltaToolCall { + function: ChatCompletionStreamOutputFunction; + id: string; + index: number; + type: string; + [property: string]: unknown; +} + +export interface ChatCompletionStreamOutputFunction { + arguments: string; + name?: string; + [property: string]: unknown; +} + +export interface ChatCompletionStreamOutputLogprobs { + content: ChatCompletionStreamOutputLogprob[]; + [property: string]: unknown; +} + +export interface ChatCompletionStreamOutputLogprob { + logprob: number; + token: string; + top_logprobs: ChatCompletionStreamOutputTopLogprob[]; + [property: string]: unknown; +} + +export interface ChatCompletionStreamOutputTopLogprob { + logprob: number; + token: string; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/chat-completion/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/chat-completion/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..0b549cd58bb7a6240485f75fceb93497b698db4c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/chat-completion/spec/input.json @@ -0,0 +1,227 @@ +{ + "$id": "/inference/schemas/chat-completion/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Chat Completion Input.\n\nAuto-generated from TGI specs.\nFor more details, check out https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.", + "title": "ChatCompletionInput", + "type": "object", + "required": ["model", "messages"], + "properties": { + "frequency_penalty": { + "type": "number", + "format": "float", + "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\ndecreasing the model's likelihood to repeat the same line verbatim.", + "example": "1.0", + "nullable": true + }, + "logit_bias": { + "type": "array", + "items": { + "type": "number", + "format": "float" + }, + "description": "UNUSED\nModify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens\n(specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically,\nthe bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model,\nbut values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should\nresult in a ban or exclusive selection of the relevant token.", + "nullable": true + }, + "logprobs": { + "type": "boolean", + "description": "Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each\noutput token returned in the content of message.", + "example": "false", + "nullable": true + }, + "max_tokens": { + "type": "integer", + "format": "int32", + "description": "The maximum number of tokens that can be generated in the chat completion.", + "example": "32", + "nullable": true, + "minimum": 0 + }, + "messages": { + "type": "array", + "items": { + "$ref": "#/$defs/ChatCompletionInputMessage" + }, + "description": "A list of messages comprising the conversation so far.", + "example": "[{\"role\": \"user\", \"content\": \"What is Deep Learning?\"}]" + }, + "model": { + "type": "string", + "description": "[UNUSED] ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", + "example": "mistralai/Mistral-7B-Instruct-v0.2" + }, + "n": { + "type": "integer", + "format": "int32", + "description": "UNUSED\nHow many chat completion choices to generate for each input message. Note that you will be charged based on the\nnumber of generated tokens across all of the choices. Keep n as 1 to minimize costs.", + "example": "2", + "nullable": true, + "minimum": 0 + }, + "presence_penalty": { + "type": "number", + "format": "float", + "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\nincreasing the model's likelihood to talk about new topics", + "example": 0.1, + "nullable": true + }, + "seed": { + "type": "integer", + "format": "int64", + "example": 42, + "nullable": true, + "minimum": 0 + }, + "stop": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Up to 4 sequences where the API will stop generating further tokens.", + "example": "null", + "nullable": true + }, + "stream": { + "type": "boolean" + }, + "temperature": { + "type": "number", + "format": "float", + "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while\nlower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.", + "example": 1, + "nullable": true + }, + "tool_choice": { + "allOf": [ + { + "$ref": "#/$defs/ChatCompletionInputToolType" + } + ], + "nullable": true + }, + "tool_prompt": { + "type": "string", + "description": "A prompt to be appended before the tools", + "example": "\"You will be presented with a JSON schema representing a set of tools.\nIf the user request lacks of sufficient information to make a precise tool selection: Do not invent any tool's properties, instead notify with an error message.\n\nJSON Schema:\n\"", + "nullable": true + }, + "tools": { + "type": "array", + "items": { + "$ref": "#/$defs/ChatCompletionInputTool" + }, + "description": "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of\nfunctions the model may generate JSON inputs for.", + "example": "null", + "nullable": true + }, + "top_logprobs": { + "type": "integer", + "format": "int32", + "description": "An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with\nan associated log probability. logprobs must be set to true if this parameter is used.", + "example": "5", + "nullable": true, + "minimum": 0 + }, + "top_p": { + "type": "number", + "format": "float", + "description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the\ntokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.", + "example": 0.95, + "nullable": true + } + }, + "$defs": { + "ChatCompletionInputMessage": { + "type": "object", + "required": ["role"], + "properties": { + "content": { + "type": "string", + "example": "My name is David and I", + "nullable": true + }, + "name": { + "type": "string", + "example": "\"David\"", + "nullable": true + }, + "role": { + "type": "string", + "example": "user" + }, + "tool_calls": { + "type": "array", + "items": { + "$ref": "#/$defs/ChatCompletionInputToolCall" + }, + "nullable": true + } + }, + "title": "ChatCompletionInputMessage" + }, + "ChatCompletionInputToolCall": { + "type": "object", + "required": ["id", "type", "function"], + "properties": { + "function": { + "$ref": "#/$defs/ChatCompletionInputFunctionDefinition" + }, + "id": { + "type": "integer", + "format": "int32", + "minimum": 0 + }, + "type": { + "type": "string" + } + }, + "title": "ChatCompletionInputToolCall" + }, + "ChatCompletionInputFunctionDefinition": { + "type": "object", + "required": ["name", "arguments"], + "properties": { + "arguments": {}, + "description": { + "type": "string", + "nullable": true + }, + "name": { + "type": "string" + } + }, + "title": "ChatCompletionInputFunctionDefinition" + }, + "ChatCompletionInputToolType": { + "oneOf": [ + { + "type": "object", + "required": ["FunctionName"], + "properties": { + "FunctionName": { + "type": "string" + } + } + }, + { + "type": "string", + "enum": ["OneOf"] + } + ], + "title": "ChatCompletionInputToolType" + }, + "ChatCompletionInputTool": { + "type": "object", + "required": ["type", "function"], + "properties": { + "function": { + "$ref": "#/$defs/ChatCompletionInputFunctionDefinition" + }, + "type": { + "type": "string", + "example": "function" + } + }, + "title": "ChatCompletionInputTool" + } + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/chat-completion/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/chat-completion/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..5b602ccd6d9b2056a9266710a5ec57b76c456eb0 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/chat-completion/spec/output.json @@ -0,0 +1,196 @@ +{ + "$id": "/inference/schemas/chat-completion/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Chat Completion Output.\n\nAuto-generated from TGI specs.\nFor more details, check out https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.", + "title": "ChatCompletionOutput", + "type": "object", + "required": ["id", "object", "created", "model", "system_fingerprint", "choices", "usage"], + "properties": { + "choices": { + "type": "array", + "items": { + "$ref": "#/$defs/ChatCompletionOutputComplete" + } + }, + "created": { + "type": "integer", + "format": "int64", + "example": "1706270835", + "minimum": 0 + }, + "id": { + "type": "string" + }, + "model": { + "type": "string", + "example": "mistralai/Mistral-7B-Instruct-v0.2" + }, + "object": { + "type": "string" + }, + "system_fingerprint": { + "type": "string" + }, + "usage": { + "$ref": "#/$defs/ChatCompletionOutputUsage" + } + }, + "$defs": { + "ChatCompletionOutputComplete": { + "type": "object", + "required": ["index", "message", "finish_reason"], + "properties": { + "finish_reason": { + "type": "string" + }, + "index": { + "type": "integer", + "format": "int32", + "minimum": 0 + }, + "logprobs": { + "allOf": [ + { + "$ref": "#/$defs/ChatCompletionOutputLogprobs" + } + ], + "nullable": true + }, + "message": { + "$ref": "#/$defs/ChatCompletionOutputMessage" + } + }, + "title": "ChatCompletionOutputComplete" + }, + "ChatCompletionOutputLogprobs": { + "type": "object", + "required": ["content"], + "properties": { + "content": { + "type": "array", + "items": { + "$ref": "#/$defs/ChatCompletionOutputLogprob" + } + } + }, + "title": "ChatCompletionOutputLogprobs" + }, + "ChatCompletionOutputLogprob": { + "type": "object", + "required": ["token", "logprob", "top_logprobs"], + "properties": { + "logprob": { + "type": "number", + "format": "float" + }, + "token": { + "type": "string" + }, + "top_logprobs": { + "type": "array", + "items": { + "$ref": "#/$defs/ChatCompletionOutputTopLogprob" + } + } + }, + "title": "ChatCompletionOutputLogprob" + }, + "ChatCompletionOutputTopLogprob": { + "type": "object", + "required": ["token", "logprob"], + "properties": { + "logprob": { + "type": "number", + "format": "float" + }, + "token": { + "type": "string" + } + }, + "title": "ChatCompletionOutputTopLogprob" + }, + "ChatCompletionOutputMessage": { + "type": "object", + "required": ["role"], + "properties": { + "content": { + "type": "string", + "example": "My name is David and I", + "nullable": true + }, + "name": { + "type": "string", + "example": "\"David\"", + "nullable": true + }, + "role": { + "type": "string", + "example": "user" + }, + "tool_calls": { + "type": "array", + "items": { + "$ref": "#/$defs/ChatCompletionOutputToolCall" + }, + "nullable": true + } + }, + "title": "ChatCompletionOutputMessage" + }, + "ChatCompletionOutputToolCall": { + "type": "object", + "required": ["id", "type", "function"], + "properties": { + "function": { + "$ref": "#/$defs/ChatCompletionOutputFunctionDefinition" + }, + "id": { + "type": "integer", + "format": "int32", + "minimum": 0 + }, + "type": { + "type": "string" + } + }, + "title": "ChatCompletionOutputToolCall" + }, + "ChatCompletionOutputFunctionDefinition": { + "type": "object", + "required": ["name", "arguments"], + "properties": { + "arguments": {}, + "description": { + "type": "string", + "nullable": true + }, + "name": { + "type": "string" + } + }, + "title": "ChatCompletionOutputFunctionDefinition" + }, + "ChatCompletionOutputUsage": { + "type": "object", + "required": ["prompt_tokens", "completion_tokens", "total_tokens"], + "properties": { + "completion_tokens": { + "type": "integer", + "format": "int32", + "minimum": 0 + }, + "prompt_tokens": { + "type": "integer", + "format": "int32", + "minimum": 0 + }, + "total_tokens": { + "type": "integer", + "format": "int32", + "minimum": 0 + } + }, + "title": "ChatCompletionOutputUsage" + } + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/chat-completion/spec/stream_output.json b/data/node_modules/@huggingface/tasks/src/tasks/chat-completion/spec/stream_output.json new file mode 100644 index 0000000000000000000000000000000000000000..72575d9139ea7269511ed2865c833cc163a32b6a --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/chat-completion/spec/stream_output.json @@ -0,0 +1,170 @@ +{ + "$id": "/inference/schemas/chat-completion/stream_output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Chat Completion Stream Output.\n\nAuto-generated from TGI specs.\nFor more details, check out https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.", + "title": "ChatCompletionStreamOutput", + "type": "object", + "required": ["id", "object", "created", "model", "system_fingerprint", "choices"], + "properties": { + "choices": { + "type": "array", + "items": { + "$ref": "#/$defs/ChatCompletionStreamOutputChoice" + } + }, + "created": { + "type": "integer", + "format": "int64", + "example": "1706270978", + "minimum": 0 + }, + "id": { + "type": "string" + }, + "model": { + "type": "string", + "example": "mistralai/Mistral-7B-Instruct-v0.2" + }, + "object": { + "type": "string" + }, + "system_fingerprint": { + "type": "string" + } + }, + "$defs": { + "ChatCompletionStreamOutputChoice": { + "type": "object", + "required": ["index", "delta"], + "properties": { + "delta": { + "$ref": "#/$defs/ChatCompletionStreamOutputDelta" + }, + "finish_reason": { + "type": "string", + "nullable": true + }, + "index": { + "type": "integer", + "format": "int32", + "minimum": 0 + }, + "logprobs": { + "allOf": [ + { + "$ref": "#/$defs/ChatCompletionStreamOutputLogprobs" + } + ], + "nullable": true + } + }, + "title": "ChatCompletionStreamOutputChoice" + }, + "ChatCompletionStreamOutputDelta": { + "type": "object", + "required": ["role"], + "properties": { + "content": { + "type": "string", + "example": "What is Deep Learning?", + "nullable": true + }, + "role": { + "type": "string", + "example": "user" + }, + "tool_calls": { + "allOf": [ + { + "$ref": "#/$defs/ChatCompletionStreamOutputDeltaToolCall" + } + ], + "nullable": true + } + }, + "title": "ChatCompletionStreamOutputDelta" + }, + "ChatCompletionStreamOutputDeltaToolCall": { + "type": "object", + "required": ["index", "id", "type", "function"], + "properties": { + "function": { + "$ref": "#/$defs/ChatCompletionStreamOutputFunction" + }, + "id": { + "type": "string" + }, + "index": { + "type": "integer", + "format": "int32", + "minimum": 0 + }, + "type": { + "type": "string" + } + }, + "title": "ChatCompletionStreamOutputDeltaToolCall" + }, + "ChatCompletionStreamOutputFunction": { + "type": "object", + "required": ["arguments"], + "properties": { + "arguments": { + "type": "string" + }, + "name": { + "type": "string", + "nullable": true + } + }, + "title": "ChatCompletionStreamOutputFunction" + }, + "ChatCompletionStreamOutputLogprobs": { + "type": "object", + "required": ["content"], + "properties": { + "content": { + "type": "array", + "items": { + "$ref": "#/$defs/ChatCompletionStreamOutputLogprob" + } + } + }, + "title": "ChatCompletionStreamOutputLogprobs" + }, + "ChatCompletionStreamOutputLogprob": { + "type": "object", + "required": ["token", "logprob", "top_logprobs"], + "properties": { + "logprob": { + "type": "number", + "format": "float" + }, + "token": { + "type": "string" + }, + "top_logprobs": { + "type": "array", + "items": { + "$ref": "#/$defs/ChatCompletionStreamOutputTopLogprob" + } + } + }, + "title": "ChatCompletionStreamOutputLogprob" + }, + "ChatCompletionStreamOutputTopLogprob": { + "type": "object", + "required": ["token", "logprob"], + "properties": { + "logprob": { + "type": "number", + "format": "float" + }, + "token": { + "type": "string" + } + }, + "title": "ChatCompletionStreamOutputTopLogprob" + } + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/common-definitions.json b/data/node_modules/@huggingface/tasks/src/tasks/common-definitions.json new file mode 100644 index 0000000000000000000000000000000000000000..f78d3d9e47a78274f5053ae553a360eadaabcaaf --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/common-definitions.json @@ -0,0 +1,117 @@ +{ + "$id": "/inference/schemas/common-definitions.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "(Incomplete!) Common type definitions shared by several tasks", + "definitions": { + "ClassificationOutputTransform": { + "title": "ClassificationOutputTransform", + "type": "string", + "description": "The function to apply to the model outputs in order to retrieve the scores.", + "oneOf": [ + { + "const": "sigmoid" + }, + { + "const": "softmax" + }, + { + "const": "none" + } + ] + }, + "ClassificationOutput": { + "title": "ClassificationOutput", + "type": "object", + "properties": { + "label": { + "type": "string", + "description": "The predicted class label." + }, + "score": { + "type": "number", + "description": "The corresponding probability." + } + }, + "required": ["label", "score"] + }, + "GenerationParameters": { + "title": "GenerationParameters", + "description": "Ad-hoc parametrization of the text generation process", + "type": "object", + "properties": { + "temperature": { + "type": "number", + "description": "The value used to modulate the next token probabilities." + }, + "top_k": { + "type": "integer", + "description": "The number of highest probability vocabulary tokens to keep for top-k-filtering." + }, + "top_p": { + "type": "number", + "description": "If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation." + }, + "typical_p": { + "type": "number", + "description": " Local typicality measures how similar the conditional probability of predicting a target token next is to the expected conditional probability of predicting a random token next, given the partial text already generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that add up to typical_p or higher are kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details." + }, + "epsilon_cutoff": { + "type": "number", + "description": "If set to float strictly between 0 and 1, only tokens with a conditional probability greater than epsilon_cutoff will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) for more details." + }, + "eta_cutoff": { + "type": "number", + "description": "Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly between 0 and 1, a token is only considered if it is greater than either eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) for more details." + }, + "max_length": { + "type": "integer", + "description": "The maximum length (in tokens) of the generated text, including the input." + }, + "max_new_tokens": { + "type": "integer", + "description": "The maximum number of tokens to generate. Takes precedence over maxLength." + }, + "min_length": { + "type": "integer", + "description": "The minimum length (in tokens) of the generated text, including the input." + }, + "min_new_tokens": { + "type": "integer", + "description": "The minimum number of tokens to generate. Takes precedence over maxLength." + }, + "do_sample": { + "type": "boolean", + "description": "Whether to use sampling instead of greedy decoding when generating new tokens." + }, + "early_stopping": { + "description": "Controls the stopping condition for beam-based methods.", + "oneOf": [ + { + "type": "boolean" + }, + { + "const": "never", + "type": "string" + } + ] + }, + "num_beams": { + "type": "integer", + "description": "Number of beams to use for beam search." + }, + "num_beam_groups": { + "type": "integer", + "description": "Number of groups to divide num_beams into in order to ensure diversity among different groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details." + }, + "penalty_alpha": { + "type": "number", + "description": "The value balances the model confidence and the degeneration penalty in contrastive search decoding." + }, + "use_cache": { + "type": "boolean", + "description": "Whether the model should use the past last key/values attentions to speed up decoding" + } + } + } + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/depth-estimation/about.md b/data/node_modules/@huggingface/tasks/src/tasks/depth-estimation/about.md new file mode 100644 index 0000000000000000000000000000000000000000..37c7d85a0991522e3bc5e6d06986f671cdc874d1 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/depth-estimation/about.md @@ -0,0 +1,45 @@ +## Use Cases + +Depth estimation models can be used to estimate the depth of different objects present in an image. + +### Estimation of Volumetric Information +Depth estimation models are widely used to study volumetric formation of objects present inside an image. This is an important use case in the domain of computer graphics. + +### 3D Representation + +Depth estimation models can also be used to develop a 3D representation from a 2D image. + +## Depth Estimation Subtasks + +There are two depth estimation subtasks. + +- **Absolute depth estimation**: Absolute (or metric) depth estimation aims to provide exact depth measurements from the camera. Absolute depth estimation models output depth maps with real-world distances in meter or feet. + +- **Relative depth estimation**: Relative depth estimation aims to predict the depth order of objects or points in a scene without providing the precise measurements. + +## Inference + +With the `transformers` library, you can use the `depth-estimation` pipeline to infer with image classification models. You can initialize the pipeline with a model id from the Hub. If you do not provide a model id it will initialize with [Intel/dpt-large](https://huggingface.co/Intel/dpt-large) by default. When calling the pipeline you just need to specify a path, http link or an image loaded in PIL. Additionally, you can find a comprehensive list of various depth estimation models at [this link](https://huggingface.co/models?pipeline_tag=depth-estimation). + +```python +from transformers import pipeline + +estimator = pipeline(task="depth-estimation", model="Intel/dpt-large") +result = estimator(images="http://images.cocodataset.org/val2017/000000039769.jpg") +result + +# {'predicted_depth': tensor([[[ 6.3199, 6.3629, 6.4148, ..., 10.4104, 10.5109, 10.3847], +# [ 6.3850, 6.3615, 6.4166, ..., 10.4540, 10.4384, 10.4554], +# [ 6.3519, 6.3176, 6.3575, ..., 10.4247, 10.4618, 10.4257], +# ..., +# [22.3772, 22.4624, 22.4227, ..., 22.5207, 22.5593, 22.5293], +# [22.5073, 22.5148, 22.5114, ..., 22.6604, 22.6344, 22.5871], +# [22.5176, 22.5275, 22.5218, ..., 22.6282, 22.6216, 22.6108]]]), +# 'depth': } + +# You can visualize the result just by calling `result["depth"]`. +``` + +## Useful Resources + +- [Monocular depth estimation task guide](https://huggingface.co/docs/transformers/tasks/monocular_depth_estimation) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/depth-estimation/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/depth-estimation/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..2e429c51906ba7fbad3c980f909abd6a2f38ed71 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/depth-estimation/data.ts @@ -0,0 +1,62 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "NYU Depth V2 Dataset: Video dataset containing both RGB and depth sensor data.", + id: "sayakpaul/nyu_depth_v2", + }, + { + description: "Monocular depth estimation benchmark based without noise and errors.", + id: "depth-anything/DA-2K", + }, + ], + demo: { + inputs: [ + { + filename: "depth-estimation-input.jpg", + type: "img", + }, + ], + outputs: [ + { + filename: "depth-estimation-output.png", + type: "img", + }, + ], + }, + metrics: [], + models: [ + { + description: "Cutting-edge depth estimation model.", + id: "depth-anything/Depth-Anything-V2-Large", + }, + { + description: "A strong monocular depth estimation model.", + id: "Bingxin/Marigold", + }, + { + description: "A metric depth estimation model trained on NYU dataset.", + id: "Intel/zoedepth-nyu", + }, + ], + spaces: [ + { + description: "An application that predicts the depth of an image and then reconstruct the 3D model as voxels.", + id: "radames/dpt-depth-estimation-3d-voxels", + }, + { + description: "An application on cutting-edge depth estimation.", + id: "depth-anything/Depth-Anything-V2", + }, + { + description: "An application to try state-of-the-art depth estimation.", + id: "merve/compare_depth_models", + }, + ], + summary: "Depth estimation is the task of predicting depth of the objects present in an image.", + widgetModels: [""], + youtubeId: "", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/depth-estimation/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/depth-estimation/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..f873f925468078666c6a698b78f6a44a40094d49 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/depth-estimation/inference.ts @@ -0,0 +1,35 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ + +/** + * Inputs for Depth Estimation inference + */ +export interface DepthEstimationInput { + /** + * The input image data + */ + inputs: unknown; + /** + * Additional inference parameters + */ + parameters?: { [key: string]: unknown }; + [property: string]: unknown; +} + +/** + * Outputs of inference for the Depth Estimation task + */ +export interface DepthEstimationOutput { + /** + * The predicted depth as an image + */ + depth?: unknown; + /** + * The predicted depth as a tensor + */ + predicted_depth?: unknown; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/depth-estimation/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/depth-estimation/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..2a4ecc71cc271b5d348cf82f93f0c833b7f0e9b8 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/depth-estimation/spec/input.json @@ -0,0 +1,25 @@ +{ + "$id": "/inference/schemas/depth-estimation/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Depth Estimation inference", + "title": "DepthEstimationInput", + "type": "object", + "properties": { + "inputs": { + "description": "The input image data" + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/DepthEstimationParameters" + } + }, + "$defs": { + "DepthEstimationParameters": { + "title": "DepthEstimationParameters", + "description": "Additional inference parameters for Depth Estimation", + "type": "object", + "properties": {} + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/depth-estimation/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/depth-estimation/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..85bc6ef103c28dbc13e114df5ddc00d1a32f8406 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/depth-estimation/spec/output.json @@ -0,0 +1,16 @@ +{ + "$id": "/inference/schemas/depth-estimation/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Depth Estimation task", + "title": "DepthEstimationOutput", + + "type": "object", + "properties": { + "predicted_depth": { + "description": "The predicted depth as a tensor" + }, + "depth": { + "description": "The predicted depth as an image" + } + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/document-question-answering/about.md b/data/node_modules/@huggingface/tasks/src/tasks/document-question-answering/about.md new file mode 100644 index 0000000000000000000000000000000000000000..528c29ec917ace00387344b09671e9a90fcc6e06 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/document-question-answering/about.md @@ -0,0 +1,53 @@ +## Use Cases + +Document Question Answering models can be used to answer natural language questions about documents. Typically, document QA models consider textual, layout and potentially visual information. This is useful when the question requires some understanding of the visual aspects of the document. +Nevertheless, certain document QA models can work without document images. Hence the task is not limited to visually-rich documents and allows users to ask questions based on spreadsheets, text PDFs, etc! + +### Document Parsing + +One of the most popular use cases of document question answering models is the parsing of structured documents. For example, you can extract the name, address, and other information from a form. You can also use the model to extract information from a table, or even a resume. + +### Invoice Information Extraction + +Another very popular use case is invoice information extraction. For example, you can extract the invoice number, the invoice date, the total amount, the VAT number, and the invoice recipient. + +## Inference + +You can infer with Document QA models with the 🤗 Transformers library using the [`document-question-answering` pipeline](https://huggingface.co/docs/transformers/en/main_classes/pipelines#transformers.DocumentQuestionAnsweringPipeline). If no model checkpoint is given, the pipeline will be initialized with [`impira/layoutlm-document-qa`](https://huggingface.co/impira/layoutlm-document-qa). This pipeline takes question(s) and document(s) as input, and returns the answer. +👉 Note that the question answering task solved here is extractive: the model extracts the answer from a context (the document). + +```python +from transformers import pipeline +from PIL import Image + +pipe = pipeline("document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa") + +question = "What is the purchase amount?" +image = Image.open("your-document.png") + +pipe(image=image, question=question) + +## [{'answer': '20,000$'}] +``` + +## Useful Resources + +Would you like to learn more about Document QA? Awesome! Here are some curated resources that you may find helpful! + +- [Document Visual Question Answering (DocVQA) challenge](https://rrc.cvc.uab.es/?ch=17) +- [DocVQA: A Dataset for Document Visual Question Answering](https://arxiv.org/abs/2007.00398) (Dataset paper) +- [ICDAR 2021 Competition on Document Visual Question Answering](https://lilianweng.github.io/lil-log/2020/10/29/open-domain-question-answering.html) (Conference paper) +- [HuggingFace's Document Question Answering pipeline](https://huggingface.co/docs/transformers/en/main_classes/pipelines#transformers.DocumentQuestionAnsweringPipeline) +- [Github repo: DocQuery - Document Query Engine Powered by Large Language Models](https://github.com/impira/docquery) + +### Notebooks + +- [Fine-tuning Donut on DocVQA dataset](https://github.com/NielsRogge/Transformers-Tutorials/tree/0ea77f29d01217587d7e32a848f3691d9c15d6ab/Donut/DocVQA) +- [Fine-tuning LayoutLMv2 on DocVQA dataset](https://github.com/NielsRogge/Transformers-Tutorials/tree/1b4bad710c41017d07a8f63b46a12523bfd2e835/LayoutLMv2/DocVQA) +- [Accelerating Document AI](https://huggingface.co/blog/document-ai) + +### Documentation + +- [Document question answering task guide](https://huggingface.co/docs/transformers/tasks/document_question_answering) + +The contents of this page are contributed by [Eliott Zemour](https://huggingface.co/eliolio) and reviewed by [Kwadwo Agyapon-Ntra](https://huggingface.co/KayO) and [Ankur Goyal](https://huggingface.co/ankrgyl). diff --git a/data/node_modules/@huggingface/tasks/src/tasks/document-question-answering/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/document-question-answering/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..e966b3925ce000da033f4e76dfb6a046dbe2b3e2 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/document-question-answering/data.ts @@ -0,0 +1,81 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "Largest document understanding dataset.", + id: "HuggingFaceM4/Docmatix", + }, + { + description: + "Dataset from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry Documents Library.", + id: "eliolio/docvqa", + }, + ], + demo: { + inputs: [ + { + label: "Question", + content: "What is the idea behind the consumer relations efficiency team?", + type: "text", + }, + { + filename: "document-question-answering-input.png", + type: "img", + }, + ], + outputs: [ + { + label: "Answer", + content: "Balance cost efficiency with quality customer service", + type: "text", + }, + ], + }, + metrics: [ + { + description: + "The evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein Similarity (ANLS). This metric is flexible to character regognition errors and compares the predicted answer with the ground truth answer.", + id: "anls", + }, + { + description: + "Exact Match is a metric based on the strict character match of the predicted answer and the right answer. For answers predicted correctly, the Exact Match will be 1. Even if only one character is different, Exact Match will be 0", + id: "exact-match", + }, + ], + models: [ + { + description: "A LayoutLM model for the document QA task, fine-tuned on DocVQA and SQuAD2.0.", + id: "impira/layoutlm-document-qa", + }, + { + description: "A special model for OCR-free Document QA task.", + id: "microsoft/udop-large", + }, + { + description: "A powerful model for document question answering.", + id: "google/pix2struct-docvqa-large", + }, + ], + spaces: [ + { + description: "A robust document question answering application.", + id: "impira/docquery", + }, + { + description: "An application that can answer questions from invoices.", + id: "impira/invoices", + }, + { + description: "An application to compare different document question answering models.", + id: "merve/compare_docvqa_models", + }, + ], + summary: + "Document Question Answering (also known as Document Visual Question Answering) is the task of answering questions on document images. Document question answering models take a (document, question) pair as input and return an answer in natural language. Models usually rely on multi-modal features, combining text, position of words (bounding-boxes) and image.", + widgetModels: ["impira/layoutlm-document-qa"], + youtubeId: "", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/document-question-answering/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/document-question-answering/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..1636dce9d9a0d714d368a7dc3428e1473781fed2 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/document-question-answering/inference.ts @@ -0,0 +1,110 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Document Question Answering inference + */ +export interface DocumentQuestionAnsweringInput { + /** + * One (document, question) pair to answer + */ + inputs: DocumentQuestionAnsweringInputData; + /** + * Additional inference parameters + */ + parameters?: DocumentQuestionAnsweringParameters; + [property: string]: unknown; +} +/** + * One (document, question) pair to answer + */ +export interface DocumentQuestionAnsweringInputData { + /** + * The image on which the question is asked + */ + image: unknown; + /** + * A question to ask of the document + */ + question: string; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Document Question Answering + */ +export interface DocumentQuestionAnsweringParameters { + /** + * If the words in the document are too long to fit with the question for the model, it will + * be split in several chunks with some overlap. This argument controls the size of that + * overlap. + */ + doc_stride?: number; + /** + * Whether to accept impossible as an answer + */ + handle_impossible_answer?: boolean; + /** + * Language to use while running OCR. Defaults to english. + */ + lang?: string; + /** + * The maximum length of predicted answers (e.g., only answers with a shorter length are + * considered). + */ + max_answer_len?: number; + /** + * The maximum length of the question after tokenization. It will be truncated if needed. + */ + max_question_len?: number; + /** + * The maximum length of the total sentence (context + question) in tokens of each chunk + * passed to the model. The context will be split in several chunks (using doc_stride as + * overlap) if needed. + */ + max_seq_len?: number; + /** + * The number of answers to return (will be chosen by order of likelihood). Can return less + * than top_k answers if there are not enough options available within the context. + */ + top_k?: number; + /** + * A list of words and bounding boxes (normalized 0->1000). If provided, the inference will + * skip the OCR step and use the provided bounding boxes instead. + */ + word_boxes?: WordBox[]; + [property: string]: unknown; +} +export type WordBox = number[] | string; +export type DocumentQuestionAnsweringOutput = DocumentQuestionAnsweringOutputElement[]; +/** + * Outputs of inference for the Document Question Answering task + */ +export interface DocumentQuestionAnsweringOutputElement { + /** + * The answer to the question. + */ + answer: string; + /** + * The end word index of the answer (in the OCR’d version of the input or provided word + * boxes). + */ + end: number; + /** + * The probability associated to the answer. + */ + score: number; + /** + * The start word index of the answer (in the OCR’d version of the input or provided word + * boxes). + */ + start: number; + /** + * The index of each word/box pair that is in the answer + */ + words: number[]; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/document-question-answering/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/document-question-answering/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..b017ce469be82c2f587da76b162a5494423bd468 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/document-question-answering/spec/input.json @@ -0,0 +1,85 @@ +{ + "$id": "/inference/schemas/document-question-answering/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Document Question Answering inference", + "title": "DocumentQuestionAnsweringInput", + "type": "object", + "properties": { + "inputs": { + "description": "One (document, question) pair to answer", + "type": "object", + "title": "DocumentQuestionAnsweringInputData", + "properties": { + "image": { + "description": "The image on which the question is asked" + }, + "question": { + "type": "string", + "description": "A question to ask of the document" + } + }, + "required": ["image", "question"] + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/DocumentQuestionAnsweringParameters" + } + }, + "$defs": { + "DocumentQuestionAnsweringParameters": { + "title": "DocumentQuestionAnsweringParameters", + "description": "Additional inference parameters for Document Question Answering", + "type": "object", + "properties": { + "doc_stride": { + "type": "integer", + "description": "If the words in the document are too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap." + }, + "handle_impossible_answer": { + "type": "boolean", + "description": "Whether to accept impossible as an answer" + }, + "lang": { + "type": "string", + "description": "Language to use while running OCR. Defaults to english." + }, + "max_answer_len": { + "type": "integer", + "description": "The maximum length of predicted answers (e.g., only answers with a shorter length are considered)." + }, + "max_seq_len": { + "type": "integer", + "description": "The maximum length of the total sentence (context + question) in tokens of each chunk passed to the model. The context will be split in several chunks (using doc_stride as overlap) if needed." + }, + "max_question_len": { + "type": "integer", + "description": "The maximum length of the question after tokenization. It will be truncated if needed." + }, + "top_k": { + "type": "integer", + "description": "The number of answers to return (will be chosen by order of likelihood). Can return less than top_k answers if there are not enough options available within the context." + }, + "word_boxes": { + "type": "array", + "description": "A list of words and bounding boxes (normalized 0->1000). If provided, the inference will skip the OCR step and use the provided bounding boxes instead.", + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "number" + }, + "maxLength": 4, + "minLength": 4 + } + ] + } + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/document-question-answering/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/document-question-answering/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..4fda3771a6c7fee0e09eff8dab47e3df6a6da823 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/document-question-answering/spec/output.json @@ -0,0 +1,36 @@ +{ + "$id": "/inference/schemas/document-question-answering/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Document Question Answering task", + "title": "DocumentQuestionAnsweringOutput", + "type": "array", + "items": { + "type": "object", + "properties": { + "answer": { + "type": "string", + "description": "The answer to the question." + }, + "score": { + "type": "number", + "description": "The probability associated to the answer." + }, + "start": { + "type": "integer", + "description": "The start word index of the answer (in the OCR\u2019d version of the input or provided word boxes)." + }, + "end": { + "type": "integer", + "description": "The end word index of the answer (in the OCR\u2019d version of the input or provided word boxes)." + }, + "words": { + "type": "array", + "items": { + "type": "integer" + }, + "description": "The index of each word/box pair that is in the answer" + } + }, + "required": ["answer", "score", "start", "end", "words"] + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/feature-extraction/about.md b/data/node_modules/@huggingface/tasks/src/tasks/feature-extraction/about.md new file mode 100644 index 0000000000000000000000000000000000000000..1563acc655dd2ea06e42e8d4d3ca7498805c92e3 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/feature-extraction/about.md @@ -0,0 +1,72 @@ +## Use Cases + +### Transfer Learning + +Models trained on a specific dataset can learn features about the data. For instance, a model trained on an English poetry dataset learns English grammar at a very high level. This information can be transferred to a new model that is going to be trained on tweets. This process of extracting features and transferring to another model is called transfer learning. One can pass their dataset through a feature extraction pipeline and feed the result to a classifier. + +### Retrieval and Reranking + +Retrieval is the process of obtaining relevant documents or information based on a user's search query. In the context of NLP, retrieval systems aim to find relevant text passages or documents from a large corpus of data that match the user's query. The goal is to return a set of results that are likely to be useful to the user. On the other hand, reranking is a technique used to improve the quality of retrieval results by reordering them based on their relevance to the query. + +### Retrieval Augmented Generation + +Retrieval-augmented generation (RAG) is a technique in which user inputs to generative models are first queried through a knowledge base, and the most relevant information from the knowledge base is used to augment the prompt to reduce hallucinations during generation. Feature extraction models (primarily retrieval and reranking models) can be used in RAG to reduce model hallucinations and ground the model. + +## Inference + +You can infer feature extraction models using `pipeline` of transformers library. + +```python +from transformers import pipeline +checkpoint = "facebook/bart-base" +feature_extractor = pipeline("feature-extraction", framework="pt", model=checkpoint) +text = "Transformers is an awesome library!" + +#Reducing along the first dimension to get a 768 dimensional array +feature_extractor(text,return_tensors = "pt")[0].numpy().mean(axis=0) + +'''tensor([[[ 2.5834, 2.7571, 0.9024, ..., 1.5036, -0.0435, -0.8603], + [-1.2850, -1.0094, -2.0826, ..., 1.5993, -0.9017, 0.6426], + [ 0.9082, 0.3896, -0.6843, ..., 0.7061, 0.6517, 1.0550], + ..., + [ 0.6919, -1.1946, 0.2438, ..., 1.3646, -1.8661, -0.1642], + [-0.1701, -2.0019, -0.4223, ..., 0.3680, -1.9704, -0.0068], + [ 0.2520, -0.6869, -1.0582, ..., 0.5198, -2.2106, 0.4547]]])''' +``` + +A very popular library for training similarity and search models is called `sentence-transformers`.  To get started, install the library. + +```bash +pip install -U sentence-transformers +``` + +You can infer with `sentence-transformers` models as follows. + +```python +from sentence_transformers import SentenceTransformer + +model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2") +sentences = [ + "The weather is lovely today.", + "It's so sunny outside!", + "He drove to the stadium.", +] + +embeddings = model.encode(sentences) +similarities = model.similarity(embeddings, embeddings) +print(similarities) +# tensor([[1.0000, 0.6660, 0.1046], +# [0.6660, 1.0000, 0.1411], +# [0.1046, 0.1411, 1.0000]]) +``` + +### Text Embedding Inference + +[Text Embeddings Inference (TEI)](https://github.com/huggingface/text-embeddings-inference) is a toolkit to easily serve feature extraction models using few lines of code. + +## Useful resources + +- [Documentation for feature extraction task in 🤗Transformers](https://huggingface.co/docs/transformers/main_classes/feature_extractor) +- [Introduction to MTEB Benchmark](https://huggingface.co/blog/mteb) +- [Cookbook: Simple RAG for GitHub issues using Hugging Face Zephyr and LangChain](https://huggingface.co/learn/cookbook/rag_zephyr_langchain) +- [sentence-transformers organization on Hugging Face Hub](https://huggingface.co/sentence-transformers) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/feature-extraction/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/feature-extraction/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..c95e5364bf1ccf08d4b04722e4d71025550023f9 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/feature-extraction/data.ts @@ -0,0 +1,53 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: + "Wikipedia dataset containing cleaned articles of all languages. Can be used to train `feature-extraction` models.", + id: "wikipedia", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: "India, officially the Republic of India, is a country in South Asia.", + type: "text", + }, + ], + outputs: [ + { + table: [ + ["Dimension 1", "Dimension 2", "Dimension 3"], + ["2.583383083343506", "2.757075071334839", "0.9023529887199402"], + ["8.29393482208252", "1.1071064472198486", "2.03399395942688"], + ["-0.7754912972450256", "-1.647324562072754", "-0.6113331913948059"], + ["0.07087723910808563", "1.5942802429199219", "1.4610432386398315"], + ], + type: "tabular", + }, + ], + }, + metrics: [], + models: [ + { + description: "A powerful feature extraction model for natural language processing tasks.", + id: "thenlper/gte-large", + }, + { + description: "A strong feature extraction model for retrieval.", + id: "Alibaba-NLP/gte-Qwen1.5-7B-instruct", + }, + ], + spaces: [ + { + description: "A leaderboard to rank best feature extraction models..", + id: "mteb/leaderboard", + }, + ], + summary: "Feature extraction is the task of extracting features learnt in a model.", + widgetModels: ["facebook/bart-base"], +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/feature-extraction/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/feature-extraction/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..96194d7e9b60176e9d1ed84a349802e81f728984 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/feature-extraction/inference.ts @@ -0,0 +1,40 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ + +export type FeatureExtractionOutput = Array; + +/** + * Feature Extraction Input. + * + * Auto-generated from TEI specs. + * For more details, check out + * https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tei-import.ts. + */ +export interface FeatureExtractionInput { + /** + * The text to embed. + */ + inputs: string; + normalize?: boolean; + /** + * The name of the prompt that should be used by for encoding. If not set, no prompt + * will be applied. + * + * Must be a key in the `Sentence Transformers` configuration `prompts` dictionary. + * + * For example if ``prompt_name`` is "query" and the ``prompts`` is {"query": "query: ", + * ...}, + * then the sentence "What is the capital of France?" will be encoded as + * "query: What is the capital of France?" because the prompt text will be prepended before + * any text to encode. + */ + prompt_name?: string; + truncate?: boolean; + truncation_direction?: FeatureExtractionInputTruncationDirection; + [property: string]: unknown; +} + +export type FeatureExtractionInputTruncationDirection = "Left" | "Right"; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/feature-extraction/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/feature-extraction/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..94e8d7a0b2d44efe6e42226460ee77523d016327 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/feature-extraction/spec/input.json @@ -0,0 +1,47 @@ +{ + "$id": "/inference/schemas/feature-extraction/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Feature Extraction Input.\n\nAuto-generated from TEI specs.\nFor more details, check out https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tei-import.ts.", + "title": "FeatureExtractionInput", + "type": "object", + "required": ["inputs"], + "properties": { + "inputs": { + "type": "string", + "description": "The text to embed." + }, + "normalize": { + "type": "boolean", + "default": "true", + "example": "true" + }, + "prompt_name": { + "type": "string", + "description": "The name of the prompt that should be used by for encoding. If not set, no prompt\nwill be applied.\n\nMust be a key in the `Sentence Transformers` configuration `prompts` dictionary.\n\nFor example if ``prompt_name`` is \"query\" and the ``prompts`` is {\"query\": \"query: \", ...},\nthen the sentence \"What is the capital of France?\" will be encoded as\n\"query: What is the capital of France?\" because the prompt text will be prepended before\nany text to encode.", + "default": "null", + "example": "null", + "nullable": true + }, + "truncate": { + "type": "boolean", + "default": "false", + "example": "false", + "nullable": true + }, + "truncation_direction": { + "allOf": [ + { + "$ref": "#/$defs/FeatureExtractionInputTruncationDirection" + } + ], + "default": "right" + } + }, + "$defs": { + "FeatureExtractionInputTruncationDirection": { + "type": "string", + "enum": ["Left", "Right"], + "title": "FeatureExtractionInputTruncationDirection" + } + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/feature-extraction/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/feature-extraction/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..591fe9c64828712f67d579b5356a1ede5f23fdb9 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/feature-extraction/spec/output.json @@ -0,0 +1,15 @@ +{ + "$id": "/inference/schemas/feature-extraction/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Feature Extraction Output.\n\nAuto-generated from TEI specs.\nFor more details, check out https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tei-import.ts.", + "title": "FeatureExtractionOutput", + "type": "array", + "$defs": {}, + "items": { + "type": "array", + "items": { + "type": "number", + "format": "float" + } + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/fill-mask/about.md b/data/node_modules/@huggingface/tasks/src/tasks/fill-mask/about.md new file mode 100644 index 0000000000000000000000000000000000000000..4fabd3cf6d06d8ba9e676eb1f637c5f688b456fb --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/fill-mask/about.md @@ -0,0 +1,51 @@ +## Use Cases + +### Domain Adaptation 👩‍⚕️ + +Masked language models do not require labelled data! They are trained by masking a couple of words in sentences and the model is expected to guess the masked word. This makes it very practical! + +For example, masked language modeling is used to train large models for domain-specific problems. If you have to work on a domain-specific task, such as retrieving information from medical research papers, you can train a masked language model using those papers. 📄 + +The resulting model has a statistical understanding of the language used in medical research papers, and can be further trained in a process called fine-tuning to solve different tasks, such as [Text Classification](/tasks/text-classification) or [Question Answering](/tasks/question-answering) to build a medical research papers information extraction system. 👩‍⚕️ Pre-training on domain-specific data tends to yield better results (see [this paper](https://arxiv.org/abs/2007.15779) for an example). + +If you don't have the data to train a masked language model, you can also use an existing [domain-specific masked language model](https://huggingface.co/microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext) from the Hub and fine-tune it with your smaller task dataset. That's the magic of Open Source and sharing your work! 🎉 + +## Inference with Fill-Mask Pipeline + +You can use the 🤗 Transformers library `fill-mask` pipeline to do inference with masked language models. If a model name is not provided, the pipeline will be initialized with [distilroberta-base](/distilroberta-base). You can provide masked text and it will return a list of possible mask values ​​ranked according to the score. + +```python +from transformers import pipeline + +classifier = pipeline("fill-mask") +classifier("Paris is the of France.") + +# [{'score': 0.7, 'sequence': 'Paris is the capital of France.'}, +# {'score': 0.2, 'sequence': 'Paris is the birthplace of France.'}, +# {'score': 0.1, 'sequence': 'Paris is the heart of France.'}] +``` + +## Useful Resources + +Would you like to learn more about the topic? Awesome! Here you can find some curated resources that can be helpful to you! + +- [Course Chapter on Fine-tuning a Masked Language Model](https://huggingface.co/course/chapter7/3?fw=pt) +- [Workshop on Pretraining Language Models and CodeParrot](https://www.youtube.com/watch?v=ExUR7w6xe94) +- [BERT 101: State Of The Art NLP Model Explained](https://huggingface.co/blog/bert-101) +- [Nyströmformer: Approximating self-attention in linear time and memory via the Nyström method](https://huggingface.co/blog/nystromformer) + +### Notebooks + +- [Pre-training an MLM for JAX/Flax](https://github.com/huggingface/notebooks/blob/master/examples/masked_language_modeling_flax.ipynb) +- [Masked language modeling in TensorFlow](https://github.com/huggingface/notebooks/blob/master/examples/language_modeling-tf.ipynb) +- [Masked language modeling in PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/language_modeling.ipynb) + +### Scripts for training + +- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling) +- [Flax](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling) +- [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling) + +### Documentation + +- [Masked language modeling task guide](https://huggingface.co/docs/transformers/tasks/masked_language_modeling) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/fill-mask/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/fill-mask/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..45d5d53b9e0f37a36796ad0ce42e54b4cfd13a69 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/fill-mask/data.ts @@ -0,0 +1,79 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A common dataset that is used to train models for many languages.", + id: "wikipedia", + }, + { + description: "A large English dataset with text crawled from the web.", + id: "c4", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: "The barked at me", + type: "text", + }, + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "wolf", + score: 0.487, + }, + { + label: "dog", + score: 0.061, + }, + { + label: "cat", + score: 0.058, + }, + { + label: "fox", + score: 0.047, + }, + { + label: "squirrel", + score: 0.025, + }, + ], + }, + ], + }, + metrics: [ + { + description: + "Cross Entropy is a metric that calculates the difference between two probability distributions. Each probability distribution is the distribution of predicted words", + id: "cross_entropy", + }, + { + description: + "Perplexity is the exponential of the cross-entropy loss. It evaluates the probabilities assigned to the next word by the model. Lower perplexity indicates better performance", + id: "perplexity", + }, + ], + models: [ + { + description: "A faster and smaller model than the famous BERT model.", + id: "distilbert-base-uncased", + }, + { + description: "A multilingual model trained on 100 languages.", + id: "xlm-roberta-base", + }, + ], + spaces: [], + summary: + "Masked language modeling is the task of masking some of the words in a sentence and predicting which words should replace those masks. These models are useful when we want to get a statistical understanding of the language in which the model is trained in.", + widgetModels: ["distilroberta-base"], + youtubeId: "mqElG5QJWUg", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/fill-mask/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/fill-mask/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..4d78ecd814ee6d4a46c29996f1cb5e15f6da119c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/fill-mask/inference.ts @@ -0,0 +1,62 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Fill Mask inference + */ +export interface FillMaskInput { + /** + * The text with masked tokens + */ + inputs: string; + /** + * Additional inference parameters + */ + parameters?: FillMaskParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Fill Mask + */ +export interface FillMaskParameters { + /** + * When passed, the model will limit the scores to the passed targets instead of looking up + * in the whole vocabulary. If the provided targets are not in the model vocab, they will be + * tokenized and the first resulting token will be used (with a warning, and that might be + * slower). + */ + targets?: string[]; + /** + * When passed, overrides the number of predictions to return. + */ + top_k?: number; + [property: string]: unknown; +} +export type FillMaskOutput = FillMaskOutputElement[]; +/** + * Outputs of inference for the Fill Mask task + */ +export interface FillMaskOutputElement { + /** + * The corresponding probability + */ + score: number; + /** + * The corresponding input with the mask token prediction. + */ + sequence: string; + /** + * The predicted token id (to replace the masked one). + */ + token: number; + tokenStr: unknown; + /** + * The predicted token (to replace the masked one). + */ + token_str?: string; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/fill-mask/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/fill-mask/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..cd3271e4a35d910c12cfcba80f380c2a84a80a8c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/fill-mask/spec/input.json @@ -0,0 +1,38 @@ +{ + "$id": "/inference/schemas/fill-mask/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Fill Mask inference", + "title": "FillMaskInput", + "type": "object", + "properties": { + "inputs": { + "description": "The text with masked tokens", + "type": "string" + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/FillMaskParameters" + } + }, + "$defs": { + "FillMaskParameters": { + "title": "FillMaskParameters", + "description": "Additional inference parameters for Fill Mask", + "type": "object", + "properties": { + "top_k": { + "type": "integer", + "description": "When passed, overrides the number of predictions to return." + }, + "targets": { + "description": "When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocabulary. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower).", + "type": "array", + "items": { + "type": "string" + } + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/fill-mask/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/fill-mask/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..0b613382e781cf0405c76df5c1f9f5091da6b196 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/fill-mask/spec/output.json @@ -0,0 +1,29 @@ +{ + "$id": "/inference/schemas/fill-mask/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Fill Mask task", + "title": "FillMaskOutput", + "type": "array", + "items": { + "type": "object", + "properties": { + "sequence": { + "type": "string", + "description": "The corresponding input with the mask token prediction." + }, + "score": { + "type": "number", + "description": "The corresponding probability" + }, + "token": { + "type": "integer", + "description": "The predicted token id (to replace the masked one)." + }, + "token_str": { + "type": "string", + "description": "The predicted token (to replace the masked one)." + } + }, + "required": ["sequence", "score", "token", "tokenStr"] + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-classification/about.md b/data/node_modules/@huggingface/tasks/src/tasks/image-classification/about.md new file mode 100644 index 0000000000000000000000000000000000000000..04169331f2853f0ae26bc6040466a20c1134473c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-classification/about.md @@ -0,0 +1,50 @@ +## Use Cases + +Image classification models can be used when we are not interested in specific instances of objects with location information or their shape. + +### Keyword Classification + +Image classification models are used widely in stock photography to assign each image a keyword. + +### Image Search + +Models trained in image classification can improve user experience by organizing and categorizing photo galleries on the phone or in the cloud, on multiple keywords or tags. + +## Inference + +With the `transformers` library, you can use the `image-classification` pipeline to infer with image classification models. You can initialize the pipeline with a model id from the Hub. If you do not provide a model id it will initialize with [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) by default. When calling the pipeline you just need to specify a path, http link or an image loaded in PIL. You can also provide a `top_k` parameter which determines how many results it should return. + +```python +from transformers import pipeline +clf = pipeline("image-classification") +clf("path_to_a_cat_image") + +[{'label': 'tabby cat', 'score': 0.731}, +... +] +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to classify images using models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_TOKEN); +await inference.imageClassification({ + data: await (await fetch("https://picsum.photos/300/300")).blob(), + model: "microsoft/resnet-50", +}); +``` + +## Useful Resources + +- [Let's Play Pictionary with Machine Learning!](https://www.youtube.com/watch?v=LS9Y2wDVI0k) +- [Fine-Tune ViT for Image Classification with 🤗Transformers](https://huggingface.co/blog/fine-tune-vit) +- [Walkthrough of Computer Vision Ecosystem in Hugging Face - CV Study Group](https://www.youtube.com/watch?v=oL-xmufhZM8) +- [Computer Vision Study Group: Swin Transformer](https://www.youtube.com/watch?v=Ngikt-K1Ecc) +- [Computer Vision Study Group: Masked Autoencoders Paper Walkthrough](https://www.youtube.com/watch?v=Ngikt-K1Ecc) +- [Image classification task guide](https://huggingface.co/docs/transformers/tasks/image_classification) + +### Creating your own image classifier in just a few minutes + +With [HuggingPics](https://github.com/nateraw/huggingpics), you can fine-tune Vision Transformers for anything using images found on the web. This project downloads images of classes defined by you, trains a model, and pushes it to the Hub. You even get to try out the model directly with a working widget in the browser, ready to be shared with all your friends! diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-classification/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/image-classification/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..875b19742c163bb9f47ae51d1d4436df1846564c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-classification/data.ts @@ -0,0 +1,88 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + // TODO write proper description + description: "Benchmark dataset used for image classification with images that belong to 100 classes.", + id: "cifar100", + }, + { + // TODO write proper description + description: "Dataset consisting of images of garments.", + id: "fashion_mnist", + }, + ], + demo: { + inputs: [ + { + filename: "image-classification-input.jpeg", + type: "img", + }, + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "Egyptian cat", + score: 0.514, + }, + { + label: "Tabby cat", + score: 0.193, + }, + { + label: "Tiger cat", + score: 0.068, + }, + ], + }, + ], + }, + metrics: [ + { + description: "", + id: "accuracy", + }, + { + description: "", + id: "recall", + }, + { + description: "", + id: "precision", + }, + { + description: "", + id: "f1", + }, + ], + models: [ + { + description: "A strong image classification model.", + id: "google/vit-base-patch16-224", + }, + { + description: "A robust image classification model.", + id: "facebook/deit-base-distilled-patch16-224", + }, + { + description: "A strong image classification model.", + id: "facebook/convnext-large-224", + }, + ], + spaces: [ + { + // TO DO: write description + description: "An application that classifies what a given image is about.", + id: "nielsr/perceiver-image-classification", + }, + ], + summary: + "Image classification is the task of assigning a label or class to an entire image. Images are expected to have only one class for each image. Image classification models take an image as input and return a prediction about which class the image belongs to.", + widgetModels: ["google/vit-base-patch16-224"], + youtubeId: "tjAIM7BOYhw", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-classification/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/image-classification/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..e0689d887fd9248237845eac5aaa6658dd3f4019 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-classification/inference.ts @@ -0,0 +1,51 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Image Classification inference + */ +export interface ImageClassificationInput { + /** + * The input image data + */ + inputs: unknown; + /** + * Additional inference parameters + */ + parameters?: ImageClassificationParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Image Classification + */ +export interface ImageClassificationParameters { + function_to_apply?: ClassificationOutputTransform; + /** + * When specified, limits the output to the top K most probable classes. + */ + top_k?: number; + [property: string]: unknown; +} +/** + * The function to apply to the model outputs in order to retrieve the scores. + */ +export type ClassificationOutputTransform = "sigmoid" | "softmax" | "none"; +export type ImageClassificationOutput = ImageClassificationOutputElement[]; +/** + * Outputs of inference for the Image Classification task + */ +export interface ImageClassificationOutputElement { + /** + * The predicted class label. + */ + label: string; + /** + * The corresponding probability. + */ + score: number; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-classification/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/image-classification/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..a8cd4273cc8c311b12857d9104d2814f7cf4179e --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-classification/spec/input.json @@ -0,0 +1,34 @@ +{ + "$id": "/inference/schemas/image-classification/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Image Classification inference", + "title": "ImageClassificationInput", + "type": "object", + "properties": { + "inputs": { + "description": "The input image data" + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/ImageClassificationParameters" + } + }, + "$defs": { + "ImageClassificationParameters": { + "title": "ImageClassificationParameters", + "description": "Additional inference parameters for Image Classification", + "type": "object", + "properties": { + "function_to_apply": { + "title": "ImageClassificationOutputTransform", + "$ref": "/inference/schemas/common-definitions.json#/definitions/ClassificationOutputTransform" + }, + "top_k": { + "type": "integer", + "description": "When specified, limits the output to the top K most probable classes." + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-classification/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/image-classification/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..2a3264bce7511f590175cd3e3ecc0af7ffe84d14 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-classification/spec/output.json @@ -0,0 +1,10 @@ +{ + "$id": "/inference/schemas/image-classification/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Image Classification task", + "title": "ImageClassificationOutput", + "type": "array", + "items": { + "$ref": "/inference/schemas/common-definitions.json#/definitions/ClassificationOutput" + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-feature-extraction/about.md b/data/node_modules/@huggingface/tasks/src/tasks/image-feature-extraction/about.md new file mode 100644 index 0000000000000000000000000000000000000000..9a968b106d527b16556f907c09e6cf67c610f7eb --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-feature-extraction/about.md @@ -0,0 +1,23 @@ +## Use Cases + +### Transfer Learning + +Models trained on a specific dataset can learn features about the data. For instance, a model trained on a car classification dataset learns to recognize edges and curves on a very high level and car-specific features on a low level. This information can be transferred to a new model that is going to be trained on classifying trucks. This process of extracting features and transferring to another model is called transfer learning. + +### Similarity + +Features extracted from models contain semantically meaningful information about the world. These features can be used to detect the similarity between two images. Assume there are two images: a photo of a stray cat in a street setting and a photo of a cat at home. These images both contain cats, and the features will contain the information that there's a cat in the image. Thus, comparing the features of a stray cat photo to the features of a domestic cat photo will result in higher similarity compared to any other image that doesn't contain any cats. + +## Inference + +```python +import torch +from transformers import pipeline + +pipe = pipeline(task="image-feature-extraction", model_name="google/vit-base-patch16-384", framework="pt", pool=True) +pipe("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/cats.png") + +feature_extractor(text,return_tensors = "pt")[0].numpy().mean(axis=0) + +'[[[0.21236686408519745, 1.0919708013534546, 0.8512550592422485, ...]]]' +``` diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-feature-extraction/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/image-feature-extraction/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..d76c885128ac726c489084ed16468529d41fa366 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-feature-extraction/data.ts @@ -0,0 +1,55 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: + "ImageNet-1K is a image classification dataset in which images are used to train image-feature-extraction models.", + id: "imagenet-1k", + }, + ], + demo: { + inputs: [ + { + filename: "mask-generation-input.png", + type: "img", + }, + ], + outputs: [ + { + table: [ + ["Dimension 1", "Dimension 2", "Dimension 3"], + ["0.21236686408519745", "1.0919708013534546", "0.8512550592422485"], + ["0.809657871723175", "-0.18544459342956543", "-0.7851548194885254"], + ["1.3103108406066895", "-0.2479034662246704", "-0.9107287526130676"], + ["1.8536205291748047", "-0.36419737339019775", "0.09717650711536407"], + ], + type: "tabular", + }, + ], + }, + metrics: [], + models: [ + { + description: "A powerful image feature extraction model.", + id: "timm/vit_large_patch14_dinov2.lvd142m", + }, + { + description: "A strong image feature extraction model.", + id: "google/vit-base-patch16-224-in21k", + }, + { + description: "A robust image feature extraction models.", + id: "facebook/dino-vitb16", + }, + { + description: "Strong image-text-to-text model made for information retrieval from documents.", + id: "vidore/colpali", + }, + ], + spaces: [], + summary: "Image feature extraction is the task of extracting features learnt in a computer vision model.", + widgetModels: [], +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-segmentation/about.md b/data/node_modules/@huggingface/tasks/src/tasks/image-segmentation/about.md new file mode 100644 index 0000000000000000000000000000000000000000..4a8a45a195af317e6d7854a62113521c72089b4c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-segmentation/about.md @@ -0,0 +1,63 @@ +## Use Cases + +### Autonomous Driving + +Segmentation models are used to identify road patterns such as lanes and obstacles for safer driving. + +### Background Removal + +Image Segmentation models are used in cameras to erase the background of certain objects and apply filters to them. + +### Medical Imaging + +Image Segmentation models are used to distinguish organs or tissues, improving medical imaging workflows. Models are used to segment dental instances, analyze X-Ray scans or even segment cells for pathological diagnosis. This [dataset](https://github.com/v7labs/covid-19-xray-dataset) contains images of lungs of healthy patients and patients with COVID-19 segmented with masks. Another [segmentation dataset](https://ivdm3seg.weebly.com/data.html) contains segmented MRI data of the lower spine to analyze the effect of spaceflight simulation. + +## Task Variants + +### Semantic Segmentation + +Semantic Segmentation is the task of segmenting parts of an image that belong to the same class. Semantic Segmentation models make predictions for each pixel and return the probabilities of the classes for each pixel. These models are evaluated on Mean Intersection Over Union (Mean IoU). + +### Instance Segmentation + +Instance Segmentation is the variant of Image Segmentation where every distinct object is segmented, instead of one segment per class. + +### Panoptic Segmentation + +Panoptic Segmentation is the Image Segmentation task that segments the image both by instance and by class, assigning each pixel a different instance of the class. + +## Inference + +You can infer with Image Segmentation models using the `image-segmentation` pipeline. You need to install [timm](https://github.com/rwightman/pytorch-image-models) first. + +```python +!pip install timm +model = pipeline("image-segmentation") +model("cat.png") +#[{'label': 'cat', +# 'mask': mask_code, +# 'score': 0.999} +# ...] +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer image segmentation models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_TOKEN); +await inference.imageSegmentation({ + data: await (await fetch("https://picsum.photos/300/300")).blob(), + model: "facebook/detr-resnet-50-panoptic", +}); +``` + +## Useful Resources + +Would you like to learn more about image segmentation? Great! Here you can find some curated resources that you may find helpful! + +- [Fine-Tune a Semantic Segmentation Model with a Custom Dataset](https://huggingface.co/blog/fine-tune-segformer) +- [Walkthrough of Computer Vision Ecosystem in Hugging Face - CV Study Group](https://www.youtube.com/watch?v=oL-xmufhZM8) +- [A Guide on Universal Image Segmentation with Mask2Former and OneFormer](https://huggingface.co/blog/mask2former) +- [Zero-shot image segmentation with CLIPSeg](https://huggingface.co/blog/clipseg-zero-shot) +- [Semantic segmentation task guide](https://huggingface.co/docs/transformers/tasks/semantic_segmentation) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-segmentation/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/image-segmentation/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..93198713b931d6976138640d65d3a475155a5f03 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-segmentation/data.ts @@ -0,0 +1,99 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "Scene segmentation dataset.", + id: "scene_parse_150", + }, + ], + demo: { + inputs: [ + { + filename: "image-segmentation-input.jpeg", + type: "img", + }, + ], + outputs: [ + { + filename: "image-segmentation-output.png", + type: "img", + }, + ], + }, + metrics: [ + { + description: + "Average Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for each semantic class separately", + id: "Average Precision", + }, + { + description: "Mean Average Precision (mAP) is the overall average of the AP values", + id: "Mean Average Precision", + }, + { + description: + "Intersection over Union (IoU) is the overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic classes", + id: "Mean Intersection over Union", + }, + { + description: "APα is the Average Precision at the IoU threshold of a α value, for example, AP50 and AP75", + id: "APα", + }, + ], + models: [ + { + // TO DO: write description + description: "Solid panoptic segmentation model trained on the COCO 2017 benchmark dataset.", + id: "facebook/detr-resnet-50-panoptic", + }, + { + description: "Semantic segmentation model trained on ADE20k benchmark dataset.", + id: "microsoft/beit-large-finetuned-ade-640-640", + }, + { + description: "Semantic segmentation model trained on ADE20k benchmark dataset with 512x512 resolution.", + id: "nvidia/segformer-b0-finetuned-ade-512-512", + }, + { + description: "Semantic segmentation model trained Cityscapes dataset.", + id: "facebook/mask2former-swin-large-cityscapes-semantic", + }, + { + description: "Panoptic segmentation model trained COCO (common objects) dataset.", + id: "facebook/mask2former-swin-large-coco-panoptic", + }, + ], + spaces: [ + { + description: "A semantic segmentation application that can predict unseen instances out of the box.", + id: "facebook/ov-seg", + }, + { + description: "One of the strongest segmentation applications.", + id: "jbrinkma/segment-anything", + }, + { + description: "A semantic segmentation application that predicts human silhouettes.", + id: "keras-io/Human-Part-Segmentation", + }, + { + description: "An instance segmentation application to predict neuronal cell types from microscopy images.", + id: "rashmi/sartorius-cell-instance-segmentation", + }, + { + description: "An application that segments videos.", + id: "ArtGAN/Segment-Anything-Video", + }, + { + description: "An panoptic segmentation application built for outdoor environments.", + id: "segments/panoptic-segment-anything", + }, + ], + summary: + "Image Segmentation divides an image into segments where each pixel in the image is mapped to an object. This task has multiple variants such as instance segmentation, panoptic segmentation and semantic segmentation.", + widgetModels: ["facebook/detr-resnet-50-panoptic"], + youtubeId: "dKE8SIt9C-w", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-segmentation/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/image-segmentation/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..02db5cb90f115e8c6b198eac0ac1aa616539344b --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-segmentation/inference.ts @@ -0,0 +1,65 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Image Segmentation inference + */ +export interface ImageSegmentationInput { + /** + * The input image data + */ + inputs: unknown; + /** + * Additional inference parameters + */ + parameters?: ImageSegmentationParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Image Segmentation + */ +export interface ImageSegmentationParameters { + /** + * Threshold to use when turning the predicted masks into binary values. + */ + mask_threshold?: number; + /** + * Mask overlap threshold to eliminate small, disconnected segments. + */ + overlap_mask_area_threshold?: number; + /** + * Segmentation task to be performed, depending on model capabilities. + */ + subtask?: ImageSegmentationSubtask; + /** + * Probability threshold to filter out predicted masks. + */ + threshold?: number; + [property: string]: unknown; +} +export type ImageSegmentationSubtask = "instance" | "panoptic" | "semantic"; +export type ImageSegmentationOutput = ImageSegmentationOutputElement[]; +/** + * Outputs of inference for the Image Segmentation task + * + * A predicted mask / segment + */ +export interface ImageSegmentationOutputElement { + /** + * The label of the predicted segment + */ + label: string; + /** + * The corresponding mask as a black-and-white image + */ + mask: unknown; + /** + * The score or confidence degreee the model has + */ + score?: number; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-segmentation/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/image-segmentation/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..500793554146810f1aa1e30adf221a5d10506b50 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-segmentation/spec/input.json @@ -0,0 +1,54 @@ +{ + "$id": "/inference/schemas/image-segmentation/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Image Segmentation inference", + "title": "ImageSegmentationInput", + "type": "object", + "properties": { + "inputs": { + "description": "The input image data" + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/ImageSegmentationParameters" + } + }, + "$defs": { + "ImageSegmentationParameters": { + "title": "ImageSegmentationParameters", + "description": "Additional inference parameters for Image Segmentation", + "type": "object", + "properties": { + "mask_threshold": { + "type": "number", + "description": "Threshold to use when turning the predicted masks into binary values." + }, + "overlap_mask_area_threshold": { + "type": "number", + "description": "Mask overlap threshold to eliminate small, disconnected segments." + }, + "subtask": { + "title": "ImageSegmentationSubtask", + "type": "string", + "description": "Segmentation task to be performed, depending on model capabilities.", + "oneOf": [ + { + "const": "instance" + }, + { + "const": "panoptic" + }, + { + "const": "semantic" + } + ] + }, + "threshold": { + "type": "number", + "description": "Probability threshold to filter out predicted masks." + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-segmentation/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/image-segmentation/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..b20aa415e058fd1b7f75f2915765cd0e89483075 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-segmentation/spec/output.json @@ -0,0 +1,25 @@ +{ + "$id": "/inference/schemas/image-segmentation/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Image Segmentation task", + "title": "ImageSegmentationOutput", + "type": "array", + "items": { + "description": "A predicted mask / segment", + "type": "object", + "properties": { + "label": { + "type": "string", + "description": "The label of the predicted segment" + }, + "mask": { + "description": "The corresponding mask as a black-and-white image" + }, + "score": { + "type": "number", + "description": "The score or confidence degreee the model has" + } + }, + "required": ["label", "mask"] + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-text-to-text/about.md b/data/node_modules/@huggingface/tasks/src/tasks/image-text-to-text/about.md new file mode 100644 index 0000000000000000000000000000000000000000..13c0ae0ec8600a0f244f9b0de38fa5b06ae4bfeb --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-text-to-text/about.md @@ -0,0 +1,74 @@ +## Different Types of Vision Language Models + +Vision language models come in three types: + +- **Base:** Pre-trained models that can be fine-tuned. A good example of base models is the [PaliGemma models family](https://huggingface.co/models?sort=trending&search=google%2Fpaligemma-3b-pt) by Google. +- **Instruction:** Base models fine-tuned on instruction datasets. A good example of instruction fine-tuned models is [idefics2-8b](https://huggingface.co/HuggingFaceM4/idefics2-8b). +- **Chatty/Conversational:** Base models fine-tuned on conversation datasets. A good example of chatty models is [deepseek-vl-7b-chat](https://huggingface.co/deepseek-ai/deepseek-vl-7b-chat). + +![VLM uses](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/vlm/visual.jpg) + +## Use Cases + +### Multimodal Dialogue + +Vision language models can be used as multimodal assistants, keeping context about the conversation and keeping the image to have multiple-turn dialogues. + +### Zero-shot Object Detection, Image Segmentation and Localization + +Some vision language models can detect or segment a set of objects or describe the positions or relative positions of the objects. For example, one could prompt such a model to ask if one object is behind another. Such a model can also output bounding box coordination or segmentation masks directly in the text output, unlike the traditional models explicitly trained on only object detection or image segmentation. + +### Visual Question Answering + +Vision language models trained on image-text pairs can be used for visual question answering and generating captions for images. + +### Document Question Answering and Retrieval + +Documents often consist of different layouts, charts, tables, images, and more. Vision language models trained on formatted documents can extract information from them. This is an OCR-free approach; the inputs skip OCR, and documents are directly fed to vision language models. + +### Image Recognition with Instructions + +Vision language models can recognize images through descriptions. When given detailed descriptions of specific entities, it can classify the entities in an image. + +## Inference + +You can use the Transformers library to interact with vision-language models. You can load the model like below. + +```python +from transformers import LlavaNextProcessor, LlavaNextForConditionalGeneration +import torch + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +processor = LlavaNextProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf") +model = LlavaNextForConditionalGeneration.from_pretrained( + "llava-hf/llava-v1.6-mistral-7b-hf", + torch_dtype=torch.float16 +) +model.to(device) +``` + +We can infer by passing image and text dialogues. + +```python +from PIL import Image +import requests + +# image of a radar chart +url = "https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/images/llava_v1_5_radar.jpg?raw=true" +image = Image.open(requests.get(url, stream=True).raw) +prompt = "[INST] \nWhat is shown in this image? [/INST]" + +inputs = processor(prompt, image, return_tensors="pt").to(device) +output = model.generate(**inputs, max_new_tokens=100) + +print(processor.decode(output[0], skip_special_tokens=True)) +# The image appears to be a radar chart, which is a type of multivariate chart that displays values for multiple variables represented on axes +# starting from the same point. This particular radar chart is showing the performance of different models or systems across various metrics. +# The axes represent different metrics or benchmarks, such as MM-Vet, MM-Vet, MM-Vet, MM-Vet, MM-Vet, MM-V +``` + +## Useful Resources + +- [Vision Language Models Explained](https://huggingface.co/blog/vlms) +- [Open-source Multimodality and How to Achieve it using Hugging Face](https://www.youtube.com/watch?v=IoGaGfU1CIg&t=601s) +- [Introducing Idefics2: A Powerful 8B Vision-Language Model for the community](https://huggingface.co/blog/idefics2) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-text-to-text/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/image-text-to-text/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..057655b325d05ce2092659b786faa2a34e39190f --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-text-to-text/data.ts @@ -0,0 +1,94 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "Instructions composed of image and text.", + id: "liuhaotian/LLaVA-Instruct-150K", + }, + { + description: "Conversation turns where questions involve image and text.", + id: "liuhaotian/LLaVA-Pretrain", + }, + { + description: "A collection of datasets made for model fine-tuning.", + id: "HuggingFaceM4/the_cauldron", + }, + { + description: "Screenshots of websites with their HTML/CSS codes.", + id: "HuggingFaceM4/WebSight", + }, + ], + demo: { + inputs: [ + { + filename: "image-text-to-text-input.png", + type: "img", + }, + { + label: "Text Prompt", + content: "Describe the position of the bee in detail.", + type: "text", + }, + ], + outputs: [ + { + label: "Answer", + content: + "The bee is sitting on a pink flower, surrounded by other flowers. The bee is positioned in the center of the flower, with its head and front legs sticking out.", + type: "text", + }, + ], + }, + metrics: [], + models: [ + { + description: "Cutting-edge vision language model that can take multiple image inputs.", + id: "facebook/chameleon-7b", + }, + { + description: "Cutting-edge conversational vision language model that can take multiple image inputs.", + id: "HuggingFaceM4/idefics2-8b-chatty", + }, + { + description: "Small yet powerful model.", + id: "vikhyatk/moondream2", + }, + { + description: "Strong image-text-to-text model made to understand documents.", + id: "mPLUG/DocOwl1.5", + }, + { + description: "Strong image-text-to-text model.", + id: "llava-hf/llava-v1.6-mistral-7b-hf", + }, + ], + spaces: [ + { + description: "Leaderboard to evaluate vision language models.", + id: "opencompass/open_vlm_leaderboard", + }, + { + description: "Vision language models arena, where models are ranked by votes of users.", + id: "WildVision/vision-arena", + }, + { + description: "Powerful vision-language model assistant.", + id: "liuhaotian/LLaVA-1.6", + }, + { + description: "An application to compare outputs of different vision language models.", + id: "merve/compare_VLMs", + }, + { + description: "An application for document vision language tasks.", + id: "mPLUG/DocOwl", + }, + ], + summary: + "Image-text-to-text models take in an image and text prompt and output text. These models are also called vision-language models, or VLMs. The difference from image-to-text models is that these models take an additional text input, not restricting the model to certain use cases like image captioning, and may also be trained to accept a conversation as input.", + widgetModels: ["microsoft/kosmos-2-patch14-224"], + youtubeId: "", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-to-3d/about.md b/data/node_modules/@huggingface/tasks/src/tasks/image-to-3d/about.md new file mode 100644 index 0000000000000000000000000000000000000000..dca8e4708190708275de87ebf0b52aa2dc7f99c3 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-to-3d/about.md @@ -0,0 +1,62 @@ +## Use Cases + +Image-to-3D models can be used in a wide variety of applications that require 3D, such as games, animation, design, architecture, engineering, marketing, and more. + +![Image-to-3D Thumbnail](https://huggingface.co/datasets/huggingfacejs/tasks/resolve/main/image-to-3d/image-to-3d-thumbnail.png) + +### Generating Meshes + +Meshes are the standard representation of 3D in industry. + +### Generating Gaussian Splats + +[Gaussian Splatting](https://huggingface.co/blog/gaussian-splatting) is a rendering technique that represents scenes as fuzzy points. + +### Inference + +Inference for this task typically leverages the [Diffusers](https://huggingface.co/docs/diffusers/index) library for inference, using [Custom Pipelines](https://huggingface.co/docs/diffusers/v0.6.0/en/using-diffusers/custom_pipelines). + +These are unstandardized and depend on the model. More details can be found in each model repository. + +```python +import torch +import requests +import numpy as np +from io import BytesIO +from diffusers import DiffusionPipeline +from PIL import Image + +pipeline = DiffusionPipeline.from_pretrained( + "dylanebert/LGM-full", + custom_pipeline="dylanebert/LGM-full", + torch_dtype=torch.float16, + trust_remote_code=True, +).to("cuda") + +input_url = "https://huggingface.co/datasets/dylanebert/iso3d/resolve/main/jpg@512/a_cat_statue.jpg" +input_image = Image.open(BytesIO(requests.get(input_url).content)) +input_image = np.array(input_image, dtype=np.float32) / 255.0 +result = pipeline("", input_image) +result_path = "/tmp/output.ply" +pipeline.save_ply(result, result_path) +``` + +In the code above, we: + +1. Import the necessary libraries +2. Load the `LGM-full` model and custom pipeline +3. Load and preprocess the input image +4. Run the pipeline on the input image +5. Save the output to a file + +### Output Formats + +Meshes can be in `.obj`, `.glb`, `.stl`, or `.gltf` format. Other formats are allowed, but won't be rendered in the gradio [Model3D](https://www.gradio.app/docs/gradio/model3d) component. + +Splats can be in `.ply` or `.splat` format. They can be rendered in the gradio [Model3D](https://www.gradio.app/docs/gradio/model3d) component using the [gsplat.js](https://github.com/huggingface/gsplat.js) library. + +## Useful Resources + +- [ML for 3D Course](https://huggingface.co/learn/ml-for-3d-course) +- [3D Arena Leaderboard](https://huggingface.co/spaces/dylanebert/3d-arena) +- [gsplat.js](https://github.com/huggingface/gsplat.js) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-to-3d/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/image-to-3d/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..e9dd9365175d3dccd47fe123126187c88e34c4f2 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-to-3d/data.ts @@ -0,0 +1,75 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A large dataset of over 10 million 3D objects.", + id: "allenai/objaverse-xl", + }, + { + description: "A dataset of isolated object images for evaluating image-to-3D models.", + id: "dylanebert/iso3d", + }, + ], + demo: { + inputs: [ + { + filename: "image-to-3d-image-input.png", + type: "img", + }, + ], + outputs: [ + { + label: "Result", + content: "image-to-3d-3d-output-filename.glb", + type: "text", + }, + ], + }, + metrics: [], + models: [ + { + description: "Fast image-to-3D mesh model by Tencent.", + id: "TencentARC/InstantMesh", + }, + { + description: "Fast image-to-3D mesh model by StabilityAI", + id: "stabilityai/TripoSR", + }, + { + description: "A scaled up image-to-3D mesh model derived from TripoSR.", + id: "hwjiang/Real3D", + }, + { + description: "Generative 3D gaussian splatting model.", + id: "ashawkey/LGM", + }, + ], + spaces: [ + { + description: "Leaderboard to evaluate image-to-3D models.", + id: "dylanebert/3d-arena", + }, + { + description: "Image-to-3D demo with mesh outputs.", + id: "TencentARC/InstantMesh", + }, + { + description: "Image-to-3D demo with mesh outputs.", + id: "stabilityai/TripoSR", + }, + { + description: "Image-to-3D demo with mesh outputs.", + id: "hwjiang/Real3D", + }, + { + description: "Image-to-3D demo with splat outputs.", + id: "dylanebert/LGM-mini", + }, + ], + summary: "Image-to-3D models take in image input and produce 3D output.", + widgetModels: [], + youtubeId: "", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-to-image/about.md b/data/node_modules/@huggingface/tasks/src/tasks/image-to-image/about.md new file mode 100644 index 0000000000000000000000000000000000000000..63f490f829554e7734e3f30d4b5b1b814dfe6d7b --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-to-image/about.md @@ -0,0 +1,80 @@ +## Use Cases + +### Style transfer + +One of the most popular use cases of image-to-image is style transfer. Style transfer models can convert a normal photography into a painting in the style of a famous painter. + +## Task Variants + +### Image inpainting + +Image inpainting is widely used during photography editing to remove unwanted objects, such as poles, wires, or sensor +dust. + +### Image colorization + +Old or black and white images can be brought up to life using an image colorization model. + +### Super Resolution + +Super-resolution models increase the resolution of an image, allowing for higher-quality viewing and printing. + +## Inference + +You can use pipelines for image-to-image in 🧨diffusers library to easily use image-to-image models. See an example for `StableDiffusionImg2ImgPipeline` below. + +```python +from PIL import Image +from diffusers import StableDiffusionImg2ImgPipeline + +model_id_or_path = "runwayml/stable-diffusion-v1-5" +pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16) +pipe = pipe.to(cuda) + +init_image = Image.open("mountains_image.jpeg").convert("RGB").resize((768, 512)) +prompt = "A fantasy landscape, trending on artstation" + +images = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images +images[0].save("fantasy_landscape.png") +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer image-to-image models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_TOKEN); +await inference.imageToImage({ + data: await (await fetch("image")).blob(), + model: "timbrooks/instruct-pix2pix", + parameters: { + prompt: "Deblur this image", + }, +}); +``` + +## ControlNet + +Controlling the outputs of diffusion models only with a text prompt is a challenging problem. ControlNet is a neural network model that provides image-based control to diffusion models. Control images can be edges or other landmarks extracted from a source image. + +Many ControlNet models were trained in our community event, JAX Diffusers sprint. You can see the full list of the ControlNet models available [here](https://huggingface.co/spaces/jax-diffusers-event/leaderboard). + +## Most Used Model for the Task + +Pix2Pix is a popular model used for image-to-image translation tasks. It is based on a conditional-GAN (generative adversarial network) where instead of a noise vector a 2D image is given as input. More information about Pix2Pix can be retrieved from this [link](https://phillipi.github.io/pix2pix/) where the associated paper and the GitHub repository can be found. + +The images below show some examples extracted from the Pix2Pix paper. This model can be applied to various use cases. It is capable of relatively simpler things, e.g., converting a grayscale image to its colored version. But more importantly, it can generate realistic pictures from rough sketches (can be seen in the purse example) or from painting-like images (can be seen in the street and facade examples below). + +![Examples](https://huggingface.co/datasets/huggingfacejs/tasks/resolve/main/image-to-image/pix2pix_examples.jpg) + +## Useful Resources + +- [Image-to-image guide with diffusers](https://huggingface.co/docs/diffusers/using-diffusers/img2img) +- [Train your ControlNet with diffusers 🧨](https://huggingface.co/blog/train-your-controlnet) +- [Ultra fast ControlNet with 🧨 Diffusers](https://huggingface.co/blog/controlnet) + +## References + +[1] P. Isola, J. -Y. Zhu, T. Zhou and A. A. Efros, "Image-to-Image Translation with Conditional Adversarial Networks," 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017, pp. 5967-5976, doi: 10.1109/CVPR.2017.632. + +This page was made possible thanks to the efforts of [Paul Gafton](https://github.com/Paul92) and [Osman Alenbey](https://huggingface.co/osman93). diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-to-image/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/image-to-image/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..5ba7d2f93343b6f28adaf6cefc4f75ace6ec5097 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-to-image/data.ts @@ -0,0 +1,101 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "Synthetic dataset, for image relighting", + id: "VIDIT", + }, + { + description: "Multiple images of celebrities, used for facial expression translation", + id: "huggan/CelebA-faces", + }, + ], + demo: { + inputs: [ + { + filename: "image-to-image-input.jpeg", + type: "img", + }, + ], + outputs: [ + { + filename: "image-to-image-output.png", + type: "img", + }, + ], + }, + isPlaceholder: false, + metrics: [ + { + description: + "Peak Signal to Noise Ratio (PSNR) is an approximation of the human perception, considering the ratio of the absolute intensity with respect to the variations. Measured in dB, a high value indicates a high fidelity.", + id: "PSNR", + }, + { + description: + "Structural Similarity Index (SSIM) is a perceptual metric which compares the luminance, contrast and structure of two images. The values of SSIM range between -1 and 1, and higher values indicate closer resemblance to the original image.", + id: "SSIM", + }, + { + description: + "Inception Score (IS) is an analysis of the labels predicted by an image classification model when presented with a sample of the generated images.", + id: "IS", + }, + ], + models: [ + { + description: "A model that enhances images captured in low light conditions.", + id: "keras-io/low-light-image-enhancement", + }, + { + description: "A model that increases the resolution of an image.", + id: "keras-io/super-resolution", + }, + { + description: + "A model that creates a set of variations of the input image in the style of DALL-E using Stable Diffusion.", + id: "lambdalabs/sd-image-variations-diffusers", + }, + { + description: "A model that generates images based on segments in the input image and the text prompt.", + id: "mfidabel/controlnet-segment-anything", + }, + { + description: "A model that takes an image and an instruction to edit the image.", + id: "timbrooks/instruct-pix2pix", + }, + ], + spaces: [ + { + description: "Image enhancer application for low light.", + id: "keras-io/low-light-image-enhancement", + }, + { + description: "Style transfer application.", + id: "keras-io/neural-style-transfer", + }, + { + description: "An application that generates images based on segment control.", + id: "mfidabel/controlnet-segment-anything", + }, + { + description: "Image generation application that takes image control and text prompt.", + id: "hysts/ControlNet", + }, + { + description: "Colorize any image using this app.", + id: "ioclab/brightness-controlnet", + }, + { + description: "Edit images with instructions.", + id: "timbrooks/instruct-pix2pix", + }, + ], + summary: + "Image-to-image is the task of transforming a source image to match the characteristics of a target image or a target image domain. Any image manipulation and enhancement is possible with image to image models.", + widgetModels: ["lllyasviel/sd-controlnet-canny"], + youtubeId: "", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-to-image/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/image-to-image/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..bf732e07018c5ab5d1e9bd0eb3f1212e7943dd36 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-to-image/inference.ts @@ -0,0 +1,67 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ + +/** + * Inputs for Image To Image inference + */ +export interface ImageToImageInput { + /** + * The input image data + */ + inputs: unknown; + /** + * Additional inference parameters + */ + parameters?: ImageToImageParameters; + [property: string]: unknown; +} + +/** + * Additional inference parameters + * + * Additional inference parameters for Image To Image + */ +export interface ImageToImageParameters { + /** + * For diffusion models. A higher guidance scale value encourages the model to generate + * images closely linked to the text prompt at the expense of lower image quality. + */ + guidance_scale?: number; + /** + * One or several prompt to guide what NOT to include in image generation. + */ + negative_prompt?: string[]; + /** + * For diffusion models. The number of denoising steps. More denoising steps usually lead to + * a higher quality image at the expense of slower inference. + */ + num_inference_steps?: number; + /** + * The size in pixel of the output image + */ + target_size?: TargetSize; + [property: string]: unknown; +} + +/** + * The size in pixel of the output image + */ +export interface TargetSize { + height: number; + width: number; + [property: string]: unknown; +} + +/** + * Outputs of inference for the Image To Image task + */ +export interface ImageToImageOutput { + /** + * The output image + */ + image?: unknown; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-to-image/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/image-to-image/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..873e1f20d956f5cb40802589be3d2a8972bd2abc --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-to-image/spec/input.json @@ -0,0 +1,54 @@ +{ + "$id": "/inference/schemas/image-to-image/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Image To Image inference", + "title": "ImageToImageInput", + "type": "object", + "properties": { + "inputs": { + "description": "The input image data" + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/ImageToImageParameters" + } + }, + "$defs": { + "ImageToImageParameters": { + "title": "ImageToImageParameters", + "description": "Additional inference parameters for Image To Image", + "type": "object", + "properties": { + "guidance_scale": { + "type": "number", + "description": "For diffusion models. A higher guidance scale value encourages the model to generate images closely linked to the text prompt at the expense of lower image quality." + }, + "negative_prompt": { + "type": "array", + "items": { + "type": "string" + }, + "description": "One or several prompt to guide what NOT to include in image generation." + }, + "num_inference_steps": { + "type": "integer", + "description": "For diffusion models. The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference." + }, + "target_size": { + "type": "object", + "description": "The size in pixel of the output image", + "properties": { + "width": { + "type": "integer" + }, + "height": { + "type": "integer" + } + }, + "required": ["width", "height"] + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-to-image/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/image-to-image/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..af4eff804604607f75ac96d1dc6ed6bcc1eb953c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-to-image/spec/output.json @@ -0,0 +1,12 @@ +{ + "$id": "/inference/schemas/image-to-image/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Image To Image task", + "title": "ImageToImageOutput", + "type": "object", + "properties": { + "image": { + "description": "The output image" + } + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-to-text/about.md b/data/node_modules/@huggingface/tasks/src/tasks/image-to-text/about.md new file mode 100644 index 0000000000000000000000000000000000000000..e479783b3fcdfdabb51a28c8714fa0e90642c197 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-to-text/about.md @@ -0,0 +1,61 @@ +## Use Cases + +### Image Captioning + +Image Captioning is the process of generating textual description of an image. +This can help the visually impaired people to understand what's happening in their surroundings. + +### Optical Character Recognition (OCR) + +OCR models convert the text present in an image, e.g. a scanned document, to text. + +## Inference + +### Image Captioning + +You can use the 🤗 Transformers library's `image-to-text` pipeline to generate caption for the Image input. + +```python +from transformers import pipeline + +captioner = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") +captioner("https://huggingface.co/datasets/Narsil/image_dummy/resolve/main/parrots.png") +## [{'generated_text': 'two birds are standing next to each other '}] +``` + +### OCR + +This code snippet uses Microsoft’s TrOCR, an encoder-decoder model consisting of an image Transformer encoder and a text Transformer decoder for state-of-the-art optical character recognition (OCR) on single-text line images. + +```python +from transformers import TrOCRProcessor, VisionEncoderDecoderModel + +processor = TrOCRProcessor.from_pretrained('microsoft/trocr-base-handwritten') +model = VisionEncoderDecoderModel.from_pretrained('microsoft/trocr-base-handwritten') +pixel_values = processor(images="image.jpeg", return_tensors="pt").pixel_values + +generated_ids = model.generate(pixel_values) +generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer image-to-text models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_TOKEN); +await inference.imageToText({ + data: await (await fetch("https://picsum.photos/300/300")).blob(), + model: "Salesforce/blip-image-captioning-base", +}); +``` + +## Useful Resources + +- [Image Captioning](https://huggingface.co/docs/transformers/main/en/tasks/image_captioning) +- [Image Captioning Use Case](https://blog.google/outreach-initiatives/accessibility/get-image-descriptions/) +- [Train Image Captioning model on your dataset](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/GIT/Fine_tune_GIT_on_an_image_captioning_dataset.ipynb) +- [Train OCR model on your dataset ](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/TrOCR) + +This page was made possible thanks to efforts of [Sukesh Perla](https://huggingface.co/hitchhiker3010) and [Johannes Kolbe](https://huggingface.co/johko). diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-to-text/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/image-to-text/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..690149537ddcff6e59923417e9f5cb3416782f70 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-to-text/data.ts @@ -0,0 +1,82 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + // TODO write proper description + description: "Dataset from 12M image-text of Reddit", + id: "red_caps", + }, + { + // TODO write proper description + description: "Dataset from 3.3M images of Google", + id: "datasets/conceptual_captions", + }, + ], + demo: { + inputs: [ + { + filename: "savanna.jpg", + type: "img", + }, + ], + outputs: [ + { + label: "Detailed description", + content: "a herd of giraffes and zebras grazing in a field", + type: "text", + }, + ], + }, + metrics: [], + models: [ + { + description: "A robust image captioning model.", + id: "Salesforce/blip2-opt-2.7b", + }, + { + description: "A powerful and accurate image-to-text model that can also localize concepts in images.", + id: "microsoft/kosmos-2-patch14-224", + }, + { + description: "A strong optical character recognition model.", + id: "facebook/nougat-base", + }, + { + description: "A powerful model that lets you have a conversation with the image.", + id: "llava-hf/llava-1.5-7b-hf", + }, + ], + spaces: [ + { + description: "An application that compares various image captioning models.", + id: "nielsr/comparing-captioning-models", + }, + { + description: "A robust image captioning application.", + id: "flax-community/image-captioning", + }, + { + description: "An application that transcribes handwritings into text.", + id: "nielsr/TrOCR-handwritten", + }, + { + description: "An application that can caption images and answer questions about a given image.", + id: "Salesforce/BLIP", + }, + { + description: "An application that can caption images and answer questions with a conversational agent.", + id: "Salesforce/BLIP2", + }, + { + description: "An image captioning application that demonstrates the effect of noise on captions.", + id: "johko/capdec-image-captioning", + }, + ], + summary: + "Image to text models output a text from a given image. Image captioning or optical character recognition can be considered as the most common applications of image to text.", + widgetModels: ["Salesforce/blip-image-captioning-base"], + youtubeId: "", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-to-text/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/image-to-text/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..7cace215832b47d1727b0db82d85e7322f5cbf03 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-to-text/inference.ts @@ -0,0 +1,143 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ + +/** + * Inputs for Image To Text inference + */ +export interface ImageToTextInput { + /** + * The input image data + */ + inputs: unknown; + /** + * Additional inference parameters + */ + parameters?: ImageToTextParameters; + [property: string]: unknown; +} + +/** + * Additional inference parameters + * + * Additional inference parameters for Image To Text + */ +export interface ImageToTextParameters { + /** + * Parametrization of the text generation process + */ + generate?: GenerationParameters; + /** + * The amount of maximum tokens to generate. + */ + max_new_tokens?: number; + [property: string]: unknown; +} + +/** + * Parametrization of the text generation process + * + * Ad-hoc parametrization of the text generation process + */ +export interface GenerationParameters { + /** + * Whether to use sampling instead of greedy decoding when generating new tokens. + */ + do_sample?: boolean; + /** + * Controls the stopping condition for beam-based methods. + */ + early_stopping?: EarlyStoppingUnion; + /** + * If set to float strictly between 0 and 1, only tokens with a conditional probability + * greater than epsilon_cutoff will be sampled. In the paper, suggested values range from + * 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language + * Model Desmoothing](https://hf.co/papers/2210.15191) for more details. + */ + epsilon_cutoff?: number; + /** + * Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to + * float strictly between 0 and 1, a token is only considered if it is greater than either + * eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter + * term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In + * the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. + * See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) + * for more details. + */ + eta_cutoff?: number; + /** + * The maximum length (in tokens) of the generated text, including the input. + */ + max_length?: number; + /** + * The maximum number of tokens to generate. Takes precedence over maxLength. + */ + max_new_tokens?: number; + /** + * The minimum length (in tokens) of the generated text, including the input. + */ + min_length?: number; + /** + * The minimum number of tokens to generate. Takes precedence over maxLength. + */ + min_new_tokens?: number; + /** + * Number of groups to divide num_beams into in order to ensure diversity among different + * groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details. + */ + num_beam_groups?: number; + /** + * Number of beams to use for beam search. + */ + num_beams?: number; + /** + * The value balances the model confidence and the degeneration penalty in contrastive + * search decoding. + */ + penalty_alpha?: number; + /** + * The value used to modulate the next token probabilities. + */ + temperature?: number; + /** + * The number of highest probability vocabulary tokens to keep for top-k-filtering. + */ + top_k?: number; + /** + * If set to float < 1, only the smallest set of most probable tokens with probabilities + * that add up to top_p or higher are kept for generation. + */ + top_p?: number; + /** + * Local typicality measures how similar the conditional probability of predicting a target + * token next is to the expected conditional probability of predicting a random token next, + * given the partial text already generated. If set to float < 1, the smallest set of the + * most locally typical tokens with probabilities that add up to typical_p or higher are + * kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details. + */ + typical_p?: number; + /** + * Whether the model should use the past last key/values attentions to speed up decoding + */ + use_cache?: boolean; + [property: string]: unknown; +} + +/** + * Controls the stopping condition for beam-based methods. + */ +export type EarlyStoppingUnion = boolean | "never"; + +/** + * Outputs of inference for the Image To Text task + */ +export interface ImageToTextOutput { + generatedText: unknown; + /** + * The generated text. + */ + generated_text?: string; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-to-text/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/image-to-text/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..dec832a48f604d66b3af6541a37f130101078bf9 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-to-text/spec/input.json @@ -0,0 +1,34 @@ +{ + "$id": "/inference/schemas/image-to-text/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Image To Text inference", + "title": "ImageToTextInput", + "type": "object", + "properties": { + "inputs": { + "description": "The input image data" + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/ImageToTextParameters" + } + }, + "$defs": { + "ImageToTextParameters": { + "title": "ImageToTextParameters", + "description": "Additional inference parameters for Image To Text", + "type": "object", + "properties": { + "max_new_tokens": { + "type": "integer", + "description": "The amount of maximum tokens to generate." + }, + "generate": { + "description": "Parametrization of the text generation process", + "$ref": "/inference/schemas/common-definitions.json#/definitions/GenerationParameters" + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/image-to-text/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/image-to-text/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..388c3456f4e7f50b0c7b133725a2d951f152cb01 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/image-to-text/spec/output.json @@ -0,0 +1,14 @@ +{ + "$id": "/inference/schemas/image-to-text/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Image To Text task", + "title": "ImageToTextOutput", + "type": "object", + "properties": { + "generated_text": { + "type": "string", + "description": "The generated text." + } + }, + "required": ["generatedText"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/index.ts b/data/node_modules/@huggingface/tasks/src/tasks/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..68b2bf6b0ddc0c05157fc4b012037a0bb26569d5 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/index.ts @@ -0,0 +1,309 @@ +import type { PipelineType } from "../pipelines"; +import { PIPELINE_DATA } from "../pipelines"; + +import audioClassification from "./audio-classification/data"; +import audioToAudio from "./audio-to-audio/data"; +import automaticSpeechRecognition from "./automatic-speech-recognition/data"; +import documentQuestionAnswering from "./document-question-answering/data"; +import featureExtraction from "./feature-extraction/data"; +import fillMask from "./fill-mask/data"; +import imageClassification from "./image-classification/data"; +import imageFeatureExtraction from "./image-feature-extraction/data"; +import imageToImage from "./image-to-image/data"; +import imageToText from "./image-to-text/data"; +import imageTextToText from "./image-text-to-text/data"; +import imageSegmentation from "./image-segmentation/data"; +import maskGeneration from "./mask-generation/data"; +import objectDetection from "./object-detection/data"; +import depthEstimation from "./depth-estimation/data"; +import placeholder from "./placeholder/data"; +import reinforcementLearning from "./reinforcement-learning/data"; +import questionAnswering from "./question-answering/data"; +import sentenceSimilarity from "./sentence-similarity/data"; +import summarization from "./summarization/data"; +import tableQuestionAnswering from "./table-question-answering/data"; +import tabularClassification from "./tabular-classification/data"; +import tabularRegression from "./tabular-regression/data"; +import textToImage from "./text-to-image/data"; +import textToSpeech from "./text-to-speech/data"; +import tokenClassification from "./token-classification/data"; +import translation from "./translation/data"; +import textClassification from "./text-classification/data"; +import textGeneration from "./text-generation/data"; +import textToVideo from "./text-to-video/data"; +import unconditionalImageGeneration from "./unconditional-image-generation/data"; +import videoClassification from "./video-classification/data"; +import visualQuestionAnswering from "./visual-question-answering/data"; +import zeroShotClassification from "./zero-shot-classification/data"; +import zeroShotImageClassification from "./zero-shot-image-classification/data"; +import zeroShotObjectDetection from "./zero-shot-object-detection/data"; +import imageTo3D from "./image-to-3d/data"; +import textTo3D from "./text-to-3d/data"; + +export type * from "./audio-classification/inference"; +export type * from "./automatic-speech-recognition/inference"; +export type { + ChatCompletionInput, + ChatCompletionInputMessage, + ChatCompletionOutput, + ChatCompletionOutputComplete, + ChatCompletionOutputMessage, + ChatCompletionStreamOutput, + ChatCompletionStreamOutputChoice, + ChatCompletionStreamOutputDelta, +} from "./chat-completion/inference"; +export type * from "./document-question-answering/inference"; +export type * from "./feature-extraction/inference"; +export type * from "./fill-mask/inference"; +export type { + ImageClassificationInput, + ImageClassificationOutput, + ImageClassificationOutputElement, + ImageClassificationParameters, +} from "./image-classification/inference"; +export type * from "./image-to-image/inference"; +export type { ImageToTextInput, ImageToTextOutput, ImageToTextParameters } from "./image-to-text/inference"; +export type * from "./image-segmentation/inference"; +export type * from "./object-detection/inference"; +export type * from "./depth-estimation/inference"; +export type * from "./question-answering/inference"; +export type * from "./sentence-similarity/inference"; +export type * from "./summarization/inference"; +export type * from "./table-question-answering/inference"; +export type { TextToImageInput, TextToImageOutput, TextToImageParameters } from "./text-to-image/inference"; +export type { TextToAudioParameters, TextToSpeechInput, TextToSpeechOutput } from "./text-to-speech/inference"; +export type * from "./token-classification/inference"; +export type { + Text2TextGenerationParameters, + Text2TextGenerationTruncationStrategy, + TranslationInput, + TranslationOutput, +} from "./translation/inference"; +export type { + ClassificationOutputTransform, + TextClassificationInput, + TextClassificationOutput, + TextClassificationOutputElement, + TextClassificationParameters, +} from "./text-classification/inference"; +export type { + TextGenerationOutputFinishReason, + TextGenerationOutputPrefillToken, + TextGenerationInput, + TextGenerationOutput, + TextGenerationOutputDetails, + TextGenerationInputGenerateParameters, + TextGenerationOutputBestOfSequence, + TextGenerationOutputToken, + TextGenerationStreamOutputStreamDetails, + TextGenerationStreamOutput, +} from "./text-generation/inference"; +export type * from "./video-classification/inference"; +export type * from "./visual-question-answering/inference"; +export type * from "./zero-shot-classification/inference"; +export type * from "./zero-shot-image-classification/inference"; +export type { + BoundingBox, + ZeroShotObjectDetectionInput, + ZeroShotObjectDetectionInputData, + ZeroShotObjectDetectionOutput, + ZeroShotObjectDetectionOutputElement, +} from "./zero-shot-object-detection/inference"; + +import type { ModelLibraryKey } from "../model-libraries"; + +/** + * Model libraries compatible with each ML task + */ +export const TASKS_MODEL_LIBRARIES: Record = { + "audio-classification": ["speechbrain", "transformers", "transformers.js"], + "audio-to-audio": ["asteroid", "fairseq", "speechbrain"], + "automatic-speech-recognition": ["espnet", "nemo", "speechbrain", "transformers", "transformers.js"], + "depth-estimation": ["transformers", "transformers.js"], + "document-question-answering": ["transformers", "transformers.js"], + "feature-extraction": ["sentence-transformers", "transformers", "transformers.js"], + "fill-mask": ["transformers", "transformers.js"], + "graph-ml": ["transformers"], + "image-classification": ["keras", "timm", "transformers", "transformers.js"], + "image-feature-extraction": ["timm", "transformers"], + "image-segmentation": ["transformers", "transformers.js"], + "image-text-to-text": ["transformers"], + "image-to-image": ["diffusers", "transformers", "transformers.js"], + "image-to-text": ["transformers", "transformers.js"], + "image-to-video": ["diffusers"], + "video-classification": ["transformers"], + "mask-generation": ["transformers"], + "multiple-choice": ["transformers"], + "object-detection": ["transformers", "transformers.js"], + other: [], + "question-answering": ["adapter-transformers", "allennlp", "transformers", "transformers.js"], + robotics: [], + "reinforcement-learning": ["transformers", "stable-baselines3", "ml-agents", "sample-factory"], + "sentence-similarity": ["sentence-transformers", "spacy", "transformers.js"], + summarization: ["transformers", "transformers.js"], + "table-question-answering": ["transformers"], + "table-to-text": ["transformers"], + "tabular-classification": ["sklearn"], + "tabular-regression": ["sklearn"], + "tabular-to-text": ["transformers"], + "text-classification": ["adapter-transformers", "setfit", "spacy", "transformers", "transformers.js"], + "text-generation": ["transformers", "transformers.js"], + "text-retrieval": [], + "text-to-image": ["diffusers"], + "text-to-speech": ["espnet", "tensorflowtts", "transformers", "transformers.js"], + "text-to-audio": ["transformers", "transformers.js"], + "text-to-video": ["diffusers"], + "text2text-generation": ["transformers", "transformers.js"], + "time-series-forecasting": [], + "token-classification": [ + "adapter-transformers", + "flair", + "spacy", + "span-marker", + "stanza", + "transformers", + "transformers.js", + ], + translation: ["transformers", "transformers.js"], + "unconditional-image-generation": ["diffusers"], + "visual-question-answering": ["transformers", "transformers.js"], + "voice-activity-detection": [], + "zero-shot-classification": ["transformers", "transformers.js"], + "zero-shot-image-classification": ["transformers", "transformers.js"], + "zero-shot-object-detection": ["transformers", "transformers.js"], + "text-to-3d": ["diffusers"], + "image-to-3d": ["diffusers"], +}; + +/** + * Return the whole TaskData object for a certain task. + * If the partialTaskData argument is left undefined, + * the default placholder data will be used. + */ +function getData(type: PipelineType, partialTaskData: TaskDataCustom = placeholder): TaskData { + return { + ...partialTaskData, + id: type, + label: PIPELINE_DATA[type].name, + libraries: TASKS_MODEL_LIBRARIES[type], + }; +} + +// To make comparisons easier, task order is the same as in const.ts +// Tasks set to undefined won't have an associated task page. +// Tasks that call getData() without the second argument will +// have a "placeholder" page. +export const TASKS_DATA: Record = { + "audio-classification": getData("audio-classification", audioClassification), + "audio-to-audio": getData("audio-to-audio", audioToAudio), + "automatic-speech-recognition": getData("automatic-speech-recognition", automaticSpeechRecognition), + "depth-estimation": getData("depth-estimation", depthEstimation), + "document-question-answering": getData("document-question-answering", documentQuestionAnswering), + "feature-extraction": getData("feature-extraction", featureExtraction), + "fill-mask": getData("fill-mask", fillMask), + "graph-ml": undefined, + "image-classification": getData("image-classification", imageClassification), + "image-feature-extraction": getData("image-feature-extraction", imageFeatureExtraction), + "image-segmentation": getData("image-segmentation", imageSegmentation), + "image-to-image": getData("image-to-image", imageToImage), + "image-text-to-text": getData("image-text-to-text", imageTextToText), + "image-to-text": getData("image-to-text", imageToText), + "image-to-video": undefined, + "mask-generation": getData("mask-generation", maskGeneration), + "multiple-choice": undefined, + "object-detection": getData("object-detection", objectDetection), + "video-classification": getData("video-classification", videoClassification), + other: undefined, + "question-answering": getData("question-answering", questionAnswering), + "reinforcement-learning": getData("reinforcement-learning", reinforcementLearning), + robotics: undefined, + "sentence-similarity": getData("sentence-similarity", sentenceSimilarity), + summarization: getData("summarization", summarization), + "table-question-answering": getData("table-question-answering", tableQuestionAnswering), + "table-to-text": undefined, + "tabular-classification": getData("tabular-classification", tabularClassification), + "tabular-regression": getData("tabular-regression", tabularRegression), + "tabular-to-text": undefined, + "text-classification": getData("text-classification", textClassification), + "text-generation": getData("text-generation", textGeneration), + "text-retrieval": undefined, + "text-to-image": getData("text-to-image", textToImage), + "text-to-speech": getData("text-to-speech", textToSpeech), + "text-to-audio": undefined, + "text-to-video": getData("text-to-video", textToVideo), + "text2text-generation": undefined, + "time-series-forecasting": undefined, + "token-classification": getData("token-classification", tokenClassification), + translation: getData("translation", translation), + "unconditional-image-generation": getData("unconditional-image-generation", unconditionalImageGeneration), + "visual-question-answering": getData("visual-question-answering", visualQuestionAnswering), + "voice-activity-detection": undefined, + "zero-shot-classification": getData("zero-shot-classification", zeroShotClassification), + "zero-shot-image-classification": getData("zero-shot-image-classification", zeroShotImageClassification), + "zero-shot-object-detection": getData("zero-shot-object-detection", zeroShotObjectDetection), + "text-to-3d": getData("text-to-3d", textTo3D), + "image-to-3d": getData("image-to-3d", imageTo3D), +} as const; + +export interface ExampleRepo { + description: string; + id: string; +} + +export type TaskDemoEntry = + | { + filename: string; + type: "audio"; + } + | { + data: Array<{ + label: string; + score: number; + }>; + type: "chart"; + } + | { + filename: string; + type: "img"; + } + | { + table: string[][]; + type: "tabular"; + } + | { + content: string; + label: string; + type: "text"; + } + | { + text: string; + tokens: Array<{ + end: number; + start: number; + type: string; + }>; + type: "text-with-tokens"; + }; + +export interface TaskDemo { + inputs: TaskDemoEntry[]; + outputs: TaskDemoEntry[]; +} + +export interface TaskData { + datasets: ExampleRepo[]; + demo: TaskDemo; + id: PipelineType; + canonicalId?: PipelineType; + isPlaceholder?: boolean; + label: string; + libraries: ModelLibraryKey[]; + metrics: ExampleRepo[]; + models: ExampleRepo[]; + spaces: ExampleRepo[]; + summary: string; + widgetModels: string[]; + youtubeId?: string; +} + +export type TaskDataCustom = Omit; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/mask-generation/about.md b/data/node_modules/@huggingface/tasks/src/tasks/mask-generation/about.md new file mode 100644 index 0000000000000000000000000000000000000000..18d6d38db4cd88e36cd92dbe2cc841495719efbe --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/mask-generation/about.md @@ -0,0 +1,65 @@ +## Use Cases + +### Filtering an Image + +When filtering for an image, the generated masks might serve as an initial filter to eliminate irrelevant information. For instance, when monitoring vegetation in satellite imaging, mask generation models identify green spots, highlighting the relevant region of the image. + +### Masked Image Modelling + +Generating masks can facilitate learning, especially in semi or unsupervised learning. For example, the [BEiT model](https://huggingface.co/docs/transformers/model_doc/beit) uses image-mask patches in the pre-training. + +### Human-in-the-loop Computer Vision Applications + +For applications where humans are in the loop, masks highlight certain regions of images for humans to validate. + +## Task Variants + +### Segmentation + +Image Segmentation divides an image into segments where each pixel is mapped to an object. This task has multiple variants, such as instance segmentation, panoptic segmentation, and semantic segmentation. You can learn more about segmentation on its [task page](https://huggingface.co/tasks/image-segmentation). + +## Inference + +Mask generation models often work in two modes: segment everything or prompt mode. +The example below works in segment-everything-mode, where many masks will be returned. + +```python +from transformers import pipeline + +generator = pipeline("mask-generation", model="Zigeng/SlimSAM-uniform-50", points_per_batch=64, device="cuda") +image_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" +outputs = generator(image_url) +outputs["masks"] +# array of multiple binary masks returned for each generated mask +``` + +Prompt mode takes in three types of prompts: + +- **Point prompt:** The user can select a point on the image, and a meaningful segment around the point will be returned. +- **Box prompt:** The user can draw a box on the image, and a meaningful segment within the box will be returned. +- **Text prompt:** The user can input a text, and the objects of that type will be segmented. Note that this capability has not yet been released and has only been explored in research. + +Below you can see how to use an input-point prompt. It also demonstrates direct model inference without the `pipeline` abstraction. The input prompt here is a nested list where the outermost list is the batch size (`1`), then the number of points (also `1` in this example), and the innermost list contains the actual coordinates of the point (`[450, 600]`). + +```python +from transformers import SamModel, SamProcessor +from PIL import Image +import requests + +model = SamModel.from_pretrained("Zigeng/SlimSAM-uniform-50").to("cuda") +processor = SamProcessor.from_pretrained("Zigeng/SlimSAM-uniform-50") + +raw_image = Image.open(requests.get(image_url, stream=True).raw).convert("RGB") +# pointing to the car window +input_points = [[[450, 600]]] +inputs = processor(raw_image, input_points=input_points, return_tensors="pt").to("cuda") +outputs = model(**inputs) +masks = processor.post_process_masks(outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu()) +scores = outputs.iou_scores +``` + +## Useful Resources + +Would you like to learn more about mask generation? Great! Here you can find some curated resources that you may find helpful! + +- [Segment anything model](https://huggingface.co/docs/transformers/main/model_doc/sam) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/mask-generation/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/mask-generation/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..05f542dfbb02943eacc8a73251f9947b9b48ace2 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/mask-generation/data.ts @@ -0,0 +1,55 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [], + demo: { + inputs: [ + { + filename: "mask-generation-input.png", + type: "img", + }, + ], + outputs: [ + { + filename: "mask-generation-output.png", + type: "img", + }, + ], + }, + metrics: [], + models: [ + { + description: "Small yet powerful mask generation model.", + id: "Zigeng/SlimSAM-uniform-50", + }, + { + description: "Very strong mask generation model.", + id: "facebook/sam-vit-huge", + }, + ], + spaces: [ + { + description: + "An application that combines a mask generation model with an image embedding model for open-vocabulary image segmentation.", + id: "SkalskiP/SAM_and_MetaCLIP", + }, + { + description: "An application that compares the performance of a large and a small mask generation model.", + id: "merve/slimsam", + }, + { + description: "An application based on an improved mask generation model.", + id: "linfanluntan/Grounded-SAM", + }, + { + description: "An application to remove objects from videos using mask generation models.", + id: "SkalskiP/SAM_and_ProPainter", + }, + ], + summary: + "Mask generation is the task of generating masks that identify a specific object or region of interest in a given image. Masks are often used in segmentation tasks, where they provide a precise way to isolate the object of interest for further processing or analysis.", + widgetModels: [], + youtubeId: "", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/object-detection/about.md b/data/node_modules/@huggingface/tasks/src/tasks/object-detection/about.md new file mode 100644 index 0000000000000000000000000000000000000000..4dda21224f937a27a5f56d40ee877ff03eaf1d09 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/object-detection/about.md @@ -0,0 +1,37 @@ +## Use Cases + +### Autonomous Driving + +Object Detection is widely used in computer vision for autonomous driving. Self-driving cars use Object Detection models to detect pedestrians, bicycles, traffic lights and road signs to decide which step to take. + +### Object Tracking in Matches + +Object Detection models are widely used in sports where the ball or a player is tracked for monitoring and refereeing during matches. + +### Image Search + +Object Detection models are widely used in image search. Smartphones use Object Detection models to detect entities (such as specific places or objects) and allow the user to search for the entity on the Internet. + +### Object Counting + +Object Detection models are used to count instances of objects in a given image, this can include counting the objects in warehouses or stores, or counting the number of visitors in a store. They are also used to manage crowds at events to prevent disasters. + +## Inference + +You can infer with Object Detection models through the `object-detection` pipeline. When calling the pipeline you just need to specify a path or http link to an image. + +```python +model = pipeline("object-detection") + +model("path_to_cat_image") + +# [{'label': 'blanket', +# 'mask': mask_string, +# 'score': 0.917}, +#...] +``` + +# Useful Resources + +- [Walkthrough of Computer Vision Ecosystem in Hugging Face - CV Study Group](https://www.youtube.com/watch?v=oL-xmufhZM8) +- [Object detection task guide](https://huggingface.co/docs/transformers/tasks/object_detection) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/object-detection/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/object-detection/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..10a25cda706e2995db06f18b541c5bb718982ff9 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/object-detection/data.ts @@ -0,0 +1,86 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "Widely used benchmark dataset for multiple vision tasks.", + id: "merve/coco2017", + }, + { + description: "Multi-task computer vision benchmark.", + id: "merve/pascal-voc", + }, + ], + demo: { + inputs: [ + { + filename: "object-detection-input.jpg", + type: "img", + }, + ], + outputs: [ + { + filename: "object-detection-output.jpg", + type: "img", + }, + ], + }, + metrics: [ + { + description: + "The Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It is calculated for each class separately", + id: "Average Precision", + }, + { + description: "The Mean Average Precision (mAP) metric is the overall average of the AP values", + id: "Mean Average Precision", + }, + { + description: + "The APα metric is the Average Precision at the IoU threshold of a α value, for example, AP50 and AP75", + id: "APα", + }, + ], + models: [ + { + description: "Solid object detection model trained on the benchmark dataset COCO 2017.", + id: "facebook/detr-resnet-50", + }, + { + description: "Strong object detection model trained on ImageNet-21k dataset.", + id: "microsoft/beit-base-patch16-224-pt22k-ft22k", + }, + { + description: "Fast and accurate object detection model trained on COCO dataset.", + id: "PekingU/rtdetr_r18vd_coco_o365", + }, + ], + spaces: [ + { + description: "Leaderboard to compare various object detection models across several metrics.", + id: "hf-vision/object_detection_leaderboard", + }, + { + description: "An application that contains various object detection models to try from.", + id: "Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS", + }, + { + description: "An application that shows multiple cutting edge techniques for object detection and tracking.", + id: "kadirnar/torchyolo", + }, + { + description: "An object tracking, segmentation and inpainting application.", + id: "VIPLab/Track-Anything", + }, + { + description: "Very fast object tracking application based on object detection.", + id: "merve/RT-DETR-tracking-coco", + }, + ], + summary: + "Object Detection models allow users to identify objects of certain defined classes. Object detection models receive an image as input and output the images with bounding boxes and labels on detected objects.", + widgetModels: ["facebook/detr-resnet-50"], + youtubeId: "WdAeKSOpxhw", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/object-detection/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/object-detection/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..d1765ad0054536c3da0b0889ff79d2884d2d3182 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/object-detection/inference.ts @@ -0,0 +1,62 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Object Detection inference + */ +export interface ObjectDetectionInput { + /** + * The input image data + */ + inputs: unknown; + /** + * Additional inference parameters + */ + parameters?: ObjectDetectionParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Object Detection + */ +export interface ObjectDetectionParameters { + /** + * The probability necessary to make a prediction. + */ + threshold?: number; + [property: string]: unknown; +} +/** + * The predicted bounding box. Coordinates are relative to the top left corner of the input + * image. + */ +export interface BoundingBox { + xmax: number; + xmin: number; + ymax: number; + ymin: number; + [property: string]: unknown; +} +export type ObjectDetectionOutput = ObjectDetectionOutputElement[]; +/** + * Outputs of inference for the Object Detection task + */ +export interface ObjectDetectionOutputElement { + /** + * The predicted bounding box. Coordinates are relative to the top left corner of the input + * image. + */ + box: BoundingBox; + /** + * The predicted label for the bounding box + */ + label: string; + /** + * The associated score / probability + */ + score: number; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/object-detection/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/object-detection/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..b694f2fa52962600d7627d1c9acf3602e4438891 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/object-detection/spec/input.json @@ -0,0 +1,30 @@ +{ + "$id": "/inference/schemas/object-detection/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Object Detection inference", + "title": "ObjectDetectionInput", + "type": "object", + "properties": { + "inputs": { + "description": "The input image data" + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/ObjectDetectionParameters" + } + }, + "$defs": { + "ObjectDetectionParameters": { + "title": "ObjectDetectionParameters", + "description": "Additional inference parameters for Object Detection", + "type": "object", + "properties": { + "threshold": { + "type": "number", + "description": "The probability necessary to make a prediction." + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/object-detection/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/object-detection/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..20c92d5d30b219f4ac1117874ea0020d59e4a822 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/object-detection/spec/output.json @@ -0,0 +1,46 @@ +{ + "$id": "/inference/schemas/object-detection/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Object Detection task", + "title": "ObjectDetectionOutput", + "type": "array", + "items": { + "type": "object", + "properties": { + "label": { + "type": "string", + "description": "The predicted label for the bounding box" + }, + "score": { + "type": "number", + "description": "The associated score / probability" + }, + "box": { + "$ref": "#/$defs/BoundingBox", + "description": "The predicted bounding box. Coordinates are relative to the top left corner of the input image." + } + }, + "required": ["box", "label", "score"] + }, + "$defs": { + "BoundingBox": { + "type": "object", + "title": "BoundingBox", + "properties": { + "xmin": { + "type": "integer" + }, + "xmax": { + "type": "integer" + }, + "ymin": { + "type": "integer" + }, + "ymax": { + "type": "integer" + } + }, + "required": ["xmin", "xmax", "ymin", "ymax"] + } + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/placeholder/about.md b/data/node_modules/@huggingface/tasks/src/tasks/placeholder/about.md new file mode 100644 index 0000000000000000000000000000000000000000..fdb45584410dcd07e530607d469140038ede6b25 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/placeholder/about.md @@ -0,0 +1,15 @@ +## Use Cases + +You can contribute this area with common use cases of the task! + +## Task Variants + +This place can be filled with variants of this task if there's any. + +## Inference + +This section should have useful information about how to pull a model from Hugging Face Hub that is a part of a library specialized in a task and use it. + +## Useful Resources + +In this area, you can insert useful resources about how to train or use a model for this task. diff --git a/data/node_modules/@huggingface/tasks/src/tasks/placeholder/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/placeholder/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..110b43703e5e9865db1551985ad588fa8cb7ce04 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/placeholder/data.ts @@ -0,0 +1,21 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [], + demo: { + inputs: [], + outputs: [], + }, + isPlaceholder: true, + metrics: [], + models: [], + spaces: [], + summary: "", + widgetModels: [], + youtubeId: undefined, + /// If this is a subtask, link to the most general task ID + /// (eg, text2text-generation is the canonical ID of translation) + canonicalId: undefined, +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/placeholder/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/placeholder/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..d31f4aac619900220d154523a6c5abc4b37f10c1 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/placeholder/spec/input.json @@ -0,0 +1,35 @@ +{ + "$id": "/inference/schemas//input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for inference", + "title": "PlaceholderInput", + "type": "object", + "properties": { + "inputs": { + "description": "TODO: describe the input here. This must be model & framework agnostic.", + "type": "string" + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/Parameters" + } + }, + "$defs": { + "Parameters": { + "title": "Parameters", + "description": "TODO: describe additional parameters here.", + "type": "object", + "properties": { + "dummy_parameter_name": { + "type": "boolean", + "description": "TODO: describe the parameter here" + }, + "dummy_parameter_name2": { + "type": "integer", + "description": "TODO: describe the parameter here" + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/placeholder/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/placeholder/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..697c6e2672a45f10abc4ba5554e38e7352bb807d --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/placeholder/spec/output.json @@ -0,0 +1,17 @@ +{ + "$id": "/inference/schemas//output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs for inference", + "title": "PlaceholderOutput", + "type": "array", + "items": { + "type": "object", + "properties": { + "meaningful_output_name": { + "type": "string", + "description": "TODO: Describe what is outputed by the inference here" + } + }, + "required": ["meaningfulOutputName"] + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/question-answering/about.md b/data/node_modules/@huggingface/tasks/src/tasks/question-answering/about.md new file mode 100644 index 0000000000000000000000000000000000000000..d5934ee80c7ca32c53726ef5b9e715af54a0b5d6 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/question-answering/about.md @@ -0,0 +1,56 @@ +## Use Cases + +### Frequently Asked Questions + +You can use Question Answering (QA) models to automate the response to frequently asked questions by using a knowledge base (documents) as context. Answers to customer questions can be drawn from those documents. + +⚡⚡ If you’d like to save inference time, you can first use [passage ranking models](/tasks/sentence-similarity) to see which document might contain the answer to the question and iterate over that document with the QA model instead. + +## Task Variants +There are different QA variants based on the inputs and outputs: + +- **Extractive QA:** The model **extracts** the answer from a context. The context here could be a provided text, a table or even HTML! This is usually solved with BERT-like models. +- **Open Generative QA:** The model **generates** free text directly based on the context. You can learn more about the Text Generation task in [its page](/tasks/text-generation). +- **Closed Generative QA:** In this case, no context is provided. The answer is completely generated by a model. + +The schema above illustrates extractive, open book QA. The model takes a context and the question and extracts the answer from the given context. + +You can also differentiate QA models depending on whether they are open-domain or closed-domain. Open-domain models are not restricted to a specific domain, while closed-domain models are restricted to a specific domain (e.g. legal, medical documents). + +## Inference + +You can infer with QA models with the 🤗 Transformers library using the `question-answering` pipeline. If no model checkpoint is given, the pipeline will be initialized with `distilbert-base-cased-distilled-squad`. This pipeline takes a question and a context from which the answer will be extracted and returned. + +```python +from transformers import pipeline + +qa_model = pipeline("question-answering") +question = "Where do I live?" +context = "My name is Merve and I live in İstanbul." +qa_model(question = question, context = context) +## {'answer': 'İstanbul', 'end': 39, 'score': 0.953, 'start': 31} +``` + +## Useful Resources + +Would you like to learn more about QA? Awesome! Here are some curated resources that you may find helpful! + +- [Course Chapter on Question Answering](https://huggingface.co/course/chapter7/7?fw=pt) +- [Question Answering Workshop](https://www.youtube.com/watch?v=Ihgk8kGLpIE&ab_channel=HuggingFace) +- [How to Build an Open-Domain Question Answering System?](https://lilianweng.github.io/lil-log/2020/10/29/open-domain-question-answering.html) +- [Blog Post: ELI5 A Model for Open Domain Long Form Question Answering](https://yjernite.github.io/lfqa.html) + +### Notebooks + +- [PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/question_answering.ipynb) +- [TensorFlow](https://github.com/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb) + +### Scripts for training + +- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) +- [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering) +- [Flax](https://github.com/huggingface/transformers/tree/main/examples/flax/question-answering) + +### Documentation + +- [Question answering task guide](https://huggingface.co/docs/transformers/tasks/question_answering) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/question-answering/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/question-answering/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..f80e138f6bbc362313717bbe727ca0955ad57a32 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/question-answering/data.ts @@ -0,0 +1,71 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + // TODO write proper description + description: "A famous question answering dataset based on English articles from Wikipedia.", + id: "squad_v2", + }, + { + // TODO write proper description + description: "A dataset of aggregated anonymized actual queries issued to the Google search engine.", + id: "natural_questions", + }, + ], + demo: { + inputs: [ + { + label: "Question", + content: "Which name is also used to describe the Amazon rainforest in English?", + type: "text", + }, + { + label: "Context", + content: "The Amazon rainforest, also known in English as Amazonia or the Amazon Jungle", + type: "text", + }, + ], + outputs: [ + { + label: "Answer", + content: "Amazonia", + type: "text", + }, + ], + }, + metrics: [ + { + description: + "Exact Match is a metric based on the strict character match of the predicted answer and the right answer. For answers predicted correctly, the Exact Match will be 1. Even if only one character is different, Exact Match will be 0", + id: "exact-match", + }, + { + description: + " The F1-Score metric is useful if we value both false positives and false negatives equally. The F1-Score is calculated on each word in the predicted sequence against the correct answer", + id: "f1", + }, + ], + models: [ + { + description: "A robust baseline model for most question answering domains.", + id: "deepset/roberta-base-squad2", + }, + { + description: "A special model that can answer questions from tables!", + id: "google/tapas-base-finetuned-wtq", + }, + ], + spaces: [ + { + description: "An application that can answer a long question from Wikipedia.", + id: "deepset/wikipedia-assistant", + }, + ], + summary: + "Question Answering models can retrieve the answer to a question from a given text, which is useful for searching for an answer in a document. Some question answering models can generate answers without context!", + widgetModels: ["deepset/roberta-base-squad2"], + youtubeId: "ajPx5LwJD-I", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/question-answering/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/question-answering/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..eaef8dfe3170ec4f790bdcdaaa0a07dd8aae7d76 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/question-answering/inference.ts @@ -0,0 +1,99 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Question Answering inference + */ +export interface QuestionAnsweringInput { + /** + * One (context, question) pair to answer + */ + inputs: QuestionAnsweringInputData; + /** + * Additional inference parameters + */ + parameters?: QuestionAnsweringParameters; + [property: string]: unknown; +} +/** + * One (context, question) pair to answer + */ +export interface QuestionAnsweringInputData { + /** + * The context to be used for answering the question + */ + context: string; + /** + * The question to be answered + */ + question: string; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Question Answering + */ +export interface QuestionAnsweringParameters { + /** + * Attempts to align the answer to real words. Improves quality on space separated + * languages. Might hurt on non-space-separated languages (like Japanese or Chinese) + */ + align_to_words?: boolean; + /** + * If the context is too long to fit with the question for the model, it will be split in + * several chunks with some overlap. This argument controls the size of that overlap. + */ + doc_stride?: number; + /** + * Whether to accept impossible as an answer. + */ + handle_impossible_answer?: boolean; + /** + * The maximum length of predicted answers (e.g., only answers with a shorter length are + * considered). + */ + max_answer_len?: number; + /** + * The maximum length of the question after tokenization. It will be truncated if needed. + */ + max_question_len?: number; + /** + * The maximum length of the total sentence (context + question) in tokens of each chunk + * passed to the model. The context will be split in several chunks (using docStride as + * overlap) if needed. + */ + max_seq_len?: number; + /** + * The number of answers to return (will be chosen by order of likelihood). Note that we + * return less than topk answers if there are not enough options available within the + * context. + */ + top_k?: number; + [property: string]: unknown; +} +export type QuestionAnsweringOutput = QuestionAnsweringOutputElement[]; +/** + * Outputs of inference for the Question Answering task + */ +export interface QuestionAnsweringOutputElement { + /** + * The answer to the question. + */ + answer: string; + /** + * The character position in the input where the answer ends. + */ + end: number; + /** + * The probability associated to the answer. + */ + score: number; + /** + * The character position in the input where the answer begins. + */ + start: number; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/question-answering/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/question-answering/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..70d5607cffcb93e728a987ca0da384c2c813dc21 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/question-answering/spec/input.json @@ -0,0 +1,67 @@ +{ + "$id": "/inference/schemas/question-answering/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Question Answering inference", + "title": "QuestionAnsweringInput", + "type": "object", + "properties": { + "inputs": { + "title": "QuestionAnsweringInputData", + "description": "One (context, question) pair to answer", + "type": "object", + "properties": { + "context": { + "type": "string", + "description": "The context to be used for answering the question" + }, + "question": { + "type": "string", + "description": "The question to be answered" + } + }, + "required": ["question", "context"] + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/QuestionAnsweringParameters" + } + }, + "$defs": { + "QuestionAnsweringParameters": { + "title": "QuestionAnsweringParameters", + "description": "Additional inference parameters for Question Answering", + "type": "object", + "properties": { + "top_k": { + "type": "integer", + "description": "The number of answers to return (will be chosen by order of likelihood). Note that we return less than topk answers if there are not enough options available within the context." + }, + "doc_stride": { + "type": "integer", + "description": "If the context is too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap." + }, + "max_answer_len": { + "type": "integer", + "description": "The maximum length of predicted answers (e.g., only answers with a shorter length are considered)." + }, + "max_seq_len": { + "type": "integer", + "description": "The maximum length of the total sentence (context + question) in tokens of each chunk passed to the model. The context will be split in several chunks (using docStride as overlap) if needed." + }, + "max_question_len": { + "type": "integer", + "description": "The maximum length of the question after tokenization. It will be truncated if needed." + }, + "handle_impossible_answer": { + "type": "boolean", + "description": "Whether to accept impossible as an answer." + }, + "align_to_words": { + "type": "boolean", + "description": "Attempts to align the answer to real words. Improves quality on space separated languages. Might hurt on non-space-separated languages (like Japanese or Chinese)" + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/question-answering/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/question-answering/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..9da8f988ad21668db65d18e22cb105a0da96a63d --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/question-answering/spec/output.json @@ -0,0 +1,29 @@ +{ + "$id": "/inference/schemas/question-answering/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "title": "QuestionAnsweringOutput", + "description": "Outputs of inference for the Question Answering task", + "type": "array", + "items": { + "type": "object", + "properties": { + "answer": { + "type": "string", + "description": "The answer to the question." + }, + "score": { + "type": "number", + "description": "The probability associated to the answer." + }, + "start": { + "type": "integer", + "description": "The character position in the input where the answer begins." + }, + "end": { + "type": "integer", + "description": "The character position in the input where the answer ends." + } + }, + "required": ["answer", "score", "start", "end"] + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/reinforcement-learning/about.md b/data/node_modules/@huggingface/tasks/src/tasks/reinforcement-learning/about.md new file mode 100644 index 0000000000000000000000000000000000000000..13f79cfff65cea30b22a4c667b8f83964f7c00f5 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/reinforcement-learning/about.md @@ -0,0 +1,167 @@ +## Use Cases + +### Gaming + +Reinforcement learning is known for its application to video games. Since the games provide a safe environment for the agent to be trained in the sense that it is perfectly defined and controllable, this makes them perfect candidates for experimentation and will help a lot to learn about the capabilities and limitations of various RL algorithms. + +There are many videos on the Internet where a game-playing reinforcement learning agent starts with a terrible gaming strategy due to random initialization of its settings, but over iterations, the agent gets better and better with each episode of the training. This [paper](https://arxiv.org/abs/1912.10944) mainly investigates the performance of RL in popular games such as Minecraft or Dota2. The agent's performance can exceed a human player's, although there are still some challenges mainly related to efficiency in constructing the gaming policy of the reinforcement learning agent. + +### Trading and Finance + +Reinforcement learning is the science to train computers to make decisions and thus has a novel use in trading and finance. All time-series models are helpful in predicting prices, volume and future sales of a product or a stock. Reinforcement based automated agents can decide to sell, buy or hold a stock. It shifts the impact of AI in this field to real time decision making rather than just prediction of prices. The glossary given below will clear some parameters to as to how we can train a model to take these decisions. + +## Task Variants + +### Model Based RL + +In model based reinforcement learning techniques intend to create a model of the environment, learn the state transition probabilities and the reward function, to find the optimal action. Some typical examples for model based reinforcement learning algorithms are dynamic programming, value iteration and policy iteration. + +### Model Free RL + +In model free reinforcement learning, agent decides on optimal actions based on its experience in the environment and the reward it collects from it. This is one of the most commonly used algorithms beneficial in complex environments, where modeling of state transition probabilities and reward functions are difficult. Some of the examples of model free reinforcement learning are SARSA, Q-Learning, actor-critic and proximal policy optimization (PPO) algorithms. + +## Glossary + + + +**Agent:** The learner and the decision maker. + +**Environment:** The part of the world the agent interacts, comprising everything outside the agent. + +Observations and states are the information our agent gets from the environment. In the case of a video game, it can be a frame (a screenshot). In the case of the trading agent, it can be the value of a certain stock. + +**State:** Complete description of the state of the environment with no hidden information. + +**Observation:** Partial description of the state, in a partially observed environment. + +**Action:** The decision taken by the agent. + +**Reward:** The numerical feedback signal that the agent receives from the environment based on the chosen action. + +**Return:** Cumulative Reward. In the simplest case, the return is the sum of the rewards. + +**Episode:** For some applications there is a natural notion of final time step. In this case, there is a starting point and an ending point (a terminal state). This creates an episode: a list of States, Actions, Rewards, and new States. For instance, think about Chess: an episode begins at the initial board position and ends when the game is over. + +**Policy:** The Policy is the brain of the Agent, it’s the function that tells what action to take given the state. So it defines the agent’s behavior at a given time. Reinforcement learning methods specify how the agent’s policy is changed as a result of its experience. + +## Inference + +Inference in reinforcement learning differs from other modalities, in which there's a model and test data. In reinforcement learning, once you have trained an agent in an environment, you try to run the trained agent for additional steps to get the average reward. + +A typical training cycle consists of gathering experience from the environment, training the agent, and running the agent on a test environment to obtain average reward. Below there's a snippet on how you can interact with the environment using the `gymnasium` library, train an agent using `stable-baselines3`, evalute the agent on test environment and infer actions from the trained agent. + +```python +# Here we are running 20 episodes of CartPole-v1 environment, taking random actions +import gymnasium as gym + +env = gym.make("CartPole-v1") +observation, info = env.reset() + +for _ in range(20): + action = env.action_space.sample() # samples random action from action sample space + + # the agent takes the action + observation, reward, terminated, truncated, info = env.step(action) + + +# if the agent reaches terminal state, we reset the environment +if terminated or truncated: + + print("Environment is reset") + observation = env.reset() + +env.close() +``` + +Below snippet shows how to train a PPO model on LunarLander-v2 environment using `stable-baselines3` library and saving the model + +```python +from stable_baselines3 import PPO + +# initialize the environment + +env = gym.make("LunarLander-v2") + +# initialize the model + +model = PPO(policy = "MlpPolicy", + env = env, + n_steps = 1024, + batch_size = 64, + n_epochs = 4, + verbose = 1) + +# train the model for 1000 time steps +model.learn(total_timesteps = 1000) + +# Saving the model in desired directory +model_name = "PPO-LunarLander-v2" +model.save(model_name) +``` + +Below code shows how to evaluate an agent trained using `stable-baselines3` + +```python +# Loading a saved model and evaluating the model for 10 episodes +from stable_baselines3.common.evaluation import evaluate_policy +from stable_baselines3 import PPO + + +env = gym.make("LunarLander-v2") +# Loading the saved model +model = PPO.load("PPO-LunarLander-v2",env=env) + +# Initializating the evaluation environment +eval_env = gym.make("LunarLander-v2") + +# Running the trained agent on eval_env for 10 time steps and getting the mean reward +mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes = 10, + deterministic=True) + +print(f"mean_reward={mean_reward:.2f} +/- {std_reward}") +``` + +Below code snippet shows how to infer actions from an agent trained using `stable-baselines3` + +```python +from stable_baselines3.common.evaluation import evaluate_policy +from stable_baselines3 import PPO + +# Loading the saved model +model = PPO.load("PPO-LunarLander-v2",env=env) + +# Getting the environment from the trained agent +env = model.get_env() + +obs = env.reset() +for i in range(1000): + # getting action predictions from the trained agent + action, _states = model.predict(obs, deterministic=True) + + # taking the predicted action in the environment to observe next state and rewards + obs, rewards, dones, info = env.step(action) +``` + +For more information, you can check out the documentations of the respective libraries. + +[Gymnasium Documentation](https://gymnasium.farama.org/) +[Stable Baselines Documentation](https://stable-baselines3.readthedocs.io/en/master/) + +## Useful Resources + +Would you like to learn more about the topic? Awesome! Here you can find some curated resources that you may find helpful! + +- [HuggingFace Deep Reinforcement Learning Class](https://github.com/huggingface/deep-rl-class) +- [Introduction to Deep Reinforcement Learning](https://huggingface.co/blog/deep-rl-intro) +- [Stable Baselines Integration with HuggingFace](https://huggingface.co/blog/sb3) +- Learn how reinforcement learning is used in conversational agents in this blog: [Illustrating Reinforcement Learning from Human Feedback (RLHF)](https://huggingface.co/blog/rlhf) +- [Reinforcement Learning from Human Feedback From Zero to ChatGPT](https://www.youtube.com/watch?v=EAd4oQtEJOM) +- [Guide on Multi-Agent Competition Systems](https://huggingface.co/blog/aivsai) + +### Notebooks + +- [Train a Deep Reinforcement Learning lander agent to land correctly on the Moon 🌕 using Stable-Baselines3](https://github.com/huggingface/deep-rl-class/blob/main/notebooks/unit1/unit1.ipynb) +- [Introduction to Unity MLAgents](https://github.com/huggingface/deep-rl-class/blob/main/notebooks/unit5/unit5.ipynb) +- [Training Decision Transformers with 🤗 transformers](https://github.com/huggingface/blog/blob/main/notebooks/101_train-decision-transformers.ipynb) + +This page was made possible thanks to the efforts of [Ram Ananth](https://huggingface.co/RamAnanth1), [Emilio Lehoucq](https://huggingface.co/emiliol), [Sagar Mathpal](https://huggingface.co/sagarmathpal) and [Osman Alenbey](https://huggingface.co/osman93). diff --git a/data/node_modules/@huggingface/tasks/src/tasks/reinforcement-learning/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/reinforcement-learning/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..71290d67776c2373b8a20c9f2c434ba75f7b08f5 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/reinforcement-learning/data.ts @@ -0,0 +1,75 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A curation of widely used datasets for Data Driven Deep Reinforcement Learning (D4RL)", + id: "edbeeching/decision_transformer_gym_replay", + }, + ], + demo: { + inputs: [ + { + label: "State", + content: "Red traffic light, pedestrians are about to pass.", + type: "text", + }, + ], + outputs: [ + { + label: "Action", + content: "Stop the car.", + type: "text", + }, + { + label: "Next State", + content: "Yellow light, pedestrians have crossed.", + type: "text", + }, + ], + }, + metrics: [ + { + description: + "Accumulated reward across all time steps discounted by a factor that ranges between 0 and 1 and determines how much the agent optimizes for future relative to immediate rewards. Measures how good is the policy ultimately found by a given algorithm considering uncertainty over the future.", + id: "Discounted Total Reward", + }, + { + description: + "Average return obtained after running the policy for a certain number of evaluation episodes. As opposed to total reward, mean reward considers how much reward a given algorithm receives while learning.", + id: "Mean Reward", + }, + { + description: + "Measures how good a given algorithm is after a predefined time. Some algorithms may be guaranteed to converge to optimal behavior across many time steps. However, an agent that reaches an acceptable level of optimality after a given time horizon may be preferable to one that ultimately reaches optimality but takes a long time.", + id: "Level of Performance After Some Time", + }, + ], + models: [ + { + description: "A Reinforcement Learning model trained on expert data from the Gym Hopper environment", + + id: "edbeeching/decision-transformer-gym-hopper-expert", + }, + { + description: "A PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and the RL Zoo.", + id: "HumanCompatibleAI/ppo-seals-CartPole-v0", + }, + ], + spaces: [ + { + description: "An application for a cute puppy agent learning to catch a stick.", + id: "ThomasSimonini/Huggy", + }, + { + description: "An application to play Snowball Fight with a reinforcement learning agent.", + id: "ThomasSimonini/SnowballFight", + }, + ], + summary: + "Reinforcement learning is the computational approach of learning from action by interacting with an environment through trial and error and receiving rewards (negative or positive) as feedback", + widgetModels: [], + youtubeId: "q0BiUn5LiBc", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/sentence-similarity/about.md b/data/node_modules/@huggingface/tasks/src/tasks/sentence-similarity/about.md new file mode 100644 index 0000000000000000000000000000000000000000..c8cd912135858ab7626ccc1885895d2527e227d8 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/sentence-similarity/about.md @@ -0,0 +1,97 @@ +## Use Cases 🔍 + +### Information Retrieval + +You can extract information from documents using Sentence Similarity models. The first step is to rank documents using Passage Ranking models. You can then get to the top ranked document and search it with Sentence Similarity models by selecting the sentence that has the most similarity to the input query. + +## The Sentence Transformers library + +The [Sentence Transformers](https://www.sbert.net/) library is very powerful for calculating embeddings of sentences, paragraphs, and entire documents. An embedding is just a vector representation of a text and is useful for finding how similar two texts are. + +You can find and use [hundreds of Sentence Transformers](https://huggingface.co/models?library=sentence-transformers&sort=downloads) models from the Hub by directly using the library, playing with the widgets in the browser or using Inference Endpoints. + +## Task Variants + +### Passage Ranking + +Passage Ranking is the task of ranking documents based on their relevance to a given query. The task is evaluated on Mean Reciprocal Rank. These models take one query and multiple documents and return ranked documents according to the relevancy to the query. 📄 + +You can infer with Passage Ranking models using [Inference Endpoints](https://huggingface.co/inference-endpoints). The Passage Ranking model inputs are a query for which we look for relevancy in the documents and the documents we want to search. The model will return scores according to the relevancy of these documents for the query. + +```python +import json +import requests + +API_URL = "https://api-inference.huggingface.co/models/sentence-transformers/msmarco-distilbert-base-tas-b" +headers = {"Authorization": f"Bearer {api_token}"} + +def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +data = query( + { + "inputs": { + "source_sentence": "That is a happy person", + "sentences": [ + "That is a happy dog", + "That is a very happy person", + "Today is a sunny day" + ] + } + } +## [0.853, 0.981, 0.655] +``` + +### Semantic Textual Similarity + +Semantic Textual Similarity is the task of evaluating how similar two texts are in terms of meaning. These models take a source sentence and a list of sentences in which we will look for similarities and will return a list of similarity scores. The benchmark dataset is the [Semantic Textual Similarity Benchmark](http://ixa2.si.ehu.eus/stswiki/index.php/STSbenchmark). The task is evaluated on Pearson’s Rank Correlation. + +```python +import json +import requests + +API_URL = "https://api-inference.huggingface.co/models/sentence-transformers/all-MiniLM-L6-v2" +headers = {"Authorization": f"Bearer {api_token}"} + +def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +data = query( + { + "inputs": { + "source_sentence": "I'm very happy", + "sentences":["I'm filled with happiness", "I'm happy"] + } + }) + +## [0.605, 0.894] +``` + +You can also infer with the models in the Hub using Sentence Transformer models. + +```python +pip install -U sentence-transformers + +from sentence_transformers import SentenceTransformer, util +sentences = ["I'm happy", "I'm full of happiness"] + +model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2') + +#Compute embedding for both lists +embedding_1= model.encode(sentences[0], convert_to_tensor=True) +embedding_2 = model.encode(sentences[1], convert_to_tensor=True) + +util.pytorch_cos_sim(embedding_1, embedding_2) +## tensor([[0.6003]]) +``` + +## Useful Resources + +Would you like to learn more about Sentence Transformers and Sentence Similarity? Awesome! Here you can find some curated resources that you may find helpful! + +- [Sentence Transformers Documentation](https://www.sbert.net/) +- [Sentence Transformers in the Hub](https://huggingface.co/blog/sentence-transformers-in-the-hub) +- [Building a Playlist Generator with Sentence Transformers](https://huggingface.co/blog/playlist-generator) +- [Getting Started With Embeddings](https://huggingface.co/blog/getting-started-with-embeddings) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/sentence-similarity/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/sentence-similarity/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..6feba5779864dbc99cb71a05233b69c0bc53c8a3 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/sentence-similarity/data.ts @@ -0,0 +1,101 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "Bing queries with relevant passages from various web sources.", + id: "ms_marco", + }, + ], + demo: { + inputs: [ + { + label: "Source sentence", + content: "Machine learning is so easy.", + type: "text", + }, + { + label: "Sentences to compare to", + content: "Deep learning is so straightforward.", + type: "text", + }, + { + label: "", + content: "This is so difficult, like rocket science.", + type: "text", + }, + { + label: "", + content: "I can't believe how much I struggled with this.", + type: "text", + }, + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "Deep learning is so straightforward.", + score: 0.623, + }, + { + label: "This is so difficult, like rocket science.", + score: 0.413, + }, + { + label: "I can't believe how much I struggled with this.", + score: 0.256, + }, + ], + }, + ], + }, + metrics: [ + { + description: + "Reciprocal Rank is a measure used to rank the relevancy of documents given a set of documents. Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal Rank is 1", + id: "Mean Reciprocal Rank", + }, + { + description: + "The similarity of the embeddings is evaluated mainly on cosine similarity. It is calculated as the cosine of the angle between two vectors. It is particularly useful when your texts are not the same length", + id: "Cosine Similarity", + }, + ], + models: [ + { + description: + "This model works well for sentences and paragraphs and can be used for clustering/grouping and semantic searches.", + id: "sentence-transformers/all-mpnet-base-v2", + }, + { + description: "A multilingual model trained for FAQ retrieval.", + id: "clips/mfaq", + }, + ], + spaces: [ + { + description: "An application that leverages sentence similarity to answer questions from YouTube videos.", + id: "Gradio-Blocks/Ask_Questions_To_YouTube_Videos", + }, + { + description: + "An application that retrieves relevant PubMed abstracts for a given online article which can be used as further references.", + id: "Gradio-Blocks/pubmed-abstract-retriever", + }, + { + description: "An application that leverages sentence similarity to summarize text.", + id: "nickmuchi/article-text-summarizer", + }, + { + description: "A guide that explains how Sentence Transformers can be used for semantic search.", + id: "sentence-transformers/Sentence_Transformers_for_semantic_search", + }, + ], + summary: + "Sentence Similarity is the task of determining how similar two texts are. Sentence similarity models convert input texts into vectors (embeddings) that capture semantic information and calculate how close (similar) they are between them. This task is particularly useful for information retrieval and clustering/grouping.", + widgetModels: ["sentence-transformers/all-MiniLM-L6-v2"], + youtubeId: "VCZq5AkbNEU", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/sentence-similarity/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/sentence-similarity/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..646e18b486af8639792a99014715abc3c61afa52 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/sentence-similarity/inference.ts @@ -0,0 +1,32 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ + +export type SentenceSimilarityOutput = number[]; + +/** + * Inputs for Sentence similarity inference + */ +export interface SentenceSimilarityInput { + inputs: SentenceSimilarityInputData; + /** + * Additional inference parameters + */ + parameters?: { [key: string]: unknown }; + [property: string]: unknown; +} + +export interface SentenceSimilarityInputData { + /** + * A list of strings which will be compared against the source_sentence. + */ + sentences: string[]; + /** + * The string that you wish to compare the other strings with. This can be a phrase, + * sentence, or longer passage, depending on the model being used. + */ + sourceSentence: string; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/sentence-similarity/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/sentence-similarity/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..ecff3479d77eef3fa2b77393d7c0255f1ce494ea --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/sentence-similarity/spec/input.json @@ -0,0 +1,40 @@ +{ + "$id": "/inference/schemas/sentence-similarity/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Sentence similarity inference", + "title": "SentenceSimilarityInput", + "type": "object", + "properties": { + "inputs": { + "title": "SentenceSimilarityInputData", + "type": "object", + "properties": { + "sourceSentence": { + "description": "The string that you wish to compare the other strings with. This can be a phrase, sentence, or longer passage, depending on the model being used.", + "type": "string" + }, + "sentences": { + "type": "array", + "description": "A list of strings which will be compared against the source_sentence.", + "items": { + "type": "string" + } + } + }, + "required": ["sourceSentence", "sentences"] + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/SentenceSimilarityParameters" + } + }, + "$defs": { + "SentenceSimilarityParameters": { + "title": "SentenceSimilarityParameters", + "description": "Additional inference parameters for Sentence Similarity", + "type": "object", + "properties": {} + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/sentence-similarity/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/sentence-similarity/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..ca13d98bd5f55bd581e99c8cc4d970b9b7735512 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/sentence-similarity/spec/output.json @@ -0,0 +1,12 @@ +{ + "$id": "/inference/schemas/sentence-similarity/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "title": "SentenceSimilarityOutput", + "description": "Outputs of inference for the Sentence Similarity task", + "type": "array", + "items": { + "description": "The associated similarity score for each of the given sentences", + "type": "number", + "title": "SentenceSimilarityScore" + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/summarization/about.md b/data/node_modules/@huggingface/tasks/src/tasks/summarization/about.md new file mode 100644 index 0000000000000000000000000000000000000000..79c585217d8e550d2b4fb24cad98aeec3fd4e15b --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/summarization/about.md @@ -0,0 +1,58 @@ +## Use Cases + +### Research Paper Summarization 🧐 + +Research papers can be summarized to allow researchers to spend less time selecting which articles to read. There are several approaches you can take for a task like this: + +1. Use an existing extractive summarization model on the Hub to do inference. +2. Pick an existing language model trained for academic papers. This model can then be trained in a process called fine-tuning so it can solve the summarization task. +3. Use a sequence-to-sequence model like [T5](https://huggingface.co/docs/transformers/model_doc/t5) for abstractive text summarization. + +## Inference + +You can use the 🤗 Transformers library `summarization` pipeline to infer with existing Summarization models. If no model name is provided the pipeline will be initialized with [sshleifer/distilbart-cnn-12-6](https://huggingface.co/sshleifer/distilbart-cnn-12-6). + +```python +from transformers import pipeline + +classifier = pipeline("summarization") +classifier("Paris is the capital and most populous city of France, with an estimated population of 2,175,601 residents as of 2018, in an area of more than 105 square kilometres (41 square miles). The City of Paris is the centre and seat of government of the region and province of Île-de-France, or Paris Region, which has an estimated population of 12,174,880, or about 18 percent of the population of France as of 2017.") +## [{ "summary_text": " Paris is the capital and most populous city of France..." }] +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer summarization models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_TOKEN); +const inputs = + "Paris is the capital and most populous city of France, with an estimated population of 2,175,601 residents as of 2018, in an area of more than 105 square kilometres (41 square miles). The City of Paris is the centre and seat of government of the region and province of Île-de-France, or Paris Region, which has an estimated population of 12,174,880, or about 18 percent of the population of France as of 2017."; + +await inference.summarization({ + model: "sshleifer/distilbart-cnn-12-6", + inputs, +}); +``` + +## Useful Resources + +Would you like to learn more about the topic? Awesome! Here you can find some curated resources that you may find helpful! + +- [Course Chapter on Summarization](https://huggingface.co/course/chapter7/5?fw=pt) +- [Distributed Training: Train BART/T5 for Summarization using 🤗 Transformers and Amazon SageMaker](https://huggingface.co/blog/sagemaker-distributed-training-seq2seq) + +### Notebooks + +- [PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/summarization.ipynb) +- [TensorFlow](https://github.com/huggingface/notebooks/blob/master/examples/summarization-tf.ipynb) + +### Scripts for training + +- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization) +- [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/summarization) +- [Flax](https://github.com/huggingface/transformers/tree/main/examples/flax/summarization) + +### Documentation + +- [Summarization task guide](https://huggingface.co/docs/transformers/tasks/summarization) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/summarization/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/summarization/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..bd04453da3f36af45f203e7c0039056677b59432 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/summarization/data.ts @@ -0,0 +1,76 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + canonicalId: "text2text-generation", + datasets: [ + { + description: + "News articles in five different languages along with their summaries. Widely used for benchmarking multilingual summarization models.", + id: "mlsum", + }, + { + description: "English conversations and their summaries. Useful for benchmarking conversational agents.", + id: "samsum", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: + "The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. It was the first structure to reach a height of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.", + type: "text", + }, + ], + outputs: [ + { + label: "Output", + content: + "The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building. It was the first structure to reach a height of 300 metres.", + type: "text", + }, + ], + }, + metrics: [ + { + description: + "The generated sequence is compared against its summary, and the overlap of tokens are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.", + id: "rouge", + }, + ], + models: [ + { + description: + "A strong summarization model trained on English news articles. Excels at generating factual summaries.", + id: "facebook/bart-large-cnn", + }, + { + description: "A summarization model trained on medical articles.", + id: "google/bigbird-pegasus-large-pubmed", + }, + ], + spaces: [ + { + description: "An application that can summarize long paragraphs.", + id: "pszemraj/summarize-long-text", + }, + { + description: "A much needed summarization application for terms and conditions.", + id: "ml6team/distilbart-tos-summarizer-tosdr", + }, + { + description: "An application that summarizes long documents.", + id: "pszemraj/document-summarization", + }, + { + description: "An application that can detect errors in abstractive summarization.", + id: "ml6team/post-processing-summarization", + }, + ], + summary: + "Summarization is the task of producing a shorter version of a document while preserving its important information. Some models can extract text from the original input, while other models can generate entirely new text.", + widgetModels: ["sshleifer/distilbart-cnn-12-6"], + youtubeId: "yHnr5Dk2zCI", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/summarization/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/summarization/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..2b674184fcfe6b9d537d91e36d29b27a9fb079a3 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/summarization/inference.ts @@ -0,0 +1,56 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ + +/** + * Inputs for Summarization inference + * + * Inputs for Text2text Generation inference + */ +export interface SummarizationInput { + /** + * The input text data + */ + inputs: string; + /** + * Additional inference parameters + */ + parameters?: Text2TextGenerationParameters; + [property: string]: unknown; +} + +/** + * Additional inference parameters + * + * Additional inference parameters for Text2text Generation + */ +export interface Text2TextGenerationParameters { + /** + * Whether to clean up the potential extra spaces in the text output. + */ + clean_up_tokenization_spaces?: boolean; + /** + * Additional parametrization of the text generation algorithm + */ + generate_parameters?: { [key: string]: unknown }; + /** + * The truncation strategy to use + */ + truncation?: Text2TextGenerationTruncationStrategy; + [property: string]: unknown; +} + +export type Text2TextGenerationTruncationStrategy = "do_not_truncate" | "longest_first" | "only_first" | "only_second"; + +/** + * Outputs of inference for the Summarization task + */ +export interface SummarizationOutput { + /** + * The summarized text. + */ + summary_text: string; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/summarization/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/summarization/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..629da31ea67216b20f2314eb454b1f710367d9a2 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/summarization/spec/input.json @@ -0,0 +1,7 @@ +{ + "$ref": "/inference/schemas/text2text-generation/input.json", + "$id": "/inference/schemas/summarization/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "title": "SummarizationInput", + "description": "Inputs for Summarization inference" +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/summarization/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/summarization/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..dfa307b642213451b0aa7e8b88d1bfdaf6f77598 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/summarization/spec/output.json @@ -0,0 +1,14 @@ +{ + "$id": "/inference/schemas/summarization/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Summarization task", + "title": "SummarizationOutput", + "type": "object", + "properties": { + "summary_text": { + "type": "string", + "description": "The summarized text." + } + }, + "required": ["summary_text"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/table-question-answering/about.md b/data/node_modules/@huggingface/tasks/src/tasks/table-question-answering/about.md new file mode 100644 index 0000000000000000000000000000000000000000..62cdb44ad25d94249ea6c4953a054b60c4021057 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/table-question-answering/about.md @@ -0,0 +1,43 @@ +## Use Cases + +### SQL execution + +You can use the Table Question Answering models to simulate SQL execution by inputting a table. + +### Table Question Answering + +Table Question Answering models are capable of answering questions based on a table. + +## Task Variants + +This place can be filled with variants of this task if there's any. + +## Inference + +You can infer with TableQA models using the 🤗 Transformers library. + +```python +from transformers import pipeline +import pandas as pd + +# prepare table + question +data = {"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Number of movies": ["87", "53", "69"]} +table = pd.DataFrame.from_dict(data) +question = "how many movies does Leonardo Di Caprio have?" + +# pipeline model +# Note: you must to install torch-scatter first. +tqa = pipeline(task="table-question-answering", model="google/tapas-large-finetuned-wtq") + +# result + +print(tqa(table=table, query=question)['cells'][0]) +#53 + +``` + +## Useful Resources + +In this area, you can insert useful resources about how to train or use a model for this task. + +This task page is complete thanks to the efforts of [Hao Kim Tieu](https://huggingface.co/haotieu). 🦸 diff --git a/data/node_modules/@huggingface/tasks/src/tasks/table-question-answering/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/table-question-answering/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..7a4691146623f0f106e062fe96a98d701e104325 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/table-question-answering/data.ts @@ -0,0 +1,59 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: + "The WikiTableQuestions dataset is a large-scale dataset for the task of question answering on semi-structured tables.", + id: "wikitablequestions", + }, + { + description: + "WikiSQL is a dataset of 80654 hand-annotated examples of questions and SQL queries distributed across 24241 tables from Wikipedia.", + id: "wikisql", + }, + ], + demo: { + inputs: [ + { + table: [ + ["Rank", "Name", "No.of reigns", "Combined days"], + ["1", "lou Thesz", "3", "3749"], + ["2", "Ric Flair", "8", "3103"], + ["3", "Harley Race", "7", "1799"], + ], + type: "tabular", + }, + + { label: "Question", content: "What is the number of reigns for Harley Race?", type: "text" }, + ], + outputs: [{ label: "Result", content: "7", type: "text" }], + }, + metrics: [ + { + description: "Checks whether the predicted answer(s) is the same as the ground-truth answer(s).", + id: "Denotation Accuracy", + }, + ], + models: [ + { + description: + "A table question answering model that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL query on a given table.", + id: "microsoft/tapex-base", + }, + { + description: "A robust table question answering model.", + id: "google/tapas-base-finetuned-wtq", + }, + ], + spaces: [ + { + description: "An application that answers questions based on table CSV files.", + id: "katanaml/table-query", + }, + ], + summary: "Table Question Answering (Table QA) is the answering a question about an information on a given table.", + widgetModels: ["google/tapas-base-finetuned-wtq"], +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/table-question-answering/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/table-question-answering/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..7e79fa2c8b083443901da8473aad6819b374aeb1 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/table-question-answering/inference.ts @@ -0,0 +1,61 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Table Question Answering inference + */ +export interface TableQuestionAnsweringInput { + /** + * One (table, question) pair to answer + */ + inputs: TableQuestionAnsweringInputData; + /** + * Additional inference parameters + */ + parameters?: { + [key: string]: unknown; + }; + [property: string]: unknown; +} +/** + * One (table, question) pair to answer + */ +export interface TableQuestionAnsweringInputData { + /** + * The question to be answered about the table + */ + question: string; + /** + * The table to serve as context for the questions + */ + table: { + [key: string]: string[]; + }; + [property: string]: unknown; +} +export type TableQuestionAnsweringOutput = TableQuestionAnsweringOutputElement[]; +/** + * Outputs of inference for the Table Question Answering task + */ +export interface TableQuestionAnsweringOutputElement { + /** + * If the model has an aggregator, this returns the aggregator. + */ + aggregator?: string; + /** + * The answer of the question given the table. If there is an aggregator, the answer will be + * preceded by `AGGREGATOR >`. + */ + answer: string; + /** + * List of strings made up of the answer cell values. + */ + cells: string[]; + /** + * Coordinates of the cells of the answers. + */ + coordinates: Array; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/table-question-answering/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/table-question-answering/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..3dfdd02a73bc26dd3fc9fd02c248b164dd22a489 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/table-question-answering/spec/input.json @@ -0,0 +1,44 @@ +{ + "$id": "/inference/schemas/table-question-answering/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Table Question Answering inference", + "title": "TableQuestionAnsweringInput", + "type": "object", + "properties": { + "inputs": { + "description": "One (table, question) pair to answer", + "title": "TableQuestionAnsweringInputData", + "type": "object", + "properties": { + "table": { + "description": "The table to serve as context for the questions", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "question": { + "description": "The question to be answered about the table", + "type": "string" + } + }, + "required": ["table", "question"] + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/TableQuestionAnsweringParameters" + } + }, + "$defs": { + "TableQuestionAnsweringParameters": { + "title": "TableQuestionAnsweringParameters", + "description": "Additional inference parameters for Table Question Answering", + "type": "object", + "properties": {} + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/table-question-answering/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/table-question-answering/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..9b43026ea12299dc83110c99d3983841a8d30c6e --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/table-question-answering/spec/output.json @@ -0,0 +1,40 @@ +{ + "$id": "/inference/schemas/table-question-answering/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Table Question Answering task", + "title": "TableQuestionAnsweringOutput", + "type": "array", + "items": { + "type": "object", + "properties": { + "answer": { + "type": "string", + "description": "The answer of the question given the table. If there is an aggregator, the answer will be preceded by `AGGREGATOR >`." + }, + "coordinates": { + "type": "array", + "description": "Coordinates of the cells of the answers.", + "items": { + "type": "array", + "items": { + "type": "integer" + }, + "minLength": 2, + "maxLength": 2 + } + }, + "cells": { + "type": "array", + "description": "List of strings made up of the answer cell values.", + "items": { + "type": "string" + } + }, + "aggregator": { + "type": "string", + "description": "If the model has an aggregator, this returns the aggregator." + } + }, + "required": ["answer", "cells", "coordinates"] + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/tabular-classification/about.md b/data/node_modules/@huggingface/tasks/src/tasks/tabular-classification/about.md new file mode 100644 index 0000000000000000000000000000000000000000..d46a48976c894b8a5d99dda14a893261c49e6d41 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/tabular-classification/about.md @@ -0,0 +1,65 @@ +## About the Task + +Tabular classification is the task of assigning a label or class given a limited number of attributes. For example, the input can be data related to a customer (balance of the customer, the time being a customer, or more) and the output can be whether the customer will churn from the service or not. +There are three types of categorical variables: + +- Binary variables: Variables that can take two values, like yes or no, open or closed. The task of predicting binary variables is called binary classification. +- Ordinal variables: Variables with a ranking relationship, e.g., good, insignificant, and bad product reviews. The task of predicting ordinal variables is called ordinal classification. +- Nominal variables: Variables with no ranking relationship among them, e.g., predicting an animal from their weight and height, where categories are cat, dog, or bird. The task of predicting nominal variables is called multinomial classification. + +## Use Cases + +### Fraud Detection +Tabular classification models can be used in detecting fraudulent credit card transactions, where the features could be the amount of the transaction and the account balance, and the target to predict could be whether the transaction is fraudulent or not. This is an example of binary classification. + +### Churn Prediction +Tabular classification models can be used in predicting customer churn in telecommunication. An example dataset for the task is hosted [here](https://huggingface.co/datasets/scikit-learn/churn-prediction). + +# Model Hosting and Inference + +You can use [skops](https://skops.readthedocs.io/) for model hosting and inference on the Hugging Face Hub. This library is built to improve production workflows of various libraries that are used to train tabular models, including [sklearn](https://scikit-learn.org/stable/) and [xgboost](https://xgboost.readthedocs.io/en/stable/). Using `skops` you can: + +- Easily use Inference Endpoints +- Build neat UIs with one line of code, +- Programmatically create model cards, +- Securely serialize your scikit-learn model. (See limitations of using pickle [here](https://huggingface.co/docs/hub/security-pickle).) + +You can push your model as follows: + +```python +from skops import hub_utils +# initialize a repository with a trained model +local_repo = "/path_to_new_repo" +hub_utils.init(model, dst=local_repo) +# push to Hub! +hub_utils.push("username/my-awesome-model", source=local_repo) +``` + +Once the model is pushed, you can infer easily. + +```python +import skops.hub_utils as hub_utils +import pandas as pd +data = pd.DataFrame(your_data) +# Load the model from the Hub +res = hub_utils.get_model_output("username/my-awesome-model", data) +``` + +You can launch a UI for your model with only one line of code! + +```python +import gradio as gr +gr.Interface.load("huggingface/username/my-awesome-model").launch() +``` + +## Useful Resources + +- Check out the [scikit-learn organization](https://huggingface.co/scikit-learn) to learn more about different algorithms used for this task. +- [Skops documentation](https://skops.readthedocs.io/en/latest/) +- [Skops announcement blog](https://huggingface.co/blog/skops) +- [Notebook: Persisting your scikit-learn model using skops](https://www.kaggle.com/code/unofficialmerve/persisting-your-scikit-learn-model-using-skops) +- Check out [interactive sklearn examples](https://huggingface.co/sklearn-docs) built with ❤️ using Gradio. + +### Training your own model in just a few seconds + +We have built a [baseline trainer](https://huggingface.co/spaces/scikit-learn/baseline-trainer) application to which you can drag and drop your dataset. It will train a baseline and push it to your Hugging Face Hub profile with a model card containing information about the model. diff --git a/data/node_modules/@huggingface/tasks/src/tasks/tabular-classification/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/tabular-classification/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..c7284cc50e2843ee3515436516687124894ff2d7 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/tabular-classification/data.ts @@ -0,0 +1,68 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A comprehensive curation of datasets covering all benchmarks.", + id: "inria-soda/tabular-benchmark", + }, + ], + demo: { + inputs: [ + { + table: [ + ["Glucose", "Blood Pressure ", "Skin Thickness", "Insulin", "BMI"], + ["148", "72", "35", "0", "33.6"], + ["150", "50", "30", "0", "35.1"], + ["141", "60", "29", "1", "39.2"], + ], + type: "tabular", + }, + ], + outputs: [ + { + table: [["Diabetes"], ["1"], ["1"], ["0"]], + type: "tabular", + }, + ], + }, + metrics: [ + { + description: "", + id: "accuracy", + }, + { + description: "", + id: "recall", + }, + { + description: "", + id: "precision", + }, + { + description: "", + id: "f1", + }, + ], + models: [ + { + description: "Breast cancer prediction model based on decision trees.", + id: "scikit-learn/cancer-prediction-trees", + }, + ], + spaces: [ + { + description: "An application that can predict defective products on a production line.", + id: "scikit-learn/tabular-playground", + }, + { + description: "An application that compares various tabular classification techniques on different datasets.", + id: "scikit-learn/classification", + }, + ], + summary: "Tabular classification is the task of classifying a target category (a group) based on set of attributes.", + widgetModels: ["scikit-learn/tabular-playground"], + youtubeId: "", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/tabular-regression/about.md b/data/node_modules/@huggingface/tasks/src/tasks/tabular-regression/about.md new file mode 100644 index 0000000000000000000000000000000000000000..53c7b6599e91be5a91b142a9a4283d69d8260d16 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/tabular-regression/about.md @@ -0,0 +1,87 @@ +## About the Task + +Tabular regression is the task of predicting a numerical value given a set of attributes/features. _Tabular_ meaning that data is stored in a table (like an excel sheet), and each sample is contained in its own row. The features used to predict our target can be both numerical and categorical. However, including categorical features often requires additional preprocessing/feature engineering (a few models do accept categorical features directly, like [CatBoost](https://catboost.ai/)). An example of tabular regression would be predicting the weight of a fish given its' species and length. + +## Use Cases + +### Sales Prediction: a Use Case for Predicting a Continuous Target Variable + +Here the objective is to predict a continuous variable based on a set of input variable(s). For example, predicting `sales` of an ice cream shop based on `temperature` of weather and `duration of hours` shop was open. Here we can build a regression model with `temperature` and `duration of hours` as input variable and `sales` as target variable. + +### Missing Value Imputation for Other Tabular Tasks +In real-world applications, due to human error or other reasons, some of the input values can be missing or there might not be any recorded data. Considering the example above, say the shopkeeper's watch was broken and they forgot to calculate the `hours` for which the shop was open. This will lead to a missing value in their dataset. In this case, missing values could be replaced it with zero, or average hours for which the shop is kept open. Another approach we can try is to use `temperature` and `sales` variables to predict the `hours` variable here. + +## Model Training + +A simple regression model can be created using `sklearn` as follows: + +```python +#set the input features +X = data[["Feature 1", "Feature 2", "Feature 3"]] +#set the target variable +y = data["Target Variable"] +#initialize the model +model = LinearRegression() +#Fit the model +model.fit(X, y) +``` + +# Model Hosting and Inference + +You can use [skops](https://skops.readthedocs.io/) for model hosting and inference on the Hugging Face Hub. This library is built to improve production workflows of various libraries that are used to train tabular models, including [sklearn](https://scikit-learn.org/stable/) and [xgboost](https://xgboost.readthedocs.io/en/stable/). Using `skops` you can: + +- Easily use Inference Endpoints, +- Build neat UIs with one line of code, +- Programmatically create model cards, +- Securely serialize your models. (See limitations of using pickle [here](https://huggingface.co/docs/hub/security-pickle).) + +You can push your model as follows: + +```python +from skops import hub_utils +# initialize a repository with a trained model +local_repo = "/path_to_new_repo" +hub_utils.init(model, dst=local_repo) +# push to Hub! +hub_utils.push("username/my-awesome-model", source=local_repo) +``` + +Once the model is pushed, you can infer easily. + +```python +import skops.hub_utils as hub_utils +import pandas as pd +data = pd.DataFrame(your_data) +# Load the model from the Hub +res = hub_utils.get_model_output("username/my-awesome-model", data) +``` + +You can launch a UI for your model with only one line of code! + +```python +import gradio as gr +gr.Interface.load("huggingface/username/my-awesome-model").launch() +``` + +## Useful Resources + +- [Skops documentation](https://skops.readthedocs.io/en/stable/index.html) + +- Check out [interactive sklearn examples](https://huggingface.co/sklearn-docs) built with ❤️ using Gradio. +- [Notebook: Persisting your scikit-learn model using skops](https://www.kaggle.com/code/unofficialmerve/persisting-your-scikit-learn-model-using-skops) + +- For starting with tabular regression: + + - Doing [Exploratory Data Analysis](https://neptune.ai/blog/exploratory-data-analysis-for-tabular-data) for tabular data. + - The data considered here consists of details of Olympic athletes and medal results from Athens 1896 to Rio 2016. + - Here you can learn more about how to explore and analyse the data and visualize them in order to get a better understanding of dataset. + - Building your [first ML model](https://www.kaggle.com/code/dansbecker/your-first-machine-learning-model). + +- Intermediate level tutorials on tabular regression: + - [A Short Chronology of Deep Learning for Tabular Data](https://sebastianraschka.com/blog/2022/deep-learning-for-tabular-data.html) by Sebastian Raschka. + +### Training your own model in just a few seconds + +We have built a [baseline trainer](https://huggingface.co/spaces/scikit-learn/baseline-trainer) application to which you can drag and drop your dataset. It will train a baseline and push it to your Hugging Face Hub profile with a model card containing information about the model. + +This page was made possible thanks to efforts of [Brenden Connors](https://huggingface.co/brendenc) and [Ayush Bihani](https://huggingface.co/hsuyab). diff --git a/data/node_modules/@huggingface/tasks/src/tasks/tabular-regression/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/tabular-regression/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..d4f085d24736391e990081104b2aaeb0f713eb98 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/tabular-regression/data.ts @@ -0,0 +1,57 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A comprehensive curation of datasets covering all benchmarks.", + id: "inria-soda/tabular-benchmark", + }, + ], + demo: { + inputs: [ + { + table: [ + ["Car Name", "Horsepower", "Weight"], + ["ford torino", "140", "3,449"], + ["amc hornet", "97", "2,774"], + ["toyota corolla", "65", "1,773"], + ], + type: "tabular", + }, + ], + outputs: [ + { + table: [["MPG (miles per gallon)"], ["17"], ["18"], ["31"]], + type: "tabular", + }, + ], + }, + metrics: [ + { + description: "", + id: "mse", + }, + { + description: + "Coefficient of determination (or R-squared) is a measure of how well the model fits the data. Higher R-squared is considered a better fit.", + id: "r-squared", + }, + ], + models: [ + { + description: "Fish weight prediction based on length measurements and species.", + id: "scikit-learn/Fish-Weight", + }, + ], + spaces: [ + { + description: "An application that can predict weight of a fish based on set of attributes.", + id: "scikit-learn/fish-weight-prediction", + }, + ], + summary: "Tabular regression is the task of predicting a numerical value given a set of attributes.", + widgetModels: ["scikit-learn/Fish-Weight"], + youtubeId: "", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-classification/about.md b/data/node_modules/@huggingface/tasks/src/tasks/text-classification/about.md new file mode 100644 index 0000000000000000000000000000000000000000..faba693d0343046626b2c2661d3ce17143193e7e --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-classification/about.md @@ -0,0 +1,173 @@ +## Use Cases + +### Sentiment Analysis on Customer Reviews + +You can track the sentiments of your customers from the product reviews using sentiment analysis models. This can help understand churn and retention by grouping reviews by sentiment, to later analyze the text and make strategic decisions based on this knowledge. + +## Task Variants + +### Natural Language Inference (NLI) + +In NLI the model determines the relationship between two given texts. Concretely, the model takes a premise and a hypothesis and returns a class that can either be: + +- **entailment**, which means the hypothesis is true. +- **contraction**, which means the hypothesis is false. +- **neutral**, which means there's no relation between the hypothesis and the premise. + +The benchmark dataset for this task is GLUE (General Language Understanding Evaluation). NLI models have different variants, such as Multi-Genre NLI, Question NLI and Winograd NLI. + +### Multi-Genre NLI (MNLI) + +MNLI is used for general NLI. Here are som examples: + +``` +Example 1: + Premise: A man inspects the uniform of a figure in some East Asian country. + Hypothesis: The man is sleeping. + Label: Contradiction + +Example 2: + Premise: Soccer game with multiple males playing. + Hypothesis: Some men are playing a sport. + Label: Entailment +``` + +#### Inference + +You can use the 🤗 Transformers library `text-classification` pipeline to infer with NLI models. + +```python +from transformers import pipeline + +classifier = pipeline("text-classification", model = "roberta-large-mnli") +classifier("A soccer game with multiple males playing. Some men are playing a sport.") +## [{'label': 'ENTAILMENT', 'score': 0.98}] +``` + +### Question Natural Language Inference (QNLI) + +QNLI is the task of determining if the answer to a certain question can be found in a given document. If the answer can be found the label is “entailment”. If the answer cannot be found the label is “not entailment". + +``` +Question: What percentage of marine life died during the extinction? +Sentence: It is also known as the “Great Dying” because it is considered the largest mass extinction in the Earth’s history. +Label: not entailment + +Question: Who was the London Weekend Television’s Managing Director? +Sentence: The managing director of London Weekend Television (LWT), Greg Dyke, met with the representatives of the "big five" football clubs in England in 1990. +Label: entailment +``` + +#### Inference + +You can use the 🤗 Transformers library `text-classification` pipeline to infer with QNLI models. The model returns the label and the confidence. + +```python +from transformers import pipeline + +classifier = pipeline("text-classification", model = "cross-encoder/qnli-electra-base") +classifier("Where is the capital of France?, Paris is the capital of France.") +## [{'label': 'entailment', 'score': 0.997}] +``` + +### Sentiment Analysis + +In Sentiment Analysis, the classes can be polarities like positive, negative, neutral, or sentiments such as happiness or anger. + +#### Inference + +You can use the 🤗 Transformers library with the `sentiment-analysis` pipeline to infer with Sentiment Analysis models. The model returns the label with the score. + +```python +from transformers import pipeline + +classifier = pipeline("sentiment-analysis") +classifier("I loved Star Wars so much!") +## [{'label': 'POSITIVE', 'score': 0.99} +``` + +### Quora Question Pairs + +Quora Question Pairs models assess whether two provided questions are paraphrases of each other. The model takes two questions and returns a binary value, with 0 being mapped to “not paraphrase” and 1 to “paraphrase". The benchmark dataset is [Quora Question Pairs](https://huggingface.co/datasets/glue/viewer/qqp/test) inside the [GLUE benchmark](https://huggingface.co/datasets/glue). The dataset consists of question pairs and their labels. + +``` +Question1: “How can I increase the speed of my internet connection while using a VPN?” +Question2: How can Internet speed be increased by hacking through DNS? +Label: Not paraphrase + +Question1: “What can make Physics easy to learn?” +Question2: “How can you make physics easy to learn?” +Label: Paraphrase +``` + +#### Inference + +You can use the 🤗 Transformers library `text-classification` pipeline to infer with QQPI models. + +```python +from transformers import pipeline + +classifier = pipeline("text-classification", model = "textattack/bert-base-uncased-QQP") +classifier("Which city is the capital of France?, Where is the capital of France?") +## [{'label': 'paraphrase', 'score': 0.998}] +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer text classification models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_TOKEN); +await inference.conversational({ + model: "distilbert-base-uncased-finetuned-sst-2-english", + inputs: "I love this movie!", +}); +``` + +### Grammatical Correctness + +Linguistic Acceptability is the task of assessing the grammatical acceptability of a sentence. The classes in this task are “acceptable” and “unacceptable”. The benchmark dataset used for this task is [Corpus of Linguistic Acceptability (CoLA)](https://huggingface.co/datasets/glue/viewer/cola/test). The dataset consists of texts and their labels. + +``` +Example: Books were sent to each other by the students. +Label: Unacceptable + +Example: She voted for herself. +Label: Acceptable. +``` + +#### Inference + +```python +from transformers import pipeline + +classifier = pipeline("text-classification", model = "textattack/distilbert-base-uncased-CoLA") +classifier("I will walk to home when I went through the bus.") +## [{'label': 'unacceptable', 'score': 0.95}] +``` + +## Useful Resources + +Would you like to learn more about the topic? Awesome! Here you can find some curated resources that you may find helpful! + +- [SetFitABSA: Few-Shot Aspect Based Sentiment Analysis using SetFit](https://huggingface.co/blog/setfit-absa) +- [Course Chapter on Fine-tuning a Text Classification Model](https://huggingface.co/course/chapter3/1?fw=pt) +- [Getting Started with Sentiment Analysis using Python](https://huggingface.co/blog/sentiment-analysis-python) +- [Sentiment Analysis on Encrypted Data with Homomorphic Encryption](https://huggingface.co/blog/sentiment-analysis-fhe) +- [Leveraging Hugging Face for complex text classification use cases](https://huggingface.co/blog/classification-use-cases) + +### Notebooks + +- [PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/text_classification.ipynb) +- [TensorFlow](https://github.com/huggingface/notebooks/blob/master/examples/text_classification-tf.ipynb) +- [Flax](https://github.com/huggingface/notebooks/blob/master/examples/text_classification_flax.ipynb) + +### Scripts for training + +- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification) +- [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification) +- [Flax](https://github.com/huggingface/transformers/tree/main/examples/flax/text-classification) + +### Documentation + +- [Text classification task guide](https://huggingface.co/docs/transformers/tasks/sequence_classification) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-classification/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/text-classification/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..068785e18d32b01fa145894100103f5a2834c5c5 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-classification/data.ts @@ -0,0 +1,91 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A widely used dataset used to benchmark multiple variants of text classification.", + id: "glue", + }, + { + description: "A text classification dataset used to benchmark natural language inference models", + id: "snli", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: "I love Hugging Face!", + type: "text", + }, + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "POSITIVE", + score: 0.9, + }, + { + label: "NEUTRAL", + score: 0.1, + }, + { + label: "NEGATIVE", + score: 0.0, + }, + ], + }, + ], + }, + metrics: [ + { + description: "", + id: "accuracy", + }, + { + description: "", + id: "recall", + }, + { + description: "", + id: "precision", + }, + { + description: + "The F1 metric is the harmonic mean of the precision and recall. It can be calculated as: F1 = 2 * (precision * recall) / (precision + recall)", + id: "f1", + }, + ], + models: [ + { + description: "A robust model trained for sentiment analysis.", + id: "distilbert-base-uncased-finetuned-sst-2-english", + }, + { + description: "Multi-genre natural language inference model.", + id: "roberta-large-mnli", + }, + ], + spaces: [ + { + description: "An application that can classify financial sentiment.", + id: "IoannisTr/Tech_Stocks_Trading_Assistant", + }, + { + description: "A dashboard that contains various text classification tasks.", + id: "miesnerjacob/Multi-task-NLP", + }, + { + description: "An application that analyzes user reviews in healthcare.", + id: "spacy/healthsea-demo", + }, + ], + summary: + "Text Classification is the task of assigning a label or class to a given text. Some use cases are sentiment analysis, natural language inference, and assessing grammatical correctness.", + widgetModels: ["distilbert-base-uncased-finetuned-sst-2-english"], + youtubeId: "leNG9fN9FQU", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-classification/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/text-classification/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..dc913690203f4f3b64d1606f4d11aaa254b8013d --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-classification/inference.ts @@ -0,0 +1,51 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Text Classification inference + */ +export interface TextClassificationInput { + /** + * The text to classify + */ + inputs: string; + /** + * Additional inference parameters + */ + parameters?: TextClassificationParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Text Classification + */ +export interface TextClassificationParameters { + function_to_apply?: ClassificationOutputTransform; + /** + * When specified, limits the output to the top K most probable classes. + */ + top_k?: number; + [property: string]: unknown; +} +/** + * The function to apply to the model outputs in order to retrieve the scores. + */ +export type ClassificationOutputTransform = "sigmoid" | "softmax" | "none"; +export type TextClassificationOutput = TextClassificationOutputElement[]; +/** + * Outputs of inference for the Text Classification task + */ +export interface TextClassificationOutputElement { + /** + * The predicted class label. + */ + label: string; + /** + * The corresponding probability. + */ + score: number; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-classification/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/text-classification/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..3bfdeaf6b905d957e5241674b7ac3d3eb1c6438a --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-classification/spec/input.json @@ -0,0 +1,35 @@ +{ + "$id": "/inference/schemas/text-classification/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Text Classification inference", + "title": "TextClassificationInput", + "type": "object", + "properties": { + "inputs": { + "description": "The text to classify", + "type": "string" + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/TextClassificationParameters" + } + }, + "$defs": { + "TextClassificationParameters": { + "title": "TextClassificationParameters", + "description": "Additional inference parameters for Text Classification", + "type": "object", + "properties": { + "function_to_apply": { + "title": "TextClassificationOutputTransform", + "$ref": "/inference/schemas/common-definitions.json#/definitions/ClassificationOutputTransform" + }, + "top_k": { + "type": "integer", + "description": "When specified, limits the output to the top K most probable classes." + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-classification/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/text-classification/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..704b82225b78d6cf17f3ffc00d7f47fa8befd1a8 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-classification/spec/output.json @@ -0,0 +1,10 @@ +{ + "$id": "/inference/schemas/text-classification/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Text Classification task", + "title": "TextClassificationOutput", + "type": "array", + "items": { + "$ref": "/inference/schemas/common-definitions.json#/definitions/ClassificationOutput" + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-generation/about.md b/data/node_modules/@huggingface/tasks/src/tasks/text-generation/about.md new file mode 100644 index 0000000000000000000000000000000000000000..f0e02153c5347ffaf240a13f854fe27ffe670d41 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-generation/about.md @@ -0,0 +1,154 @@ +This task covers guides on both [text-generation](https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads) and [text-to-text generation](https://huggingface.co/models?pipeline_tag=text2text-generation&sort=downloads) models. Popular large language models that are used for chats or following instructions are also covered in this task. You can find the list of selected open-source large language models [here](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard), ranked by their performance scores. + +## Use Cases + +### Instruction Models + +A model trained for text generation can be later adapted to follow instructions. You can try some of the most powerful instruction-tuned open-access models like Mixtral 8x7B, Cohere Command R+, and Meta Llama3 70B [at Hugging Chat](https://huggingface.co/chat). + +### Code Generation + +A Text Generation model, also known as a causal language model, can be trained on code from scratch to help the programmers in their repetitive coding tasks. One of the most popular open-source models for code generation is StarCoder, which can generate code in 80+ languages. You can try it [here](https://huggingface.co/spaces/bigcode/bigcode-playground). + +### Stories Generation + +A story generation model can receive an input like "Once upon a time" and proceed to create a story-like text based on those first words. You can try [this application](https://huggingface.co/spaces/mosaicml/mpt-7b-storywriter) which contains a model trained on story generation, by MosaicML. + +If your generative model training data is different than your use case, you can train a causal language model from scratch. Learn how to do it in the free transformers [course](https://huggingface.co/course/chapter7/6?fw=pt)! + +## Task Variants + +### Completion Generation Models + +A popular variant of Text Generation models predicts the next word given a bunch of words. Word by word a longer text is formed that results in for example: + +- Given an incomplete sentence, complete it. +- Continue a story given the first sentences. +- Provided a code description, generate the code. + +The most popular models for this task are GPT-based models, [Mistral](mistralai/Mistral-7B-v0.1) or [Llama series](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf). These models are trained on data that has no labels, so you just need plain text to train your own model. You can train text generation models to generate a wide variety of documents, from code to stories. + +### Text-to-Text Generation Models + +These models are trained to learn the mapping between a pair of texts (e.g. translation from one language to another). The most popular variants of these models are [NLLB](facebook/nllb-200-distilled-600M), [FLAN-T5](https://huggingface.co/google/flan-t5-xxl), and [BART](https://huggingface.co/docs/transformers/model_doc/bart). Text-to-Text models are trained with multi-tasking capabilities, they can accomplish a wide range of tasks, including summarization, translation, and text classification. + +## Language Model Variants + +When it comes to text generation, the underlying language model can come in several types: + +- **Base models:** refers to plain language models like [Mistral 7B](https://huggingface.co/mistralai/Mistral-7B-v0.3) and [Meta Llama-3-70b](https://huggingface.co/meta-llama/Meta-Llama-3-70B). These models are good for fine-tuning and few-shot prompting. + +- **Instruction-trained models:** these models are trained in a multi-task manner to follow a broad range of instructions like "Write me a recipe for chocolate cake". Models like [Qwen 2 7B](https://huggingface.co/Qwen/Qwen2-7B-Instruct), [Yi 1.5 34B Chat](https://huggingface.co/01-ai/Yi-1.5-34B-Chat), and [Meta Llama 70B Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct) are examples of instruction-trained models. In general, instruction-trained models will produce better responses to instructions than base models. + +- **Human feedback models:** these models extend base and instruction-trained models by incorporating human feedback that rates the quality of the generated text according to criteria like [helpfulness, honesty, and harmlessness](https://arxiv.org/abs/2112.00861). The human feedback is then combined with an optimization technique like reinforcement learning to align the original model to be closer with human preferences. The overall methodology is often called [Reinforcement Learning from Human Feedback](https://huggingface.co/blog/rlhf), or RLHF for short. [Zephyr ORPO 141B A35B](https://huggingface.co/HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1) is an open-source model aligned through human feedback. + +## Text Generation from Image and Text + +There are language models that can input both text and image and output text, called vision language models. [IDEFICS 2](https://huggingface.co/HuggingFaceM4/idefics2-8b) and [MiniCPM Llama3 V](https://huggingface.co/openbmb/MiniCPM-Llama3-V-2_5) are good examples. They accept the same generation parameters as other language models. However, since they also take images as input, you have to use them with the `image-to-text` pipeline. You can find more information about this in the [image-to-text task page](https://huggingface.co/tasks/image-to-text). + +## Inference + +You can use the 🤗 Transformers library `text-generation` pipeline to do inference with Text Generation models. It takes an incomplete text and returns multiple outputs with which the text can be completed. + +```python +from transformers import pipeline +generator = pipeline('text-generation', model = 'HuggingFaceH4/zephyr-7b-beta') +generator("Hello, I'm a language model", max_length = 30, num_return_sequences=3) +## [{'generated_text': "Hello, I'm a language modeler. So while writing this, when I went out to meet my wife or come home she told me that my"}, +## {'generated_text': "Hello, I'm a language modeler. I write and maintain software in Python. I love to code, and that includes coding things that require writing"}, ... +``` + +[Text-to-Text generation models](https://huggingface.co/models?pipeline_tag=text2text-generation&sort=downloads) have a separate pipeline called `text2text-generation`. This pipeline takes an input containing the sentence including the task and returns the output of the accomplished task. + +```python +from transformers import pipeline + +text2text_generator = pipeline("text2text-generation") +text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything") +[{'generated_text': 'the answer to life, the universe and everything'}] + +text2text_generator("translate from English to French: I'm very happy") +[{'generated_text': 'Je suis très heureux'}] +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer text classification models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_TOKEN); +await inference.conversational({ + model: "distilbert-base-uncased-finetuned-sst-2-english", + inputs: "I love this movie!", +}); +``` + +## Text Generation Inference + +[Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) is an open-source toolkit for serving LLMs tackling challenges such as response time. TGI powers inference solutions like [Inference Endpoints](https://huggingface.co/inference-endpoints) and [Hugging Chat](https://huggingface.co/chat/), as well as multiple community projects. You can use it to deploy any supported open-source large language model of your choice. + +## ChatUI Spaces + +Hugging Face Spaces includes templates to easily deploy your own instance of a specific application. [ChatUI](https://github.com/huggingface/chat-ui) is an open-source interface that enables serving conversational interface for large language models and can be deployed with few clicks at Spaces. TGI powers these Spaces under the hood for faster inference. Thanks to the template, you can deploy your own instance based on a large language model with only a few clicks and customize it. Learn more about it [here](https://huggingface.co/docs/hub/spaces-sdks-docker-chatui) and create your large language model instance [here](https://huggingface.co/new-space?template=huggingchat/chat-ui-template). + +![ChatUI](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/os_llms/docker_chat.png) + +## Useful Resources + +Would you like to learn more about the topic? Awesome! Here you can find some curated resources that you may find helpful! + +### Tools within Hugging Face Ecosystem + +- You can use [PEFT](https://github.com/huggingface/peft) to adapt large language models in efficient way. +- [ChatUI](https://github.com/huggingface/chat-ui) is the open-source interface to conversate with Large Language Models. +- [text-generation-inferface](https://github.com/huggingface/text-generation-inference) +- [HuggingChat](https://huggingface.co/chat/) is a chat interface powered by Hugging Face to chat with powerful models like Meta Llama 3 70B, Mixtral 8x7B, etc. + +### Documentation + +- [PEFT documentation](https://huggingface.co/docs/peft/index) +- [ChatUI Docker Spaces](https://huggingface.co/docs/hub/spaces-sdks-docker-chatui) +- [Causal language modeling task guide](https://huggingface.co/docs/transformers/tasks/language_modeling) +- [Text generation strategies](https://huggingface.co/docs/transformers/generation_strategies) +- [Course chapter on training a causal language model from scratch](https://huggingface.co/course/chapter7/6?fw=pt) + +### Model Inference & Deployment + +- [Optimizing your LLM in production](https://huggingface.co/blog/optimize-llm) +- [Open-Source Text Generation & LLM Ecosystem at Hugging Face](https://huggingface.co/blog/os-llms) +- [Introducing RWKV - An RNN with the advantages of a transformer](https://huggingface.co/blog/rwkv) +- [Llama 2 is at Hugging Face](https://huggingface.co/blog/llama2) +- [Guiding Text Generation with Constrained Beam Search in 🤗 Transformers](https://huggingface.co/blog/constrained-beam-search) +- [Code generation with Hugging Face](https://huggingface.co/spaces/codeparrot/code-generation-models) +- [Assisted Generation: a new direction toward low-latency text generation](https://huggingface.co/blog/assisted-generation) +- [How to generate text: using different decoding methods for language generation with Transformers](https://huggingface.co/blog/how-to-generate) +- [Faster Text Generation with TensorFlow and XLA](https://huggingface.co/blog/tf-xla-generate) + +### Model Fine-tuning/Training + +- [Non-engineers guide: Train a LLaMA 2 chatbot](https://huggingface.co/blog/Llama2-for-non-engineers) +- [Training CodeParrot 🦜 from Scratch](https://huggingface.co/blog/codeparrot) +- [Creating a Coding Assistant with StarCoder](https://huggingface.co/blog/starchat-alpha) + +### Advanced Concepts Explained Simply + +- [Mixture of Experts Explained](https://huggingface.co/blog/moe) + +### Advanced Fine-tuning/Training Recipes + +- [Fine-tuning Llama 2 70B using PyTorch FSDP](https://huggingface.co/blog/ram-efficient-pytorch-fsdp) +- [The N Implementation Details of RLHF with PPO](https://huggingface.co/blog/the_n_implementation_details_of_rlhf_with_ppo) +- [Preference Tuning LLMs with Direct Preference Optimization Methods](https://huggingface.co/blog/pref-tuning) +- [Fine-tune Llama 2 with DPO](https://huggingface.co/blog/dpo-trl) + +### Notebooks + +- [Training a CLM in Flax](https://github.com/huggingface/notebooks/blob/master/examples/causal_language_modeling_flax.ipynb) +- [Training a CLM in TensorFlow](https://github.com/huggingface/notebooks/blob/master/examples/language_modeling_from_scratch-tf.ipynb) +- [Training a CLM in PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/language_modeling_from_scratch.ipynb) + +### Scripts for training + +- [Training a CLM in PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling) +- [Training a CLM in TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling) +- [Text Generation in PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-generation) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-generation/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/text-generation/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..60350f2a95d468047a23bbfd61f2fe1a5eb2e003 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-generation/data.ts @@ -0,0 +1,110 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A large multilingual dataset of text crawled from the web.", + id: "mc4", + }, + { + description: + "Diverse open-source data consisting of 22 smaller high-quality datasets. It was used to train GPT-Neo.", + id: "the_pile", + }, + { + description: "Truly open-source, curated and cleaned dialogue dataset.", + id: "HuggingFaceH4/ultrachat_200k", + }, + { + description: "An instruction dataset with preference ratings on responses.", + id: "openbmb/UltraFeedback", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: "Once upon a time,", + type: "text", + }, + ], + outputs: [ + { + label: "Output", + content: + "Once upon a time, we knew that our ancestors were on the verge of extinction. The great explorers and poets of the Old World, from Alexander the Great to Chaucer, are dead and gone. A good many of our ancient explorers and poets have", + type: "text", + }, + ], + }, + metrics: [ + { + description: + "Cross Entropy is a metric that calculates the difference between two probability distributions. Each probability distribution is the distribution of predicted words", + id: "Cross Entropy", + }, + { + description: + "The Perplexity metric is the exponential of the cross-entropy loss. It evaluates the probabilities assigned to the next word by the model. Lower perplexity indicates better performance", + id: "Perplexity", + }, + ], + models: [ + { + description: "A large language model trained for text generation.", + id: "bigscience/bloom-560m", + }, + { + description: "A large code generation model that can generate code in 80+ languages.", + id: "bigcode/starcoder", + }, + { + description: "A very powerful text generation model.", + id: "mistralai/Mixtral-8x7B-Instruct-v0.1", + }, + { + description: "Small yet powerful text generation model.", + id: "microsoft/phi-2", + }, + { + description: "A very powerful model that can chat, do mathematical reasoning and write code.", + id: "openchat/openchat-3.5-0106", + }, + { + description: "Very strong yet small assistant model.", + id: "HuggingFaceH4/zephyr-7b-beta", + }, + { + description: "Very strong open-source large language model.", + id: "meta-llama/Llama-2-70b-hf", + }, + ], + spaces: [ + { + description: "A leaderboard to compare different open-source text generation models based on various benchmarks.", + id: "open-llm-leaderboard/open_llm_leaderboard", + }, + { + description: "An text generation based application based on a very powerful LLaMA2 model.", + id: "ysharma/Explore_llamav2_with_TGI", + }, + { + description: "An text generation based application to converse with Zephyr model.", + id: "HuggingFaceH4/zephyr-chat", + }, + { + description: "An text generation application that combines OpenAI and Hugging Face models.", + id: "microsoft/HuggingGPT", + }, + { + description: "An chatbot to converse with a very powerful text generation model.", + id: "mlabonne/phixtral-chat", + }, + ], + summary: + "Generating text is the task of generating new text given another text. These models can, for example, fill in incomplete text or paraphrase.", + widgetModels: ["HuggingFaceH4/zephyr-7b-beta"], + youtubeId: "Vpjb1lu0MDk", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-generation/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/text-generation/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..37395c580b87be5c0eb0b673672c9247b7e8470a --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-generation/inference.ts @@ -0,0 +1,138 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ + +/** + * Text Generation Input. + * + * Auto-generated from TGI specs. + * For more details, check out + * https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. + */ +export interface TextGenerationInput { + inputs: string; + parameters?: TextGenerationInputGenerateParameters; + stream?: boolean; + [property: string]: unknown; +} + +export interface TextGenerationInputGenerateParameters { + best_of?: number; + decoder_input_details?: boolean; + details?: boolean; + do_sample?: boolean; + frequency_penalty?: number; + grammar?: TextGenerationInputGrammarType; + max_new_tokens?: number; + repetition_penalty?: number; + return_full_text?: boolean; + seed?: number; + stop?: string[]; + temperature?: number; + top_k?: number; + top_n_tokens?: number; + top_p?: number; + truncate?: number; + typical_p?: number; + watermark?: boolean; + [property: string]: unknown; +} + +export interface TextGenerationInputGrammarType { + type: Type; + /** + * A string that represents a [JSON Schema](https://json-schema.org/). + * + * JSON Schema is a declarative language that allows to annotate JSON documents + * with types and descriptions. + */ + value: unknown; + [property: string]: unknown; +} + +export type Type = "json" | "regex"; + +/** + * Text Generation Output. + * + * Auto-generated from TGI specs. + * For more details, check out + * https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. + */ +export interface TextGenerationOutput { + details?: TextGenerationOutputDetails; + generated_text: string; + [property: string]: unknown; +} + +export interface TextGenerationOutputDetails { + best_of_sequences?: TextGenerationOutputBestOfSequence[]; + finish_reason: TextGenerationOutputFinishReason; + generated_tokens: number; + prefill: TextGenerationOutputPrefillToken[]; + seed?: number; + tokens: TextGenerationOutputToken[]; + top_tokens?: Array; + [property: string]: unknown; +} + +export interface TextGenerationOutputBestOfSequence { + finish_reason: TextGenerationOutputFinishReason; + generated_text: string; + generated_tokens: number; + prefill: TextGenerationOutputPrefillToken[]; + seed?: number; + tokens: TextGenerationOutputToken[]; + top_tokens?: Array; + [property: string]: unknown; +} + +export type TextGenerationOutputFinishReason = "length" | "eos_token" | "stop_sequence"; + +export interface TextGenerationOutputPrefillToken { + id: number; + logprob: number; + text: string; + [property: string]: unknown; +} + +export interface TextGenerationOutputToken { + id: number; + logprob: number; + special: boolean; + text: string; + [property: string]: unknown; +} + +/** + * Text Generation Stream Output. + * + * Auto-generated from TGI specs. + * For more details, check out + * https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. + */ +export interface TextGenerationStreamOutput { + details?: TextGenerationStreamOutputStreamDetails; + generated_text?: string; + index: number; + token: TextGenerationStreamOutputToken; + top_tokens?: TextGenerationStreamOutputToken[]; + [property: string]: unknown; +} + +export interface TextGenerationStreamOutputStreamDetails { + finish_reason: TextGenerationOutputFinishReason; + generated_tokens: number; + seed?: number; + [property: string]: unknown; +} + +export interface TextGenerationStreamOutputToken { + id: number; + logprob: number; + special: boolean; + text: string; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-generation/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/text-generation/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..0742cefe053c3fb3b3a5b38423424c5949080ca9 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-generation/spec/input.json @@ -0,0 +1,195 @@ +{ + "$id": "/inference/schemas/text-generation/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Text Generation Input.\n\nAuto-generated from TGI specs.\nFor more details, check out https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.", + "title": "TextGenerationInput", + "type": "object", + "required": ["inputs"], + "properties": { + "inputs": { + "type": "string", + "example": "My name is Olivier and I" + }, + "parameters": { + "$ref": "#/$defs/TextGenerationInputGenerateParameters" + }, + "stream": { + "type": "boolean", + "default": "false" + } + }, + "$defs": { + "TextGenerationInputGenerateParameters": { + "type": "object", + "properties": { + "best_of": { + "type": "integer", + "default": "null", + "example": 1, + "nullable": true, + "minimum": 0, + "exclusiveMinimum": 0 + }, + "decoder_input_details": { + "type": "boolean", + "default": "false" + }, + "details": { + "type": "boolean", + "default": "true" + }, + "do_sample": { + "type": "boolean", + "default": "false", + "example": true + }, + "frequency_penalty": { + "type": "number", + "format": "float", + "default": "null", + "example": 0.1, + "nullable": true, + "exclusiveMinimum": -2 + }, + "grammar": { + "allOf": [ + { + "$ref": "#/$defs/TextGenerationInputGrammarType" + } + ], + "default": "null", + "nullable": true + }, + "max_new_tokens": { + "type": "integer", + "format": "int32", + "default": "100", + "example": "20", + "nullable": true, + "minimum": 0 + }, + "repetition_penalty": { + "type": "number", + "format": "float", + "default": "null", + "example": 1.03, + "nullable": true, + "exclusiveMinimum": 0 + }, + "return_full_text": { + "type": "boolean", + "default": "null", + "example": false, + "nullable": true + }, + "seed": { + "type": "integer", + "format": "int64", + "default": "null", + "example": "null", + "nullable": true, + "minimum": 0, + "exclusiveMinimum": 0 + }, + "stop": { + "type": "array", + "items": { + "type": "string" + }, + "example": ["photographer"], + "maxItems": 4 + }, + "temperature": { + "type": "number", + "format": "float", + "default": "null", + "example": 0.5, + "nullable": true, + "exclusiveMinimum": 0 + }, + "top_k": { + "type": "integer", + "format": "int32", + "default": "null", + "example": 10, + "nullable": true, + "exclusiveMinimum": 0 + }, + "top_n_tokens": { + "type": "integer", + "format": "int32", + "default": "null", + "example": 5, + "nullable": true, + "minimum": 0, + "exclusiveMinimum": 0 + }, + "top_p": { + "type": "number", + "format": "float", + "default": "null", + "example": 0.95, + "nullable": true, + "maximum": 1, + "exclusiveMinimum": 0 + }, + "truncate": { + "type": "integer", + "default": "null", + "example": "null", + "nullable": true, + "minimum": 0 + }, + "typical_p": { + "type": "number", + "format": "float", + "default": "null", + "example": 0.95, + "nullable": true, + "maximum": 1, + "exclusiveMinimum": 0 + }, + "watermark": { + "type": "boolean", + "default": "false", + "example": true + } + }, + "title": "TextGenerationInputGenerateParameters" + }, + "TextGenerationInputGrammarType": { + "oneOf": [ + { + "type": "object", + "required": ["type", "value"], + "properties": { + "type": { + "type": "string", + "enum": ["json"] + }, + "value": { + "description": "A string that represents a [JSON Schema](https://json-schema.org/).\n\nJSON Schema is a declarative language that allows to annotate JSON documents\nwith types and descriptions." + } + } + }, + { + "type": "object", + "required": ["type", "value"], + "properties": { + "type": { + "type": "string", + "enum": ["regex"] + }, + "value": { + "type": "string" + } + } + } + ], + "discriminator": { + "propertyName": "type" + }, + "title": "TextGenerationInputGrammarType" + } + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-generation/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/text-generation/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..cb6ef3f99694ae4ed5eb6a05b8f3ea18e1da8a60 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-generation/spec/output.json @@ -0,0 +1,179 @@ +{ + "$id": "/inference/schemas/text-generation/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Text Generation Output.\n\nAuto-generated from TGI specs.\nFor more details, check out https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.", + "title": "TextGenerationOutput", + "type": "object", + "required": ["generated_text"], + "properties": { + "details": { + "allOf": [ + { + "$ref": "#/$defs/TextGenerationOutputDetails" + } + ], + "nullable": true + }, + "generated_text": { + "type": "string", + "example": "test" + } + }, + "$defs": { + "TextGenerationOutputDetails": { + "type": "object", + "required": ["finish_reason", "generated_tokens", "prefill", "tokens"], + "properties": { + "best_of_sequences": { + "type": "array", + "items": { + "$ref": "#/$defs/TextGenerationOutputBestOfSequence" + }, + "nullable": true + }, + "finish_reason": { + "$ref": "#/$defs/TextGenerationOutputFinishReason" + }, + "generated_tokens": { + "type": "integer", + "format": "int32", + "example": 1, + "minimum": 0 + }, + "prefill": { + "type": "array", + "items": { + "$ref": "#/$defs/TextGenerationOutputPrefillToken" + } + }, + "seed": { + "type": "integer", + "format": "int64", + "example": 42, + "nullable": true, + "minimum": 0 + }, + "tokens": { + "type": "array", + "items": { + "$ref": "#/$defs/TextGenerationOutputToken" + } + }, + "top_tokens": { + "type": "array", + "items": { + "type": "array", + "items": { + "$ref": "#/$defs/TextGenerationOutputToken" + } + } + } + }, + "title": "TextGenerationOutputDetails" + }, + "TextGenerationOutputBestOfSequence": { + "type": "object", + "required": ["generated_text", "finish_reason", "generated_tokens", "prefill", "tokens"], + "properties": { + "finish_reason": { + "$ref": "#/$defs/TextGenerationOutputFinishReason" + }, + "generated_text": { + "type": "string", + "example": "test" + }, + "generated_tokens": { + "type": "integer", + "format": "int32", + "example": 1, + "minimum": 0 + }, + "prefill": { + "type": "array", + "items": { + "$ref": "#/$defs/TextGenerationOutputPrefillToken" + } + }, + "seed": { + "type": "integer", + "format": "int64", + "example": 42, + "nullable": true, + "minimum": 0 + }, + "tokens": { + "type": "array", + "items": { + "$ref": "#/$defs/TextGenerationOutputToken" + } + }, + "top_tokens": { + "type": "array", + "items": { + "type": "array", + "items": { + "$ref": "#/$defs/TextGenerationOutputToken" + } + } + } + }, + "title": "TextGenerationOutputBestOfSequence" + }, + "TextGenerationOutputFinishReason": { + "type": "string", + "enum": ["length", "eos_token", "stop_sequence"], + "example": "Length", + "title": "TextGenerationOutputFinishReason" + }, + "TextGenerationOutputPrefillToken": { + "type": "object", + "required": ["id", "text", "logprob"], + "properties": { + "id": { + "type": "integer", + "format": "int32", + "example": 0, + "minimum": 0 + }, + "logprob": { + "type": "number", + "format": "float", + "example": -0.34, + "nullable": true + }, + "text": { + "type": "string", + "example": "test" + } + }, + "title": "TextGenerationOutputPrefillToken" + }, + "TextGenerationOutputToken": { + "type": "object", + "required": ["id", "text", "logprob", "special"], + "properties": { + "id": { + "type": "integer", + "format": "int32", + "example": 0, + "minimum": 0 + }, + "logprob": { + "type": "number", + "format": "float", + "example": -0.34, + "nullable": true + }, + "special": { + "type": "boolean", + "example": "false" + }, + "text": { + "type": "string", + "example": "test" + } + }, + "title": "TextGenerationOutputToken" + } + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-generation/spec/stream_output.json b/data/node_modules/@huggingface/tasks/src/tasks/text-generation/spec/stream_output.json new file mode 100644 index 0000000000000000000000000000000000000000..e1ef8a0dce59fdeb57b8809c7dcced5d45fa7322 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-generation/spec/stream_output.json @@ -0,0 +1,97 @@ +{ + "$id": "/inference/schemas/text-generation/stream_output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Text Generation Stream Output.\n\nAuto-generated from TGI specs.\nFor more details, check out https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.", + "title": "TextGenerationStreamOutput", + "type": "object", + "required": ["index", "token"], + "properties": { + "details": { + "allOf": [ + { + "$ref": "#/$defs/TextGenerationStreamOutputStreamDetails" + } + ], + "default": "null", + "nullable": true + }, + "generated_text": { + "type": "string", + "default": "null", + "example": "test", + "nullable": true + }, + "index": { + "type": "integer", + "format": "int32", + "minimum": 0 + }, + "token": { + "$ref": "#/$defs/TextGenerationStreamOutputToken" + }, + "top_tokens": { + "type": "array", + "items": { + "$ref": "#/$defs/TextGenerationStreamOutputToken" + } + } + }, + "$defs": { + "TextGenerationStreamOutputStreamDetails": { + "type": "object", + "required": ["finish_reason", "generated_tokens"], + "properties": { + "finish_reason": { + "$ref": "#/$defs/TextGenerationStreamOutputFinishReason" + }, + "generated_tokens": { + "type": "integer", + "format": "int32", + "example": 1, + "minimum": 0 + }, + "seed": { + "type": "integer", + "format": "int64", + "example": 42, + "nullable": true, + "minimum": 0 + } + }, + "title": "TextGenerationStreamOutputStreamDetails" + }, + "TextGenerationStreamOutputFinishReason": { + "type": "string", + "enum": ["length", "eos_token", "stop_sequence"], + "example": "Length", + "title": "TextGenerationStreamOutputFinishReason" + }, + "TextGenerationStreamOutputToken": { + "type": "object", + "required": ["id", "text", "logprob", "special"], + "properties": { + "id": { + "type": "integer", + "format": "int32", + "example": 0, + "minimum": 0 + }, + "logprob": { + "type": "number", + "format": "float", + "example": -0.34, + "nullable": true + }, + "special": { + "type": "boolean", + "example": "false" + }, + "text": { + "type": "string", + "example": "test" + } + }, + "title": "TextGenerationStreamOutputToken" + } + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-to-3d/about.md b/data/node_modules/@huggingface/tasks/src/tasks/text-to-3d/about.md new file mode 100644 index 0000000000000000000000000000000000000000..9f76ba3e4ddee504215bf676dc9bc28926c98fef --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-to-3d/about.md @@ -0,0 +1,62 @@ +## Use Cases + +Text-to-3D models can be used in a wide variety of applications that require 3D, such as games, animation, design, architecture, engineering, marketing, and more. + +![Text-to-3D Thumbnail](https://huggingface.co/datasets/huggingfacejs/tasks/resolve/main/text-to-3d/text-to-3d-thumbnail.png) + +This task is similar to the [image-to-3d](https://huggingface.co/tasks/image-to-3d) task, but takes text input instead of image input. In practice, this is often equivalent to a combination of [text-to-image](https://huggingface.co/tasks/text-to-image) and [image-to-3d](https://huggingface.co/tasks/image-to-3d). That is, the text is first converted to an image, then the image is converted to 3D. + +### Generating Meshes + +Meshes are the standard representation of 3D in industry. + +### Generating Gaussian Splats + +[Gaussian Splatting](https://huggingface.co/blog/gaussian-splatting) is a rendering technique that represents scenes as fuzzy points. + +### Inference + +Inference for this task typically leverages the [Diffusers](https://huggingface.co/docs/diffusers/index) library for inference, using [Custom Pipelines](https://huggingface.co/docs/diffusers/v0.6.0/en/using-diffusers/custom_pipelines). + +These are unstandardized and depend on the model. More details can be found in each model repository. + +```python +import torch +import requests +import numpy as np +from io import BytesIO +from diffusers import DiffusionPipeline +from PIL import Image + +pipeline = DiffusionPipeline.from_pretrained( + "dylanebert/LGM-full", + custom_pipeline="dylanebert/LGM-full", + torch_dtype=torch.float16, + trust_remote_code=True, +).to("cuda") + +input_prompt = "a cat statue" +result = pipeline(input_prompt, None) +result_path = "/tmp/output.ply" +pipeline.save_ply(result, result_path) +``` + +In the code above, we: + +1. Import the necessary libraries +2. Load the `LGM-full` model and custom pipeline +3. Define the input prompt +4. Run the pipeline on the input prompt +5. Save the output to a file + +### Output Formats + +Meshes can be in `.obj`, `.glb`, `.stl`, or `.gltf` format. Other formats are allowed, but won't be rendered in the gradio [Model3D](https://www.gradio.app/docs/gradio/model3d) component. + +Splats can be in `.ply` or `.splat` format. They can be rendered in the gradio [Model3D](https://www.gradio.app/docs/gradio/model3d) component using the [gsplat.js](https://github.com/huggingface/gsplat.js) library. + +## Useful Resources + +- [ML for 3D Course](https://huggingface.co/learn/ml-for-3d-course) +- [3D Arena Leaderboard](https://huggingface.co/spaces/dylanebert/3d-arena) +- [gsplat.js](https://github.com/huggingface/gsplat.js) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-to-3d/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/text-to-3d/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..e66a2127599f1aeb5fafb9f30705fdcb7bfde442 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-to-3d/data.ts @@ -0,0 +1,56 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A large dataset of over 10 million 3D objects.", + id: "allenai/objaverse-xl", + }, + { + description: "Descriptive captions for 3D objects in Objaverse.", + id: "tiange/Cap3D", + }, + ], + demo: { + inputs: [ + { + label: "Prompt", + content: "a cat statue", + type: "text", + }, + ], + outputs: [ + { + label: "Result", + content: "text-to-3d-3d-output-filename.glb", + type: "text", + }, + ], + }, + metrics: [], + models: [ + { + description: "Text-to-3D mesh model by OpenAI", + id: "openai/shap-e", + }, + { + description: "Generative 3D gaussian splatting model.", + id: "ashawkey/LGM", + }, + ], + spaces: [ + { + description: "Text-to-3D demo with mesh outputs.", + id: "hysts/Shap-E", + }, + { + description: "Text/image-to-3D demo with splat outputs.", + id: "ashawkey/LGM", + }, + ], + summary: "Text-to-3D models take in text input and produce 3D output.", + widgetModels: [], + youtubeId: "", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-to-audio/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/text-to-audio/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..276ecce652394bdc98b8708c6ac19fba46a8da48 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-to-audio/inference.ts @@ -0,0 +1,143 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ + +/** + * Inputs for Text To Audio inference + */ +export interface TextToAudioInput { + /** + * The input text data + */ + inputs: string; + /** + * Additional inference parameters + */ + parameters?: TextToAudioParameters; + [property: string]: unknown; +} + +/** + * Additional inference parameters + * + * Additional inference parameters for Text To Audio + */ +export interface TextToAudioParameters { + /** + * Parametrization of the text generation process + */ + generate?: GenerationParameters; + [property: string]: unknown; +} + +/** + * Parametrization of the text generation process + * + * Ad-hoc parametrization of the text generation process + */ +export interface GenerationParameters { + /** + * Whether to use sampling instead of greedy decoding when generating new tokens. + */ + do_sample?: boolean; + /** + * Controls the stopping condition for beam-based methods. + */ + early_stopping?: EarlyStoppingUnion; + /** + * If set to float strictly between 0 and 1, only tokens with a conditional probability + * greater than epsilon_cutoff will be sampled. In the paper, suggested values range from + * 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language + * Model Desmoothing](https://hf.co/papers/2210.15191) for more details. + */ + epsilon_cutoff?: number; + /** + * Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to + * float strictly between 0 and 1, a token is only considered if it is greater than either + * eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter + * term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In + * the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. + * See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) + * for more details. + */ + eta_cutoff?: number; + /** + * The maximum length (in tokens) of the generated text, including the input. + */ + max_length?: number; + /** + * The maximum number of tokens to generate. Takes precedence over maxLength. + */ + max_new_tokens?: number; + /** + * The minimum length (in tokens) of the generated text, including the input. + */ + min_length?: number; + /** + * The minimum number of tokens to generate. Takes precedence over maxLength. + */ + min_new_tokens?: number; + /** + * Number of groups to divide num_beams into in order to ensure diversity among different + * groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details. + */ + num_beam_groups?: number; + /** + * Number of beams to use for beam search. + */ + num_beams?: number; + /** + * The value balances the model confidence and the degeneration penalty in contrastive + * search decoding. + */ + penalty_alpha?: number; + /** + * The value used to modulate the next token probabilities. + */ + temperature?: number; + /** + * The number of highest probability vocabulary tokens to keep for top-k-filtering. + */ + top_k?: number; + /** + * If set to float < 1, only the smallest set of most probable tokens with probabilities + * that add up to top_p or higher are kept for generation. + */ + top_p?: number; + /** + * Local typicality measures how similar the conditional probability of predicting a target + * token next is to the expected conditional probability of predicting a random token next, + * given the partial text already generated. If set to float < 1, the smallest set of the + * most locally typical tokens with probabilities that add up to typical_p or higher are + * kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details. + */ + typical_p?: number; + /** + * Whether the model should use the past last key/values attentions to speed up decoding + */ + use_cache?: boolean; + [property: string]: unknown; +} + +/** + * Controls the stopping condition for beam-based methods. + */ +export type EarlyStoppingUnion = boolean | "never"; + +/** + * Outputs of inference for the Text To Audio task + */ +export interface TextToAudioOutput { + /** + * The generated audio waveform. + */ + audio: unknown; + samplingRate: unknown; + /** + * The sampling rate of the generated audio waveform. + */ + sampling_rate?: number; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-to-audio/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/text-to-audio/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..08267d681a23d7e281295b3e9b84e6a0a390f5fb --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-to-audio/spec/input.json @@ -0,0 +1,31 @@ +{ + "$id": "/inference/schemas/text-to-audio/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Text To Audio inference", + "title": "TextToAudioInput", + "type": "object", + "properties": { + "inputs": { + "description": "The input text data", + "type": "string" + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/TextToAudioParameters" + } + }, + "$defs": { + "TextToAudioParameters": { + "title": "TextToAudioParameters", + "description": "Additional inference parameters for Text To Audio", + "type": "object", + "properties": { + "generate": { + "description": "Parametrization of the text generation process", + "$ref": "/inference/schemas/common-definitions.json#/definitions/GenerationParameters" + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-to-audio/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/text-to-audio/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..c171d62bffbed21b423f91a807ed525d285f3445 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-to-audio/spec/output.json @@ -0,0 +1,17 @@ +{ + "$id": "/inference/schemas/text-to-audio/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Text To Audio task", + "title": "TextToAudioOutput", + "type": "object", + "properties": { + "audio": { + "description": "The generated audio waveform." + }, + "sampling_rate": { + "type": "number", + "description": "The sampling rate of the generated audio waveform." + } + }, + "required": ["audio", "samplingRate"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-to-image/about.md b/data/node_modules/@huggingface/tasks/src/tasks/text-to-image/about.md new file mode 100644 index 0000000000000000000000000000000000000000..79b00c005dbfd58bd2de8ad30ed0ac95e47120c4 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-to-image/about.md @@ -0,0 +1,75 @@ +## Use Cases + +### Data Generation + +Businesses can generate data for their their use cases by inputting text and getting image outputs. + +### Immersive Conversational Chatbots + +Chatbots can be made more immersive if they provide contextual images based on the input provided by the user. + +### Creative Ideas for Fashion Industry + +Different patterns can be generated to obtain unique pieces of fashion. Text-to-image models make creations easier for designers to conceptualize their design before actually implementing it. + +### Architecture Industry + +Architects can utilise the models to construct an environment based out on the requirements of the floor plan. This can also include the furniture that has to be placed in that environment. + +## Task Variants + +You can contribute variants of this task [here](https://github.com/huggingface/hub-docs/blob/main/tasks/src/text-to-image/about.md). + +## Inference + +You can use diffusers pipelines to infer with `text-to-image` models. + +```python +from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler + +model_id = "stabilityai/stable-diffusion-2" +scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler") +pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16) +pipe = pipe.to("cuda") + +prompt = "a photo of an astronaut riding a horse on mars" +image = pipe(prompt).images[0] +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer text-to-image models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_TOKEN); +await inference.textToImage({ + model: "stabilityai/stable-diffusion-2", + inputs: "award winning high resolution photo of a giant tortoise/((ladybird)) hybrid, [trending on artstation]", + parameters: { + negative_prompt: "blurry", + }, +}); +``` + +## Useful Resources + +### Model Inference + +- [Hugging Face Diffusion Models Course](https://github.com/huggingface/diffusion-models-class) +- [Getting Started with Diffusers](https://huggingface.co/docs/diffusers/index) +- [Text-to-Image Generation](https://huggingface.co/docs/diffusers/using-diffusers/conditional_image_generation) +- [Using Stable Diffusion with Core ML on Apple Silicon](https://huggingface.co/blog/diffusers-coreml) +- [A guide on Vector Quantized Diffusion](https://huggingface.co/blog/vq-diffusion) +- [🧨 Stable Diffusion in JAX/Flax](https://huggingface.co/blog/stable_diffusion_jax) +- [Running IF with 🧨 diffusers on a Free Tier Google Colab](https://huggingface.co/blog/if) +- [Introducing Würstchen: Fast Diffusion for Image Generation](https://huggingface.co/blog/wuerstchen) +- [Efficient Controllable Generation for SDXL with T2I-Adapters](https://huggingface.co/blog/t2i-sdxl-adapters) +- [Welcome aMUSEd: Efficient Text-to-Image Generation](https://huggingface.co/blog/amused) + +### Model Fine-tuning + +- [Finetune Stable Diffusion Models with DDPO via TRL](https://huggingface.co/blog/pref-tuning) +- [LoRA training scripts of the world, unite!](https://huggingface.co/blog/sdxl_lora_advanced_script) +- [Using LoRA for Efficient Stable Diffusion Fine-Tuning](https://huggingface.co/blog/lora) + +This page was made possible thanks to the efforts of [Ishan Dutta](https://huggingface.co/ishandutta), [Enrique Elias Ubaldo](https://huggingface.co/herrius) and [Oğuz Akif](https://huggingface.co/oguzakif). diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-to-image/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/text-to-image/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..5c314be0ccfa52f1d5e0a00f81ca1702b8c3aebd --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-to-image/data.ts @@ -0,0 +1,100 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "RedCaps is a large-scale dataset of 12M image-text pairs collected from Reddit.", + id: "red_caps", + }, + { + description: "Conceptual Captions is a dataset consisting of ~3.3M images annotated with captions.", + id: "conceptual_captions", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: "A city above clouds, pastel colors, Victorian style", + type: "text", + }, + ], + outputs: [ + { + filename: "image.jpeg", + type: "img", + }, + ], + }, + metrics: [ + { + description: + "The Inception Score (IS) measure assesses diversity and meaningfulness. It uses a generated image sample to predict its label. A higher score signifies more diverse and meaningful images.", + id: "IS", + }, + { + description: + "The Fréchet Inception Distance (FID) calculates the distance between distributions between synthetic and real samples. A lower FID score indicates better similarity between the distributions of real and generated images.", + id: "FID", + }, + { + description: + "R-precision assesses how the generated image aligns with the provided text description. It uses the generated images as queries to retrieve relevant text descriptions. The top 'r' relevant descriptions are selected and used to calculate R-precision as r/R, where 'R' is the number of ground truth descriptions associated with the generated images. A higher R-precision value indicates a better model.", + id: "R-Precision", + }, + ], + models: [ + { + description: "One of the most powerful image generation models that can generate realistic outputs.", + id: "stabilityai/stable-diffusion-xl-base-1.0", + }, + { + description: "A powerful yet fast image generation model.", + id: "latent-consistency/lcm-lora-sdxl", + }, + { + description: "A very fast text-to-image model.", + id: "ByteDance/SDXL-Lightning", + }, + { + description: "A powerful text-to-image model.", + id: "stabilityai/stable-diffusion-3-medium-diffusers", + }, + ], + spaces: [ + { + description: "A powerful text-to-image application.", + id: "stabilityai/stable-diffusion-3-medium", + }, + { + description: "A text-to-image application to generate comics.", + id: "jbilcke-hf/ai-comic-factory", + }, + { + description: "A text-to-image application that can generate coherent text inside the image.", + id: "DeepFloyd/IF", + }, + { + description: "A powerful yet very fast image generation application.", + id: "latent-consistency/lcm-lora-for-sdxl", + }, + { + description: "A gallery to explore various text-to-image models.", + id: "multimodalart/LoraTheExplorer", + }, + { + description: "An application for `text-to-image`, `image-to-image` and image inpainting.", + id: "ArtGAN/Stable-Diffusion-ControlNet-WebUI", + }, + { + description: "An application to generate realistic images given photos of a person and a prompt.", + id: "InstantX/InstantID", + }, + ], + summary: + "Generates images from input text. These models can be used to generate and modify images based on text prompts.", + widgetModels: ["CompVis/stable-diffusion-v1-4"], + youtubeId: "", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-to-image/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/text-to-image/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..4997165b8c1351c37356ecc6ec613555b6d871b3 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-to-image/inference.ts @@ -0,0 +1,71 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ + +/** + * Inputs for Text To Image inference + */ +export interface TextToImageInput { + /** + * The input text data (sometimes called "prompt" + */ + inputs: string; + /** + * Additional inference parameters + */ + parameters?: TextToImageParameters; + [property: string]: unknown; +} + +/** + * Additional inference parameters + * + * Additional inference parameters for Text To Image + */ +export interface TextToImageParameters { + /** + * For diffusion models. A higher guidance scale value encourages the model to generate + * images closely linked to the text prompt at the expense of lower image quality. + */ + guidance_scale?: number; + /** + * One or several prompt to guide what NOT to include in image generation. + */ + negative_prompt?: string[]; + /** + * For diffusion models. The number of denoising steps. More denoising steps usually lead to + * a higher quality image at the expense of slower inference. + */ + num_inference_steps?: number; + /** + * For diffusion models. Override the scheduler with a compatible one + */ + scheduler?: string; + /** + * The size in pixel of the output image + */ + target_size?: TargetSize; + [property: string]: unknown; +} + +/** + * The size in pixel of the output image + */ +export interface TargetSize { + height: number; + width: number; + [property: string]: unknown; +} + +/** + * Outputs of inference for the Text To Image task + */ +export interface TextToImageOutput { + /** + * The generated image + */ + image: unknown; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-to-image/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/text-to-image/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..49acc7ed3af74cc3293f6f8b250d715586a9085c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-to-image/spec/input.json @@ -0,0 +1,59 @@ +{ + "$id": "/inference/schemas/text-to-image/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Text To Image inference", + "title": "TextToImageInput", + "type": "object", + "properties": { + "inputs": { + "description": "The input text data (sometimes called \"prompt\"", + "type": "string" + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/TextToImageParameters" + } + }, + "$defs": { + "TextToImageParameters": { + "title": "TextToImageParameters", + "description": "Additional inference parameters for Text To Image", + "type": "object", + "properties": { + "guidance_scale": { + "type": "number", + "description": "For diffusion models. A higher guidance scale value encourages the model to generate images closely linked to the text prompt at the expense of lower image quality." + }, + "negative_prompt": { + "type": "array", + "items": { + "type": "string" + }, + "description": "One or several prompt to guide what NOT to include in image generation." + }, + "num_inference_steps": { + "type": "integer", + "description": "For diffusion models. The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference." + }, + "target_size": { + "type": "object", + "description": "The size in pixel of the output image", + "properties": { + "width": { + "type": "integer" + }, + "height": { + "type": "integer" + } + }, + "required": ["width", "height"] + }, + "scheduler": { + "type": "string", + "description": "For diffusion models. Override the scheduler with a compatible one" + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-to-image/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/text-to-image/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..ff952a3a36dd7cdc4e1c6209ec9bce3aaf594999 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-to-image/spec/output.json @@ -0,0 +1,13 @@ +{ + "$id": "/inference/schemas/text-to-image/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Text To Image task", + "title": "TextToImageOutput", + "type": "object", + "properties": { + "image": { + "description": "The generated image" + } + }, + "required": ["image"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-to-speech/about.md b/data/node_modules/@huggingface/tasks/src/tasks/text-to-speech/about.md new file mode 100644 index 0000000000000000000000000000000000000000..c56467a1baf91a65880050aedd82ab8017bc75ee --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-to-speech/about.md @@ -0,0 +1,63 @@ +## Use Cases + +Text-to-Speech (TTS) models can be used in any speech-enabled application that requires converting text to speech imitating human voice. + +### Voice Assistants + +TTS models are used to create voice assistants on smart devices. These models are a better alternative compared to concatenative methods where the assistant is built by recording sounds and mapping them, since the outputs in TTS models contain elements in natural speech such as emphasis. + +### Announcement Systems + +TTS models are widely used in airport and public transportation announcement systems to convert the announcement of a given text into speech. + +## Inference Endpoints + +The Hub contains over [1500 TTS models](https://huggingface.co/models?pipeline_tag=text-to-speech&sort=downloads) that you can use right away by trying out the widgets directly in the browser or calling the models as a service using Inference Endpoints. Here is a simple code snippet to get you started: + +```python +import json +import requests + +headers = {"Authorization": f"Bearer {API_TOKEN}"} +API_URL = "https://api-inference.huggingface.co/models/microsoft/speecht5_tts" + +def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response + +output = query({"text_inputs": "Max is the best doggo."}) +``` + +You can also use libraries such as [espnet](https://huggingface.co/models?library=espnet&pipeline_tag=text-to-speech&sort=downloads) or [transformers](https://huggingface.co/models?pipeline_tag=text-to-speech&library=transformers&sort=trending) if you want to handle the Inference directly. + +## Direct Inference + +Now, you can also use the Text-to-Speech pipeline in Transformers to synthesise high quality voice. + +```python +from transformers import pipeline + +synthesizer = pipeline("text-to-speech", "suno/bark") + +synthesizer("Look I am generating speech in three lines of code!") +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer summarization models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_TOKEN); +await inference.textToSpeech({ + model: "facebook/mms-tts", + inputs: "text to generate speech from", +}); +``` + +## Useful Resources + +- [Hugging Face Audio Course](https://huggingface.co/learn/audio-course/chapter6/introduction) +- [ML for Audio Study Group - Text to Speech Deep Dive](https://www.youtube.com/watch?v=aLBedWj-5CQ) +- [Speech Synthesis, Recognition, and More With SpeechT5](https://huggingface.co/blog/speecht5) +- [Optimizing a Text-To-Speech model using 🤗 Transformers](https://huggingface.co/blog/optimizing-bark) +- [Train your own TTS models with Parler-TTS](https://github.com/huggingface/parler-tts) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-to-speech/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/text-to-speech/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..d4e5c886fd37822f65c32acedc412c5fdefc718e --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-to-speech/data.ts @@ -0,0 +1,70 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + canonicalId: "text-to-audio", + datasets: [ + { + description: "10K hours of multi-speaker English dataset.", + id: "parler-tts/mls_eng_10k", + }, + { + description: "Multi-speaker English dataset.", + id: "LibriTTS", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: "I love audio models on the Hub!", + type: "text", + }, + ], + outputs: [ + { + filename: "audio.wav", + type: "audio", + }, + ], + }, + metrics: [ + { + description: "The Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated speech.", + id: "mel cepstral distortion", + }, + ], + models: [ + { + description: "A powerful TTS model.", + id: "suno/bark", + }, + { + description: "A massively multi-lingual TTS model.", + id: "facebook/mms-tts", + }, + { + description: "A prompt based, powerful TTS model.", + id: "parler-tts/parler_tts_mini_v0.1", + }, + ], + spaces: [ + { + description: "An application for generate highly realistic, multilingual speech.", + id: "suno/bark", + }, + { + description: "XTTS is a Voice generation model that lets you clone voices into different languages.", + id: "coqui/xtts", + }, + { + description: "An application that synthesizes speech for diverse speaker prompts.", + id: "parler-tts/parler_tts_mini", + }, + ], + summary: + "Text-to-Speech (TTS) is the task of generating natural sounding speech given text input. TTS models can be extended to have a single model that generates speech for multiple speakers and multiple languages.", + widgetModels: ["suno/bark"], + youtubeId: "NW62DpzJ274", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-to-speech/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/text-to-speech/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..cdf778438337af9ec63f2dd0123d8f5723c62d35 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-to-speech/inference.ts @@ -0,0 +1,147 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ + +/** + * Inputs for Text to Speech inference + * + * Inputs for Text To Audio inference + */ +export interface TextToSpeechInput { + /** + * The input text data + */ + inputs: string; + /** + * Additional inference parameters + */ + parameters?: TextToAudioParameters; + [property: string]: unknown; +} + +/** + * Additional inference parameters + * + * Additional inference parameters for Text To Audio + */ +export interface TextToAudioParameters { + /** + * Parametrization of the text generation process + */ + generate?: GenerationParameters; + [property: string]: unknown; +} + +/** + * Parametrization of the text generation process + * + * Ad-hoc parametrization of the text generation process + */ +export interface GenerationParameters { + /** + * Whether to use sampling instead of greedy decoding when generating new tokens. + */ + do_sample?: boolean; + /** + * Controls the stopping condition for beam-based methods. + */ + early_stopping?: EarlyStoppingUnion; + /** + * If set to float strictly between 0 and 1, only tokens with a conditional probability + * greater than epsilon_cutoff will be sampled. In the paper, suggested values range from + * 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language + * Model Desmoothing](https://hf.co/papers/2210.15191) for more details. + */ + epsilon_cutoff?: number; + /** + * Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to + * float strictly between 0 and 1, a token is only considered if it is greater than either + * eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter + * term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In + * the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. + * See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) + * for more details. + */ + eta_cutoff?: number; + /** + * The maximum length (in tokens) of the generated text, including the input. + */ + max_length?: number; + /** + * The maximum number of tokens to generate. Takes precedence over maxLength. + */ + max_new_tokens?: number; + /** + * The minimum length (in tokens) of the generated text, including the input. + */ + min_length?: number; + /** + * The minimum number of tokens to generate. Takes precedence over maxLength. + */ + min_new_tokens?: number; + /** + * Number of groups to divide num_beams into in order to ensure diversity among different + * groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details. + */ + num_beam_groups?: number; + /** + * Number of beams to use for beam search. + */ + num_beams?: number; + /** + * The value balances the model confidence and the degeneration penalty in contrastive + * search decoding. + */ + penalty_alpha?: number; + /** + * The value used to modulate the next token probabilities. + */ + temperature?: number; + /** + * The number of highest probability vocabulary tokens to keep for top-k-filtering. + */ + top_k?: number; + /** + * If set to float < 1, only the smallest set of most probable tokens with probabilities + * that add up to top_p or higher are kept for generation. + */ + top_p?: number; + /** + * Local typicality measures how similar the conditional probability of predicting a target + * token next is to the expected conditional probability of predicting a random token next, + * given the partial text already generated. If set to float < 1, the smallest set of the + * most locally typical tokens with probabilities that add up to typical_p or higher are + * kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details. + */ + typical_p?: number; + /** + * Whether the model should use the past last key/values attentions to speed up decoding + */ + use_cache?: boolean; + [property: string]: unknown; +} + +/** + * Controls the stopping condition for beam-based methods. + */ +export type EarlyStoppingUnion = boolean | "never"; + +/** + * Outputs for Text to Speech inference + * + * Outputs of inference for the Text To Audio task + */ +export interface TextToSpeechOutput { + /** + * The generated audio waveform. + */ + audio: unknown; + samplingRate: unknown; + /** + * The sampling rate of the generated audio waveform. + */ + sampling_rate?: number; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-to-speech/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/text-to-speech/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..7d2bac0924d743b9a077d122df1c734533fa73d4 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-to-speech/spec/input.json @@ -0,0 +1,7 @@ +{ + "$ref": "/inference/schemas/text-to-audio/input.json", + "$id": "/inference/schemas/text-to-speech/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "title": "TextToSpeechInput", + "description": "Inputs for Text to Speech inference" +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-to-speech/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/text-to-speech/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..91654e2b506962a371791796abc0b862f6b73ce2 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-to-speech/spec/output.json @@ -0,0 +1,7 @@ +{ + "$ref": "/inference/schemas/text-to-audio/output.json", + "$id": "/inference/schemas/text-to-speech/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "title": "TextToSpeechOutput", + "description": "Outputs for Text to Speech inference" +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-to-video/about.md b/data/node_modules/@huggingface/tasks/src/tasks/text-to-video/about.md new file mode 100644 index 0000000000000000000000000000000000000000..898d638c264aa8219cdc3a71d1a4562de0d084b8 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-to-video/about.md @@ -0,0 +1,41 @@ +## Use Cases + +### Script-based Video Generation + +Text-to-video models can be used to create short-form video content from a provided text script. These models can be used to create engaging and informative marketing videos. For example, a company could use a text-to-video model to create a video that explains how their product works. + +### Content format conversion + +Text-to-video models can be used to generate videos from long-form text, including blog posts, articles, and text files. Text-to-video models can be used to create educational videos that are more engaging and interactive. An example of this is creating a video that explains a complex concept from an article. + +### Voice-overs and Speech + +Text-to-video models can be used to create an AI newscaster to deliver daily news, or for a film-maker to create a short film or a music video. + +## Task Variants +Text-to-video models have different variants based on inputs and outputs. + +### Text-to-video Editing + +One text-to-video task is generating text-based video style and local attribute editing. Text-to-video editing models can make it easier to perform tasks like cropping, stabilization, color correction, resizing and audio editing consistently. + +### Text-to-video Search + +Text-to-video search is the task of retrieving videos that are relevant to a given text query. This can be challenging, as videos are a complex medium that can contain a lot of information. By using semantic analysis to extract the meaning of the text query, visual analysis to extract features from the videos, such as the objects and actions that are present in the video, and temporal analysis to categorize relationships between the objects and actions in the video, we can determine which videos are most likely to be relevant to the text query. + +### Text-driven Video Prediction + +Text-driven video prediction is the task of generating a video sequence from a text description. Text description can be anything from a simple sentence to a detailed story. The goal of this task is to generate a video that is both visually realistic and semantically consistent with the text description. + +### Video Translation + +Text-to-video translation models can translate videos from one language to another or allow to query the multilingual text-video model with non-English sentences. This can be useful for people who want to watch videos in a language that they don't understand, especially when multi-lingual captions are available for training. + +## Inference +Contribute an inference snippet for text-to-video here! + +## Useful Resources + +In this area, you can insert useful resources about how to train or use a model for this task. + +- [Text-to-Video: The Task, Challenges and the Current State](https://huggingface.co/blog/text-to-video) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text-to-video/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/text-to-video/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..072d1394f28f1b792a47400ad9b8d9bc7f7b3903 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text-to-video/data.ts @@ -0,0 +1,102 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "Microsoft Research Video to Text is a large-scale dataset for open domain video captioning", + id: "iejMac/CLIP-MSR-VTT", + }, + { + description: "UCF101 Human Actions dataset consists of 13,320 video clips from YouTube, with 101 classes.", + id: "quchenyuan/UCF101-ZIP", + }, + { + description: "A high-quality dataset for human action recognition in YouTube videos.", + id: "nateraw/kinetics", + }, + { + description: "A dataset of video clips of humans performing pre-defined basic actions with everyday objects.", + id: "HuggingFaceM4/something_something_v2", + }, + { + description: + "This dataset consists of text-video pairs and contains noisy samples with irrelevant video descriptions", + id: "HuggingFaceM4/webvid", + }, + { + description: "A dataset of short Flickr videos for the temporal localization of events with descriptions.", + id: "iejMac/CLIP-DiDeMo", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: "Darth Vader is surfing on the waves.", + type: "text", + }, + ], + outputs: [ + { + filename: "text-to-video-output.gif", + type: "img", + }, + ], + }, + metrics: [ + { + description: + "Inception Score uses an image classification model that predicts class labels and evaluates how distinct and diverse the images are. A higher score indicates better video generation.", + id: "is", + }, + { + description: + "Frechet Inception Distance uses an image classification model to obtain image embeddings. The metric compares mean and standard deviation of the embeddings of real and generated images. A smaller score indicates better video generation.", + id: "fid", + }, + { + description: + "Frechet Video Distance uses a model that captures coherence for changes in frames and the quality of each frame. A smaller score indicates better video generation.", + id: "fvd", + }, + { + description: + "CLIPSIM measures similarity between video frames and text using an image-text similarity model. A higher score indicates better video generation.", + id: "clipsim", + }, + ], + models: [ + { + description: "A strong model for video generation.", + id: "Vchitect/LaVie", + }, + { + description: "A robust model for text-to-video generation.", + id: "damo-vilab/text-to-video-ms-1.7b", + }, + { + description: "A text-to-video generation model with high quality and smooth outputs.", + id: "hotshotco/Hotshot-XL", + }, + ], + spaces: [ + { + description: "An application that generates video from text.", + id: "fffiloni/zeroscope", + }, + { + description: "An application that generates video from image and text.", + id: "Vchitect/LaVie", + }, + { + description: "An application that generates videos from text and provides multi-model support.", + id: "ArtGAN/Video-Diffusion-WebUI", + }, + ], + summary: + "Text-to-video models can be used in any application that requires generating consistent sequence of images from text. ", + widgetModels: [], + youtubeId: undefined, +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text2text-generation/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/text2text-generation/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..3fb690b702a87cea401f213ffbc038d0fb076def --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text2text-generation/inference.ts @@ -0,0 +1,55 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ + +/** + * Inputs for Text2text Generation inference + */ +export interface Text2TextGenerationInput { + /** + * The input text data + */ + inputs: string; + /** + * Additional inference parameters + */ + parameters?: Text2TextGenerationParameters; + [property: string]: unknown; +} + +/** + * Additional inference parameters + * + * Additional inference parameters for Text2text Generation + */ +export interface Text2TextGenerationParameters { + /** + * Whether to clean up the potential extra spaces in the text output. + */ + clean_up_tokenization_spaces?: boolean; + /** + * Additional parametrization of the text generation algorithm + */ + generate_parameters?: { [key: string]: unknown }; + /** + * The truncation strategy to use + */ + truncation?: Text2TextGenerationTruncationStrategy; + [property: string]: unknown; +} + +export type Text2TextGenerationTruncationStrategy = "do_not_truncate" | "longest_first" | "only_first" | "only_second"; + +/** + * Outputs of inference for the Text2text Generation task + */ +export interface Text2TextGenerationOutput { + generatedText: unknown; + /** + * The generated text. + */ + generated_text?: string; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text2text-generation/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/text2text-generation/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..0310d74787a56ae5dd306732487646ccf82cf907 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text2text-generation/spec/input.json @@ -0,0 +1,55 @@ +{ + "$id": "/inference/schemas/text2text-generation/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Text2text Generation inference", + "title": "Text2TextGenerationInput", + "type": "object", + "properties": { + "inputs": { + "description": "The input text data", + "type": "string" + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/Text2textGenerationParameters" + } + }, + "$defs": { + "Text2textGenerationParameters": { + "title": "Text2textGenerationParameters", + "description": "Additional inference parameters for Text2text Generation", + "type": "object", + "properties": { + "clean_up_tokenization_spaces": { + "type": "boolean", + "description": "Whether to clean up the potential extra spaces in the text output." + }, + "truncation": { + "title": "Text2textGenerationTruncationStrategy", + "type": "string", + "description": "The truncation strategy to use", + "oneOf": [ + { + "const": "do_not_truncate" + }, + { + "const": "longest_first" + }, + { + "const": "only_first" + }, + { + "const": "only_second" + } + ] + }, + "generate_parameters": { + "title": "generateParameters", + "type": "object", + "description": "Additional parametrization of the text generation algorithm" + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/text2text-generation/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/text2text-generation/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..0da61f103d4cb27c3f61c2c5d782f44906ca2120 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/text2text-generation/spec/output.json @@ -0,0 +1,14 @@ +{ + "$id": "/inference/schemas/text2text-generation/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Text2text Generation task", + "title": "Text2TextGenerationOutput", + "type": "object", + "properties": { + "generated_text": { + "type": "string", + "description": "The generated text." + } + }, + "required": ["generatedText"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/token-classification/about.md b/data/node_modules/@huggingface/tasks/src/tasks/token-classification/about.md new file mode 100644 index 0000000000000000000000000000000000000000..9b0701385b5793f32bdafd890c476a4efb99b509 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/token-classification/about.md @@ -0,0 +1,76 @@ +## Use Cases + +### Information Extraction from Invoices + +You can extract entities of interest from invoices automatically using Named Entity Recognition (NER) models. Invoices can be read with Optical Character Recognition models and the output can be used to do inference with NER models. In this way, important information such as date, company name, and other named entities can be extracted. + +## Task Variants + +### Named Entity Recognition (NER) + +NER is the task of recognizing named entities in a text. These entities can be the names of people, locations, or organizations. The task is formulated as labeling each token with a class for each named entity and a class named "0" for tokens that do not contain any entities. The input for this task is text and the output is the annotated text with named entities. + +#### Inference + +You can use the 🤗 Transformers library `ner` pipeline to infer with NER models. + +```python +from transformers import pipeline + +classifier = pipeline("ner") +classifier("Hello I'm Omar and I live in Zürich.") +``` + +### Part-of-Speech (PoS) Tagging +In PoS tagging, the model recognizes parts of speech, such as nouns, pronouns, adjectives, or verbs, in a given text. The task is formulated as labeling each word with a part of the speech. + +#### Inference + +You can use the 🤗 Transformers library `token-classification` pipeline with a POS tagging model of your choice. The model will return a json with PoS tags for each token. + +```python +from transformers import pipeline + +classifier = pipeline("token-classification", model = "vblagoje/bert-english-uncased-finetuned-pos") +classifier("Hello I'm Omar and I live in Zürich.") +``` + +This is not limited to transformers! You can also use other libraries such as Stanza, spaCy, and Flair to do inference! Here is an example using a canonical [spaCy](https://hf.co/blog/spacy) model. + +```python +!pip install https://huggingface.co/spacy/en_core_web_sm/resolve/main/en_core_web_sm-any-py3-none-any.whl + +import en_core_web_sm + +nlp = en_core_web_sm.load() +doc = nlp("I'm Omar and I live in Zürich.") +for token in doc: + print(token.text, token.pos_, token.dep_, token.ent_type_) + +## I PRON nsubj +## 'm AUX ROOT +## Omar PROPN attr PERSON +### ... +``` + +## Useful Resources + +Would you like to learn more about token classification? Great! Here you can find some curated resources that you may find helpful! + +- [Course Chapter on Token Classification](https://huggingface.co/course/chapter7/2?fw=pt) +- [Blog post: Welcome spaCy to the Hugging Face Hub](https://huggingface.co/blog/spacy) + +### Notebooks + +- [PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/token_classification.ipynb) +- [TensorFlow](https://github.com/huggingface/notebooks/blob/master/examples/token_classification-tf.ipynb) + +### Scripts for training + +- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification) +- [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow) +- [Flax](https://github.com/huggingface/transformers/tree/main/examples/flax/token-classification) + +### Documentation + +- [Token classification task guide](https://huggingface.co/docs/transformers/tasks/token_classification) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/token-classification/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/token-classification/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..d4510819fd11e9ea38b9e51c43e2fe551d404966 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/token-classification/data.ts @@ -0,0 +1,84 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A widely used dataset useful to benchmark named entity recognition models.", + id: "conll2003", + }, + { + description: + "A multilingual dataset of Wikipedia articles annotated for named entity recognition in over 150 different languages.", + id: "wikiann", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: "My name is Omar and I live in Zürich.", + type: "text", + }, + ], + outputs: [ + { + text: "My name is Omar and I live in Zürich.", + tokens: [ + { + type: "PERSON", + start: 11, + end: 15, + }, + { + type: "GPE", + start: 30, + end: 36, + }, + ], + type: "text-with-tokens", + }, + ], + }, + metrics: [ + { + description: "", + id: "accuracy", + }, + { + description: "", + id: "recall", + }, + { + description: "", + id: "precision", + }, + { + description: "", + id: "f1", + }, + ], + models: [ + { + description: + "A robust performance model to identify people, locations, organizations and names of miscellaneous entities.", + id: "dslim/bert-base-NER", + }, + { + description: "Flair models are typically the state of the art in named entity recognition tasks.", + id: "flair/ner-english", + }, + ], + spaces: [ + { + description: + "An application that can recognizes entities, extracts noun chunks and recognizes various linguistic features of each token.", + id: "spacy/gradio_pipeline_visualizer", + }, + ], + summary: + "Token classification is a natural language understanding task in which a label is assigned to some tokens in a text. Some popular token classification subtasks are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models could be trained to identify specific entities in a text, such as dates, individuals and places; and PoS tagging would identify, for example, which words in a text are verbs, nouns, and punctuation marks.", + widgetModels: ["dslim/bert-base-NER"], + youtubeId: "wVHdVlPScxA", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/token-classification/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/token-classification/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..c89bf4e70e634c16400c766bbad761c0fdc53424 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/token-classification/inference.ts @@ -0,0 +1,82 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Token Classification inference + */ +export interface TokenClassificationInput { + /** + * The input text data + */ + inputs: string; + /** + * Additional inference parameters + */ + parameters?: TokenClassificationParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Token Classification + */ +export interface TokenClassificationParameters { + /** + * The strategy used to fuse tokens based on model predictions + */ + aggregation_strategy?: TokenClassificationAggregationStrategy; + /** + * A list of labels to ignore + */ + ignore_labels?: string[]; + /** + * The number of overlapping tokens between chunks when splitting the input text. + */ + stride?: number; + [property: string]: unknown; +} +/** + * Do not aggregate tokens + * + * Group consecutive tokens with the same label in a single entity. + * + * Similar to "simple", also preserves word integrity (use the label predicted for the first + * token in a word). + * + * Similar to "simple", also preserves word integrity (uses the label with the highest + * score, averaged across the word's tokens). + * + * Similar to "simple", also preserves word integrity (uses the label with the highest score + * across the word's tokens). + */ +export type TokenClassificationAggregationStrategy = "none" | "simple" | "first" | "average" | "max"; +export type TokenClassificationOutput = TokenClassificationOutputElement[]; +/** + * Outputs of inference for the Token Classification task + */ +export interface TokenClassificationOutputElement { + /** + * The character position in the input where this group ends. + */ + end?: number; + /** + * The predicted label for that group of tokens + */ + entity_group?: string; + label: unknown; + /** + * The associated score / probability + */ + score: number; + /** + * The character position in the input where this group begins. + */ + start?: number; + /** + * The corresponding text + */ + word?: string; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/token-classification/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/token-classification/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..30d6153d2ac99f11c79d378a2352dc85c1be3fb9 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/token-classification/spec/input.json @@ -0,0 +1,65 @@ +{ + "$id": "/inference/schemas/token-classification/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Token Classification inference", + "title": "TokenClassificationInput", + "type": "object", + "properties": { + "inputs": { + "description": "The input text data", + "type": "string" + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/TokenClassificationParameters" + } + }, + "$defs": { + "TokenClassificationParameters": { + "title": "TokenClassificationParameters", + "description": "Additional inference parameters for Token Classification", + "type": "object", + "properties": { + "ignore_labels": { + "type": "array", + "items": { + "type": "string" + }, + "description": "A list of labels to ignore" + }, + "stride": { + "type": "integer", + "description": "The number of overlapping tokens between chunks when splitting the input text." + }, + "aggregation_strategy": { + "title": "TokenClassificationAggregationStrategy", + "type": "string", + "description": "The strategy used to fuse tokens based on model predictions", + "oneOf": [ + { + "const": "none", + "description": "Do not aggregate tokens" + }, + { + "const": "simple", + "description": "Group consecutive tokens with the same label in a single entity." + }, + { + "const": "first", + "description": "Similar to \"simple\", also preserves word integrity (use the label predicted for the first token in a word)." + }, + { + "const": "average", + "description": "Similar to \"simple\", also preserves word integrity (uses the label with the highest score, averaged across the word's tokens)." + }, + { + "const": "max", + "description": "Similar to \"simple\", also preserves word integrity (uses the label with the highest score across the word's tokens)." + } + ] + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/token-classification/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/token-classification/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..95bdc06f531faec57d01f2bfcfb565ea6560f731 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/token-classification/spec/output.json @@ -0,0 +1,33 @@ +{ + "$id": "/inference/schemas/token-classification/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Token Classification task", + "title": "TokenClassificationOutput", + "type": "array", + "items": { + "type": "object", + "properties": { + "entity_group": { + "type": "string", + "description": "The predicted label for that group of tokens" + }, + "score": { + "type": "number", + "description": "The associated score / probability" + }, + "word": { + "type": "string", + "description": "The corresponding text" + }, + "start": { + "type": "integer", + "description": "The character position in the input where this group begins." + }, + "end": { + "type": "integer", + "description": "The character position in the input where this group ends." + } + }, + "required": ["label", "score"] + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/translation/about.md b/data/node_modules/@huggingface/tasks/src/tasks/translation/about.md new file mode 100644 index 0000000000000000000000000000000000000000..23fc48576f9cd472f99e39f8765e593dd096f2ab --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/translation/about.md @@ -0,0 +1,65 @@ +## Use Cases + +You can find over a thousand Translation models on the Hub, but sometimes you might not find a model for the language pair you are interested in. When this happen, you can use a pretrained multilingual Translation model like [mBART](https://huggingface.co/facebook/mbart-large-cc25) and further train it on your own data in a process called fine-tuning. + +### Multilingual conversational agents + +Translation models can be used to build conversational agents across different languages. This can be done in two ways. + +- **Translate the dataset to a new language.** You can translate a dataset of intents (inputs) and responses to the target language. You can then train a new intent classification model with this new dataset. This allows you to proofread responses in the target language and have better control of the chatbot's outputs. + +* **Translate the input and output of the agent.** You can use a Translation model in user inputs so that the chatbot can process it. You can then translate the output of the chatbot into the language of the user. This approach might be less reliable as the chatbot will generate responses that were not defined before. + +## Inference + +You can use the 🤗 Transformers library with the `translation_xx_to_yy` pattern where xx is the source language code and yy is the target language code. The default model for the pipeline is [t5-base](https://huggingface.co/t5-base) which under the hood adds a task prefix indicating the task itself, e.g. “translate: English to French”. + +```python +from transformers import pipeline +en_fr_translator = pipeline("translation_en_to_fr") +en_fr_translator("How old are you?") +## [{'translation_text': ' quel âge êtes-vous?'}] +``` + +If you’d like to use a specific model checkpoint that is from one specific language to another, you can also directly use the `translation` pipeline. + +```python +from transformers import pipeline + +model_checkpoint = "Helsinki-NLP/opus-mt-en-fr" +translator = pipeline("translation", model=model_checkpoint) +translator("How are you?") +# [{'translation_text': 'Comment allez-vous ?'}] +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer translation models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_TOKEN); +await inference.translation({ + model: "t5-base", + inputs: "My name is Wolfgang and I live in Berlin", +}); +``` + +## Useful Resources + +Would you like to learn more about Translation? Great! Here you can find some curated resources that you may find helpful! + +- [Course Chapter on Translation](https://huggingface.co/course/chapter7/4?fw=pt) + +### Notebooks + +- [PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/translation.ipynb) +- [TensorFlow](https://github.com/huggingface/notebooks/blob/master/examples/translation-tf.ipynb) + +### Scripts for training + +- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/translation) +- [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/translation) + +### Documentation + +- [Translation task guide](https://huggingface.co/docs/transformers/tasks/translation) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/translation/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/translation/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..0edfab7b889b9bf54b28873f3ffe922b6f2296a8 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/translation/data.ts @@ -0,0 +1,69 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + canonicalId: "text2text-generation", + datasets: [ + { + description: "A dataset of copyright-free books translated into 16 different languages.", + id: "opus_books", + }, + { + description: + "An example of translation between programming languages. This dataset consists of functions in Java and C#.", + id: "code_x_glue_cc_code_to_code_trans", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: "My name is Omar and I live in Zürich.", + type: "text", + }, + ], + outputs: [ + { + label: "Output", + content: "Mein Name ist Omar und ich wohne in Zürich.", + type: "text", + }, + ], + }, + metrics: [ + { + description: + "BLEU score is calculated by counting the number of shared single or subsequent tokens between the generated sequence and the reference. Subsequent n tokens are called “n-grams”. Unigram refers to a single token while bi-gram refers to token pairs and n-grams refer to n subsequent tokens. The score ranges from 0 to 1, where 1 means the translation perfectly matched and 0 did not match at all", + id: "bleu", + }, + { + description: "", + id: "sacrebleu", + }, + ], + models: [ + { + description: "A model that translates from English to French.", + id: "Helsinki-NLP/opus-mt-en-fr", + }, + { + description: + "A general-purpose Transformer that can be used to translate from English to German, French, or Romanian.", + id: "t5-base", + }, + ], + spaces: [ + { + description: "An application that can translate between 100 languages.", + id: "Iker/Translate-100-languages", + }, + { + description: "An application that can translate between English, Spanish and Hindi.", + id: "EuroPython2022/Translate-with-Bloom", + }, + ], + summary: "Translation is the task of converting text from one language to another.", + widgetModels: ["t5-small"], + youtubeId: "1JvfrvZgi6c", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/translation/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/translation/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..d5ea57eedf477cdb8d2940fde11d50cc5394e549 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/translation/inference.ts @@ -0,0 +1,56 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ + +/** + * Inputs for Translation inference + * + * Inputs for Text2text Generation inference + */ +export interface TranslationInput { + /** + * The input text data + */ + inputs: string; + /** + * Additional inference parameters + */ + parameters?: Text2TextGenerationParameters; + [property: string]: unknown; +} + +/** + * Additional inference parameters + * + * Additional inference parameters for Text2text Generation + */ +export interface Text2TextGenerationParameters { + /** + * Whether to clean up the potential extra spaces in the text output. + */ + clean_up_tokenization_spaces?: boolean; + /** + * Additional parametrization of the text generation algorithm + */ + generate_parameters?: { [key: string]: unknown }; + /** + * The truncation strategy to use + */ + truncation?: Text2TextGenerationTruncationStrategy; + [property: string]: unknown; +} + +export type Text2TextGenerationTruncationStrategy = "do_not_truncate" | "longest_first" | "only_first" | "only_second"; + +/** + * Outputs of inference for the Translation task + */ +export interface TranslationOutput { + /** + * The translated text. + */ + translation_text: string; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/translation/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/translation/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..0695bc6728994e3b5ff72e62c517ac038b6871ad --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/translation/spec/input.json @@ -0,0 +1,7 @@ +{ + "$ref": "/inference/schemas/text2text-generation/input.json", + "$id": "/inference/schemas/translation/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "title": "TranslationInput", + "description": "Inputs for Translation inference" +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/translation/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/translation/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..976c3641eb32a07865d2efc921242ade895da3c0 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/translation/spec/output.json @@ -0,0 +1,14 @@ +{ + "$id": "/inference/schemas/translation/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Translation task", + "title": "TranslationOutput", + "type": "object", + "properties": { + "translation_text": { + "type": "string", + "description": "The translated text." + } + }, + "required": ["translation_text"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/unconditional-image-generation/about.md b/data/node_modules/@huggingface/tasks/src/tasks/unconditional-image-generation/about.md new file mode 100644 index 0000000000000000000000000000000000000000..e5a9585528ae0afd0bc779d0d8628ceca167376e --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/unconditional-image-generation/about.md @@ -0,0 +1,50 @@ +## About the Task + +Unconditional image generation is the task of generating new images without any specific input. The main goal of this is to create novel, original images that are not based on existing images. +This can be used for a variety of applications, such as creating new artistic images, improving image recognition algorithms, or generating photorealistic images for virtual reality environments. + +Unconditional image generation models usually start with a _seed_ that generates a _random noise vector_. The model will then use this vector to create an output image similar to the images used for training the model. + +An example of unconditional image generation would be generating the image of a face on a model trained with the [CelebA dataset](https://huggingface.co/datasets/huggan/CelebA-HQ) or [generating a butterfly](https://huggingface.co/spaces/huggan/butterfly-gan) on a model trained with the [Smithsonian Butterflies dataset](https://huggingface.co/datasets/ceyda/smithsonian_butterflies). + +[Generative adversarial networks](https://en.wikipedia.org/wiki/Generative_adversarial_network) and [Diffusion](https://huggingface.co/docs/diffusers/index) are common architectures for this task. + +## Use Cases + +Unconditional image generation can be used for a variety of applications. + +### Artistic Expression + +Unconditional image generation can be used to create novel, original artwork that is not based on any existing images. This can be used to explore new creative possibilities and produce unique, imaginative images. + +### Data Augmentation + +Unconditional image generation models can be used to generate new images to improve the performance of image recognition algorithms. This makes algorithms more robust and able to handle a broader range of images. + +### Virtual Reality + +Unconditional image generation models can be used to create photorealistic images that can be used in virtual reality environments. This makes the VR experience more immersive and realistic. + +### Medical Imaging + +Unconditional image generation models can generate new medical images, such as CT or MRI scans, that can be used to train and evaluate medical imaging algorithms. This can improve the accuracy and reliability of these algorithms. + +### Industrial Design + +Unconditional image generation models can generate new designs for products, such as clothing or furniture, that are not based on any existing designs. This way, designers can explore new creative possibilities and produce unique, innovative designs. + +## Model Hosting and Inference + +This section should have useful information about Model Hosting and Inference + +## Useful Resources + +- [Hugging Face Diffusion Models Course](https://github.com/huggingface/diffusion-models-class) +- [Getting Started with Diffusers](https://huggingface.co/docs/diffusers/index) +- [Unconditional Image Generation Training](https://huggingface.co/docs/diffusers/training/unconditional_training) + +### Training your own model in just a few seconds + +In this area, you can insert useful information about training the model + +This page was made possible thanks to the efforts of [Someet Sahoo](https://huggingface.co/Someet24) and [Juan Carlos Piñeros](https://huggingface.co/juancopi81). diff --git a/data/node_modules/@huggingface/tasks/src/tasks/unconditional-image-generation/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/unconditional-image-generation/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..8cbf8a016807af5ca14e04d1416175c95d014757 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/unconditional-image-generation/data.ts @@ -0,0 +1,72 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: + "The CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with 600 images per class.", + id: "cifar100", + }, + { + description: "Multiple images of celebrities, used for facial expression translation.", + id: "CelebA", + }, + ], + demo: { + inputs: [ + { + label: "Seed", + content: "42", + type: "text", + }, + { + label: "Number of images to generate:", + content: "4", + type: "text", + }, + ], + outputs: [ + { + filename: "unconditional-image-generation-output.jpeg", + type: "img", + }, + ], + }, + metrics: [ + { + description: + "The inception score (IS) evaluates the quality of generated images. It measures the diversity of the generated images (the model predictions are evenly distributed across all possible labels) and their 'distinction' or 'sharpness' (the model confidently predicts a single label for each image).", + id: "Inception score (IS)", + }, + { + description: + "The Fréchet Inception Distance (FID) evaluates the quality of images created by a generative model by calculating the distance between feature vectors for real and generated images.", + id: "Frećhet Inception Distance (FID)", + }, + ], + models: [ + { + description: + "High-quality image generation model trained on the CIFAR-10 dataset. It synthesizes images of the ten classes presented in the dataset using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics.", + id: "google/ddpm-cifar10-32", + }, + { + description: + "High-quality image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes images of faces using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics.", + id: "google/ddpm-celebahq-256", + }, + ], + spaces: [ + { + description: "An application that can generate realistic faces.", + id: "CompVis/celeba-latent-diffusion", + }, + ], + summary: + "Unconditional image generation is the task of generating images with no condition in any context (like a prompt text or another image). Once trained, the model will create images that resemble its training data distribution.", + widgetModels: [""], + // TODO: Add related video + youtubeId: "", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/video-classification/about.md b/data/node_modules/@huggingface/tasks/src/tasks/video-classification/about.md new file mode 100644 index 0000000000000000000000000000000000000000..0436a873d3989f5c54fbff555fbfc573d3bd43b9 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/video-classification/about.md @@ -0,0 +1,37 @@ +## Use Cases + +Video classification models can be used to categorize what a video is all about. + +### Activity Recognition + +Video classification models are used to perform activity recognition which is useful for fitness applications. Activity recognition is also helpful for vision-impaired individuals especially when they're commuting. + +### Video Search + +Models trained in video classification can improve user experience by organizing and categorizing video galleries on the phone or in the cloud, on multiple keywords or tags. + +## Inference + +Below you can find code for inferring with a pre-trained video classification model. + +```python +from transformers import pipeline + +pipe = pipeline(task = "video-classification", model="nateraw/videomae-base-finetuned-ucf101-subset") +pipe("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/basketball.avi?download=true") + +#[{'score': 0.90, 'label': 'BasketballDunk'}, +# {'score': 0.02, 'label': 'BalanceBeam'}, +# ... ] +``` + +## Useful Resources + +- [Developing a simple video classification model](https://keras.io/examples/vision/video_classification) +- [Video classification with Transformers](https://keras.io/examples/vision/video_transformers) +- [Building a video archive](https://www.youtube.com/watch?v=_IeS1m8r6SY) +- [Video classification task guide](https://huggingface.co/docs/transformers/tasks/video_classification) + +### Creating your own video classifier in minutes + +- [Fine-tuning tutorial notebook (PyTorch)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/video_classification.ipynb) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/video-classification/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/video-classification/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..f02d2bbed4c826456b736fddb2e705b13bfce3a3 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/video-classification/data.ts @@ -0,0 +1,84 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + // TODO write proper description + description: "Benchmark dataset used for video classification with videos that belong to 400 classes.", + id: "kinetics400", + }, + ], + demo: { + inputs: [ + { + filename: "video-classification-input.gif", + type: "img", + }, + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "Playing Guitar", + score: 0.514, + }, + { + label: "Playing Tennis", + score: 0.193, + }, + { + label: "Cooking", + score: 0.068, + }, + ], + }, + ], + }, + metrics: [ + { + description: "", + id: "accuracy", + }, + { + description: "", + id: "recall", + }, + { + description: "", + id: "precision", + }, + { + description: "", + id: "f1", + }, + ], + models: [ + { + // TO DO: write description + description: "Strong Video Classification model trained on the Kinects 400 dataset.", + id: "MCG-NJU/videomae-base-finetuned-kinetics", + }, + { + // TO DO: write description + description: "Strong Video Classification model trained on the Kinects 400 dataset.", + id: "microsoft/xclip-base-patch32", + }, + ], + spaces: [ + { + description: "An application that classifies video at different timestamps.", + id: "nateraw/lavila", + }, + { + description: "An application that classifies video.", + id: "fcakyon/video-classification", + }, + ], + summary: + "Video classification is the task of assigning a label or class to an entire video. Videos are expected to have only one class for each video. Video classification models take a video as input and return a prediction about which class the video belongs to.", + widgetModels: [], + youtubeId: "", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/video-classification/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/video-classification/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..6615b8ddcbd0df5a4a7ebe67d89c93743ffa7d2c --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/video-classification/inference.ts @@ -0,0 +1,59 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Video Classification inference + */ +export interface VideoClassificationInput { + /** + * The input video data + */ + inputs: unknown; + /** + * Additional inference parameters + */ + parameters?: VideoClassificationParameters; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Video Classification + */ +export interface VideoClassificationParameters { + /** + * The sampling rate used to select frames from the video. + */ + frame_sampling_rate?: number; + function_to_apply?: ClassificationOutputTransform; + /** + * The number of sampled frames to consider for classification. + */ + num_frames?: number; + /** + * When specified, limits the output to the top K most probable classes. + */ + top_k?: number; + [property: string]: unknown; +} +/** + * The function to apply to the model outputs in order to retrieve the scores. + */ +export type ClassificationOutputTransform = "sigmoid" | "softmax" | "none"; +export type VideoClassificationOutput = VideoClassificationOutputElement[]; +/** + * Outputs of inference for the Video Classification task + */ +export interface VideoClassificationOutputElement { + /** + * The predicted class label. + */ + label: string; + /** + * The corresponding probability. + */ + score: number; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/video-classification/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/video-classification/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..1fb58e278364bda22840da44d3aedd295a6aa331 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/video-classification/spec/input.json @@ -0,0 +1,42 @@ +{ + "$id": "/inference/schemas/video-classification/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Video Classification inference", + "title": "VideoClassificationInput", + "type": "object", + "properties": { + "inputs": { + "description": "The input video data" + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/VideoClassificationParameters" + } + }, + "$defs": { + "VideoClassificationParameters": { + "title": "VideoClassificationParameters", + "description": "Additional inference parameters for Video Classification", + "type": "object", + "properties": { + "function_to_apply": { + "title": "TextClassificationOutputTransform", + "$ref": "/inference/schemas/common-definitions.json#/definitions/ClassificationOutputTransform" + }, + "num_frames": { + "type": "integer", + "description": "The number of sampled frames to consider for classification." + }, + "frame_sampling_rate": { + "type": "integer", + "description": "The sampling rate used to select frames from the video." + }, + "top_k": { + "type": "integer", + "description": "When specified, limits the output to the top K most probable classes." + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/video-classification/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/video-classification/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..4c24f5d577717994e0b4a8e329a7e063a967cb10 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/video-classification/spec/output.json @@ -0,0 +1,10 @@ +{ + "$id": "/inference/schemas/video-classification/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Video Classification task", + "title": "VideoClassificationOutput", + "type": "array", + "items": { + "$ref": "/inference/schemas/common-definitions.json#/definitions/ClassificationOutput" + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/visual-question-answering/about.md b/data/node_modules/@huggingface/tasks/src/tasks/visual-question-answering/about.md new file mode 100644 index 0000000000000000000000000000000000000000..7f96e1679b8a5b46042f5c6e2eb533e80749160f --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/visual-question-answering/about.md @@ -0,0 +1,48 @@ +## Use Cases + +### Aid the Visually Impaired Persons + +VQA models can be used to reduce visual barriers for visually impaired individuals by allowing them to get information about images from the web and the real world. + +### Education + +VQA models can be used to improve experiences at museums by allowing observers to directly ask questions they interested in. + +### Improved Image Retrieval + +Visual question answering models can be used to retrieve images with specific characteristics. For example, the user can ask "Is there a dog?" to find all images with dogs from a set of images. + +### Video Search + +Specific snippets/timestamps of a video can be retrieved based on search queries. For example, the user can ask "At which part of the video does the guitar appear?" and get a specific timestamp range from the whole video. + +## Task Variants + +### Video Question Answering + +Video Question Answering aims to answer questions asked about the content of a video. + +## Inference + +You can infer with Visual Question Answering models using the `vqa` (or `visual-question-answering`) pipeline. This pipeline requires [the Python Image Library (PIL)](https://pillow.readthedocs.io/en/stable/) to process images. You can install it with (`pip install pillow`). + +```python +from PIL import Image +from transformers import pipeline + +vqa_pipeline = pipeline("visual-question-answering") + +image = Image.open("elephant.jpeg") +question = "Is there an elephant?" + +vqa_pipeline(image, question, top_k=1) +#[{'score': 0.9998154044151306, 'answer': 'yes'}] +``` + +## Useful Resources + +- [An introduction to Visual Question Answering - AllenAI](https://blog.allenai.org/vanilla-vqa-adcaaaa94336) +- [Multi Modal Framework (MMF) - Meta Research](https://mmf.sh/docs/getting_started/video_overview/) + +The contents of this page are contributed by [ +Bharat Raghunathan](https://huggingface.co/bharat-raghunathan) and [Jose Londono Botero](https://huggingface.co/jlondonobo). diff --git a/data/node_modules/@huggingface/tasks/src/tasks/visual-question-answering/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/visual-question-answering/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..2d94edd425fca987459258877438747153618809 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/visual-question-answering/data.ts @@ -0,0 +1,97 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A widely used dataset containing questions (with answers) about images.", + id: "Graphcore/vqa", + }, + { + description: "A dataset to benchmark visual reasoning based on text in images.", + id: "textvqa", + }, + ], + demo: { + inputs: [ + { + filename: "elephant.jpeg", + type: "img", + }, + { + label: "Question", + content: "What is in this image?", + type: "text", + }, + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "elephant", + score: 0.97, + }, + { + label: "elephants", + score: 0.06, + }, + { + label: "animal", + score: 0.003, + }, + ], + }, + ], + }, + isPlaceholder: false, + metrics: [ + { + description: "", + id: "accuracy", + }, + { + description: + "Measures how much a predicted answer differs from the ground truth based on the difference in their semantic meaning.", + id: "wu-palmer similarity", + }, + ], + models: [ + { + description: "A visual question answering model trained to convert charts and plots to text.", + id: "google/deplot", + }, + { + description: + "A visual question answering model trained for mathematical reasoning and chart derendering from images.", + id: "google/matcha-base ", + }, + { + description: "A strong visual question answering that answers questions from book covers.", + id: "google/pix2struct-ocrvqa-large", + }, + ], + spaces: [ + { + description: "An application that compares visual question answering models across different tasks.", + id: "merve/pix2struct", + }, + { + description: "An application that can answer questions based on images.", + id: "nielsr/vilt-vqa", + }, + { + description: "An application that can caption images and answer questions about a given image. ", + id: "Salesforce/BLIP", + }, + { + description: "An application that can caption images and answer questions about a given image. ", + id: "vumichien/Img2Prompt", + }, + ], + summary: + "Visual Question Answering is the task of answering open-ended questions based on an image. They output natural language responses to natural language questions.", + widgetModels: ["dandelin/vilt-b32-finetuned-vqa"], + youtubeId: "", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/visual-question-answering/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/visual-question-answering/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..7adc07ae02ab0993a6f40b8ecab7bceeb7be441e --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/visual-question-answering/inference.ts @@ -0,0 +1,63 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Visual Question Answering inference + */ +export interface VisualQuestionAnsweringInput { + /** + * One (image, question) pair to answer + */ + inputs: VisualQuestionAnsweringInputData; + /** + * Additional inference parameters + */ + parameters?: VisualQuestionAnsweringParameters; + [property: string]: unknown; +} +/** + * One (image, question) pair to answer + */ +export interface VisualQuestionAnsweringInputData { + /** + * The image. + */ + image: unknown; + /** + * The question to answer based on the image. + */ + question: unknown; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Visual Question Answering + */ +export interface VisualQuestionAnsweringParameters { + /** + * The number of answers to return (will be chosen by order of likelihood). Note that we + * return less than topk answers if there are not enough options available within the + * context. + */ + top_k?: number; + [property: string]: unknown; +} +export type VisualQuestionAnsweringOutput = VisualQuestionAnsweringOutputElement[]; +/** + * Outputs of inference for the Visual Question Answering task + */ +export interface VisualQuestionAnsweringOutputElement { + /** + * The answer to the question + */ + answer?: string; + label: unknown; + /** + * The associated score / probability + */ + score: number; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/visual-question-answering/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/visual-question-answering/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..9f9dab121ca0f9d2290173b4cc9bf1f20de7bf15 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/visual-question-answering/spec/input.json @@ -0,0 +1,41 @@ +{ + "$id": "/inference/schemas/visual-question-answering/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Visual Question Answering inference", + "title": "VisualQuestionAnsweringInput", + "type": "object", + "properties": { + "inputs": { + "description": "One (image, question) pair to answer", + "type": "object", + "title": "VisualQuestionAnsweringInputData", + "properties": { + "image": { + "description": "The image." + }, + "question": { + "description": "The question to answer based on the image." + } + }, + "required": ["question", "image"] + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/VisualQuestionAnsweringParameters" + } + }, + "$defs": { + "VisualQuestionAnsweringParameters": { + "title": "VisualQuestionAnsweringParameters", + "description": "Additional inference parameters for Visual Question Answering", + "type": "object", + "properties": { + "top_k": { + "type": "integer", + "description": "The number of answers to return (will be chosen by order of likelihood). Note that we return less than topk answers if there are not enough options available within the context." + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/visual-question-answering/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/visual-question-answering/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..32c9c6c26b8134412588731fdb894799a3f107e3 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/visual-question-answering/spec/output.json @@ -0,0 +1,21 @@ +{ + "$id": "/inference/schemas/visual-question-answering/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Visual Question Answering task", + "title": "VisualQuestionAnsweringOutput", + "type": "array", + "items": { + "type": "object", + "properties": { + "answer": { + "type": "string", + "description": "The answer to the question" + }, + "score": { + "type": "number", + "description": "The associated score / probability" + } + }, + "required": ["label", "score"] + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-classification/about.md b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-classification/about.md new file mode 100644 index 0000000000000000000000000000000000000000..9b7ff3c48c931d3355c76aed20b891fe8f57c54b --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-classification/about.md @@ -0,0 +1,40 @@ +## About the Task + +Zero Shot Classification is the task of predicting a class that wasn't seen by the model during training. This method, which leverages a pre-trained language model, can be thought of as an instance of [transfer learning](https://www.youtube.com/watch?v=BqqfQnyjmgg) which generally refers to using a model trained for one task in a different application than what it was originally trained for. This is particularly useful for situations where the amount of labeled data is small. + +In zero shot classification, we provide the model with a prompt and a sequence of text that describes what we want our model to do, in natural language. Zero-shot classification excludes any examples of the desired task being completed. This differs from single or few-shot classification, as these tasks include a single or a few examples of the selected task. + +Zero, single and few-shot classification seem to be an emergent feature of large language models. This feature seems to come about around model sizes of +100M parameters. The effectiveness of a model at a zero, single or few-shot task seems to scale with model size, meaning that larger models (models with more trainable parameters or layers) generally do better at this task. + +Here is an example of a zero-shot prompt for classifying the sentiment of a sequence of text: + +``` +Classify the following input text into one of the following three categories: [positive, negative, neutral] + +Input Text: Hugging Face is awesome for making all of these +state of the art models available! +Sentiment: positive + +``` + +One great example of this task with a nice off-the-shelf model is available at the widget of this page, where the user can input a sequence of text and candidate labels to the model. This is a _word level_ example of zero shot classification, more elaborate and lengthy generations are available with larger models. Testing these models out and getting a feel for prompt engineering is the best way to learn how to use them. + +## Inference + +You can use the 🤗 Transformers library zero-shot-classification pipeline to infer with zero shot text classification models. + +```python +from transformers import pipeline + +pipe = pipeline(model="facebook/bart-large-mnli") +pipe("I have a problem with my iphone that needs to be resolved asap!", + candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"], +) +# output +>>> {'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]} +``` + +## Useful Resources + +- [Zero Shot Learning](https://joeddav.github.io/blog/2020/05/29/ZSL.html) +- [Hugging Face on Transfer Learning](https://huggingface.co/course/en/chapter1/4?fw=pt#transfer-learning) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-classification/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-classification/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..0def512406232d8dba2a448d531ca9196deee654 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-classification/data.ts @@ -0,0 +1,66 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A widely used dataset used to benchmark multiple variants of text classification.", + id: "glue", + }, + { + description: + "The Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced collection of 433k sentence pairs annotated with textual entailment information.", + id: "MultiNLI", + }, + { + description: + "FEVER is a publicly available dataset for fact extraction and verification against textual sources.", + id: "FEVER", + }, + ], + demo: { + inputs: [ + { + label: "Text Input", + content: "Dune is the best movie ever.", + type: "text", + }, + { + label: "Candidate Labels", + content: "CINEMA, ART, MUSIC", + type: "text", + }, + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "CINEMA", + score: 0.9, + }, + { + label: "ART", + score: 0.1, + }, + { + label: "MUSIC", + score: 0.0, + }, + ], + }, + ], + }, + metrics: [], + models: [ + { + description: "Powerful zero-shot text classification model", + id: "facebook/bart-large-mnli", + }, + ], + spaces: [], + summary: + "Zero-shot text classification is a task in natural language processing where a model is trained on a set of labeled examples but is then able to classify new examples from previously unseen classes.", + widgetModels: ["facebook/bart-large-mnli"], +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-classification/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-classification/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..20e0d369a2cfdd1b4903e4817f611159ae8f8d57 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-classification/inference.ts @@ -0,0 +1,67 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Zero Shot Classification inference + */ +export interface ZeroShotClassificationInput { + /** + * The input text data, with candidate labels + */ + inputs: ZeroShotClassificationInputData; + /** + * Additional inference parameters + */ + parameters?: ZeroShotClassificationParameters; + [property: string]: unknown; +} +/** + * The input text data, with candidate labels + */ +export interface ZeroShotClassificationInputData { + /** + * The set of possible class labels to classify the text into. + */ + candidateLabels: string[]; + /** + * The text to classify + */ + text: string; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Zero Shot Classification + */ +export interface ZeroShotClassificationParameters { + /** + * The sentence used in conjunction with candidateLabels to attempt the text classification + * by replacing the placeholder with the candidate labels. + */ + hypothesis_template?: string; + /** + * Whether multiple candidate labels can be true. If false, the scores are normalized such + * that the sum of the label likelihoods for each sequence is 1. If true, the labels are + * considered independent and probabilities are normalized for each candidate. + */ + multi_label?: boolean; + [property: string]: unknown; +} +export type ZeroShotClassificationOutput = ZeroShotClassificationOutputElement[]; +/** + * Outputs of inference for the Zero Shot Classification task + */ +export interface ZeroShotClassificationOutputElement { + /** + * The predicted class label. + */ + label: string; + /** + * The corresponding probability. + */ + score: number; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-classification/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-classification/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..c955f2769f4c44c34dcb2e021fd99010c036cc45 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-classification/spec/input.json @@ -0,0 +1,50 @@ +{ + "$id": "/inference/schemas/zero-shot-classification/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Zero Shot Classification inference", + "title": "ZeroShotClassificationInput", + "type": "object", + "properties": { + "inputs": { + "description": "The input text data, with candidate labels", + "type": "object", + "title": "ZeroShotClassificationInputData", + "properties": { + "text": { + "type": "string", + "description": "The text to classify" + }, + "candidateLabels": { + "type": "array", + "description": "The set of possible class labels to classify the text into.", + "items": { + "type": "string" + } + } + }, + "required": ["text", "candidateLabels"] + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/ZeroShotClassificationParameters" + } + }, + "$defs": { + "ZeroShotClassificationParameters": { + "title": "ZeroShotClassificationParameters", + "description": "Additional inference parameters for Zero Shot Classification", + "type": "object", + "properties": { + "hypothesis_template": { + "type": "string", + "description": "The sentence used in conjunction with candidateLabels to attempt the text classification by replacing the placeholder with the candidate labels." + }, + "multi_label": { + "type": "boolean", + "description": "Whether multiple candidate labels can be true. If false, the scores are normalized such that the sum of the label likelihoods for each sequence is 1. If true, the labels are considered independent and probabilities are normalized for each candidate." + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-classification/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-classification/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..83ed1098fd139fe5373a5c5065596d3c1fffd491 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-classification/spec/output.json @@ -0,0 +1,10 @@ +{ + "$id": "/inference/schemas/zero-shot-classification/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Zero Shot Classification task", + "title": "ZeroShotClassificationOutput", + "type": "array", + "items": { + "$ref": "/inference/schemas/common-definitions.json#/definitions/ClassificationOutput" + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-image-classification/about.md b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-image-classification/about.md new file mode 100644 index 0000000000000000000000000000000000000000..9cf273b299be7828ea2c75fd0da6f50d65c50029 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-image-classification/about.md @@ -0,0 +1,75 @@ +## About the Task + +Zero-shot image classification is a computer vision task to classify images into one of several classes, without any prior training or knowledge of the classes. + +Zero shot image classification works by transferring knowledge learnt during training of one model, to classify novel classes that was not present in the training data. So this is a variation of [transfer learning](https://www.youtube.com/watch?v=BqqfQnyjmgg). For instance, a model trained to differentiate cars from airplanes can be used to classify images of ships. + +The data in this learning paradigm consists of + +- Seen data - images and their corresponding labels +- Unseen data - only labels and no images +- Auxiliary information - additional information given to the model during training connecting the unseen and seen data. This can be in the form of textual description or word embeddings. + +## Use Cases + +### Image Retrieval + +Zero-shot learning resolves several challenges in image retrieval systems. For example, with the rapid growth of categories on the web, it is challenging to index images based on unseen categories. With zero-shot learning we can associate unseen categories to images by exploiting attributes to model the relationships among visual features and labels. + +### Action Recognition + +Action recognition is the task of identifying when a person in an image/video is performing a given action from a set of actions. If all the possible actions are not known beforehand, conventional deep learning models fail. With zero-shot learning, for a given domain of a set of actions, we can create a mapping connecting low-level features and a semantic description of auxiliary data to classify unknown classes of actions. + +## Task Variants + +You can contribute variants of this task [here](https://github.com/huggingface/hub-docs/blob/main/tasks/src/zero-shot-image-classification/about.md). + +## Inference + +The model can be loaded with the zero-shot-image-classification pipeline like so: + +```python +from transformers import pipeline +# More models in the model hub. +model_name = "openai/clip-vit-large-patch14-336" +classifier = pipeline("zero-shot-image-classification", model = model_name) +``` + +You can then use this pipeline to classify images into any of the class names you specify. You can specify more than two class labels too. + +```python +image_to_classify = "path_to_cat_and_dog_image.jpeg" +labels_for_classification = ["cat and dog", + "lion and cheetah", + "rabbit and lion"] +scores = classifier(image_to_classify, + candidate_labels = labels_for_classification) +``` + +The classifier would return a list of dictionaries after the inference which is stored in the variable `scores` in the code snippet above. Variable `scores` would look as follows: + +```python +[{'score': 0.9950482249259949, 'label': 'cat and dog'}, +{'score': 0.004863627254962921, 'label': 'rabbit and lion'}, +{'score': 8.816882473183796e-05, 'label': 'lion and cheetah'}] +``` + +The dictionary at the zeroth index of the list will contain the label with the highest score. + +```python +print(f"The highest score is {scores[0]['score']:.3f} for the label {scores[0]['label']}") +``` + +The output from the print statement above would look as follows: + +``` +The highest probability is 0.995 for the label cat and dog +``` + +## Useful Resources + +- [Zero-shot image classification task guide](https://huggingface.co/docs/transformers/tasks/zero_shot_image_classification). +- [Image-text Similarity Search](https://huggingface.co/learn/cookbook/faiss_with_hf_datasets_and_clip) + +This page was made possible thanks to the efforts of [Shamima Hossain](https://huggingface.co/Shamima), [Haider Zaidi +](https://huggingface.co/chefhaider) and [Paarth Bhatnagar](https://huggingface.co/Paarth). diff --git a/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-image-classification/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-image-classification/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..bc7b6aab30a9c2937d781f7d356bbc65d0dd36d8 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-image-classification/data.ts @@ -0,0 +1,84 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [ + { + // TODO write proper description + description: "", + id: "", + }, + ], + demo: { + inputs: [ + { + filename: "image-classification-input.jpeg", + type: "img", + }, + { + label: "Classes", + content: "cat, dog, bird", + type: "text", + }, + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "Cat", + score: 0.664, + }, + { + label: "Dog", + score: 0.329, + }, + { + label: "Bird", + score: 0.008, + }, + ], + }, + ], + }, + metrics: [ + { + description: "Computes the number of times the correct label appears in top K labels predicted", + id: "top-K accuracy", + }, + ], + models: [ + { + description: "Robust image classification model trained on publicly available image-caption data.", + id: "openai/clip-vit-base-patch16", + }, + { + description: "Strong zero-shot image classification model.", + id: "google/siglip-base-patch16-224", + }, + { + description: "Small yet powerful zero-shot image classification model that can run on edge devices.", + id: "apple/MobileCLIP-S1-OpenCLIP", + }, + { + description: "Strong image classification model for biomedical domain.", + id: "microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224", + }, + ], + spaces: [ + { + description: + "An application that leverages zero-shot image classification to find best captions to generate an image. ", + id: "pharma/CLIP-Interrogator", + }, + { + description: "An application to compare different zero-shot image classification models. ", + id: "merve/compare_clip_siglip", + }, + ], + summary: + "Zero-shot image classification is the task of classifying previously unseen classes during training of a model.", + widgetModels: ["openai/clip-vit-large-patch14-336"], + youtubeId: "", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-image-classification/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-image-classification/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..44ce76173503e6403626b0ae1244e2121b0be2b1 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-image-classification/inference.ts @@ -0,0 +1,61 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Zero Shot Image Classification inference + */ +export interface ZeroShotImageClassificationInput { + /** + * The input image data, with candidate labels + */ + inputs: ZeroShotImageClassificationInputData; + /** + * Additional inference parameters + */ + parameters?: ZeroShotImageClassificationParameters; + [property: string]: unknown; +} +/** + * The input image data, with candidate labels + */ +export interface ZeroShotImageClassificationInputData { + /** + * The candidate labels for this image + */ + candidateLabels: string[]; + /** + * The image data to classify + */ + image: unknown; + [property: string]: unknown; +} +/** + * Additional inference parameters + * + * Additional inference parameters for Zero Shot Image Classification + */ +export interface ZeroShotImageClassificationParameters { + /** + * The sentence used in conjunction with candidateLabels to attempt the text classification + * by replacing the placeholder with the candidate labels. + */ + hypothesis_template?: string; + [property: string]: unknown; +} +export type ZeroShotImageClassificationOutput = ZeroShotImageClassificationOutputElement[]; +/** + * Outputs of inference for the Zero Shot Image Classification task + */ +export interface ZeroShotImageClassificationOutputElement { + /** + * The predicted class label. + */ + label: string; + /** + * The corresponding probability. + */ + score: number; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-image-classification/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-image-classification/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..dfdababc7018e9a46354813f77a839f6d48400c4 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-image-classification/spec/input.json @@ -0,0 +1,45 @@ +{ + "$id": "/inference/schemas/zero-shot-image-classification/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Zero Shot Image Classification inference", + "title": "ZeroShotImageClassificationInput", + "type": "object", + "properties": { + "inputs": { + "description": "The input image data, with candidate labels", + "type": "object", + "title": "ZeroShotImageClassificationInputData", + "properties": { + "image": { + "description": "The image data to classify" + }, + "candidateLabels": { + "description": "The candidate labels for this image", + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": ["image", "candidateLabels"] + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/ZeroShotImageClassificationParameters" + } + }, + "$defs": { + "ZeroShotImageClassificationParameters": { + "title": "ZeroShotImageClassificationParameters", + "description": "Additional inference parameters for Zero Shot Image Classification", + "type": "object", + "properties": { + "hypothesis_template": { + "type": "string", + "description": "The sentence used in conjunction with candidateLabels to attempt the text classification by replacing the placeholder with the candidate labels." + } + } + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-image-classification/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-image-classification/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..6b795fbdbae8b566845fb424f30a7d7908609358 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-image-classification/spec/output.json @@ -0,0 +1,10 @@ +{ + "$id": "/inference/schemas/zero-shot-image-classification/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Zero Shot Image Classification task", + "title": "ZeroShotImageClassificationOutput", + "type": "array", + "items": { + "$ref": "/inference/schemas/common-definitions.json#/definitions/ClassificationOutput" + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-object-detection/about.md b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-object-detection/about.md new file mode 100644 index 0000000000000000000000000000000000000000..46c4bf7c169fc3664b89d6b1243b47d9b4cdc7aa --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-object-detection/about.md @@ -0,0 +1,45 @@ +## Use Cases + +Zero-shot object detection models can be used in any object detection application where the detection involves text queries for objects of interest. + +### Object Search + +Zero-shot object detection models can be used in image search. Smartphones, for example, use zero-shot object detection models to detect entities (such as specific places or objects) and allow the user to search for the entity on the internet. + +### Object Counting + +Zero-shot object detection models are used to count instances of objects in a given image. This can include counting the objects in warehouses or stores or the number of visitors in a store. They are also used to manage crowds at events to prevent disasters. + +### Object Tracking + +Zero-shot object detectors can track objects in videos. + +## Inference + +You can infer with zero-shot object detection models through the `zero-shot-object-detection` pipeline. When calling the pipeline, you just need to specify a path or HTTP link to an image and the candidate labels. + +```python +from transformers import pipeline +from PIL import Image + +image = Image.open("my-image.png").convert("RGB") + +detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection") + +predictions = detector( + image, + candidate_labels=["a photo of a cat", "a photo of a dog"], +) + +# [{'score': 0.95, +# 'label': 'a photo of a cat', +# 'box': {'xmin': 180, 'ymin': 71, 'xmax': 271, 'ymax': 178}}, +# ... +# ] +``` + +# Useful Resources + +- [Zero-shot object detection task guide](https://huggingface.co/docs/transformers/tasks/zero_shot_object_detection) + +This page was made possible thanks to the efforts of [Victor Guichard](https://huggingface.co/VictorGuichard) diff --git a/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-object-detection/data.ts b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-object-detection/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..9e36cad4653bb8fcb8bba8b4dbb8c31c28546fe3 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-object-detection/data.ts @@ -0,0 +1,67 @@ +import type { TaskDataCustom } from ".."; + +const taskData: TaskDataCustom = { + datasets: [], + demo: { + inputs: [ + { + filename: "zero-shot-object-detection-input.jpg", + type: "img", + }, + { + label: "Classes", + content: "cat, dog, bird", + type: "text", + }, + ], + outputs: [ + { + filename: "zero-shot-object-detection-output.jpg", + type: "img", + }, + ], + }, + metrics: [ + { + description: + "The Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It is calculated for each class separately", + id: "Average Precision", + }, + { + description: "The Mean Average Precision (mAP) metric is the overall average of the AP values", + id: "Mean Average Precision", + }, + { + description: + "The APα metric is the Average Precision at the IoU threshold of a α value, for example, AP50 and AP75", + id: "APα", + }, + ], + models: [ + { + description: "Solid zero-shot object detection model.", + id: "IDEA-Research/grounding-dino-base", + }, + { + description: "Cutting-edge zero-shot object detection model.", + id: "google/owlv2-base-patch16-ensemble", + }, + ], + spaces: [ + { + description: "A demo to try the state-of-the-art zero-shot object detection model, OWLv2.", + id: "merve/owlv2", + }, + { + description: + "A demo that combines a zero-shot object detection and mask generation model for zero-shot segmentation.", + id: "merve/OWLSAM", + }, + ], + summary: + "Zero-shot object detection is a computer vision task to detect objects and their classes in images, without any prior training or knowledge of the classes. Zero-shot object detection models receive an image as input, as well as a list of candidate classes, and output the bounding boxes and labels where the objects have been detected.", + widgetModels: [], + youtubeId: "", +}; + +export default taskData; diff --git a/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-object-detection/inference.ts b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-object-detection/inference.ts new file mode 100644 index 0000000000000000000000000000000000000000..87447ca0adeaa5c155a0d220a0362938d73dbae0 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-object-detection/inference.ts @@ -0,0 +1,66 @@ +/** + * Inference code generated from the JSON schema spec in ./spec + * + * Using src/scripts/inference-codegen + */ +/** + * Inputs for Zero Shot Object Detection inference + */ +export interface ZeroShotObjectDetectionInput { + /** + * The input image data, with candidate labels + */ + inputs: ZeroShotObjectDetectionInputData; + /** + * Additional inference parameters + */ + parameters?: { + [key: string]: unknown; + }; + [property: string]: unknown; +} +/** + * The input image data, with candidate labels + */ +export interface ZeroShotObjectDetectionInputData { + /** + * The candidate labels for this image + */ + candidateLabels: string[]; + /** + * The image data to generate bounding boxes from + */ + image: unknown; + [property: string]: unknown; +} +/** + * The predicted bounding box. Coordinates are relative to the top left corner of the input + * image. + */ +export interface BoundingBox { + xmax: number; + xmin: number; + ymax: number; + ymin: number; + [property: string]: unknown; +} +export type ZeroShotObjectDetectionOutput = ZeroShotObjectDetectionOutputElement[]; +/** + * Outputs of inference for the Zero Shot Object Detection task + */ +export interface ZeroShotObjectDetectionOutputElement { + /** + * The predicted bounding box. Coordinates are relative to the top left corner of the input + * image. + */ + box: BoundingBox; + /** + * A candidate label + */ + label: string; + /** + * The associated score / probability + */ + score: number; + [property: string]: unknown; +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-object-detection/spec/input.json b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-object-detection/spec/input.json new file mode 100644 index 0000000000000000000000000000000000000000..7c9aa15acb70a5c651764336374ce31883ee97ed --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-object-detection/spec/input.json @@ -0,0 +1,40 @@ +{ + "$id": "/inference/schemas/zero-shot-object-detection/input.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Inputs for Zero Shot Object Detection inference", + "title": "ZeroShotObjectDetectionInput", + "type": "object", + "properties": { + "inputs": { + "description": "The input image data, with candidate labels", + "type": "object", + "title": "ZeroShotObjectDetectionInputData", + "properties": { + "image": { + "description": "The image data to generate bounding boxes from" + }, + "candidateLabels": { + "description": "The candidate labels for this image", + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": ["image", "candidateLabels"] + }, + "parameters": { + "description": "Additional inference parameters", + "$ref": "#/$defs/ZeroShotObjectDetectionParameters" + } + }, + "$defs": { + "ZeroShotObjectDetectionParameters": { + "title": "ZeroShotObjectDetectionParameters", + "description": "Additional inference parameters for Zero Shot Object Detection", + "type": "object", + "properties": {} + } + }, + "required": ["inputs"] +} diff --git a/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-object-detection/spec/output.json b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-object-detection/spec/output.json new file mode 100644 index 0000000000000000000000000000000000000000..8afa6052769f617ae365348d4d560ee43095ae4a --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tasks/zero-shot-object-detection/spec/output.json @@ -0,0 +1,47 @@ +{ + "$id": "/inference/schemas/zero-shot-object-detection/output.json", + "$schema": "http://json-schema.org/draft-06/schema#", + "description": "Outputs of inference for the Zero Shot Object Detection task", + "title": "ZeroShotObjectDetectionOutput", + "type": "array", + "items": { + "type": "object", + "title": "ZeroShotObjectDetectionOutputElement", + "properties": { + "label": { + "type": "string", + "description": "A candidate label" + }, + "score": { + "type": "number", + "description": "The associated score / probability" + }, + "box": { + "$ref": "#/$defs/BoundingBox", + "description": "The predicted bounding box. Coordinates are relative to the top left corner of the input image." + } + }, + "required": ["box", "label", "score"] + }, + "$defs": { + "BoundingBox": { + "title": "BoundingBox", + "type": "object", + "properties": { + "xmin": { + "type": "integer" + }, + "xmax": { + "type": "integer" + }, + "ymin": { + "type": "integer" + }, + "ymax": { + "type": "integer" + } + }, + "required": ["xmin", "xmax", "ymin", "ymax"] + } + } +} diff --git a/data/node_modules/@huggingface/tasks/src/tokenizer-data.ts b/data/node_modules/@huggingface/tasks/src/tokenizer-data.ts new file mode 100644 index 0000000000000000000000000000000000000000..6be41e8f60df763bf854feea8ca8ed689794a609 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/tokenizer-data.ts @@ -0,0 +1,32 @@ +export const SPECIAL_TOKENS_ATTRIBUTES = [ + "bos_token", + "eos_token", + "unk_token", + "sep_token", + "pad_token", + "cls_token", + "mask_token", + // additional_special_tokens (TODO) +] as const; + +/** + * Public interface for a tokenizer's special tokens mapping + */ +export interface AddedToken { + __type: "AddedToken"; + content?: string; + lstrip?: boolean; + normalized?: boolean; + rstrip?: boolean; + single_word?: boolean; +} +export type SpecialTokensMap = { + [key in (typeof SPECIAL_TOKENS_ATTRIBUTES)[number]]?: string | AddedToken | null; +}; +/** + * Public interface for tokenizer config + */ +export interface TokenizerConfig extends SpecialTokensMap { + use_default_system_prompt?: boolean; + chat_template?: string | Array<{ name: string; template: string }>; +} diff --git a/data/node_modules/@huggingface/tasks/src/widget-example.ts b/data/node_modules/@huggingface/tasks/src/widget-example.ts new file mode 100644 index 0000000000000000000000000000000000000000..3c47530f431fd77ba571aeace8186548969ea1da --- /dev/null +++ b/data/node_modules/@huggingface/tasks/src/widget-example.ts @@ -0,0 +1,125 @@ +/** + * See default-widget-inputs.ts for the default widget inputs, this files only contains the types + */ + +import type { ChatCompletionInputMessage } from "./tasks"; + +type TableData = Record; + +//#region outputs +export type WidgetExampleOutputLabels = Array<{ label: string; score: number }>; +export interface WidgetExampleOutputAnswerScore { + answer: string; + score: number; +} +export interface WidgetExampleOutputText { + text: string; +} +export interface WidgetExampleOutputUrl { + url: string; +} + +export type WidgetExampleOutput = + | WidgetExampleOutputLabels + | WidgetExampleOutputAnswerScore + | WidgetExampleOutputText + | WidgetExampleOutputUrl; +//#endregion + +export interface WidgetExampleBase { + example_title?: string; + group?: string; + /** + * Potential overrides to API parameters for this specific example + * (takes precedences over the model card metadata's inference.parameters) + */ + parameters?: { + /// token-classification + aggregation_strategy?: string; + /// text-generation + top_k?: number; + top_p?: number; + temperature?: number; + max_new_tokens?: number; + do_sample?: boolean; + /// text-to-image + negative_prompt?: string; + guidance_scale?: number; + num_inference_steps?: number; + }; + /** + * Optional output + */ + output?: TOutput; +} + +export interface WidgetExampleChatInput extends WidgetExampleBase { + messages: ChatCompletionInputMessage[]; +} + +export interface WidgetExampleTextInput extends WidgetExampleBase { + text: string; +} + +export interface WidgetExampleTextAndContextInput + extends WidgetExampleTextInput { + context: string; +} + +export interface WidgetExampleTextAndTableInput extends WidgetExampleTextInput { + table: TableData; +} + +export interface WidgetExampleAssetInput extends WidgetExampleBase { + src: string; +} +export interface WidgetExampleAssetAndPromptInput + extends WidgetExampleAssetInput { + prompt: string; +} + +export type WidgetExampleAssetAndTextInput = WidgetExampleAssetInput & + WidgetExampleTextInput; + +export type WidgetExampleAssetAndZeroShotInput = WidgetExampleAssetInput & + WidgetExampleZeroShotTextInput; + +export interface WidgetExampleStructuredDataInput extends WidgetExampleBase { + structured_data: TableData; +} + +export interface WidgetExampleTableDataInput extends WidgetExampleBase { + table: TableData; +} + +export interface WidgetExampleZeroShotTextInput extends WidgetExampleTextInput { + text: string; + candidate_labels: string; + multi_class: boolean; +} + +export interface WidgetExampleSentenceSimilarityInput + extends WidgetExampleBase { + source_sentence: string; + sentences: string[]; +} + +//#endregion + +export type WidgetExample = + | WidgetExampleChatInput + | WidgetExampleTextInput + | WidgetExampleTextAndContextInput + | WidgetExampleTextAndTableInput + | WidgetExampleAssetInput + | WidgetExampleAssetAndPromptInput + | WidgetExampleAssetAndTextInput + | WidgetExampleAssetAndZeroShotInput + | WidgetExampleStructuredDataInput + | WidgetExampleTableDataInput + | WidgetExampleZeroShotTextInput + | WidgetExampleSentenceSimilarityInput; + +type KeysOfUnion = T extends unknown ? keyof T : never; + +export type WidgetExampleAttribute = KeysOfUnion; diff --git a/data/node_modules/@huggingface/tasks/tsconfig.json b/data/node_modules/@huggingface/tasks/tsconfig.json new file mode 100644 index 0000000000000000000000000000000000000000..6a2bad7587dc09288e0a2dfd18e068fa106d09b3 --- /dev/null +++ b/data/node_modules/@huggingface/tasks/tsconfig.json @@ -0,0 +1,20 @@ +{ + "compilerOptions": { + "allowSyntheticDefaultImports": true, + "lib": ["ES2022", "DOM"], + "module": "ESNext", + "target": "ESNext", + "moduleResolution": "node", + "forceConsistentCasingInFileNames": true, + "strict": true, + "noImplicitAny": true, + "strictNullChecks": true, + "skipLibCheck": true, + "noImplicitOverride": true, + "outDir": "./dist", + "declaration": true, + "declarationMap": true + }, + "include": ["src", "scripts"], + "exclude": ["dist"] +} diff --git a/data/node_modules/accepts/HISTORY.md b/data/node_modules/accepts/HISTORY.md new file mode 100644 index 0000000000000000000000000000000000000000..cb5990c7c3620f4936a3ac42b3bf335c95eef7e8 --- /dev/null +++ b/data/node_modules/accepts/HISTORY.md @@ -0,0 +1,243 @@ +1.3.8 / 2022-02-02 +================== + + * deps: mime-types@~2.1.34 + - deps: mime-db@~1.51.0 + * deps: negotiator@0.6.3 + +1.3.7 / 2019-04-29 +================== + + * deps: negotiator@0.6.2 + - Fix sorting charset, encoding, and language with extra parameters + +1.3.6 / 2019-04-28 +================== + + * deps: mime-types@~2.1.24 + - deps: mime-db@~1.40.0 + +1.3.5 / 2018-02-28 +================== + + * deps: mime-types@~2.1.18 + - deps: mime-db@~1.33.0 + +1.3.4 / 2017-08-22 +================== + + * deps: mime-types@~2.1.16 + - deps: mime-db@~1.29.0 + +1.3.3 / 2016-05-02 +================== + + * deps: mime-types@~2.1.11 + - deps: mime-db@~1.23.0 + * deps: negotiator@0.6.1 + - perf: improve `Accept` parsing speed + - perf: improve `Accept-Charset` parsing speed + - perf: improve `Accept-Encoding` parsing speed + - perf: improve `Accept-Language` parsing speed + +1.3.2 / 2016-03-08 +================== + + * deps: mime-types@~2.1.10 + - Fix extension of `application/dash+xml` + - Update primary extension for `audio/mp4` + - deps: mime-db@~1.22.0 + +1.3.1 / 2016-01-19 +================== + + * deps: mime-types@~2.1.9 + - deps: mime-db@~1.21.0 + +1.3.0 / 2015-09-29 +================== + + * deps: mime-types@~2.1.7 + - deps: mime-db@~1.19.0 + * deps: negotiator@0.6.0 + - Fix including type extensions in parameters in `Accept` parsing + - Fix parsing `Accept` parameters with quoted equals + - Fix parsing `Accept` parameters with quoted semicolons + - Lazy-load modules from main entry point + - perf: delay type concatenation until needed + - perf: enable strict mode + - perf: hoist regular expressions + - perf: remove closures getting spec properties + - perf: remove a closure from media type parsing + - perf: remove property delete from media type parsing + +1.2.13 / 2015-09-06 +=================== + + * deps: mime-types@~2.1.6 + - deps: mime-db@~1.18.0 + +1.2.12 / 2015-07-30 +=================== + + * deps: mime-types@~2.1.4 + - deps: mime-db@~1.16.0 + +1.2.11 / 2015-07-16 +=================== + + * deps: mime-types@~2.1.3 + - deps: mime-db@~1.15.0 + +1.2.10 / 2015-07-01 +=================== + + * deps: mime-types@~2.1.2 + - deps: mime-db@~1.14.0 + +1.2.9 / 2015-06-08 +================== + + * deps: mime-types@~2.1.1 + - perf: fix deopt during mapping + +1.2.8 / 2015-06-07 +================== + + * deps: mime-types@~2.1.0 + - deps: mime-db@~1.13.0 + * perf: avoid argument reassignment & argument slice + * perf: avoid negotiator recursive construction + * perf: enable strict mode + * perf: remove unnecessary bitwise operator + +1.2.7 / 2015-05-10 +================== + + * deps: negotiator@0.5.3 + - Fix media type parameter matching to be case-insensitive + +1.2.6 / 2015-05-07 +================== + + * deps: mime-types@~2.0.11 + - deps: mime-db@~1.9.1 + * deps: negotiator@0.5.2 + - Fix comparing media types with quoted values + - Fix splitting media types with quoted commas + +1.2.5 / 2015-03-13 +================== + + * deps: mime-types@~2.0.10 + - deps: mime-db@~1.8.0 + +1.2.4 / 2015-02-14 +================== + + * Support Node.js 0.6 + * deps: mime-types@~2.0.9 + - deps: mime-db@~1.7.0 + * deps: negotiator@0.5.1 + - Fix preference sorting to be stable for long acceptable lists + +1.2.3 / 2015-01-31 +================== + + * deps: mime-types@~2.0.8 + - deps: mime-db@~1.6.0 + +1.2.2 / 2014-12-30 +================== + + * deps: mime-types@~2.0.7 + - deps: mime-db@~1.5.0 + +1.2.1 / 2014-12-30 +================== + + * deps: mime-types@~2.0.5 + - deps: mime-db@~1.3.1 + +1.2.0 / 2014-12-19 +================== + + * deps: negotiator@0.5.0 + - Fix list return order when large accepted list + - Fix missing identity encoding when q=0 exists + - Remove dynamic building of Negotiator class + +1.1.4 / 2014-12-10 +================== + + * deps: mime-types@~2.0.4 + - deps: mime-db@~1.3.0 + +1.1.3 / 2014-11-09 +================== + + * deps: mime-types@~2.0.3 + - deps: mime-db@~1.2.0 + +1.1.2 / 2014-10-14 +================== + + * deps: negotiator@0.4.9 + - Fix error when media type has invalid parameter + +1.1.1 / 2014-09-28 +================== + + * deps: mime-types@~2.0.2 + - deps: mime-db@~1.1.0 + * deps: negotiator@0.4.8 + - Fix all negotiations to be case-insensitive + - Stable sort preferences of same quality according to client order + +1.1.0 / 2014-09-02 +================== + + * update `mime-types` + +1.0.7 / 2014-07-04 +================== + + * Fix wrong type returned from `type` when match after unknown extension + +1.0.6 / 2014-06-24 +================== + + * deps: negotiator@0.4.7 + +1.0.5 / 2014-06-20 +================== + + * fix crash when unknown extension given + +1.0.4 / 2014-06-19 +================== + + * use `mime-types` + +1.0.3 / 2014-06-11 +================== + + * deps: negotiator@0.4.6 + - Order by specificity when quality is the same + +1.0.2 / 2014-05-29 +================== + + * Fix interpretation when header not in request + * deps: pin negotiator@0.4.5 + +1.0.1 / 2014-01-18 +================== + + * Identity encoding isn't always acceptable + * deps: negotiator@~0.4.0 + +1.0.0 / 2013-12-27 +================== + + * Genesis diff --git a/data/node_modules/accepts/LICENSE b/data/node_modules/accepts/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..06166077be4d1f620d89b9eb33c76d89e75857da --- /dev/null +++ b/data/node_modules/accepts/LICENSE @@ -0,0 +1,23 @@ +(The MIT License) + +Copyright (c) 2014 Jonathan Ong +Copyright (c) 2015 Douglas Christopher Wilson + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/data/node_modules/accepts/README.md b/data/node_modules/accepts/README.md new file mode 100644 index 0000000000000000000000000000000000000000..82680c530c3886540f630f643990e2ec707319d1 --- /dev/null +++ b/data/node_modules/accepts/README.md @@ -0,0 +1,140 @@ +# accepts + +[![NPM Version][npm-version-image]][npm-url] +[![NPM Downloads][npm-downloads-image]][npm-url] +[![Node.js Version][node-version-image]][node-version-url] +[![Build Status][github-actions-ci-image]][github-actions-ci-url] +[![Test Coverage][coveralls-image]][coveralls-url] + +Higher level content negotiation based on [negotiator](https://www.npmjs.com/package/negotiator). +Extracted from [koa](https://www.npmjs.com/package/koa) for general use. + +In addition to negotiator, it allows: + +- Allows types as an array or arguments list, ie `(['text/html', 'application/json'])` + as well as `('text/html', 'application/json')`. +- Allows type shorthands such as `json`. +- Returns `false` when no types match +- Treats non-existent headers as `*` + +## Installation + +This is a [Node.js](https://nodejs.org/en/) module available through the +[npm registry](https://www.npmjs.com/). Installation is done using the +[`npm install` command](https://docs.npmjs.com/getting-started/installing-npm-packages-locally): + +```sh +$ npm install accepts +``` + +## API + +```js +var accepts = require('accepts') +``` + +### accepts(req) + +Create a new `Accepts` object for the given `req`. + +#### .charset(charsets) + +Return the first accepted charset. If nothing in `charsets` is accepted, +then `false` is returned. + +#### .charsets() + +Return the charsets that the request accepts, in the order of the client's +preference (most preferred first). + +#### .encoding(encodings) + +Return the first accepted encoding. If nothing in `encodings` is accepted, +then `false` is returned. + +#### .encodings() + +Return the encodings that the request accepts, in the order of the client's +preference (most preferred first). + +#### .language(languages) + +Return the first accepted language. If nothing in `languages` is accepted, +then `false` is returned. + +#### .languages() + +Return the languages that the request accepts, in the order of the client's +preference (most preferred first). + +#### .type(types) + +Return the first accepted type (and it is returned as the same text as what +appears in the `types` array). If nothing in `types` is accepted, then `false` +is returned. + +The `types` array can contain full MIME types or file extensions. Any value +that is not a full MIME types is passed to `require('mime-types').lookup`. + +#### .types() + +Return the types that the request accepts, in the order of the client's +preference (most preferred first). + +## Examples + +### Simple type negotiation + +This simple example shows how to use `accepts` to return a different typed +respond body based on what the client wants to accept. The server lists it's +preferences in order and will get back the best match between the client and +server. + +```js +var accepts = require('accepts') +var http = require('http') + +function app (req, res) { + var accept = accepts(req) + + // the order of this list is significant; should be server preferred order + switch (accept.type(['json', 'html'])) { + case 'json': + res.setHeader('Content-Type', 'application/json') + res.write('{"hello":"world!"}') + break + case 'html': + res.setHeader('Content-Type', 'text/html') + res.write('hello, world!') + break + default: + // the fallback is text/plain, so no need to specify it above + res.setHeader('Content-Type', 'text/plain') + res.write('hello, world!') + break + } + + res.end() +} + +http.createServer(app).listen(3000) +``` + +You can test this out with the cURL program: +```sh +curl -I -H'Accept: text/html' http://localhost:3000/ +``` + +## License + +[MIT](LICENSE) + +[coveralls-image]: https://badgen.net/coveralls/c/github/jshttp/accepts/master +[coveralls-url]: https://coveralls.io/r/jshttp/accepts?branch=master +[github-actions-ci-image]: https://badgen.net/github/checks/jshttp/accepts/master?label=ci +[github-actions-ci-url]: https://github.com/jshttp/accepts/actions/workflows/ci.yml +[node-version-image]: https://badgen.net/npm/node/accepts +[node-version-url]: https://nodejs.org/en/download +[npm-downloads-image]: https://badgen.net/npm/dm/accepts +[npm-url]: https://npmjs.org/package/accepts +[npm-version-image]: https://badgen.net/npm/v/accepts diff --git a/data/node_modules/accepts/index.js b/data/node_modules/accepts/index.js new file mode 100644 index 0000000000000000000000000000000000000000..e9b2f63fb16f8ecdeb16c8eced302612794ccf65 --- /dev/null +++ b/data/node_modules/accepts/index.js @@ -0,0 +1,238 @@ +/*! + * accepts + * Copyright(c) 2014 Jonathan Ong + * Copyright(c) 2015 Douglas Christopher Wilson + * MIT Licensed + */ + +'use strict' + +/** + * Module dependencies. + * @private + */ + +var Negotiator = require('negotiator') +var mime = require('mime-types') + +/** + * Module exports. + * @public + */ + +module.exports = Accepts + +/** + * Create a new Accepts object for the given req. + * + * @param {object} req + * @public + */ + +function Accepts (req) { + if (!(this instanceof Accepts)) { + return new Accepts(req) + } + + this.headers = req.headers + this.negotiator = new Negotiator(req) +} + +/** + * Check if the given `type(s)` is acceptable, returning + * the best match when true, otherwise `undefined`, in which + * case you should respond with 406 "Not Acceptable". + * + * The `type` value may be a single mime type string + * such as "application/json", the extension name + * such as "json" or an array `["json", "html", "text/plain"]`. When a list + * or array is given the _best_ match, if any is returned. + * + * Examples: + * + * // Accept: text/html + * this.types('html'); + * // => "html" + * + * // Accept: text/*, application/json + * this.types('html'); + * // => "html" + * this.types('text/html'); + * // => "text/html" + * this.types('json', 'text'); + * // => "json" + * this.types('application/json'); + * // => "application/json" + * + * // Accept: text/*, application/json + * this.types('image/png'); + * this.types('png'); + * // => undefined + * + * // Accept: text/*;q=.5, application/json + * this.types(['html', 'json']); + * this.types('html', 'json'); + * // => "json" + * + * @param {String|Array} types... + * @return {String|Array|Boolean} + * @public + */ + +Accepts.prototype.type = +Accepts.prototype.types = function (types_) { + var types = types_ + + // support flattened arguments + if (types && !Array.isArray(types)) { + types = new Array(arguments.length) + for (var i = 0; i < types.length; i++) { + types[i] = arguments[i] + } + } + + // no types, return all requested types + if (!types || types.length === 0) { + return this.negotiator.mediaTypes() + } + + // no accept header, return first given type + if (!this.headers.accept) { + return types[0] + } + + var mimes = types.map(extToMime) + var accepts = this.negotiator.mediaTypes(mimes.filter(validMime)) + var first = accepts[0] + + return first + ? types[mimes.indexOf(first)] + : false +} + +/** + * Return accepted encodings or best fit based on `encodings`. + * + * Given `Accept-Encoding: gzip, deflate` + * an array sorted by quality is returned: + * + * ['gzip', 'deflate'] + * + * @param {String|Array} encodings... + * @return {String|Array} + * @public + */ + +Accepts.prototype.encoding = +Accepts.prototype.encodings = function (encodings_) { + var encodings = encodings_ + + // support flattened arguments + if (encodings && !Array.isArray(encodings)) { + encodings = new Array(arguments.length) + for (var i = 0; i < encodings.length; i++) { + encodings[i] = arguments[i] + } + } + + // no encodings, return all requested encodings + if (!encodings || encodings.length === 0) { + return this.negotiator.encodings() + } + + return this.negotiator.encodings(encodings)[0] || false +} + +/** + * Return accepted charsets or best fit based on `charsets`. + * + * Given `Accept-Charset: utf-8, iso-8859-1;q=0.2, utf-7;q=0.5` + * an array sorted by quality is returned: + * + * ['utf-8', 'utf-7', 'iso-8859-1'] + * + * @param {String|Array} charsets... + * @return {String|Array} + * @public + */ + +Accepts.prototype.charset = +Accepts.prototype.charsets = function (charsets_) { + var charsets = charsets_ + + // support flattened arguments + if (charsets && !Array.isArray(charsets)) { + charsets = new Array(arguments.length) + for (var i = 0; i < charsets.length; i++) { + charsets[i] = arguments[i] + } + } + + // no charsets, return all requested charsets + if (!charsets || charsets.length === 0) { + return this.negotiator.charsets() + } + + return this.negotiator.charsets(charsets)[0] || false +} + +/** + * Return accepted languages or best fit based on `langs`. + * + * Given `Accept-Language: en;q=0.8, es, pt` + * an array sorted by quality is returned: + * + * ['es', 'pt', 'en'] + * + * @param {String|Array} langs... + * @return {Array|String} + * @public + */ + +Accepts.prototype.lang = +Accepts.prototype.langs = +Accepts.prototype.language = +Accepts.prototype.languages = function (languages_) { + var languages = languages_ + + // support flattened arguments + if (languages && !Array.isArray(languages)) { + languages = new Array(arguments.length) + for (var i = 0; i < languages.length; i++) { + languages[i] = arguments[i] + } + } + + // no languages, return all requested languages + if (!languages || languages.length === 0) { + return this.negotiator.languages() + } + + return this.negotiator.languages(languages)[0] || false +} + +/** + * Convert extnames to mime. + * + * @param {String} type + * @return {String} + * @private + */ + +function extToMime (type) { + return type.indexOf('/') === -1 + ? mime.lookup(type) + : type +} + +/** + * Check if mime is valid. + * + * @param {String} type + * @return {String} + * @private + */ + +function validMime (type) { + return typeof type === 'string' +} diff --git a/data/node_modules/accepts/package.json b/data/node_modules/accepts/package.json new file mode 100644 index 0000000000000000000000000000000000000000..0f2d15da92b29d328f4da484f494c5442c711b4d --- /dev/null +++ b/data/node_modules/accepts/package.json @@ -0,0 +1,47 @@ +{ + "name": "accepts", + "description": "Higher-level content negotiation", + "version": "1.3.8", + "contributors": [ + "Douglas Christopher Wilson ", + "Jonathan Ong (http://jongleberry.com)" + ], + "license": "MIT", + "repository": "jshttp/accepts", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "devDependencies": { + "deep-equal": "1.0.1", + "eslint": "7.32.0", + "eslint-config-standard": "14.1.1", + "eslint-plugin-import": "2.25.4", + "eslint-plugin-markdown": "2.2.1", + "eslint-plugin-node": "11.1.0", + "eslint-plugin-promise": "4.3.1", + "eslint-plugin-standard": "4.1.0", + "mocha": "9.2.0", + "nyc": "15.1.0" + }, + "files": [ + "LICENSE", + "HISTORY.md", + "index.js" + ], + "engines": { + "node": ">= 0.6" + }, + "scripts": { + "lint": "eslint .", + "test": "mocha --reporter spec --check-leaks --bail test/", + "test-ci": "nyc --reporter=lcov --reporter=text npm test", + "test-cov": "nyc --reporter=html --reporter=text npm test" + }, + "keywords": [ + "content", + "negotiation", + "accept", + "accepts" + ] +} diff --git a/data/node_modules/array-flatten/LICENSE b/data/node_modules/array-flatten/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..983fbe8aec3f4e2d4add592bb1083b00d7366f66 --- /dev/null +++ b/data/node_modules/array-flatten/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Blake Embrey (hello@blakeembrey.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/data/node_modules/array-flatten/README.md b/data/node_modules/array-flatten/README.md new file mode 100644 index 0000000000000000000000000000000000000000..91fa5b637ec2d2a492d6b5c4bf9ba2e76ff2f352 --- /dev/null +++ b/data/node_modules/array-flatten/README.md @@ -0,0 +1,43 @@ +# Array Flatten + +[![NPM version][npm-image]][npm-url] +[![NPM downloads][downloads-image]][downloads-url] +[![Build status][travis-image]][travis-url] +[![Test coverage][coveralls-image]][coveralls-url] + +> Flatten an array of nested arrays into a single flat array. Accepts an optional depth. + +## Installation + +``` +npm install array-flatten --save +``` + +## Usage + +```javascript +var flatten = require('array-flatten') + +flatten([1, [2, [3, [4, [5], 6], 7], 8], 9]) +//=> [1, 2, 3, 4, 5, 6, 7, 8, 9] + +flatten([1, [2, [3, [4, [5], 6], 7], 8], 9], 2) +//=> [1, 2, 3, [4, [5], 6], 7, 8, 9] + +(function () { + flatten(arguments) //=> [1, 2, 3] +})(1, [2, 3]) +``` + +## License + +MIT + +[npm-image]: https://img.shields.io/npm/v/array-flatten.svg?style=flat +[npm-url]: https://npmjs.org/package/array-flatten +[downloads-image]: https://img.shields.io/npm/dm/array-flatten.svg?style=flat +[downloads-url]: https://npmjs.org/package/array-flatten +[travis-image]: https://img.shields.io/travis/blakeembrey/array-flatten.svg?style=flat +[travis-url]: https://travis-ci.org/blakeembrey/array-flatten +[coveralls-image]: https://img.shields.io/coveralls/blakeembrey/array-flatten.svg?style=flat +[coveralls-url]: https://coveralls.io/r/blakeembrey/array-flatten?branch=master diff --git a/data/node_modules/array-flatten/array-flatten.js b/data/node_modules/array-flatten/array-flatten.js new file mode 100644 index 0000000000000000000000000000000000000000..089117b322f5857b8bb6bccf7a659686aca067c0 --- /dev/null +++ b/data/node_modules/array-flatten/array-flatten.js @@ -0,0 +1,64 @@ +'use strict' + +/** + * Expose `arrayFlatten`. + */ +module.exports = arrayFlatten + +/** + * Recursive flatten function with depth. + * + * @param {Array} array + * @param {Array} result + * @param {Number} depth + * @return {Array} + */ +function flattenWithDepth (array, result, depth) { + for (var i = 0; i < array.length; i++) { + var value = array[i] + + if (depth > 0 && Array.isArray(value)) { + flattenWithDepth(value, result, depth - 1) + } else { + result.push(value) + } + } + + return result +} + +/** + * Recursive flatten function. Omitting depth is slightly faster. + * + * @param {Array} array + * @param {Array} result + * @return {Array} + */ +function flattenForever (array, result) { + for (var i = 0; i < array.length; i++) { + var value = array[i] + + if (Array.isArray(value)) { + flattenForever(value, result) + } else { + result.push(value) + } + } + + return result +} + +/** + * Flatten an array, with the ability to define a depth. + * + * @param {Array} array + * @param {Number} depth + * @return {Array} + */ +function arrayFlatten (array, depth) { + if (depth == null) { + return flattenForever(array, []) + } + + return flattenWithDepth(array, [], depth) +} diff --git a/data/node_modules/array-flatten/package.json b/data/node_modules/array-flatten/package.json new file mode 100644 index 0000000000000000000000000000000000000000..1a24e2a1a1d3fbd694b77bf6673ab1e1c2fd5043 --- /dev/null +++ b/data/node_modules/array-flatten/package.json @@ -0,0 +1,39 @@ +{ + "name": "array-flatten", + "version": "1.1.1", + "description": "Flatten an array of nested arrays into a single flat array", + "main": "array-flatten.js", + "files": [ + "array-flatten.js", + "LICENSE" + ], + "scripts": { + "test": "istanbul cover _mocha -- -R spec" + }, + "repository": { + "type": "git", + "url": "git://github.com/blakeembrey/array-flatten.git" + }, + "keywords": [ + "array", + "flatten", + "arguments", + "depth" + ], + "author": { + "name": "Blake Embrey", + "email": "hello@blakeembrey.com", + "url": "http://blakeembrey.me" + }, + "license": "MIT", + "bugs": { + "url": "https://github.com/blakeembrey/array-flatten/issues" + }, + "homepage": "https://github.com/blakeembrey/array-flatten", + "devDependencies": { + "istanbul": "^0.3.13", + "mocha": "^2.2.4", + "pre-commit": "^1.0.7", + "standard": "^3.7.3" + } +} diff --git a/data/node_modules/body-parser/HISTORY.md b/data/node_modules/body-parser/HISTORY.md new file mode 100644 index 0000000000000000000000000000000000000000..b89249198a83f852a4d16db733697385317f929a --- /dev/null +++ b/data/node_modules/body-parser/HISTORY.md @@ -0,0 +1,665 @@ +1.20.2 / 2023-02-21 +=================== + + * Fix strict json error message on Node.js 19+ + * deps: content-type@~1.0.5 + - perf: skip value escaping when unnecessary + * deps: raw-body@2.5.2 + +1.20.1 / 2022-10-06 +=================== + + * deps: qs@6.11.0 + * perf: remove unnecessary object clone + +1.20.0 / 2022-04-02 +=================== + + * Fix error message for json parse whitespace in `strict` + * Fix internal error when inflated body exceeds limit + * Prevent loss of async hooks context + * Prevent hanging when request already read + * deps: depd@2.0.0 + - Replace internal `eval` usage with `Function` constructor + - Use instance methods on `process` to check for listeners + * deps: http-errors@2.0.0 + - deps: depd@2.0.0 + - deps: statuses@2.0.1 + * deps: on-finished@2.4.1 + * deps: qs@6.10.3 + * deps: raw-body@2.5.1 + - deps: http-errors@2.0.0 + +1.19.2 / 2022-02-15 +=================== + + * deps: bytes@3.1.2 + * deps: qs@6.9.7 + * Fix handling of `__proto__` keys + * deps: raw-body@2.4.3 + - deps: bytes@3.1.2 + +1.19.1 / 2021-12-10 +=================== + + * deps: bytes@3.1.1 + * deps: http-errors@1.8.1 + - deps: inherits@2.0.4 + - deps: toidentifier@1.0.1 + - deps: setprototypeof@1.2.0 + * deps: qs@6.9.6 + * deps: raw-body@2.4.2 + - deps: bytes@3.1.1 + - deps: http-errors@1.8.1 + * deps: safe-buffer@5.2.1 + * deps: type-is@~1.6.18 + +1.19.0 / 2019-04-25 +=================== + + * deps: bytes@3.1.0 + - Add petabyte (`pb`) support + * deps: http-errors@1.7.2 + - Set constructor name when possible + - deps: setprototypeof@1.1.1 + - deps: statuses@'>= 1.5.0 < 2' + * deps: iconv-lite@0.4.24 + - Added encoding MIK + * deps: qs@6.7.0 + - Fix parsing array brackets after index + * deps: raw-body@2.4.0 + - deps: bytes@3.1.0 + - deps: http-errors@1.7.2 + - deps: iconv-lite@0.4.24 + * deps: type-is@~1.6.17 + - deps: mime-types@~2.1.24 + - perf: prevent internal `throw` on invalid type + +1.18.3 / 2018-05-14 +=================== + + * Fix stack trace for strict json parse error + * deps: depd@~1.1.2 + - perf: remove argument reassignment + * deps: http-errors@~1.6.3 + - deps: depd@~1.1.2 + - deps: setprototypeof@1.1.0 + - deps: statuses@'>= 1.3.1 < 2' + * deps: iconv-lite@0.4.23 + - Fix loading encoding with year appended + - Fix deprecation warnings on Node.js 10+ + * deps: qs@6.5.2 + * deps: raw-body@2.3.3 + - deps: http-errors@1.6.3 + - deps: iconv-lite@0.4.23 + * deps: type-is@~1.6.16 + - deps: mime-types@~2.1.18 + +1.18.2 / 2017-09-22 +=================== + + * deps: debug@2.6.9 + * perf: remove argument reassignment + +1.18.1 / 2017-09-12 +=================== + + * deps: content-type@~1.0.4 + - perf: remove argument reassignment + - perf: skip parameter parsing when no parameters + * deps: iconv-lite@0.4.19 + - Fix ISO-8859-1 regression + - Update Windows-1255 + * deps: qs@6.5.1 + - Fix parsing & compacting very deep objects + * deps: raw-body@2.3.2 + - deps: iconv-lite@0.4.19 + +1.18.0 / 2017-09-08 +=================== + + * Fix JSON strict violation error to match native parse error + * Include the `body` property on verify errors + * Include the `type` property on all generated errors + * Use `http-errors` to set status code on errors + * deps: bytes@3.0.0 + * deps: debug@2.6.8 + * deps: depd@~1.1.1 + - Remove unnecessary `Buffer` loading + * deps: http-errors@~1.6.2 + - deps: depd@1.1.1 + * deps: iconv-lite@0.4.18 + - Add support for React Native + - Add a warning if not loaded as utf-8 + - Fix CESU-8 decoding in Node.js 8 + - Improve speed of ISO-8859-1 encoding + * deps: qs@6.5.0 + * deps: raw-body@2.3.1 + - Use `http-errors` for standard emitted errors + - deps: bytes@3.0.0 + - deps: iconv-lite@0.4.18 + - perf: skip buffer decoding on overage chunk + * perf: prevent internal `throw` when missing charset + +1.17.2 / 2017-05-17 +=================== + + * deps: debug@2.6.7 + - Fix `DEBUG_MAX_ARRAY_LENGTH` + - deps: ms@2.0.0 + * deps: type-is@~1.6.15 + - deps: mime-types@~2.1.15 + +1.17.1 / 2017-03-06 +=================== + + * deps: qs@6.4.0 + - Fix regression parsing keys starting with `[` + +1.17.0 / 2017-03-01 +=================== + + * deps: http-errors@~1.6.1 + - Make `message` property enumerable for `HttpError`s + - deps: setprototypeof@1.0.3 + * deps: qs@6.3.1 + - Fix compacting nested arrays + +1.16.1 / 2017-02-10 +=================== + + * deps: debug@2.6.1 + - Fix deprecation messages in WebStorm and other editors + - Undeprecate `DEBUG_FD` set to `1` or `2` + +1.16.0 / 2017-01-17 +=================== + + * deps: debug@2.6.0 + - Allow colors in workers + - Deprecated `DEBUG_FD` environment variable + - Fix error when running under React Native + - Use same color for same namespace + - deps: ms@0.7.2 + * deps: http-errors@~1.5.1 + - deps: inherits@2.0.3 + - deps: setprototypeof@1.0.2 + - deps: statuses@'>= 1.3.1 < 2' + * deps: iconv-lite@0.4.15 + - Added encoding MS-31J + - Added encoding MS-932 + - Added encoding MS-936 + - Added encoding MS-949 + - Added encoding MS-950 + - Fix GBK/GB18030 handling of Euro character + * deps: qs@6.2.1 + - Fix array parsing from skipping empty values + * deps: raw-body@~2.2.0 + - deps: iconv-lite@0.4.15 + * deps: type-is@~1.6.14 + - deps: mime-types@~2.1.13 + +1.15.2 / 2016-06-19 +=================== + + * deps: bytes@2.4.0 + * deps: content-type@~1.0.2 + - perf: enable strict mode + * deps: http-errors@~1.5.0 + - Use `setprototypeof` module to replace `__proto__` setting + - deps: statuses@'>= 1.3.0 < 2' + - perf: enable strict mode + * deps: qs@6.2.0 + * deps: raw-body@~2.1.7 + - deps: bytes@2.4.0 + - perf: remove double-cleanup on happy path + * deps: type-is@~1.6.13 + - deps: mime-types@~2.1.11 + +1.15.1 / 2016-05-05 +=================== + + * deps: bytes@2.3.0 + - Drop partial bytes on all parsed units + - Fix parsing byte string that looks like hex + * deps: raw-body@~2.1.6 + - deps: bytes@2.3.0 + * deps: type-is@~1.6.12 + - deps: mime-types@~2.1.10 + +1.15.0 / 2016-02-10 +=================== + + * deps: http-errors@~1.4.0 + - Add `HttpError` export, for `err instanceof createError.HttpError` + - deps: inherits@2.0.1 + - deps: statuses@'>= 1.2.1 < 2' + * deps: qs@6.1.0 + * deps: type-is@~1.6.11 + - deps: mime-types@~2.1.9 + +1.14.2 / 2015-12-16 +=================== + + * deps: bytes@2.2.0 + * deps: iconv-lite@0.4.13 + * deps: qs@5.2.0 + * deps: raw-body@~2.1.5 + - deps: bytes@2.2.0 + - deps: iconv-lite@0.4.13 + * deps: type-is@~1.6.10 + - deps: mime-types@~2.1.8 + +1.14.1 / 2015-09-27 +=================== + + * Fix issue where invalid charset results in 400 when `verify` used + * deps: iconv-lite@0.4.12 + - Fix CESU-8 decoding in Node.js 4.x + * deps: raw-body@~2.1.4 + - Fix masking critical errors from `iconv-lite` + - deps: iconv-lite@0.4.12 + * deps: type-is@~1.6.9 + - deps: mime-types@~2.1.7 + +1.14.0 / 2015-09-16 +=================== + + * Fix JSON strict parse error to match syntax errors + * Provide static `require` analysis in `urlencoded` parser + * deps: depd@~1.1.0 + - Support web browser loading + * deps: qs@5.1.0 + * deps: raw-body@~2.1.3 + - Fix sync callback when attaching data listener causes sync read + * deps: type-is@~1.6.8 + - Fix type error when given invalid type to match against + - deps: mime-types@~2.1.6 + +1.13.3 / 2015-07-31 +=================== + + * deps: type-is@~1.6.6 + - deps: mime-types@~2.1.4 + +1.13.2 / 2015-07-05 +=================== + + * deps: iconv-lite@0.4.11 + * deps: qs@4.0.0 + - Fix dropping parameters like `hasOwnProperty` + - Fix user-visible incompatibilities from 3.1.0 + - Fix various parsing edge cases + * deps: raw-body@~2.1.2 + - Fix error stack traces to skip `makeError` + - deps: iconv-lite@0.4.11 + * deps: type-is@~1.6.4 + - deps: mime-types@~2.1.2 + - perf: enable strict mode + - perf: remove argument reassignment + +1.13.1 / 2015-06-16 +=================== + + * deps: qs@2.4.2 + - Downgraded from 3.1.0 because of user-visible incompatibilities + +1.13.0 / 2015-06-14 +=================== + + * Add `statusCode` property on `Error`s, in addition to `status` + * Change `type` default to `application/json` for JSON parser + * Change `type` default to `application/x-www-form-urlencoded` for urlencoded parser + * Provide static `require` analysis + * Use the `http-errors` module to generate errors + * deps: bytes@2.1.0 + - Slight optimizations + * deps: iconv-lite@0.4.10 + - The encoding UTF-16 without BOM now defaults to UTF-16LE when detection fails + - Leading BOM is now removed when decoding + * deps: on-finished@~2.3.0 + - Add defined behavior for HTTP `CONNECT` requests + - Add defined behavior for HTTP `Upgrade` requests + - deps: ee-first@1.1.1 + * deps: qs@3.1.0 + - Fix dropping parameters like `hasOwnProperty` + - Fix various parsing edge cases + - Parsed object now has `null` prototype + * deps: raw-body@~2.1.1 + - Use `unpipe` module for unpiping requests + - deps: iconv-lite@0.4.10 + * deps: type-is@~1.6.3 + - deps: mime-types@~2.1.1 + - perf: reduce try block size + - perf: remove bitwise operations + * perf: enable strict mode + * perf: remove argument reassignment + * perf: remove delete call + +1.12.4 / 2015-05-10 +=================== + + * deps: debug@~2.2.0 + * deps: qs@2.4.2 + - Fix allowing parameters like `constructor` + * deps: on-finished@~2.2.1 + * deps: raw-body@~2.0.1 + - Fix a false-positive when unpiping in Node.js 0.8 + - deps: bytes@2.0.1 + * deps: type-is@~1.6.2 + - deps: mime-types@~2.0.11 + +1.12.3 / 2015-04-15 +=================== + + * Slight efficiency improvement when not debugging + * deps: depd@~1.0.1 + * deps: iconv-lite@0.4.8 + - Add encoding alias UNICODE-1-1-UTF-7 + * deps: raw-body@1.3.4 + - Fix hanging callback if request aborts during read + - deps: iconv-lite@0.4.8 + +1.12.2 / 2015-03-16 +=================== + + * deps: qs@2.4.1 + - Fix error when parameter `hasOwnProperty` is present + +1.12.1 / 2015-03-15 +=================== + + * deps: debug@~2.1.3 + - Fix high intensity foreground color for bold + - deps: ms@0.7.0 + * deps: type-is@~1.6.1 + - deps: mime-types@~2.0.10 + +1.12.0 / 2015-02-13 +=================== + + * add `debug` messages + * accept a function for the `type` option + * use `content-type` to parse `Content-Type` headers + * deps: iconv-lite@0.4.7 + - Gracefully support enumerables on `Object.prototype` + * deps: raw-body@1.3.3 + - deps: iconv-lite@0.4.7 + * deps: type-is@~1.6.0 + - fix argument reassignment + - fix false-positives in `hasBody` `Transfer-Encoding` check + - support wildcard for both type and subtype (`*/*`) + - deps: mime-types@~2.0.9 + +1.11.0 / 2015-01-30 +=================== + + * make internal `extended: true` depth limit infinity + * deps: type-is@~1.5.6 + - deps: mime-types@~2.0.8 + +1.10.2 / 2015-01-20 +=================== + + * deps: iconv-lite@0.4.6 + - Fix rare aliases of single-byte encodings + * deps: raw-body@1.3.2 + - deps: iconv-lite@0.4.6 + +1.10.1 / 2015-01-01 +=================== + + * deps: on-finished@~2.2.0 + * deps: type-is@~1.5.5 + - deps: mime-types@~2.0.7 + +1.10.0 / 2014-12-02 +=================== + + * make internal `extended: true` array limit dynamic + +1.9.3 / 2014-11-21 +================== + + * deps: iconv-lite@0.4.5 + - Fix Windows-31J and X-SJIS encoding support + * deps: qs@2.3.3 + - Fix `arrayLimit` behavior + * deps: raw-body@1.3.1 + - deps: iconv-lite@0.4.5 + * deps: type-is@~1.5.3 + - deps: mime-types@~2.0.3 + +1.9.2 / 2014-10-27 +================== + + * deps: qs@2.3.2 + - Fix parsing of mixed objects and values + +1.9.1 / 2014-10-22 +================== + + * deps: on-finished@~2.1.1 + - Fix handling of pipelined requests + * deps: qs@2.3.0 + - Fix parsing of mixed implicit and explicit arrays + * deps: type-is@~1.5.2 + - deps: mime-types@~2.0.2 + +1.9.0 / 2014-09-24 +================== + + * include the charset in "unsupported charset" error message + * include the encoding in "unsupported content encoding" error message + * deps: depd@~1.0.0 + +1.8.4 / 2014-09-23 +================== + + * fix content encoding to be case-insensitive + +1.8.3 / 2014-09-19 +================== + + * deps: qs@2.2.4 + - Fix issue with object keys starting with numbers truncated + +1.8.2 / 2014-09-15 +================== + + * deps: depd@0.4.5 + +1.8.1 / 2014-09-07 +================== + + * deps: media-typer@0.3.0 + * deps: type-is@~1.5.1 + +1.8.0 / 2014-09-05 +================== + + * make empty-body-handling consistent between chunked requests + - empty `json` produces `{}` + - empty `raw` produces `new Buffer(0)` + - empty `text` produces `''` + - empty `urlencoded` produces `{}` + * deps: qs@2.2.3 + - Fix issue where first empty value in array is discarded + * deps: type-is@~1.5.0 + - fix `hasbody` to be true for `content-length: 0` + +1.7.0 / 2014-09-01 +================== + + * add `parameterLimit` option to `urlencoded` parser + * change `urlencoded` extended array limit to 100 + * respond with 413 when over `parameterLimit` in `urlencoded` + +1.6.7 / 2014-08-29 +================== + + * deps: qs@2.2.2 + - Remove unnecessary cloning + +1.6.6 / 2014-08-27 +================== + + * deps: qs@2.2.0 + - Array parsing fix + - Performance improvements + +1.6.5 / 2014-08-16 +================== + + * deps: on-finished@2.1.0 + +1.6.4 / 2014-08-14 +================== + + * deps: qs@1.2.2 + +1.6.3 / 2014-08-10 +================== + + * deps: qs@1.2.1 + +1.6.2 / 2014-08-07 +================== + + * deps: qs@1.2.0 + - Fix parsing array of objects + +1.6.1 / 2014-08-06 +================== + + * deps: qs@1.1.0 + - Accept urlencoded square brackets + - Accept empty values in implicit array notation + +1.6.0 / 2014-08-05 +================== + + * deps: qs@1.0.2 + - Complete rewrite + - Limits array length to 20 + - Limits object depth to 5 + - Limits parameters to 1,000 + +1.5.2 / 2014-07-27 +================== + + * deps: depd@0.4.4 + - Work-around v8 generating empty stack traces + +1.5.1 / 2014-07-26 +================== + + * deps: depd@0.4.3 + - Fix exception when global `Error.stackTraceLimit` is too low + +1.5.0 / 2014-07-20 +================== + + * deps: depd@0.4.2 + - Add `TRACE_DEPRECATION` environment variable + - Remove non-standard grey color from color output + - Support `--no-deprecation` argument + - Support `--trace-deprecation` argument + * deps: iconv-lite@0.4.4 + - Added encoding UTF-7 + * deps: raw-body@1.3.0 + - deps: iconv-lite@0.4.4 + - Added encoding UTF-7 + - Fix `Cannot switch to old mode now` error on Node.js 0.10+ + * deps: type-is@~1.3.2 + +1.4.3 / 2014-06-19 +================== + + * deps: type-is@1.3.1 + - fix global variable leak + +1.4.2 / 2014-06-19 +================== + + * deps: type-is@1.3.0 + - improve type parsing + +1.4.1 / 2014-06-19 +================== + + * fix urlencoded extended deprecation message + +1.4.0 / 2014-06-19 +================== + + * add `text` parser + * add `raw` parser + * check accepted charset in content-type (accepts utf-8) + * check accepted encoding in content-encoding (accepts identity) + * deprecate `bodyParser()` middleware; use `.json()` and `.urlencoded()` as needed + * deprecate `urlencoded()` without provided `extended` option + * lazy-load urlencoded parsers + * parsers split into files for reduced mem usage + * support gzip and deflate bodies + - set `inflate: false` to turn off + * deps: raw-body@1.2.2 + - Support all encodings from `iconv-lite` + +1.3.1 / 2014-06-11 +================== + + * deps: type-is@1.2.1 + - Switch dependency from mime to mime-types@1.0.0 + +1.3.0 / 2014-05-31 +================== + + * add `extended` option to urlencoded parser + +1.2.2 / 2014-05-27 +================== + + * deps: raw-body@1.1.6 + - assert stream encoding on node.js 0.8 + - assert stream encoding on node.js < 0.10.6 + - deps: bytes@1 + +1.2.1 / 2014-05-26 +================== + + * invoke `next(err)` after request fully read + - prevents hung responses and socket hang ups + +1.2.0 / 2014-05-11 +================== + + * add `verify` option + * deps: type-is@1.2.0 + - support suffix matching + +1.1.2 / 2014-05-11 +================== + + * improve json parser speed + +1.1.1 / 2014-05-11 +================== + + * fix repeated limit parsing with every request + +1.1.0 / 2014-05-10 +================== + + * add `type` option + * deps: pin for safety and consistency + +1.0.2 / 2014-04-14 +================== + + * use `type-is` module + +1.0.1 / 2014-03-20 +================== + + * lower default limits to 100kb diff --git a/data/node_modules/body-parser/LICENSE b/data/node_modules/body-parser/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..386b7b6946e47bc46f8138791049b4e6a7cef889 --- /dev/null +++ b/data/node_modules/body-parser/LICENSE @@ -0,0 +1,23 @@ +(The MIT License) + +Copyright (c) 2014 Jonathan Ong +Copyright (c) 2014-2015 Douglas Christopher Wilson + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/data/node_modules/body-parser/README.md b/data/node_modules/body-parser/README.md new file mode 100644 index 0000000000000000000000000000000000000000..38553bf7987866e7d9d872acb0e4ceec9c826c4e --- /dev/null +++ b/data/node_modules/body-parser/README.md @@ -0,0 +1,465 @@ +# body-parser + +[![NPM Version][npm-version-image]][npm-url] +[![NPM Downloads][npm-downloads-image]][npm-url] +[![Build Status][ci-image]][ci-url] +[![Test Coverage][coveralls-image]][coveralls-url] + +Node.js body parsing middleware. + +Parse incoming request bodies in a middleware before your handlers, available +under the `req.body` property. + +**Note** As `req.body`'s shape is based on user-controlled input, all +properties and values in this object are untrusted and should be validated +before trusting. For example, `req.body.foo.toString()` may fail in multiple +ways, for example the `foo` property may not be there or may not be a string, +and `toString` may not be a function and instead a string or other user input. + +[Learn about the anatomy of an HTTP transaction in Node.js](https://nodejs.org/en/docs/guides/anatomy-of-an-http-transaction/). + +_This does not handle multipart bodies_, due to their complex and typically +large nature. For multipart bodies, you may be interested in the following +modules: + + * [busboy](https://www.npmjs.org/package/busboy#readme) and + [connect-busboy](https://www.npmjs.org/package/connect-busboy#readme) + * [multiparty](https://www.npmjs.org/package/multiparty#readme) and + [connect-multiparty](https://www.npmjs.org/package/connect-multiparty#readme) + * [formidable](https://www.npmjs.org/package/formidable#readme) + * [multer](https://www.npmjs.org/package/multer#readme) + +This module provides the following parsers: + + * [JSON body parser](#bodyparserjsonoptions) + * [Raw body parser](#bodyparserrawoptions) + * [Text body parser](#bodyparsertextoptions) + * [URL-encoded form body parser](#bodyparserurlencodedoptions) + +Other body parsers you might be interested in: + +- [body](https://www.npmjs.org/package/body#readme) +- [co-body](https://www.npmjs.org/package/co-body#readme) + +## Installation + +```sh +$ npm install body-parser +``` + +## API + +```js +var bodyParser = require('body-parser') +``` + +The `bodyParser` object exposes various factories to create middlewares. All +middlewares will populate the `req.body` property with the parsed body when +the `Content-Type` request header matches the `type` option, or an empty +object (`{}`) if there was no body to parse, the `Content-Type` was not matched, +or an error occurred. + +The various errors returned by this module are described in the +[errors section](#errors). + +### bodyParser.json([options]) + +Returns middleware that only parses `json` and only looks at requests where +the `Content-Type` header matches the `type` option. This parser accepts any +Unicode encoding of the body and supports automatic inflation of `gzip` and +`deflate` encodings. + +A new `body` object containing the parsed data is populated on the `request` +object after the middleware (i.e. `req.body`). + +#### Options + +The `json` function takes an optional `options` object that may contain any of +the following keys: + +##### inflate + +When set to `true`, then deflated (compressed) bodies will be inflated; when +`false`, deflated bodies are rejected. Defaults to `true`. + +##### limit + +Controls the maximum request body size. If this is a number, then the value +specifies the number of bytes; if it is a string, the value is passed to the +[bytes](https://www.npmjs.com/package/bytes) library for parsing. Defaults +to `'100kb'`. + +##### reviver + +The `reviver` option is passed directly to `JSON.parse` as the second +argument. You can find more information on this argument +[in the MDN documentation about JSON.parse](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/parse#Example.3A_Using_the_reviver_parameter). + +##### strict + +When set to `true`, will only accept arrays and objects; when `false` will +accept anything `JSON.parse` accepts. Defaults to `true`. + +##### type + +The `type` option is used to determine what media type the middleware will +parse. This option can be a string, array of strings, or a function. If not a +function, `type` option is passed directly to the +[type-is](https://www.npmjs.org/package/type-is#readme) library and this can +be an extension name (like `json`), a mime type (like `application/json`), or +a mime type with a wildcard (like `*/*` or `*/json`). If a function, the `type` +option is called as `fn(req)` and the request is parsed if it returns a truthy +value. Defaults to `application/json`. + +##### verify + +The `verify` option, if supplied, is called as `verify(req, res, buf, encoding)`, +where `buf` is a `Buffer` of the raw request body and `encoding` is the +encoding of the request. The parsing can be aborted by throwing an error. + +### bodyParser.raw([options]) + +Returns middleware that parses all bodies as a `Buffer` and only looks at +requests where the `Content-Type` header matches the `type` option. This +parser supports automatic inflation of `gzip` and `deflate` encodings. + +A new `body` object containing the parsed data is populated on the `request` +object after the middleware (i.e. `req.body`). This will be a `Buffer` object +of the body. + +#### Options + +The `raw` function takes an optional `options` object that may contain any of +the following keys: + +##### inflate + +When set to `true`, then deflated (compressed) bodies will be inflated; when +`false`, deflated bodies are rejected. Defaults to `true`. + +##### limit + +Controls the maximum request body size. If this is a number, then the value +specifies the number of bytes; if it is a string, the value is passed to the +[bytes](https://www.npmjs.com/package/bytes) library for parsing. Defaults +to `'100kb'`. + +##### type + +The `type` option is used to determine what media type the middleware will +parse. This option can be a string, array of strings, or a function. +If not a function, `type` option is passed directly to the +[type-is](https://www.npmjs.org/package/type-is#readme) library and this +can be an extension name (like `bin`), a mime type (like +`application/octet-stream`), or a mime type with a wildcard (like `*/*` or +`application/*`). If a function, the `type` option is called as `fn(req)` +and the request is parsed if it returns a truthy value. Defaults to +`application/octet-stream`. + +##### verify + +The `verify` option, if supplied, is called as `verify(req, res, buf, encoding)`, +where `buf` is a `Buffer` of the raw request body and `encoding` is the +encoding of the request. The parsing can be aborted by throwing an error. + +### bodyParser.text([options]) + +Returns middleware that parses all bodies as a string and only looks at +requests where the `Content-Type` header matches the `type` option. This +parser supports automatic inflation of `gzip` and `deflate` encodings. + +A new `body` string containing the parsed data is populated on the `request` +object after the middleware (i.e. `req.body`). This will be a string of the +body. + +#### Options + +The `text` function takes an optional `options` object that may contain any of +the following keys: + +##### defaultCharset + +Specify the default character set for the text content if the charset is not +specified in the `Content-Type` header of the request. Defaults to `utf-8`. + +##### inflate + +When set to `true`, then deflated (compressed) bodies will be inflated; when +`false`, deflated bodies are rejected. Defaults to `true`. + +##### limit + +Controls the maximum request body size. If this is a number, then the value +specifies the number of bytes; if it is a string, the value is passed to the +[bytes](https://www.npmjs.com/package/bytes) library for parsing. Defaults +to `'100kb'`. + +##### type + +The `type` option is used to determine what media type the middleware will +parse. This option can be a string, array of strings, or a function. If not +a function, `type` option is passed directly to the +[type-is](https://www.npmjs.org/package/type-is#readme) library and this can +be an extension name (like `txt`), a mime type (like `text/plain`), or a mime +type with a wildcard (like `*/*` or `text/*`). If a function, the `type` +option is called as `fn(req)` and the request is parsed if it returns a +truthy value. Defaults to `text/plain`. + +##### verify + +The `verify` option, if supplied, is called as `verify(req, res, buf, encoding)`, +where `buf` is a `Buffer` of the raw request body and `encoding` is the +encoding of the request. The parsing can be aborted by throwing an error. + +### bodyParser.urlencoded([options]) + +Returns middleware that only parses `urlencoded` bodies and only looks at +requests where the `Content-Type` header matches the `type` option. This +parser accepts only UTF-8 encoding of the body and supports automatic +inflation of `gzip` and `deflate` encodings. + +A new `body` object containing the parsed data is populated on the `request` +object after the middleware (i.e. `req.body`). This object will contain +key-value pairs, where the value can be a string or array (when `extended` is +`false`), or any type (when `extended` is `true`). + +#### Options + +The `urlencoded` function takes an optional `options` object that may contain +any of the following keys: + +##### extended + +The `extended` option allows to choose between parsing the URL-encoded data +with the `querystring` library (when `false`) or the `qs` library (when +`true`). The "extended" syntax allows for rich objects and arrays to be +encoded into the URL-encoded format, allowing for a JSON-like experience +with URL-encoded. For more information, please +[see the qs library](https://www.npmjs.org/package/qs#readme). + +Defaults to `true`, but using the default has been deprecated. Please +research into the difference between `qs` and `querystring` and choose the +appropriate setting. + +##### inflate + +When set to `true`, then deflated (compressed) bodies will be inflated; when +`false`, deflated bodies are rejected. Defaults to `true`. + +##### limit + +Controls the maximum request body size. If this is a number, then the value +specifies the number of bytes; if it is a string, the value is passed to the +[bytes](https://www.npmjs.com/package/bytes) library for parsing. Defaults +to `'100kb'`. + +##### parameterLimit + +The `parameterLimit` option controls the maximum number of parameters that +are allowed in the URL-encoded data. If a request contains more parameters +than this value, a 413 will be returned to the client. Defaults to `1000`. + +##### type + +The `type` option is used to determine what media type the middleware will +parse. This option can be a string, array of strings, or a function. If not +a function, `type` option is passed directly to the +[type-is](https://www.npmjs.org/package/type-is#readme) library and this can +be an extension name (like `urlencoded`), a mime type (like +`application/x-www-form-urlencoded`), or a mime type with a wildcard (like +`*/x-www-form-urlencoded`). If a function, the `type` option is called as +`fn(req)` and the request is parsed if it returns a truthy value. Defaults +to `application/x-www-form-urlencoded`. + +##### verify + +The `verify` option, if supplied, is called as `verify(req, res, buf, encoding)`, +where `buf` is a `Buffer` of the raw request body and `encoding` is the +encoding of the request. The parsing can be aborted by throwing an error. + +## Errors + +The middlewares provided by this module create errors using the +[`http-errors` module](https://www.npmjs.com/package/http-errors). The errors +will typically have a `status`/`statusCode` property that contains the suggested +HTTP response code, an `expose` property to determine if the `message` property +should be displayed to the client, a `type` property to determine the type of +error without matching against the `message`, and a `body` property containing +the read body, if available. + +The following are the common errors created, though any error can come through +for various reasons. + +### content encoding unsupported + +This error will occur when the request had a `Content-Encoding` header that +contained an encoding but the "inflation" option was set to `false`. The +`status` property is set to `415`, the `type` property is set to +`'encoding.unsupported'`, and the `charset` property will be set to the +encoding that is unsupported. + +### entity parse failed + +This error will occur when the request contained an entity that could not be +parsed by the middleware. The `status` property is set to `400`, the `type` +property is set to `'entity.parse.failed'`, and the `body` property is set to +the entity value that failed parsing. + +### entity verify failed + +This error will occur when the request contained an entity that could not be +failed verification by the defined `verify` option. The `status` property is +set to `403`, the `type` property is set to `'entity.verify.failed'`, and the +`body` property is set to the entity value that failed verification. + +### request aborted + +This error will occur when the request is aborted by the client before reading +the body has finished. The `received` property will be set to the number of +bytes received before the request was aborted and the `expected` property is +set to the number of expected bytes. The `status` property is set to `400` +and `type` property is set to `'request.aborted'`. + +### request entity too large + +This error will occur when the request body's size is larger than the "limit" +option. The `limit` property will be set to the byte limit and the `length` +property will be set to the request body's length. The `status` property is +set to `413` and the `type` property is set to `'entity.too.large'`. + +### request size did not match content length + +This error will occur when the request's length did not match the length from +the `Content-Length` header. This typically occurs when the request is malformed, +typically when the `Content-Length` header was calculated based on characters +instead of bytes. The `status` property is set to `400` and the `type` property +is set to `'request.size.invalid'`. + +### stream encoding should not be set + +This error will occur when something called the `req.setEncoding` method prior +to this middleware. This module operates directly on bytes only and you cannot +call `req.setEncoding` when using this module. The `status` property is set to +`500` and the `type` property is set to `'stream.encoding.set'`. + +### stream is not readable + +This error will occur when the request is no longer readable when this middleware +attempts to read it. This typically means something other than a middleware from +this module read the request body already and the middleware was also configured to +read the same request. The `status` property is set to `500` and the `type` +property is set to `'stream.not.readable'`. + +### too many parameters + +This error will occur when the content of the request exceeds the configured +`parameterLimit` for the `urlencoded` parser. The `status` property is set to +`413` and the `type` property is set to `'parameters.too.many'`. + +### unsupported charset "BOGUS" + +This error will occur when the request had a charset parameter in the +`Content-Type` header, but the `iconv-lite` module does not support it OR the +parser does not support it. The charset is contained in the message as well +as in the `charset` property. The `status` property is set to `415`, the +`type` property is set to `'charset.unsupported'`, and the `charset` property +is set to the charset that is unsupported. + +### unsupported content encoding "bogus" + +This error will occur when the request had a `Content-Encoding` header that +contained an unsupported encoding. The encoding is contained in the message +as well as in the `encoding` property. The `status` property is set to `415`, +the `type` property is set to `'encoding.unsupported'`, and the `encoding` +property is set to the encoding that is unsupported. + +## Examples + +### Express/Connect top-level generic + +This example demonstrates adding a generic JSON and URL-encoded parser as a +top-level middleware, which will parse the bodies of all incoming requests. +This is the simplest setup. + +```js +var express = require('express') +var bodyParser = require('body-parser') + +var app = express() + +// parse application/x-www-form-urlencoded +app.use(bodyParser.urlencoded({ extended: false })) + +// parse application/json +app.use(bodyParser.json()) + +app.use(function (req, res) { + res.setHeader('Content-Type', 'text/plain') + res.write('you posted:\n') + res.end(JSON.stringify(req.body, null, 2)) +}) +``` + +### Express route-specific + +This example demonstrates adding body parsers specifically to the routes that +need them. In general, this is the most recommended way to use body-parser with +Express. + +```js +var express = require('express') +var bodyParser = require('body-parser') + +var app = express() + +// create application/json parser +var jsonParser = bodyParser.json() + +// create application/x-www-form-urlencoded parser +var urlencodedParser = bodyParser.urlencoded({ extended: false }) + +// POST /login gets urlencoded bodies +app.post('/login', urlencodedParser, function (req, res) { + res.send('welcome, ' + req.body.username) +}) + +// POST /api/users gets JSON bodies +app.post('/api/users', jsonParser, function (req, res) { + // create user in req.body +}) +``` + +### Change accepted type for parsers + +All the parsers accept a `type` option which allows you to change the +`Content-Type` that the middleware will parse. + +```js +var express = require('express') +var bodyParser = require('body-parser') + +var app = express() + +// parse various different custom JSON types as JSON +app.use(bodyParser.json({ type: 'application/*+json' })) + +// parse some custom thing into a Buffer +app.use(bodyParser.raw({ type: 'application/vnd.custom-type' })) + +// parse an HTML body into a string +app.use(bodyParser.text({ type: 'text/html' })) +``` + +## License + +[MIT](LICENSE) + +[ci-image]: https://badgen.net/github/checks/expressjs/body-parser/master?label=ci +[ci-url]: https://github.com/expressjs/body-parser/actions/workflows/ci.yml +[coveralls-image]: https://badgen.net/coveralls/c/github/expressjs/body-parser/master +[coveralls-url]: https://coveralls.io/r/expressjs/body-parser?branch=master +[node-version-image]: https://badgen.net/npm/node/body-parser +[node-version-url]: https://nodejs.org/en/download +[npm-downloads-image]: https://badgen.net/npm/dm/body-parser +[npm-url]: https://npmjs.org/package/body-parser +[npm-version-image]: https://badgen.net/npm/v/body-parser diff --git a/data/node_modules/body-parser/SECURITY.md b/data/node_modules/body-parser/SECURITY.md new file mode 100644 index 0000000000000000000000000000000000000000..9694d429616df706508285a1ef185d40e45cdfae --- /dev/null +++ b/data/node_modules/body-parser/SECURITY.md @@ -0,0 +1,25 @@ +# Security Policies and Procedures + +## Reporting a Bug + +The Express team and community take all security bugs seriously. Thank you +for improving the security of Express. We appreciate your efforts and +responsible disclosure and will make every effort to acknowledge your +contributions. + +Report security bugs by emailing the current owner(s) of `body-parser`. This +information can be found in the npm registry using the command +`npm owner ls body-parser`. +If unsure or unable to get the information from the above, open an issue +in the [project issue tracker](https://github.com/expressjs/body-parser/issues) +asking for the current contact information. + +To ensure the timely response to your report, please ensure that the entirety +of the report is contained within the email body and not solely behind a web +link or an attachment. + +At least one owner will acknowledge your email within 48 hours, and will send a +more detailed response within 48 hours indicating the next steps in handling +your report. After the initial reply to your report, the owners will +endeavor to keep you informed of the progress towards a fix and full +announcement, and may ask for additional information or guidance. diff --git a/data/node_modules/body-parser/index.js b/data/node_modules/body-parser/index.js new file mode 100644 index 0000000000000000000000000000000000000000..bb24d739d9c5fade336cdf76b08b784ae2594d77 --- /dev/null +++ b/data/node_modules/body-parser/index.js @@ -0,0 +1,156 @@ +/*! + * body-parser + * Copyright(c) 2014-2015 Douglas Christopher Wilson + * MIT Licensed + */ + +'use strict' + +/** + * Module dependencies. + * @private + */ + +var deprecate = require('depd')('body-parser') + +/** + * Cache of loaded parsers. + * @private + */ + +var parsers = Object.create(null) + +/** + * @typedef Parsers + * @type {function} + * @property {function} json + * @property {function} raw + * @property {function} text + * @property {function} urlencoded + */ + +/** + * Module exports. + * @type {Parsers} + */ + +exports = module.exports = deprecate.function(bodyParser, + 'bodyParser: use individual json/urlencoded middlewares') + +/** + * JSON parser. + * @public + */ + +Object.defineProperty(exports, 'json', { + configurable: true, + enumerable: true, + get: createParserGetter('json') +}) + +/** + * Raw parser. + * @public + */ + +Object.defineProperty(exports, 'raw', { + configurable: true, + enumerable: true, + get: createParserGetter('raw') +}) + +/** + * Text parser. + * @public + */ + +Object.defineProperty(exports, 'text', { + configurable: true, + enumerable: true, + get: createParserGetter('text') +}) + +/** + * URL-encoded parser. + * @public + */ + +Object.defineProperty(exports, 'urlencoded', { + configurable: true, + enumerable: true, + get: createParserGetter('urlencoded') +}) + +/** + * Create a middleware to parse json and urlencoded bodies. + * + * @param {object} [options] + * @return {function} + * @deprecated + * @public + */ + +function bodyParser (options) { + // use default type for parsers + var opts = Object.create(options || null, { + type: { + configurable: true, + enumerable: true, + value: undefined, + writable: true + } + }) + + var _urlencoded = exports.urlencoded(opts) + var _json = exports.json(opts) + + return function bodyParser (req, res, next) { + _json(req, res, function (err) { + if (err) return next(err) + _urlencoded(req, res, next) + }) + } +} + +/** + * Create a getter for loading a parser. + * @private + */ + +function createParserGetter (name) { + return function get () { + return loadParser(name) + } +} + +/** + * Load a parser module. + * @private + */ + +function loadParser (parserName) { + var parser = parsers[parserName] + + if (parser !== undefined) { + return parser + } + + // this uses a switch for static require analysis + switch (parserName) { + case 'json': + parser = require('./lib/types/json') + break + case 'raw': + parser = require('./lib/types/raw') + break + case 'text': + parser = require('./lib/types/text') + break + case 'urlencoded': + parser = require('./lib/types/urlencoded') + break + } + + // store to prevent invoking require() + return (parsers[parserName] = parser) +} diff --git a/data/node_modules/body-parser/lib/read.js b/data/node_modules/body-parser/lib/read.js new file mode 100644 index 0000000000000000000000000000000000000000..fce6283f50961e68c2f576031ed5e3d4fdc39984 --- /dev/null +++ b/data/node_modules/body-parser/lib/read.js @@ -0,0 +1,205 @@ +/*! + * body-parser + * Copyright(c) 2014-2015 Douglas Christopher Wilson + * MIT Licensed + */ + +'use strict' + +/** + * Module dependencies. + * @private + */ + +var createError = require('http-errors') +var destroy = require('destroy') +var getBody = require('raw-body') +var iconv = require('iconv-lite') +var onFinished = require('on-finished') +var unpipe = require('unpipe') +var zlib = require('zlib') + +/** + * Module exports. + */ + +module.exports = read + +/** + * Read a request into a buffer and parse. + * + * @param {object} req + * @param {object} res + * @param {function} next + * @param {function} parse + * @param {function} debug + * @param {object} options + * @private + */ + +function read (req, res, next, parse, debug, options) { + var length + var opts = options + var stream + + // flag as parsed + req._body = true + + // read options + var encoding = opts.encoding !== null + ? opts.encoding + : null + var verify = opts.verify + + try { + // get the content stream + stream = contentstream(req, debug, opts.inflate) + length = stream.length + stream.length = undefined + } catch (err) { + return next(err) + } + + // set raw-body options + opts.length = length + opts.encoding = verify + ? null + : encoding + + // assert charset is supported + if (opts.encoding === null && encoding !== null && !iconv.encodingExists(encoding)) { + return next(createError(415, 'unsupported charset "' + encoding.toUpperCase() + '"', { + charset: encoding.toLowerCase(), + type: 'charset.unsupported' + })) + } + + // read body + debug('read body') + getBody(stream, opts, function (error, body) { + if (error) { + var _error + + if (error.type === 'encoding.unsupported') { + // echo back charset + _error = createError(415, 'unsupported charset "' + encoding.toUpperCase() + '"', { + charset: encoding.toLowerCase(), + type: 'charset.unsupported' + }) + } else { + // set status code on error + _error = createError(400, error) + } + + // unpipe from stream and destroy + if (stream !== req) { + unpipe(req) + destroy(stream, true) + } + + // read off entire request + dump(req, function onfinished () { + next(createError(400, _error)) + }) + return + } + + // verify + if (verify) { + try { + debug('verify body') + verify(req, res, body, encoding) + } catch (err) { + next(createError(403, err, { + body: body, + type: err.type || 'entity.verify.failed' + })) + return + } + } + + // parse + var str = body + try { + debug('parse body') + str = typeof body !== 'string' && encoding !== null + ? iconv.decode(body, encoding) + : body + req.body = parse(str) + } catch (err) { + next(createError(400, err, { + body: str, + type: err.type || 'entity.parse.failed' + })) + return + } + + next() + }) +} + +/** + * Get the content stream of the request. + * + * @param {object} req + * @param {function} debug + * @param {boolean} [inflate=true] + * @return {object} + * @api private + */ + +function contentstream (req, debug, inflate) { + var encoding = (req.headers['content-encoding'] || 'identity').toLowerCase() + var length = req.headers['content-length'] + var stream + + debug('content-encoding "%s"', encoding) + + if (inflate === false && encoding !== 'identity') { + throw createError(415, 'content encoding unsupported', { + encoding: encoding, + type: 'encoding.unsupported' + }) + } + + switch (encoding) { + case 'deflate': + stream = zlib.createInflate() + debug('inflate body') + req.pipe(stream) + break + case 'gzip': + stream = zlib.createGunzip() + debug('gunzip body') + req.pipe(stream) + break + case 'identity': + stream = req + stream.length = length + break + default: + throw createError(415, 'unsupported content encoding "' + encoding + '"', { + encoding: encoding, + type: 'encoding.unsupported' + }) + } + + return stream +} + +/** + * Dump the contents of a request. + * + * @param {object} req + * @param {function} callback + * @api private + */ + +function dump (req, callback) { + if (onFinished.isFinished(req)) { + callback(null) + } else { + onFinished(req, callback) + req.resume() + } +} diff --git a/data/node_modules/body-parser/lib/types/json.js b/data/node_modules/body-parser/lib/types/json.js new file mode 100644 index 0000000000000000000000000000000000000000..59f3f7e28f2416cac957b5e35dc78839a5019e0d --- /dev/null +++ b/data/node_modules/body-parser/lib/types/json.js @@ -0,0 +1,247 @@ +/*! + * body-parser + * Copyright(c) 2014 Jonathan Ong + * Copyright(c) 2014-2015 Douglas Christopher Wilson + * MIT Licensed + */ + +'use strict' + +/** + * Module dependencies. + * @private + */ + +var bytes = require('bytes') +var contentType = require('content-type') +var createError = require('http-errors') +var debug = require('debug')('body-parser:json') +var read = require('../read') +var typeis = require('type-is') + +/** + * Module exports. + */ + +module.exports = json + +/** + * RegExp to match the first non-space in a string. + * + * Allowed whitespace is defined in RFC 7159: + * + * ws = *( + * %x20 / ; Space + * %x09 / ; Horizontal tab + * %x0A / ; Line feed or New line + * %x0D ) ; Carriage return + */ + +var FIRST_CHAR_REGEXP = /^[\x20\x09\x0a\x0d]*([^\x20\x09\x0a\x0d])/ // eslint-disable-line no-control-regex + +var JSON_SYNTAX_CHAR = '#' +var JSON_SYNTAX_REGEXP = /#+/g + +/** + * Create a middleware to parse JSON bodies. + * + * @param {object} [options] + * @return {function} + * @public + */ + +function json (options) { + var opts = options || {} + + var limit = typeof opts.limit !== 'number' + ? bytes.parse(opts.limit || '100kb') + : opts.limit + var inflate = opts.inflate !== false + var reviver = opts.reviver + var strict = opts.strict !== false + var type = opts.type || 'application/json' + var verify = opts.verify || false + + if (verify !== false && typeof verify !== 'function') { + throw new TypeError('option verify must be function') + } + + // create the appropriate type checking function + var shouldParse = typeof type !== 'function' + ? typeChecker(type) + : type + + function parse (body) { + if (body.length === 0) { + // special-case empty json body, as it's a common client-side mistake + // TODO: maybe make this configurable or part of "strict" option + return {} + } + + if (strict) { + var first = firstchar(body) + + if (first !== '{' && first !== '[') { + debug('strict violation') + throw createStrictSyntaxError(body, first) + } + } + + try { + debug('parse json') + return JSON.parse(body, reviver) + } catch (e) { + throw normalizeJsonSyntaxError(e, { + message: e.message, + stack: e.stack + }) + } + } + + return function jsonParser (req, res, next) { + if (req._body) { + debug('body already parsed') + next() + return + } + + req.body = req.body || {} + + // skip requests without bodies + if (!typeis.hasBody(req)) { + debug('skip empty body') + next() + return + } + + debug('content-type %j', req.headers['content-type']) + + // determine if request should be parsed + if (!shouldParse(req)) { + debug('skip parsing') + next() + return + } + + // assert charset per RFC 7159 sec 8.1 + var charset = getCharset(req) || 'utf-8' + if (charset.slice(0, 4) !== 'utf-') { + debug('invalid charset') + next(createError(415, 'unsupported charset "' + charset.toUpperCase() + '"', { + charset: charset, + type: 'charset.unsupported' + })) + return + } + + // read + read(req, res, next, parse, debug, { + encoding: charset, + inflate: inflate, + limit: limit, + verify: verify + }) + } +} + +/** + * Create strict violation syntax error matching native error. + * + * @param {string} str + * @param {string} char + * @return {Error} + * @private + */ + +function createStrictSyntaxError (str, char) { + var index = str.indexOf(char) + var partial = '' + + if (index !== -1) { + partial = str.substring(0, index) + JSON_SYNTAX_CHAR + + for (var i = index + 1; i < str.length; i++) { + partial += JSON_SYNTAX_CHAR + } + } + + try { + JSON.parse(partial); /* istanbul ignore next */ throw new SyntaxError('strict violation') + } catch (e) { + return normalizeJsonSyntaxError(e, { + message: e.message.replace(JSON_SYNTAX_REGEXP, function (placeholder) { + return str.substring(index, index + placeholder.length) + }), + stack: e.stack + }) + } +} + +/** + * Get the first non-whitespace character in a string. + * + * @param {string} str + * @return {function} + * @private + */ + +function firstchar (str) { + var match = FIRST_CHAR_REGEXP.exec(str) + + return match + ? match[1] + : undefined +} + +/** + * Get the charset of a request. + * + * @param {object} req + * @api private + */ + +function getCharset (req) { + try { + return (contentType.parse(req).parameters.charset || '').toLowerCase() + } catch (e) { + return undefined + } +} + +/** + * Normalize a SyntaxError for JSON.parse. + * + * @param {SyntaxError} error + * @param {object} obj + * @return {SyntaxError} + */ + +function normalizeJsonSyntaxError (error, obj) { + var keys = Object.getOwnPropertyNames(error) + + for (var i = 0; i < keys.length; i++) { + var key = keys[i] + if (key !== 'stack' && key !== 'message') { + delete error[key] + } + } + + // replace stack before message for Node.js 0.10 and below + error.stack = obj.stack.replace(error.message, obj.message) + error.message = obj.message + + return error +} + +/** + * Get the simple type checker. + * + * @param {string} type + * @return {function} + */ + +function typeChecker (type) { + return function checkType (req) { + return Boolean(typeis(req, type)) + } +} diff --git a/data/node_modules/body-parser/lib/types/raw.js b/data/node_modules/body-parser/lib/types/raw.js new file mode 100644 index 0000000000000000000000000000000000000000..f5d1b67475405284e3dac312f92ade101571329f --- /dev/null +++ b/data/node_modules/body-parser/lib/types/raw.js @@ -0,0 +1,101 @@ +/*! + * body-parser + * Copyright(c) 2014-2015 Douglas Christopher Wilson + * MIT Licensed + */ + +'use strict' + +/** + * Module dependencies. + */ + +var bytes = require('bytes') +var debug = require('debug')('body-parser:raw') +var read = require('../read') +var typeis = require('type-is') + +/** + * Module exports. + */ + +module.exports = raw + +/** + * Create a middleware to parse raw bodies. + * + * @param {object} [options] + * @return {function} + * @api public + */ + +function raw (options) { + var opts = options || {} + + var inflate = opts.inflate !== false + var limit = typeof opts.limit !== 'number' + ? bytes.parse(opts.limit || '100kb') + : opts.limit + var type = opts.type || 'application/octet-stream' + var verify = opts.verify || false + + if (verify !== false && typeof verify !== 'function') { + throw new TypeError('option verify must be function') + } + + // create the appropriate type checking function + var shouldParse = typeof type !== 'function' + ? typeChecker(type) + : type + + function parse (buf) { + return buf + } + + return function rawParser (req, res, next) { + if (req._body) { + debug('body already parsed') + next() + return + } + + req.body = req.body || {} + + // skip requests without bodies + if (!typeis.hasBody(req)) { + debug('skip empty body') + next() + return + } + + debug('content-type %j', req.headers['content-type']) + + // determine if request should be parsed + if (!shouldParse(req)) { + debug('skip parsing') + next() + return + } + + // read + read(req, res, next, parse, debug, { + encoding: null, + inflate: inflate, + limit: limit, + verify: verify + }) + } +} + +/** + * Get the simple type checker. + * + * @param {string} type + * @return {function} + */ + +function typeChecker (type) { + return function checkType (req) { + return Boolean(typeis(req, type)) + } +} diff --git a/data/node_modules/body-parser/lib/types/text.js b/data/node_modules/body-parser/lib/types/text.js new file mode 100644 index 0000000000000000000000000000000000000000..083a00908a6299a8ef72f477983359f5675f82ef --- /dev/null +++ b/data/node_modules/body-parser/lib/types/text.js @@ -0,0 +1,121 @@ +/*! + * body-parser + * Copyright(c) 2014-2015 Douglas Christopher Wilson + * MIT Licensed + */ + +'use strict' + +/** + * Module dependencies. + */ + +var bytes = require('bytes') +var contentType = require('content-type') +var debug = require('debug')('body-parser:text') +var read = require('../read') +var typeis = require('type-is') + +/** + * Module exports. + */ + +module.exports = text + +/** + * Create a middleware to parse text bodies. + * + * @param {object} [options] + * @return {function} + * @api public + */ + +function text (options) { + var opts = options || {} + + var defaultCharset = opts.defaultCharset || 'utf-8' + var inflate = opts.inflate !== false + var limit = typeof opts.limit !== 'number' + ? bytes.parse(opts.limit || '100kb') + : opts.limit + var type = opts.type || 'text/plain' + var verify = opts.verify || false + + if (verify !== false && typeof verify !== 'function') { + throw new TypeError('option verify must be function') + } + + // create the appropriate type checking function + var shouldParse = typeof type !== 'function' + ? typeChecker(type) + : type + + function parse (buf) { + return buf + } + + return function textParser (req, res, next) { + if (req._body) { + debug('body already parsed') + next() + return + } + + req.body = req.body || {} + + // skip requests without bodies + if (!typeis.hasBody(req)) { + debug('skip empty body') + next() + return + } + + debug('content-type %j', req.headers['content-type']) + + // determine if request should be parsed + if (!shouldParse(req)) { + debug('skip parsing') + next() + return + } + + // get charset + var charset = getCharset(req) || defaultCharset + + // read + read(req, res, next, parse, debug, { + encoding: charset, + inflate: inflate, + limit: limit, + verify: verify + }) + } +} + +/** + * Get the charset of a request. + * + * @param {object} req + * @api private + */ + +function getCharset (req) { + try { + return (contentType.parse(req).parameters.charset || '').toLowerCase() + } catch (e) { + return undefined + } +} + +/** + * Get the simple type checker. + * + * @param {string} type + * @return {function} + */ + +function typeChecker (type) { + return function checkType (req) { + return Boolean(typeis(req, type)) + } +} diff --git a/data/node_modules/body-parser/lib/types/urlencoded.js b/data/node_modules/body-parser/lib/types/urlencoded.js new file mode 100644 index 0000000000000000000000000000000000000000..b2ca8f16d0c105424acd16282e629346698e140b --- /dev/null +++ b/data/node_modules/body-parser/lib/types/urlencoded.js @@ -0,0 +1,284 @@ +/*! + * body-parser + * Copyright(c) 2014 Jonathan Ong + * Copyright(c) 2014-2015 Douglas Christopher Wilson + * MIT Licensed + */ + +'use strict' + +/** + * Module dependencies. + * @private + */ + +var bytes = require('bytes') +var contentType = require('content-type') +var createError = require('http-errors') +var debug = require('debug')('body-parser:urlencoded') +var deprecate = require('depd')('body-parser') +var read = require('../read') +var typeis = require('type-is') + +/** + * Module exports. + */ + +module.exports = urlencoded + +/** + * Cache of parser modules. + */ + +var parsers = Object.create(null) + +/** + * Create a middleware to parse urlencoded bodies. + * + * @param {object} [options] + * @return {function} + * @public + */ + +function urlencoded (options) { + var opts = options || {} + + // notice because option default will flip in next major + if (opts.extended === undefined) { + deprecate('undefined extended: provide extended option') + } + + var extended = opts.extended !== false + var inflate = opts.inflate !== false + var limit = typeof opts.limit !== 'number' + ? bytes.parse(opts.limit || '100kb') + : opts.limit + var type = opts.type || 'application/x-www-form-urlencoded' + var verify = opts.verify || false + + if (verify !== false && typeof verify !== 'function') { + throw new TypeError('option verify must be function') + } + + // create the appropriate query parser + var queryparse = extended + ? extendedparser(opts) + : simpleparser(opts) + + // create the appropriate type checking function + var shouldParse = typeof type !== 'function' + ? typeChecker(type) + : type + + function parse (body) { + return body.length + ? queryparse(body) + : {} + } + + return function urlencodedParser (req, res, next) { + if (req._body) { + debug('body already parsed') + next() + return + } + + req.body = req.body || {} + + // skip requests without bodies + if (!typeis.hasBody(req)) { + debug('skip empty body') + next() + return + } + + debug('content-type %j', req.headers['content-type']) + + // determine if request should be parsed + if (!shouldParse(req)) { + debug('skip parsing') + next() + return + } + + // assert charset + var charset = getCharset(req) || 'utf-8' + if (charset !== 'utf-8') { + debug('invalid charset') + next(createError(415, 'unsupported charset "' + charset.toUpperCase() + '"', { + charset: charset, + type: 'charset.unsupported' + })) + return + } + + // read + read(req, res, next, parse, debug, { + debug: debug, + encoding: charset, + inflate: inflate, + limit: limit, + verify: verify + }) + } +} + +/** + * Get the extended query parser. + * + * @param {object} options + */ + +function extendedparser (options) { + var parameterLimit = options.parameterLimit !== undefined + ? options.parameterLimit + : 1000 + var parse = parser('qs') + + if (isNaN(parameterLimit) || parameterLimit < 1) { + throw new TypeError('option parameterLimit must be a positive number') + } + + if (isFinite(parameterLimit)) { + parameterLimit = parameterLimit | 0 + } + + return function queryparse (body) { + var paramCount = parameterCount(body, parameterLimit) + + if (paramCount === undefined) { + debug('too many parameters') + throw createError(413, 'too many parameters', { + type: 'parameters.too.many' + }) + } + + var arrayLimit = Math.max(100, paramCount) + + debug('parse extended urlencoding') + return parse(body, { + allowPrototypes: true, + arrayLimit: arrayLimit, + depth: Infinity, + parameterLimit: parameterLimit + }) + } +} + +/** + * Get the charset of a request. + * + * @param {object} req + * @api private + */ + +function getCharset (req) { + try { + return (contentType.parse(req).parameters.charset || '').toLowerCase() + } catch (e) { + return undefined + } +} + +/** + * Count the number of parameters, stopping once limit reached + * + * @param {string} body + * @param {number} limit + * @api private + */ + +function parameterCount (body, limit) { + var count = 0 + var index = 0 + + while ((index = body.indexOf('&', index)) !== -1) { + count++ + index++ + + if (count === limit) { + return undefined + } + } + + return count +} + +/** + * Get parser for module name dynamically. + * + * @param {string} name + * @return {function} + * @api private + */ + +function parser (name) { + var mod = parsers[name] + + if (mod !== undefined) { + return mod.parse + } + + // this uses a switch for static require analysis + switch (name) { + case 'qs': + mod = require('qs') + break + case 'querystring': + mod = require('querystring') + break + } + + // store to prevent invoking require() + parsers[name] = mod + + return mod.parse +} + +/** + * Get the simple query parser. + * + * @param {object} options + */ + +function simpleparser (options) { + var parameterLimit = options.parameterLimit !== undefined + ? options.parameterLimit + : 1000 + var parse = parser('querystring') + + if (isNaN(parameterLimit) || parameterLimit < 1) { + throw new TypeError('option parameterLimit must be a positive number') + } + + if (isFinite(parameterLimit)) { + parameterLimit = parameterLimit | 0 + } + + return function queryparse (body) { + var paramCount = parameterCount(body, parameterLimit) + + if (paramCount === undefined) { + debug('too many parameters') + throw createError(413, 'too many parameters', { + type: 'parameters.too.many' + }) + } + + debug('parse urlencoding') + return parse(body, undefined, undefined, { maxKeys: parameterLimit }) + } +} + +/** + * Get the simple type checker. + * + * @param {string} type + * @return {function} + */ + +function typeChecker (type) { + return function checkType (req) { + return Boolean(typeis(req, type)) + } +} diff --git a/data/node_modules/body-parser/package.json b/data/node_modules/body-parser/package.json new file mode 100644 index 0000000000000000000000000000000000000000..4637304389c5dc7a0a52974c120ea164b85e7c22 --- /dev/null +++ b/data/node_modules/body-parser/package.json @@ -0,0 +1,56 @@ +{ + "name": "body-parser", + "description": "Node.js body parsing middleware", + "version": "1.20.2", + "contributors": [ + "Douglas Christopher Wilson ", + "Jonathan Ong (http://jongleberry.com)" + ], + "license": "MIT", + "repository": "expressjs/body-parser", + "dependencies": { + "bytes": "3.1.2", + "content-type": "~1.0.5", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.11.0", + "raw-body": "2.5.2", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "devDependencies": { + "eslint": "8.34.0", + "eslint-config-standard": "14.1.1", + "eslint-plugin-import": "2.27.5", + "eslint-plugin-markdown": "3.0.0", + "eslint-plugin-node": "11.1.0", + "eslint-plugin-promise": "6.1.1", + "eslint-plugin-standard": "4.1.0", + "methods": "1.1.2", + "mocha": "10.2.0", + "nyc": "15.1.0", + "safe-buffer": "5.2.1", + "supertest": "6.3.3" + }, + "files": [ + "lib/", + "LICENSE", + "HISTORY.md", + "SECURITY.md", + "index.js" + ], + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + }, + "scripts": { + "lint": "eslint .", + "test": "mocha --require test/support/env --reporter spec --check-leaks --bail test/", + "test-ci": "nyc --reporter=lcov --reporter=text npm test", + "test-cov": "nyc --reporter=html --reporter=text npm test" + } +} diff --git a/data/node_modules/bytes/History.md b/data/node_modules/bytes/History.md new file mode 100644 index 0000000000000000000000000000000000000000..d60ce0e6df2efd3f83c08b098d1b7b683b96ec84 --- /dev/null +++ b/data/node_modules/bytes/History.md @@ -0,0 +1,97 @@ +3.1.2 / 2022-01-27 +================== + + * Fix return value for un-parsable strings + +3.1.1 / 2021-11-15 +================== + + * Fix "thousandsSeparator" incorrecting formatting fractional part + +3.1.0 / 2019-01-22 +================== + + * Add petabyte (`pb`) support + +3.0.0 / 2017-08-31 +================== + + * Change "kB" to "KB" in format output + * Remove support for Node.js 0.6 + * Remove support for ComponentJS + +2.5.0 / 2017-03-24 +================== + + * Add option "unit" + +2.4.0 / 2016-06-01 +================== + + * Add option "unitSeparator" + +2.3.0 / 2016-02-15 +================== + + * Drop partial bytes on all parsed units + * Fix non-finite numbers to `.format` to return `null` + * Fix parsing byte string that looks like hex + * perf: hoist regular expressions + +2.2.0 / 2015-11-13 +================== + + * add option "decimalPlaces" + * add option "fixedDecimals" + +2.1.0 / 2015-05-21 +================== + + * add `.format` export + * add `.parse` export + +2.0.2 / 2015-05-20 +================== + + * remove map recreation + * remove unnecessary object construction + +2.0.1 / 2015-05-07 +================== + + * fix browserify require + * remove node.extend dependency + +2.0.0 / 2015-04-12 +================== + + * add option "case" + * add option "thousandsSeparator" + * return "null" on invalid parse input + * support proper round-trip: bytes(bytes(num)) === num + * units no longer case sensitive when parsing + +1.0.0 / 2014-05-05 +================== + + * add negative support. fixes #6 + +0.3.0 / 2014-03-19 +================== + + * added terabyte support + +0.2.1 / 2013-04-01 +================== + + * add .component + +0.2.0 / 2012-10-28 +================== + + * bytes(200).should.eql('200b') + +0.1.0 / 2012-07-04 +================== + + * add bytes to string conversion [yields] diff --git a/data/node_modules/bytes/LICENSE b/data/node_modules/bytes/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..63e95a96338a608c218a7ef5805629878aaa951f --- /dev/null +++ b/data/node_modules/bytes/LICENSE @@ -0,0 +1,23 @@ +(The MIT License) + +Copyright (c) 2012-2014 TJ Holowaychuk +Copyright (c) 2015 Jed Watson + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/data/node_modules/bytes/Readme.md b/data/node_modules/bytes/Readme.md new file mode 100644 index 0000000000000000000000000000000000000000..5790e23e328e045e66ec6f0b98526157b6c2abcf --- /dev/null +++ b/data/node_modules/bytes/Readme.md @@ -0,0 +1,152 @@ +# Bytes utility + +[![NPM Version][npm-image]][npm-url] +[![NPM Downloads][downloads-image]][downloads-url] +[![Build Status][ci-image]][ci-url] +[![Test Coverage][coveralls-image]][coveralls-url] + +Utility to parse a string bytes (ex: `1TB`) to bytes (`1099511627776`) and vice-versa. + +## Installation + +This is a [Node.js](https://nodejs.org/en/) module available through the +[npm registry](https://www.npmjs.com/). Installation is done using the +[`npm install` command](https://docs.npmjs.com/getting-started/installing-npm-packages-locally): + +```bash +$ npm install bytes +``` + +## Usage + +```js +var bytes = require('bytes'); +``` + +#### bytes(number|string value, [options]): number|string|null + +Default export function. Delegates to either `bytes.format` or `bytes.parse` based on the type of `value`. + +**Arguments** + +| Name | Type | Description | +|---------|----------|--------------------| +| value | `number`|`string` | Number value to format or string value to parse | +| options | `Object` | Conversion options for `format` | + +**Returns** + +| Name | Type | Description | +|---------|------------------|-------------------------------------------------| +| results | `string`|`number`|`null` | Return null upon error. Numeric value in bytes, or string value otherwise. | + +**Example** + +```js +bytes(1024); +// output: '1KB' + +bytes('1KB'); +// output: 1024 +``` + +#### bytes.format(number value, [options]): string|null + +Format the given value in bytes into a string. If the value is negative, it is kept as such. If it is a float, it is + rounded. + +**Arguments** + +| Name | Type | Description | +|---------|----------|--------------------| +| value | `number` | Value in bytes | +| options | `Object` | Conversion options | + +**Options** + +| Property | Type | Description | +|-------------------|--------|-----------------------------------------------------------------------------------------| +| decimalPlaces | `number`|`null` | Maximum number of decimal places to include in output. Default value to `2`. | +| fixedDecimals | `boolean`|`null` | Whether to always display the maximum number of decimal places. Default value to `false` | +| thousandsSeparator | `string`|`null` | Example of values: `' '`, `','` and `'.'`... Default value to `''`. | +| unit | `string`|`null` | The unit in which the result will be returned (B/KB/MB/GB/TB). Default value to `''` (which means auto detect). | +| unitSeparator | `string`|`null` | Separator to use between number and unit. Default value to `''`. | + +**Returns** + +| Name | Type | Description | +|---------|------------------|-------------------------------------------------| +| results | `string`|`null` | Return null upon error. String value otherwise. | + +**Example** + +```js +bytes.format(1024); +// output: '1KB' + +bytes.format(1000); +// output: '1000B' + +bytes.format(1000, {thousandsSeparator: ' '}); +// output: '1 000B' + +bytes.format(1024 * 1.7, {decimalPlaces: 0}); +// output: '2KB' + +bytes.format(1024, {unitSeparator: ' '}); +// output: '1 KB' +``` + +#### bytes.parse(string|number value): number|null + +Parse the string value into an integer in bytes. If no unit is given, or `value` +is a number, it is assumed the value is in bytes. + +Supported units and abbreviations are as follows and are case-insensitive: + + * `b` for bytes + * `kb` for kilobytes + * `mb` for megabytes + * `gb` for gigabytes + * `tb` for terabytes + * `pb` for petabytes + +The units are in powers of two, not ten. This means 1kb = 1024b according to this parser. + +**Arguments** + +| Name | Type | Description | +|---------------|--------|--------------------| +| value | `string`|`number` | String to parse, or number in bytes. | + +**Returns** + +| Name | Type | Description | +|---------|-------------|-------------------------| +| results | `number`|`null` | Return null upon error. Value in bytes otherwise. | + +**Example** + +```js +bytes.parse('1KB'); +// output: 1024 + +bytes.parse('1024'); +// output: 1024 + +bytes.parse(1024); +// output: 1024 +``` + +## License + +[MIT](LICENSE) + +[ci-image]: https://badgen.net/github/checks/visionmedia/bytes.js/master?label=ci +[ci-url]: https://github.com/visionmedia/bytes.js/actions?query=workflow%3Aci +[coveralls-image]: https://badgen.net/coveralls/c/github/visionmedia/bytes.js/master +[coveralls-url]: https://coveralls.io/r/visionmedia/bytes.js?branch=master +[downloads-image]: https://badgen.net/npm/dm/bytes +[downloads-url]: https://npmjs.org/package/bytes +[npm-image]: https://badgen.net/npm/v/bytes +[npm-url]: https://npmjs.org/package/bytes diff --git a/data/node_modules/bytes/index.js b/data/node_modules/bytes/index.js new file mode 100644 index 0000000000000000000000000000000000000000..6f2d0f89e1258564bad95175159e1d8a6abd9ddf --- /dev/null +++ b/data/node_modules/bytes/index.js @@ -0,0 +1,170 @@ +/*! + * bytes + * Copyright(c) 2012-2014 TJ Holowaychuk + * Copyright(c) 2015 Jed Watson + * MIT Licensed + */ + +'use strict'; + +/** + * Module exports. + * @public + */ + +module.exports = bytes; +module.exports.format = format; +module.exports.parse = parse; + +/** + * Module variables. + * @private + */ + +var formatThousandsRegExp = /\B(?=(\d{3})+(?!\d))/g; + +var formatDecimalsRegExp = /(?:\.0*|(\.[^0]+)0+)$/; + +var map = { + b: 1, + kb: 1 << 10, + mb: 1 << 20, + gb: 1 << 30, + tb: Math.pow(1024, 4), + pb: Math.pow(1024, 5), +}; + +var parseRegExp = /^((-|\+)?(\d+(?:\.\d+)?)) *(kb|mb|gb|tb|pb)$/i; + +/** + * Convert the given value in bytes into a string or parse to string to an integer in bytes. + * + * @param {string|number} value + * @param {{ + * case: [string], + * decimalPlaces: [number] + * fixedDecimals: [boolean] + * thousandsSeparator: [string] + * unitSeparator: [string] + * }} [options] bytes options. + * + * @returns {string|number|null} + */ + +function bytes(value, options) { + if (typeof value === 'string') { + return parse(value); + } + + if (typeof value === 'number') { + return format(value, options); + } + + return null; +} + +/** + * Format the given value in bytes into a string. + * + * If the value is negative, it is kept as such. If it is a float, + * it is rounded. + * + * @param {number} value + * @param {object} [options] + * @param {number} [options.decimalPlaces=2] + * @param {number} [options.fixedDecimals=false] + * @param {string} [options.thousandsSeparator=] + * @param {string} [options.unit=] + * @param {string} [options.unitSeparator=] + * + * @returns {string|null} + * @public + */ + +function format(value, options) { + if (!Number.isFinite(value)) { + return null; + } + + var mag = Math.abs(value); + var thousandsSeparator = (options && options.thousandsSeparator) || ''; + var unitSeparator = (options && options.unitSeparator) || ''; + var decimalPlaces = (options && options.decimalPlaces !== undefined) ? options.decimalPlaces : 2; + var fixedDecimals = Boolean(options && options.fixedDecimals); + var unit = (options && options.unit) || ''; + + if (!unit || !map[unit.toLowerCase()]) { + if (mag >= map.pb) { + unit = 'PB'; + } else if (mag >= map.tb) { + unit = 'TB'; + } else if (mag >= map.gb) { + unit = 'GB'; + } else if (mag >= map.mb) { + unit = 'MB'; + } else if (mag >= map.kb) { + unit = 'KB'; + } else { + unit = 'B'; + } + } + + var val = value / map[unit.toLowerCase()]; + var str = val.toFixed(decimalPlaces); + + if (!fixedDecimals) { + str = str.replace(formatDecimalsRegExp, '$1'); + } + + if (thousandsSeparator) { + str = str.split('.').map(function (s, i) { + return i === 0 + ? s.replace(formatThousandsRegExp, thousandsSeparator) + : s + }).join('.'); + } + + return str + unitSeparator + unit; +} + +/** + * Parse the string value into an integer in bytes. + * + * If no unit is given, it is assumed the value is in bytes. + * + * @param {number|string} val + * + * @returns {number|null} + * @public + */ + +function parse(val) { + if (typeof val === 'number' && !isNaN(val)) { + return val; + } + + if (typeof val !== 'string') { + return null; + } + + // Test if the string passed is valid + var results = parseRegExp.exec(val); + var floatValue; + var unit = 'b'; + + if (!results) { + // Nothing could be extracted from the given string + floatValue = parseInt(val, 10); + unit = 'b' + } else { + // Retrieve the value and the unit + floatValue = parseFloat(results[1]); + unit = results[4].toLowerCase(); + } + + if (isNaN(floatValue)) { + return null; + } + + return Math.floor(map[unit] * floatValue); +} diff --git a/data/node_modules/bytes/package.json b/data/node_modules/bytes/package.json new file mode 100644 index 0000000000000000000000000000000000000000..f2b6a8b0e3c9020746409617bcd562c9368be451 --- /dev/null +++ b/data/node_modules/bytes/package.json @@ -0,0 +1,42 @@ +{ + "name": "bytes", + "description": "Utility to parse a string bytes to bytes and vice-versa", + "version": "3.1.2", + "author": "TJ Holowaychuk (http://tjholowaychuk.com)", + "contributors": [ + "Jed Watson ", + "Théo FIDRY " + ], + "license": "MIT", + "keywords": [ + "byte", + "bytes", + "utility", + "parse", + "parser", + "convert", + "converter" + ], + "repository": "visionmedia/bytes.js", + "devDependencies": { + "eslint": "7.32.0", + "eslint-plugin-markdown": "2.2.1", + "mocha": "9.2.0", + "nyc": "15.1.0" + }, + "files": [ + "History.md", + "LICENSE", + "Readme.md", + "index.js" + ], + "engines": { + "node": ">= 0.8" + }, + "scripts": { + "lint": "eslint .", + "test": "mocha --check-leaks --reporter spec", + "test-ci": "nyc --reporter=lcov --reporter=text npm test", + "test-cov": "nyc --reporter=html --reporter=text npm test" + } +} diff --git a/data/node_modules/call-bind/.eslintignore b/data/node_modules/call-bind/.eslintignore new file mode 100644 index 0000000000000000000000000000000000000000..404abb22121cdcbb710c56f4ba2684789cb2868c --- /dev/null +++ b/data/node_modules/call-bind/.eslintignore @@ -0,0 +1 @@ +coverage/ diff --git a/data/node_modules/call-bind/.eslintrc b/data/node_modules/call-bind/.eslintrc new file mode 100644 index 0000000000000000000000000000000000000000..dfa9a6cdcf03e3f611cb91a59c68327d040a5d58 --- /dev/null +++ b/data/node_modules/call-bind/.eslintrc @@ -0,0 +1,16 @@ +{ + "root": true, + + "extends": "@ljharb", + + "rules": { + "func-name-matching": 0, + "id-length": 0, + "new-cap": [2, { + "capIsNewExceptions": [ + "GetIntrinsic", + ], + }], + "no-magic-numbers": 0, + }, +} diff --git a/data/node_modules/call-bind/.github/FUNDING.yml b/data/node_modules/call-bind/.github/FUNDING.yml new file mode 100644 index 0000000000000000000000000000000000000000..c70c2ecdb216df098ffbaba7773e6d8cf52e9bf3 --- /dev/null +++ b/data/node_modules/call-bind/.github/FUNDING.yml @@ -0,0 +1,12 @@ +# These are supported funding model platforms + +github: [ljharb] +patreon: # Replace with a single Patreon username +open_collective: # Replace with a single Open Collective username +ko_fi: # Replace with a single Ko-fi username +tidelift: npm/call-bind +community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +liberapay: # Replace with a single Liberapay username +issuehunt: # Replace with a single IssueHunt username +otechie: # Replace with a single Otechie username +custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] diff --git a/data/node_modules/call-bind/.nycrc b/data/node_modules/call-bind/.nycrc new file mode 100644 index 0000000000000000000000000000000000000000..bdd626ce91477abbdd489b79988baebadbd3c897 --- /dev/null +++ b/data/node_modules/call-bind/.nycrc @@ -0,0 +1,9 @@ +{ + "all": true, + "check-coverage": false, + "reporter": ["text-summary", "text", "html", "json"], + "exclude": [ + "coverage", + "test" + ] +} diff --git a/data/node_modules/call-bind/CHANGELOG.md b/data/node_modules/call-bind/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..c653f701aff57965c171364c13205bb09678ea14 --- /dev/null +++ b/data/node_modules/call-bind/CHANGELOG.md @@ -0,0 +1,93 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [v1.0.7](https://github.com/ljharb/call-bind/compare/v1.0.6...v1.0.7) - 2024-02-12 + +### Commits + +- [Refactor] use `es-define-property` [`09b76a0`](https://github.com/ljharb/call-bind/commit/09b76a01634440461d44a80c9924ec4b500f3b03) +- [Deps] update `get-intrinsic`, `set-function-length` [`ad5136d`](https://github.com/ljharb/call-bind/commit/ad5136ddda2a45c590959829ad3dce0c9f4e3590) + +## [v1.0.6](https://github.com/ljharb/call-bind/compare/v1.0.5...v1.0.6) - 2024-02-05 + +### Commits + +- [Dev Deps] update `aud`, `npmignore`, `tape` [`d564d5c`](https://github.com/ljharb/call-bind/commit/d564d5ce3e06a19df4d499c77f8d1a9da44e77aa) +- [Deps] update `get-intrinsic`, `set-function-length` [`cfc2bdc`](https://github.com/ljharb/call-bind/commit/cfc2bdca7b633df0e0e689e6b637f668f1c6792e) +- [Refactor] use `es-errors`, so things that only need those do not need `get-intrinsic` [`64cd289`](https://github.com/ljharb/call-bind/commit/64cd289ae5862c250a4ca80aa8d461047c166af5) +- [meta] add missing `engines.node` [`32a4038`](https://github.com/ljharb/call-bind/commit/32a4038857b62179f7f9b7b3df2c5260036be582) + +## [v1.0.5](https://github.com/ljharb/call-bind/compare/v1.0.4...v1.0.5) - 2023-10-19 + +### Commits + +- [Fix] throw an error on non-functions as early as possible [`f262408`](https://github.com/ljharb/call-bind/commit/f262408f822c840fbc268080f3ad7c429611066d) +- [Deps] update `set-function-length` [`3fff271`](https://github.com/ljharb/call-bind/commit/3fff27145a1e3a76a5b74f1d7c3c43d0fa3b9871) + +## [v1.0.4](https://github.com/ljharb/call-bind/compare/v1.0.3...v1.0.4) - 2023-10-19 + +## [v1.0.3](https://github.com/ljharb/call-bind/compare/v1.0.2...v1.0.3) - 2023-10-19 + +### Commits + +- [actions] reuse common workflows [`a994df6`](https://github.com/ljharb/call-bind/commit/a994df69f401f4bf735a4ccd77029b85d1549453) +- [meta] use `npmignore` to autogenerate an npmignore file [`eef3ef2`](https://github.com/ljharb/call-bind/commit/eef3ef21e1f002790837fedb8af2679c761fbdf5) +- [readme] flesh out content [`1845ccf`](https://github.com/ljharb/call-bind/commit/1845ccfd9976a607884cfc7157c93192cc16cf22) +- [actions] use `node/install` instead of `node/run`; use `codecov` action [`5b47d53`](https://github.com/ljharb/call-bind/commit/5b47d53d2fd74af5ea0a44f1d51e503cd42f7a90) +- [Refactor] use `set-function-length` [`a0e165c`](https://github.com/ljharb/call-bind/commit/a0e165c5dc61db781cbc919b586b1c2b8da0b150) +- [Dev Deps] update `@ljharb/eslint-config`, `aud`, `tape` [`9c50103`](https://github.com/ljharb/call-bind/commit/9c50103f44137279a817317cf6cc421a658f85b4) +- [meta] simplify "exports" [`019c6d0`](https://github.com/ljharb/call-bind/commit/019c6d06b0e1246ceed8e579f57e44441cbbf6d9) +- [Dev Deps] update `eslint`, `@ljharb/eslint-config`, `aud`, `auto-changelog`, `safe-publish-latest`, `tape` [`23bd718`](https://github.com/ljharb/call-bind/commit/23bd718a288d3b03042062b4ef5153b3cea83f11) +- [actions] update codecov uploader [`62552d7`](https://github.com/ljharb/call-bind/commit/62552d79cc79e05825e99aaba134ae5b37f33da5) +- [Dev Deps] update `eslint`, `@ljharb/eslint-config`, `aud`, `auto-changelog`, `tape` [`ec81665`](https://github.com/ljharb/call-bind/commit/ec81665b300f87eabff597afdc8b8092adfa7afd) +- [Dev Deps] update `eslint`, `@ljharb/eslint-config`, `safe-publish-latest`, `tape` [`35d67fc`](https://github.com/ljharb/call-bind/commit/35d67fcea883e686650f736f61da5ddca2592de8) +- [Dev Deps] update `eslint`, `@ljharb/eslint-config`, `aud`, `tape` [`0266d8d`](https://github.com/ljharb/call-bind/commit/0266d8d2a45086a922db366d0c2932fa463662ff) +- [Dev Deps] update `@ljharb/eslint-config`, `aud`, `tape` [`43a5b28`](https://github.com/ljharb/call-bind/commit/43a5b28a444e710e1bbf92adb8afb5cf7523a223) +- [Deps] update `define-data-property`, `function-bind`, `get-intrinsic` [`780eb36`](https://github.com/ljharb/call-bind/commit/780eb36552514f8cc99c70821ce698697c2726a5) +- [Dev Deps] update `aud`, `tape` [`90d50ad`](https://github.com/ljharb/call-bind/commit/90d50ad03b061e0268b3380b0065fcaec183dc05) +- [meta] use `prepublishOnly` script for npm 7+ [`44c5433`](https://github.com/ljharb/call-bind/commit/44c5433b7980e02b4870007046407cf6fc543329) +- [Deps] update `get-intrinsic` [`86bfbfc`](https://github.com/ljharb/call-bind/commit/86bfbfcf34afdc6eabc93ce3d408548d0e27d958) +- [Deps] update `get-intrinsic` [`5c53354`](https://github.com/ljharb/call-bind/commit/5c5335489be0294c18cd7a8bb6e08226ee019ff5) +- [actions] update checkout action [`4c393a8`](https://github.com/ljharb/call-bind/commit/4c393a8173b3c8e5b30d5b3297b3b94d48bf87f3) +- [Deps] update `get-intrinsic` [`4e70bde`](https://github.com/ljharb/call-bind/commit/4e70bdec0626acb11616d66250fc14565e716e91) +- [Deps] update `get-intrinsic` [`55ae803`](https://github.com/ljharb/call-bind/commit/55ae803a920bd93c369cd798c20de31f91e9fc60) + +## [v1.0.2](https://github.com/ljharb/call-bind/compare/v1.0.1...v1.0.2) - 2021-01-11 + +### Commits + +- [Fix] properly include the receiver in the bound length [`dbae7bc`](https://github.com/ljharb/call-bind/commit/dbae7bc676c079a0d33c0a43e9ef92cb7b01345d) + +## [v1.0.1](https://github.com/ljharb/call-bind/compare/v1.0.0...v1.0.1) - 2021-01-08 + +### Commits + +- [Tests] migrate tests to Github Actions [`b6db284`](https://github.com/ljharb/call-bind/commit/b6db284c36f8ccd195b88a6764fe84b7223a0da1) +- [meta] do not publish github action workflow files [`ec7fe46`](https://github.com/ljharb/call-bind/commit/ec7fe46e60cfa4764ee943d2755f5e5a366e578e) +- [Fix] preserve original function’s length when possible [`adbceaa`](https://github.com/ljharb/call-bind/commit/adbceaa3cac4b41ea78bb19d7ccdbaaf7e0bdadb) +- [Tests] gather coverage data on every job [`d69e23c`](https://github.com/ljharb/call-bind/commit/d69e23cc65f101ba1d4c19bb07fa8eb0ec624be8) +- [Dev Deps] update `eslint`, `@ljharb/eslint-config`, `aud`, `tape` [`2fd3586`](https://github.com/ljharb/call-bind/commit/2fd3586c5d47b335364c14293114c6b625ae1f71) +- [Deps] update `get-intrinsic` [`f23e931`](https://github.com/ljharb/call-bind/commit/f23e9318cc271c2add8bb38cfded85ee7baf8eee) +- [Deps] update `get-intrinsic` [`72d9f44`](https://github.com/ljharb/call-bind/commit/72d9f44e184465ba8dd3fb48260bbcff234985f2) +- [meta] fix FUNDING.yml [`e723573`](https://github.com/ljharb/call-bind/commit/e723573438c5a68dcec31fb5d96ea6b7e4a93be8) +- [eslint] ignore coverage output [`15e76d2`](https://github.com/ljharb/call-bind/commit/15e76d28a5f43e504696401e5b31ebb78ee1b532) +- [meta] add Automatic Rebase and Require Allow Edits workflows [`8fa4dab`](https://github.com/ljharb/call-bind/commit/8fa4dabb23ba3dd7bb92c9571c1241c08b56e4b6) + +## v1.0.0 - 2020-10-30 + +### Commits + +- Initial commit [`306cf98`](https://github.com/ljharb/call-bind/commit/306cf98c7ec9e7ef66b653ec152277ac1381eb50) +- Tests [`e10d0bb`](https://github.com/ljharb/call-bind/commit/e10d0bbdadc7a10ecedc9a1c035112d3e368b8df) +- Implementation [`43852ed`](https://github.com/ljharb/call-bind/commit/43852eda0f187327b7fad2423ca972149a52bd65) +- npm init [`408f860`](https://github.com/ljharb/call-bind/commit/408f860b773a2f610805fd3613d0d71bac1b6249) +- [meta] add Automatic Rebase and Require Allow Edits workflows [`fb349b2`](https://github.com/ljharb/call-bind/commit/fb349b2e48defbec8b5ec8a8395cc8f69f220b13) +- [meta] add `auto-changelog` [`c4001fc`](https://github.com/ljharb/call-bind/commit/c4001fc43031799ef908211c98d3b0fb2b60fde4) +- [meta] add "funding"; create `FUNDING.yml` [`d4d6d29`](https://github.com/ljharb/call-bind/commit/d4d6d2974a14bc2e98830468eda7fe6d6a776717) +- [Tests] add `npm run lint` [`dedfb98`](https://github.com/ljharb/call-bind/commit/dedfb98bd0ecefb08ddb9a94061bd10cde4332af) +- Only apps should have lockfiles [`54ac776`](https://github.com/ljharb/call-bind/commit/54ac77653db45a7361dc153d2f478e743f110650) +- [meta] add `safe-publish-latest` [`9ea8e43`](https://github.com/ljharb/call-bind/commit/9ea8e435b950ce9b705559cd651039f9bf40140f) diff --git a/data/node_modules/call-bind/LICENSE b/data/node_modules/call-bind/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..48f05d01d0acae75acada5bd42a3442d0699d067 --- /dev/null +++ b/data/node_modules/call-bind/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Jordan Harband + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/data/node_modules/call-bind/README.md b/data/node_modules/call-bind/README.md new file mode 100644 index 0000000000000000000000000000000000000000..48e9047f0c02b482bdd43a9a1c08192f9d6a8208 --- /dev/null +++ b/data/node_modules/call-bind/README.md @@ -0,0 +1,64 @@ +# call-bind [![Version Badge][npm-version-svg]][package-url] + +[![github actions][actions-image]][actions-url] +[![coverage][codecov-image]][codecov-url] +[![dependency status][deps-svg]][deps-url] +[![dev dependency status][dev-deps-svg]][dev-deps-url] +[![License][license-image]][license-url] +[![Downloads][downloads-image]][downloads-url] + +[![npm badge][npm-badge-png]][package-url] + +Robustly `.call.bind()` a function. + +## Getting started + +```sh +npm install --save call-bind +``` + +## Usage/Examples + +```js +const assert = require('assert'); +const callBind = require('call-bind'); +const callBound = require('call-bind/callBound'); + +function f(a, b) { + assert.equal(this, 1); + assert.equal(a, 2); + assert.equal(b, 3); + assert.equal(arguments.length, 2); +} + +const fBound = callBind(f); + +const slice = callBound('Array.prototype.slice'); + +delete Function.prototype.call; +delete Function.prototype.bind; + +fBound(1, 2, 3); + +assert.deepEqual(slice([1, 2, 3, 4], 1, -1), [2, 3]); +``` + +## Tests + +Clone the repo, `npm install`, and run `npm test` + +[package-url]: https://npmjs.org/package/call-bind +[npm-version-svg]: https://versionbadg.es/ljharb/call-bind.svg +[deps-svg]: https://david-dm.org/ljharb/call-bind.svg +[deps-url]: https://david-dm.org/ljharb/call-bind +[dev-deps-svg]: https://david-dm.org/ljharb/call-bind/dev-status.svg +[dev-deps-url]: https://david-dm.org/ljharb/call-bind#info=devDependencies +[npm-badge-png]: https://nodei.co/npm/call-bind.png?downloads=true&stars=true +[license-image]: https://img.shields.io/npm/l/call-bind.svg +[license-url]: LICENSE +[downloads-image]: https://img.shields.io/npm/dm/call-bind.svg +[downloads-url]: https://npm-stat.com/charts.html?package=call-bind +[codecov-image]: https://codecov.io/gh/ljharb/call-bind/branch/main/graphs/badge.svg +[codecov-url]: https://app.codecov.io/gh/ljharb/call-bind/ +[actions-image]: https://img.shields.io/endpoint?url=https://github-actions-badge-u3jn4tfpocch.runkit.sh/ljharb/call-bind +[actions-url]: https://github.com/ljharb/call-bind/actions diff --git a/data/node_modules/call-bind/callBound.js b/data/node_modules/call-bind/callBound.js new file mode 100644 index 0000000000000000000000000000000000000000..8374adfd0549fef7cc678ab66089596afb7f8172 --- /dev/null +++ b/data/node_modules/call-bind/callBound.js @@ -0,0 +1,15 @@ +'use strict'; + +var GetIntrinsic = require('get-intrinsic'); + +var callBind = require('./'); + +var $indexOf = callBind(GetIntrinsic('String.prototype.indexOf')); + +module.exports = function callBoundIntrinsic(name, allowMissing) { + var intrinsic = GetIntrinsic(name, !!allowMissing); + if (typeof intrinsic === 'function' && $indexOf(name, '.prototype.') > -1) { + return callBind(intrinsic); + } + return intrinsic; +}; diff --git a/data/node_modules/call-bind/index.js b/data/node_modules/call-bind/index.js new file mode 100644 index 0000000000000000000000000000000000000000..01c5b3d4edaf9e0efad0a7b5c6df0b020750b004 --- /dev/null +++ b/data/node_modules/call-bind/index.js @@ -0,0 +1,35 @@ +'use strict'; + +var bind = require('function-bind'); +var GetIntrinsic = require('get-intrinsic'); +var setFunctionLength = require('set-function-length'); + +var $TypeError = require('es-errors/type'); +var $apply = GetIntrinsic('%Function.prototype.apply%'); +var $call = GetIntrinsic('%Function.prototype.call%'); +var $reflectApply = GetIntrinsic('%Reflect.apply%', true) || bind.call($call, $apply); + +var $defineProperty = require('es-define-property'); +var $max = GetIntrinsic('%Math.max%'); + +module.exports = function callBind(originalFunction) { + if (typeof originalFunction !== 'function') { + throw new $TypeError('a function is required'); + } + var func = $reflectApply(bind, $call, arguments); + return setFunctionLength( + func, + 1 + $max(0, originalFunction.length - (arguments.length - 1)), + true + ); +}; + +var applyBind = function applyBind() { + return $reflectApply(bind, $apply, arguments); +}; + +if ($defineProperty) { + $defineProperty(module.exports, 'apply', { value: applyBind }); +} else { + module.exports.apply = applyBind; +} diff --git a/data/node_modules/call-bind/package.json b/data/node_modules/call-bind/package.json new file mode 100644 index 0000000000000000000000000000000000000000..5ba88ff8521a50fe38222783ebfc36f8cc8be3df --- /dev/null +++ b/data/node_modules/call-bind/package.json @@ -0,0 +1,95 @@ +{ + "name": "call-bind", + "version": "1.0.7", + "description": "Robustly `.call.bind()` a function", + "main": "index.js", + "exports": { + ".": "./index.js", + "./callBound": "./callBound.js", + "./package.json": "./package.json" + }, + "scripts": { + "prepack": "npmignore --auto --commentLines=auto", + "prepublish": "not-in-publish || npm run prepublishOnly", + "prepublishOnly": "safe-publish-latest", + "lint": "eslint --ext=.js,.mjs .", + "postlint": "evalmd README.md", + "pretest": "npm run lint", + "tests-only": "nyc tape 'test/**/*.js'", + "test": "npm run tests-only", + "posttest": "aud --production", + "version": "auto-changelog && git add CHANGELOG.md", + "postversion": "auto-changelog && git add CHANGELOG.md && git commit --no-edit --amend && git tag -f \"v$(node -e \"console.log(require('./package.json').version)\")\"" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/ljharb/call-bind.git" + }, + "keywords": [ + "javascript", + "ecmascript", + "es", + "js", + "callbind", + "callbound", + "call", + "bind", + "bound", + "call-bind", + "call-bound", + "function", + "es-abstract" + ], + "author": "Jordan Harband ", + "funding": { + "url": "https://github.com/sponsors/ljharb" + }, + "license": "MIT", + "bugs": { + "url": "https://github.com/ljharb/call-bind/issues" + }, + "homepage": "https://github.com/ljharb/call-bind#readme", + "devDependencies": { + "@ljharb/eslint-config": "^21.1.0", + "aud": "^2.0.4", + "auto-changelog": "^2.4.0", + "es-value-fixtures": "^1.4.2", + "eslint": "=8.8.0", + "evalmd": "^0.0.19", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-strict-mode": "^1.0.1", + "in-publish": "^2.0.1", + "npmignore": "^0.3.1", + "nyc": "^10.3.2", + "object-inspect": "^1.13.1", + "safe-publish-latest": "^2.0.0", + "tape": "^5.7.4" + }, + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "testling": { + "files": "test/index.js" + }, + "auto-changelog": { + "output": "CHANGELOG.md", + "template": "keepachangelog", + "unreleased": false, + "commitLimit": false, + "backfillLimit": false, + "hideCredit": true + }, + "publishConfig": { + "ignore": [ + ".github/workflows" + ] + }, + "engines": { + "node": ">= 0.4" + } +} diff --git a/data/node_modules/call-bind/test/callBound.js b/data/node_modules/call-bind/test/callBound.js new file mode 100644 index 0000000000000000000000000000000000000000..c32319d70d3e3f75b6c9b046666af68ed52e8ce6 --- /dev/null +++ b/data/node_modules/call-bind/test/callBound.js @@ -0,0 +1,54 @@ +'use strict'; + +var test = require('tape'); + +var callBound = require('../callBound'); + +test('callBound', function (t) { + // static primitive + t.equal(callBound('Array.length'), Array.length, 'Array.length yields itself'); + t.equal(callBound('%Array.length%'), Array.length, '%Array.length% yields itself'); + + // static non-function object + t.equal(callBound('Array.prototype'), Array.prototype, 'Array.prototype yields itself'); + t.equal(callBound('%Array.prototype%'), Array.prototype, '%Array.prototype% yields itself'); + t.equal(callBound('Array.constructor'), Array.constructor, 'Array.constructor yields itself'); + t.equal(callBound('%Array.constructor%'), Array.constructor, '%Array.constructor% yields itself'); + + // static function + t.equal(callBound('Date.parse'), Date.parse, 'Date.parse yields itself'); + t.equal(callBound('%Date.parse%'), Date.parse, '%Date.parse% yields itself'); + + // prototype primitive + t.equal(callBound('Error.prototype.message'), Error.prototype.message, 'Error.prototype.message yields itself'); + t.equal(callBound('%Error.prototype.message%'), Error.prototype.message, '%Error.prototype.message% yields itself'); + + // prototype function + t.notEqual(callBound('Object.prototype.toString'), Object.prototype.toString, 'Object.prototype.toString does not yield itself'); + t.notEqual(callBound('%Object.prototype.toString%'), Object.prototype.toString, '%Object.prototype.toString% does not yield itself'); + t.equal(callBound('Object.prototype.toString')(true), Object.prototype.toString.call(true), 'call-bound Object.prototype.toString calls into the original'); + t.equal(callBound('%Object.prototype.toString%')(true), Object.prototype.toString.call(true), 'call-bound %Object.prototype.toString% calls into the original'); + + t['throws']( + function () { callBound('does not exist'); }, + SyntaxError, + 'nonexistent intrinsic throws' + ); + t['throws']( + function () { callBound('does not exist', true); }, + SyntaxError, + 'allowMissing arg still throws for unknown intrinsic' + ); + + t.test('real but absent intrinsic', { skip: typeof WeakRef !== 'undefined' }, function (st) { + st['throws']( + function () { callBound('WeakRef'); }, + TypeError, + 'real but absent intrinsic throws' + ); + st.equal(callBound('WeakRef', true), undefined, 'allowMissing arg avoids exception'); + st.end(); + }); + + t.end(); +}); diff --git a/data/node_modules/call-bind/test/index.js b/data/node_modules/call-bind/test/index.js new file mode 100644 index 0000000000000000000000000000000000000000..1fd46689ef778887c4e91ca705529c79b2ebb05e --- /dev/null +++ b/data/node_modules/call-bind/test/index.js @@ -0,0 +1,80 @@ +'use strict'; + +var callBind = require('../'); +var bind = require('function-bind'); +var gOPD = require('gopd'); +var hasStrictMode = require('has-strict-mode')(); +var forEach = require('for-each'); +var inspect = require('object-inspect'); +var v = require('es-value-fixtures'); + +var test = require('tape'); + +/* + * older engines have length nonconfigurable + * in io.js v3, it is configurable except on bound functions, hence the .bind() + */ +var functionsHaveConfigurableLengths = !!( + gOPD + && Object.getOwnPropertyDescriptor + && Object.getOwnPropertyDescriptor(bind.call(function () {}), 'length').configurable +); + +test('callBind', function (t) { + forEach(v.nonFunctions, function (nonFunction) { + t['throws']( + function () { callBind(nonFunction); }, + TypeError, + inspect(nonFunction) + ' is not a function' + ); + }); + + var sentinel = { sentinel: true }; + var func = function (a, b) { + // eslint-disable-next-line no-invalid-this + return [!hasStrictMode && this === global ? undefined : this, a, b]; + }; + t.equal(func.length, 2, 'original function length is 2'); + t.deepEqual(func(), [undefined, undefined, undefined], 'unbound func with too few args'); + t.deepEqual(func(1, 2), [undefined, 1, 2], 'unbound func with right args'); + t.deepEqual(func(1, 2, 3), [undefined, 1, 2], 'unbound func with too many args'); + + var bound = callBind(func); + t.equal(bound.length, func.length + 1, 'function length is preserved', { skip: !functionsHaveConfigurableLengths }); + t.deepEqual(bound(), [undefined, undefined, undefined], 'bound func with too few args'); + t.deepEqual(bound(1, 2), [hasStrictMode ? 1 : Object(1), 2, undefined], 'bound func with right args'); + t.deepEqual(bound(1, 2, 3), [hasStrictMode ? 1 : Object(1), 2, 3], 'bound func with too many args'); + + var boundR = callBind(func, sentinel); + t.equal(boundR.length, func.length, 'function length is preserved', { skip: !functionsHaveConfigurableLengths }); + t.deepEqual(boundR(), [sentinel, undefined, undefined], 'bound func with receiver, with too few args'); + t.deepEqual(boundR(1, 2), [sentinel, 1, 2], 'bound func with receiver, with right args'); + t.deepEqual(boundR(1, 2, 3), [sentinel, 1, 2], 'bound func with receiver, with too many args'); + + var boundArg = callBind(func, sentinel, 1); + t.equal(boundArg.length, func.length - 1, 'function length is preserved', { skip: !functionsHaveConfigurableLengths }); + t.deepEqual(boundArg(), [sentinel, 1, undefined], 'bound func with receiver and arg, with too few args'); + t.deepEqual(boundArg(2), [sentinel, 1, 2], 'bound func with receiver and arg, with right arg'); + t.deepEqual(boundArg(2, 3), [sentinel, 1, 2], 'bound func with receiver and arg, with too many args'); + + t.test('callBind.apply', function (st) { + var aBound = callBind.apply(func); + st.deepEqual(aBound(sentinel), [sentinel, undefined, undefined], 'apply-bound func with no args'); + st.deepEqual(aBound(sentinel, [1], 4), [sentinel, 1, undefined], 'apply-bound func with too few args'); + st.deepEqual(aBound(sentinel, [1, 2], 4), [sentinel, 1, 2], 'apply-bound func with right args'); + + var aBoundArg = callBind.apply(func); + st.deepEqual(aBoundArg(sentinel, [1, 2, 3], 4), [sentinel, 1, 2], 'apply-bound func with too many args'); + st.deepEqual(aBoundArg(sentinel, [1, 2], 4), [sentinel, 1, 2], 'apply-bound func with right args'); + st.deepEqual(aBoundArg(sentinel, [1], 4), [sentinel, 1, undefined], 'apply-bound func with too few args'); + + var aBoundR = callBind.apply(func, sentinel); + st.deepEqual(aBoundR([1, 2, 3], 4), [sentinel, 1, 2], 'apply-bound func with receiver and too many args'); + st.deepEqual(aBoundR([1, 2], 4), [sentinel, 1, 2], 'apply-bound func with receiver and right args'); + st.deepEqual(aBoundR([1], 4), [sentinel, 1, undefined], 'apply-bound func with receiver and too few args'); + + st.end(); + }); + + t.end(); +}); diff --git a/data/node_modules/content-disposition/HISTORY.md b/data/node_modules/content-disposition/HISTORY.md new file mode 100644 index 0000000000000000000000000000000000000000..488effa0c9440f4e214102980665781a62ba7059 --- /dev/null +++ b/data/node_modules/content-disposition/HISTORY.md @@ -0,0 +1,60 @@ +0.5.4 / 2021-12-10 +================== + + * deps: safe-buffer@5.2.1 + +0.5.3 / 2018-12-17 +================== + + * Use `safe-buffer` for improved Buffer API + +0.5.2 / 2016-12-08 +================== + + * Fix `parse` to accept any linear whitespace character + +0.5.1 / 2016-01-17 +================== + + * perf: enable strict mode + +0.5.0 / 2014-10-11 +================== + + * Add `parse` function + +0.4.0 / 2014-09-21 +================== + + * Expand non-Unicode `filename` to the full ISO-8859-1 charset + +0.3.0 / 2014-09-20 +================== + + * Add `fallback` option + * Add `type` option + +0.2.0 / 2014-09-19 +================== + + * Reduce ambiguity of file names with hex escape in buggy browsers + +0.1.2 / 2014-09-19 +================== + + * Fix periodic invalid Unicode filename header + +0.1.1 / 2014-09-19 +================== + + * Fix invalid characters appearing in `filename*` parameter + +0.1.0 / 2014-09-18 +================== + + * Make the `filename` argument optional + +0.0.0 / 2014-09-18 +================== + + * Initial release diff --git a/data/node_modules/content-disposition/LICENSE b/data/node_modules/content-disposition/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..84441fbb5709262c2bfc9b5ff0166ad4f024a1b8 --- /dev/null +++ b/data/node_modules/content-disposition/LICENSE @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2014-2017 Douglas Christopher Wilson + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/data/node_modules/content-disposition/README.md b/data/node_modules/content-disposition/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3a0bb055949cdaed008f0f85e111624214213873 --- /dev/null +++ b/data/node_modules/content-disposition/README.md @@ -0,0 +1,142 @@ +# content-disposition + +[![NPM Version][npm-image]][npm-url] +[![NPM Downloads][downloads-image]][downloads-url] +[![Node.js Version][node-version-image]][node-version-url] +[![Build Status][github-actions-ci-image]][github-actions-ci-url] +[![Test Coverage][coveralls-image]][coveralls-url] + +Create and parse HTTP `Content-Disposition` header + +## Installation + +```sh +$ npm install content-disposition +``` + +## API + +```js +var contentDisposition = require('content-disposition') +``` + +### contentDisposition(filename, options) + +Create an attachment `Content-Disposition` header value using the given file name, +if supplied. The `filename` is optional and if no file name is desired, but you +want to specify `options`, set `filename` to `undefined`. + +```js +res.setHeader('Content-Disposition', contentDisposition('∫ maths.pdf')) +``` + +**note** HTTP headers are of the ISO-8859-1 character set. If you are writing this +header through a means different from `setHeader` in Node.js, you'll want to specify +the `'binary'` encoding in Node.js. + +#### Options + +`contentDisposition` accepts these properties in the options object. + +##### fallback + +If the `filename` option is outside ISO-8859-1, then the file name is actually +stored in a supplemental field for clients that support Unicode file names and +a ISO-8859-1 version of the file name is automatically generated. + +This specifies the ISO-8859-1 file name to override the automatic generation or +disables the generation all together, defaults to `true`. + + - A string will specify the ISO-8859-1 file name to use in place of automatic + generation. + - `false` will disable including a ISO-8859-1 file name and only include the + Unicode version (unless the file name is already ISO-8859-1). + - `true` will enable automatic generation if the file name is outside ISO-8859-1. + +If the `filename` option is ISO-8859-1 and this option is specified and has a +different value, then the `filename` option is encoded in the extended field +and this set as the fallback field, even though they are both ISO-8859-1. + +##### type + +Specifies the disposition type, defaults to `"attachment"`. This can also be +`"inline"`, or any other value (all values except inline are treated like +`attachment`, but can convey additional information if both parties agree to +it). The type is normalized to lower-case. + +### contentDisposition.parse(string) + +```js +var disposition = contentDisposition.parse('attachment; filename="EURO rates.txt"; filename*=UTF-8\'\'%e2%82%ac%20rates.txt') +``` + +Parse a `Content-Disposition` header string. This automatically handles extended +("Unicode") parameters by decoding them and providing them under the standard +parameter name. This will return an object with the following properties (examples +are shown for the string `'attachment; filename="EURO rates.txt"; filename*=UTF-8\'\'%e2%82%ac%20rates.txt'`): + + - `type`: The disposition type (always lower case). Example: `'attachment'` + + - `parameters`: An object of the parameters in the disposition (name of parameter + always lower case and extended versions replace non-extended versions). Example: + `{filename: "€ rates.txt"}` + +## Examples + +### Send a file for download + +```js +var contentDisposition = require('content-disposition') +var destroy = require('destroy') +var fs = require('fs') +var http = require('http') +var onFinished = require('on-finished') + +var filePath = '/path/to/public/plans.pdf' + +http.createServer(function onRequest (req, res) { + // set headers + res.setHeader('Content-Type', 'application/pdf') + res.setHeader('Content-Disposition', contentDisposition(filePath)) + + // send file + var stream = fs.createReadStream(filePath) + stream.pipe(res) + onFinished(res, function () { + destroy(stream) + }) +}) +``` + +## Testing + +```sh +$ npm test +``` + +## References + +- [RFC 2616: Hypertext Transfer Protocol -- HTTP/1.1][rfc-2616] +- [RFC 5987: Character Set and Language Encoding for Hypertext Transfer Protocol (HTTP) Header Field Parameters][rfc-5987] +- [RFC 6266: Use of the Content-Disposition Header Field in the Hypertext Transfer Protocol (HTTP)][rfc-6266] +- [Test Cases for HTTP Content-Disposition header field (RFC 6266) and the Encodings defined in RFCs 2047, 2231 and 5987][tc-2231] + +[rfc-2616]: https://tools.ietf.org/html/rfc2616 +[rfc-5987]: https://tools.ietf.org/html/rfc5987 +[rfc-6266]: https://tools.ietf.org/html/rfc6266 +[tc-2231]: http://greenbytes.de/tech/tc2231/ + +## License + +[MIT](LICENSE) + +[npm-image]: https://img.shields.io/npm/v/content-disposition.svg +[npm-url]: https://npmjs.org/package/content-disposition +[node-version-image]: https://img.shields.io/node/v/content-disposition.svg +[node-version-url]: https://nodejs.org/en/download +[coveralls-image]: https://img.shields.io/coveralls/jshttp/content-disposition.svg +[coveralls-url]: https://coveralls.io/r/jshttp/content-disposition?branch=master +[downloads-image]: https://img.shields.io/npm/dm/content-disposition.svg +[downloads-url]: https://npmjs.org/package/content-disposition +[github-actions-ci-image]: https://img.shields.io/github/workflow/status/jshttp/content-disposition/ci/master?label=ci +[github-actions-ci-url]: https://github.com/jshttp/content-disposition?query=workflow%3Aci diff --git a/data/node_modules/content-disposition/index.js b/data/node_modules/content-disposition/index.js new file mode 100644 index 0000000000000000000000000000000000000000..ecec899a992d46f2e903a87475b1c342f2ce4d30 --- /dev/null +++ b/data/node_modules/content-disposition/index.js @@ -0,0 +1,458 @@ +/*! + * content-disposition + * Copyright(c) 2014-2017 Douglas Christopher Wilson + * MIT Licensed + */ + +'use strict' + +/** + * Module exports. + * @public + */ + +module.exports = contentDisposition +module.exports.parse = parse + +/** + * Module dependencies. + * @private + */ + +var basename = require('path').basename +var Buffer = require('safe-buffer').Buffer + +/** + * RegExp to match non attr-char, *after* encodeURIComponent (i.e. not including "%") + * @private + */ + +var ENCODE_URL_ATTR_CHAR_REGEXP = /[\x00-\x20"'()*,/:;<=>?@[\\\]{}\x7f]/g // eslint-disable-line no-control-regex + +/** + * RegExp to match percent encoding escape. + * @private + */ + +var HEX_ESCAPE_REGEXP = /%[0-9A-Fa-f]{2}/ +var HEX_ESCAPE_REPLACE_REGEXP = /%([0-9A-Fa-f]{2})/g + +/** + * RegExp to match non-latin1 characters. + * @private + */ + +var NON_LATIN1_REGEXP = /[^\x20-\x7e\xa0-\xff]/g + +/** + * RegExp to match quoted-pair in RFC 2616 + * + * quoted-pair = "\" CHAR + * CHAR = + * @private + */ + +var QESC_REGEXP = /\\([\u0000-\u007f])/g // eslint-disable-line no-control-regex + +/** + * RegExp to match chars that must be quoted-pair in RFC 2616 + * @private + */ + +var QUOTE_REGEXP = /([\\"])/g + +/** + * RegExp for various RFC 2616 grammar + * + * parameter = token "=" ( token | quoted-string ) + * token = 1* + * separators = "(" | ")" | "<" | ">" | "@" + * | "," | ";" | ":" | "\" | <"> + * | "/" | "[" | "]" | "?" | "=" + * | "{" | "}" | SP | HT + * quoted-string = ( <"> *(qdtext | quoted-pair ) <"> ) + * qdtext = > + * quoted-pair = "\" CHAR + * CHAR = + * TEXT = + * LWS = [CRLF] 1*( SP | HT ) + * CRLF = CR LF + * CR = + * LF = + * SP = + * HT = + * CTL = + * OCTET = + * @private + */ + +var PARAM_REGEXP = /;[\x09\x20]*([!#$%&'*+.0-9A-Z^_`a-z|~-]+)[\x09\x20]*=[\x09\x20]*("(?:[\x20!\x23-\x5b\x5d-\x7e\x80-\xff]|\\[\x20-\x7e])*"|[!#$%&'*+.0-9A-Z^_`a-z|~-]+)[\x09\x20]*/g // eslint-disable-line no-control-regex +var TEXT_REGEXP = /^[\x20-\x7e\x80-\xff]+$/ +var TOKEN_REGEXP = /^[!#$%&'*+.0-9A-Z^_`a-z|~-]+$/ + +/** + * RegExp for various RFC 5987 grammar + * + * ext-value = charset "'" [ language ] "'" value-chars + * charset = "UTF-8" / "ISO-8859-1" / mime-charset + * mime-charset = 1*mime-charsetc + * mime-charsetc = ALPHA / DIGIT + * / "!" / "#" / "$" / "%" / "&" + * / "+" / "-" / "^" / "_" / "`" + * / "{" / "}" / "~" + * language = ( 2*3ALPHA [ extlang ] ) + * / 4ALPHA + * / 5*8ALPHA + * extlang = *3( "-" 3ALPHA ) + * value-chars = *( pct-encoded / attr-char ) + * pct-encoded = "%" HEXDIG HEXDIG + * attr-char = ALPHA / DIGIT + * / "!" / "#" / "$" / "&" / "+" / "-" / "." + * / "^" / "_" / "`" / "|" / "~" + * @private + */ + +var EXT_VALUE_REGEXP = /^([A-Za-z0-9!#$%&+\-^_`{}~]+)'(?:[A-Za-z]{2,3}(?:-[A-Za-z]{3}){0,3}|[A-Za-z]{4,8}|)'((?:%[0-9A-Fa-f]{2}|[A-Za-z0-9!#$&+.^_`|~-])+)$/ + +/** + * RegExp for various RFC 6266 grammar + * + * disposition-type = "inline" | "attachment" | disp-ext-type + * disp-ext-type = token + * disposition-parm = filename-parm | disp-ext-parm + * filename-parm = "filename" "=" value + * | "filename*" "=" ext-value + * disp-ext-parm = token "=" value + * | ext-token "=" ext-value + * ext-token = + * @private + */ + +var DISPOSITION_TYPE_REGEXP = /^([!#$%&'*+.0-9A-Z^_`a-z|~-]+)[\x09\x20]*(?:$|;)/ // eslint-disable-line no-control-regex + +/** + * Create an attachment Content-Disposition header. + * + * @param {string} [filename] + * @param {object} [options] + * @param {string} [options.type=attachment] + * @param {string|boolean} [options.fallback=true] + * @return {string} + * @public + */ + +function contentDisposition (filename, options) { + var opts = options || {} + + // get type + var type = opts.type || 'attachment' + + // get parameters + var params = createparams(filename, opts.fallback) + + // format into string + return format(new ContentDisposition(type, params)) +} + +/** + * Create parameters object from filename and fallback. + * + * @param {string} [filename] + * @param {string|boolean} [fallback=true] + * @return {object} + * @private + */ + +function createparams (filename, fallback) { + if (filename === undefined) { + return + } + + var params = {} + + if (typeof filename !== 'string') { + throw new TypeError('filename must be a string') + } + + // fallback defaults to true + if (fallback === undefined) { + fallback = true + } + + if (typeof fallback !== 'string' && typeof fallback !== 'boolean') { + throw new TypeError('fallback must be a string or boolean') + } + + if (typeof fallback === 'string' && NON_LATIN1_REGEXP.test(fallback)) { + throw new TypeError('fallback must be ISO-8859-1 string') + } + + // restrict to file base name + var name = basename(filename) + + // determine if name is suitable for quoted string + var isQuotedString = TEXT_REGEXP.test(name) + + // generate fallback name + var fallbackName = typeof fallback !== 'string' + ? fallback && getlatin1(name) + : basename(fallback) + var hasFallback = typeof fallbackName === 'string' && fallbackName !== name + + // set extended filename parameter + if (hasFallback || !isQuotedString || HEX_ESCAPE_REGEXP.test(name)) { + params['filename*'] = name + } + + // set filename parameter + if (isQuotedString || hasFallback) { + params.filename = hasFallback + ? fallbackName + : name + } + + return params +} + +/** + * Format object to Content-Disposition header. + * + * @param {object} obj + * @param {string} obj.type + * @param {object} [obj.parameters] + * @return {string} + * @private + */ + +function format (obj) { + var parameters = obj.parameters + var type = obj.type + + if (!type || typeof type !== 'string' || !TOKEN_REGEXP.test(type)) { + throw new TypeError('invalid type') + } + + // start with normalized type + var string = String(type).toLowerCase() + + // append parameters + if (parameters && typeof parameters === 'object') { + var param + var params = Object.keys(parameters).sort() + + for (var i = 0; i < params.length; i++) { + param = params[i] + + var val = param.substr(-1) === '*' + ? ustring(parameters[param]) + : qstring(parameters[param]) + + string += '; ' + param + '=' + val + } + } + + return string +} + +/** + * Decode a RFC 5987 field value (gracefully). + * + * @param {string} str + * @return {string} + * @private + */ + +function decodefield (str) { + var match = EXT_VALUE_REGEXP.exec(str) + + if (!match) { + throw new TypeError('invalid extended field value') + } + + var charset = match[1].toLowerCase() + var encoded = match[2] + var value + + // to binary string + var binary = encoded.replace(HEX_ESCAPE_REPLACE_REGEXP, pdecode) + + switch (charset) { + case 'iso-8859-1': + value = getlatin1(binary) + break + case 'utf-8': + value = Buffer.from(binary, 'binary').toString('utf8') + break + default: + throw new TypeError('unsupported charset in extended field') + } + + return value +} + +/** + * Get ISO-8859-1 version of string. + * + * @param {string} val + * @return {string} + * @private + */ + +function getlatin1 (val) { + // simple Unicode -> ISO-8859-1 transformation + return String(val).replace(NON_LATIN1_REGEXP, '?') +} + +/** + * Parse Content-Disposition header string. + * + * @param {string} string + * @return {object} + * @public + */ + +function parse (string) { + if (!string || typeof string !== 'string') { + throw new TypeError('argument string is required') + } + + var match = DISPOSITION_TYPE_REGEXP.exec(string) + + if (!match) { + throw new TypeError('invalid type format') + } + + // normalize type + var index = match[0].length + var type = match[1].toLowerCase() + + var key + var names = [] + var params = {} + var value + + // calculate index to start at + index = PARAM_REGEXP.lastIndex = match[0].substr(-1) === ';' + ? index - 1 + : index + + // match parameters + while ((match = PARAM_REGEXP.exec(string))) { + if (match.index !== index) { + throw new TypeError('invalid parameter format') + } + + index += match[0].length + key = match[1].toLowerCase() + value = match[2] + + if (names.indexOf(key) !== -1) { + throw new TypeError('invalid duplicate parameter') + } + + names.push(key) + + if (key.indexOf('*') + 1 === key.length) { + // decode extended value + key = key.slice(0, -1) + value = decodefield(value) + + // overwrite existing value + params[key] = value + continue + } + + if (typeof params[key] === 'string') { + continue + } + + if (value[0] === '"') { + // remove quotes and escapes + value = value + .substr(1, value.length - 2) + .replace(QESC_REGEXP, '$1') + } + + params[key] = value + } + + if (index !== -1 && index !== string.length) { + throw new TypeError('invalid parameter format') + } + + return new ContentDisposition(type, params) +} + +/** + * Percent decode a single character. + * + * @param {string} str + * @param {string} hex + * @return {string} + * @private + */ + +function pdecode (str, hex) { + return String.fromCharCode(parseInt(hex, 16)) +} + +/** + * Percent encode a single character. + * + * @param {string} char + * @return {string} + * @private + */ + +function pencode (char) { + return '%' + String(char) + .charCodeAt(0) + .toString(16) + .toUpperCase() +} + +/** + * Quote a string for HTTP. + * + * @param {string} val + * @return {string} + * @private + */ + +function qstring (val) { + var str = String(val) + + return '"' + str.replace(QUOTE_REGEXP, '\\$1') + '"' +} + +/** + * Encode a Unicode string for HTTP (RFC 5987). + * + * @param {string} val + * @return {string} + * @private + */ + +function ustring (val) { + var str = String(val) + + // percent encode as UTF-8 + var encoded = encodeURIComponent(str) + .replace(ENCODE_URL_ATTR_CHAR_REGEXP, pencode) + + return 'UTF-8\'\'' + encoded +} + +/** + * Class for parsed Content-Disposition header for v8 optimization + * + * @public + * @param {string} type + * @param {object} parameters + * @constructor + */ + +function ContentDisposition (type, parameters) { + this.type = type + this.parameters = parameters +} diff --git a/data/node_modules/content-disposition/package.json b/data/node_modules/content-disposition/package.json new file mode 100644 index 0000000000000000000000000000000000000000..43c70ce24a45a9a8f9eec7c6b6a30e0324d3078d --- /dev/null +++ b/data/node_modules/content-disposition/package.json @@ -0,0 +1,44 @@ +{ + "name": "content-disposition", + "description": "Create and parse Content-Disposition header", + "version": "0.5.4", + "author": "Douglas Christopher Wilson ", + "license": "MIT", + "keywords": [ + "content-disposition", + "http", + "rfc6266", + "res" + ], + "repository": "jshttp/content-disposition", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "devDependencies": { + "deep-equal": "1.0.1", + "eslint": "7.32.0", + "eslint-config-standard": "13.0.1", + "eslint-plugin-import": "2.25.3", + "eslint-plugin-markdown": "2.2.1", + "eslint-plugin-node": "11.1.0", + "eslint-plugin-promise": "5.2.0", + "eslint-plugin-standard": "4.1.0", + "istanbul": "0.4.5", + "mocha": "9.1.3" + }, + "files": [ + "LICENSE", + "HISTORY.md", + "README.md", + "index.js" + ], + "engines": { + "node": ">= 0.6" + }, + "scripts": { + "lint": "eslint .", + "test": "mocha --reporter spec --bail --check-leaks test/", + "test-ci": "istanbul cover node_modules/mocha/bin/_mocha --report lcovonly -- --reporter spec --check-leaks test/", + "test-cov": "istanbul cover node_modules/mocha/bin/_mocha -- --reporter dot --check-leaks test/" + } +} diff --git a/data/node_modules/content-type/HISTORY.md b/data/node_modules/content-type/HISTORY.md new file mode 100644 index 0000000000000000000000000000000000000000..458367139eb9f0af3daa5449ff0a3d9e2e189582 --- /dev/null +++ b/data/node_modules/content-type/HISTORY.md @@ -0,0 +1,29 @@ +1.0.5 / 2023-01-29 +================== + + * perf: skip value escaping when unnecessary + +1.0.4 / 2017-09-11 +================== + + * perf: skip parameter parsing when no parameters + +1.0.3 / 2017-09-10 +================== + + * perf: remove argument reassignment + +1.0.2 / 2016-05-09 +================== + + * perf: enable strict mode + +1.0.1 / 2015-02-13 +================== + + * Improve missing `Content-Type` header error message + +1.0.0 / 2015-02-01 +================== + + * Initial implementation, derived from `media-typer@0.3.0` diff --git a/data/node_modules/content-type/LICENSE b/data/node_modules/content-type/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..34b1a2de37216b60b749c23b6f894e51d701ecf0 --- /dev/null +++ b/data/node_modules/content-type/LICENSE @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2015 Douglas Christopher Wilson + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/data/node_modules/content-type/README.md b/data/node_modules/content-type/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c1a922a9afba84293f449dc4b661124fbac2fd5d --- /dev/null +++ b/data/node_modules/content-type/README.md @@ -0,0 +1,94 @@ +# content-type + +[![NPM Version][npm-version-image]][npm-url] +[![NPM Downloads][npm-downloads-image]][npm-url] +[![Node.js Version][node-image]][node-url] +[![Build Status][ci-image]][ci-url] +[![Coverage Status][coveralls-image]][coveralls-url] + +Create and parse HTTP Content-Type header according to RFC 7231 + +## Installation + +```sh +$ npm install content-type +``` + +## API + +```js +var contentType = require('content-type') +``` + +### contentType.parse(string) + +```js +var obj = contentType.parse('image/svg+xml; charset=utf-8') +``` + +Parse a `Content-Type` header. This will return an object with the following +properties (examples are shown for the string `'image/svg+xml; charset=utf-8'`): + + - `type`: The media type (the type and subtype, always lower case). + Example: `'image/svg+xml'` + + - `parameters`: An object of the parameters in the media type (name of parameter + always lower case). Example: `{charset: 'utf-8'}` + +Throws a `TypeError` if the string is missing or invalid. + +### contentType.parse(req) + +```js +var obj = contentType.parse(req) +``` + +Parse the `Content-Type` header from the given `req`. Short-cut for +`contentType.parse(req.headers['content-type'])`. + +Throws a `TypeError` if the `Content-Type` header is missing or invalid. + +### contentType.parse(res) + +```js +var obj = contentType.parse(res) +``` + +Parse the `Content-Type` header set on the given `res`. Short-cut for +`contentType.parse(res.getHeader('content-type'))`. + +Throws a `TypeError` if the `Content-Type` header is missing or invalid. + +### contentType.format(obj) + +```js +var str = contentType.format({ + type: 'image/svg+xml', + parameters: { charset: 'utf-8' } +}) +``` + +Format an object into a `Content-Type` header. This will return a string of the +content type for the given object with the following properties (examples are +shown that produce the string `'image/svg+xml; charset=utf-8'`): + + - `type`: The media type (will be lower-cased). Example: `'image/svg+xml'` + + - `parameters`: An object of the parameters in the media type (name of the + parameter will be lower-cased). Example: `{charset: 'utf-8'}` + +Throws a `TypeError` if the object contains an invalid type or parameter names. + +## License + +[MIT](LICENSE) + +[ci-image]: https://badgen.net/github/checks/jshttp/content-type/master?label=ci +[ci-url]: https://github.com/jshttp/content-type/actions/workflows/ci.yml +[coveralls-image]: https://badgen.net/coveralls/c/github/jshttp/content-type/master +[coveralls-url]: https://coveralls.io/r/jshttp/content-type?branch=master +[node-image]: https://badgen.net/npm/node/content-type +[node-url]: https://nodejs.org/en/download +[npm-downloads-image]: https://badgen.net/npm/dm/content-type +[npm-url]: https://npmjs.org/package/content-type +[npm-version-image]: https://badgen.net/npm/v/content-type diff --git a/data/node_modules/content-type/index.js b/data/node_modules/content-type/index.js new file mode 100644 index 0000000000000000000000000000000000000000..41840e7bc3e48cda894597cd18e562a37a174f7c --- /dev/null +++ b/data/node_modules/content-type/index.js @@ -0,0 +1,225 @@ +/*! + * content-type + * Copyright(c) 2015 Douglas Christopher Wilson + * MIT Licensed + */ + +'use strict' + +/** + * RegExp to match *( ";" parameter ) in RFC 7231 sec 3.1.1.1 + * + * parameter = token "=" ( token / quoted-string ) + * token = 1*tchar + * tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" + * / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" + * / DIGIT / ALPHA + * ; any VCHAR, except delimiters + * quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE + * qdtext = HTAB / SP / %x21 / %x23-5B / %x5D-7E / obs-text + * obs-text = %x80-FF + * quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) + */ +var PARAM_REGEXP = /; *([!#$%&'*+.^_`|~0-9A-Za-z-]+) *= *("(?:[\u000b\u0020\u0021\u0023-\u005b\u005d-\u007e\u0080-\u00ff]|\\[\u000b\u0020-\u00ff])*"|[!#$%&'*+.^_`|~0-9A-Za-z-]+) */g // eslint-disable-line no-control-regex +var TEXT_REGEXP = /^[\u000b\u0020-\u007e\u0080-\u00ff]+$/ // eslint-disable-line no-control-regex +var TOKEN_REGEXP = /^[!#$%&'*+.^_`|~0-9A-Za-z-]+$/ + +/** + * RegExp to match quoted-pair in RFC 7230 sec 3.2.6 + * + * quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) + * obs-text = %x80-FF + */ +var QESC_REGEXP = /\\([\u000b\u0020-\u00ff])/g // eslint-disable-line no-control-regex + +/** + * RegExp to match chars that must be quoted-pair in RFC 7230 sec 3.2.6 + */ +var QUOTE_REGEXP = /([\\"])/g + +/** + * RegExp to match type in RFC 7231 sec 3.1.1.1 + * + * media-type = type "/" subtype + * type = token + * subtype = token + */ +var TYPE_REGEXP = /^[!#$%&'*+.^_`|~0-9A-Za-z-]+\/[!#$%&'*+.^_`|~0-9A-Za-z-]+$/ + +/** + * Module exports. + * @public + */ + +exports.format = format +exports.parse = parse + +/** + * Format object to media type. + * + * @param {object} obj + * @return {string} + * @public + */ + +function format (obj) { + if (!obj || typeof obj !== 'object') { + throw new TypeError('argument obj is required') + } + + var parameters = obj.parameters + var type = obj.type + + if (!type || !TYPE_REGEXP.test(type)) { + throw new TypeError('invalid type') + } + + var string = type + + // append parameters + if (parameters && typeof parameters === 'object') { + var param + var params = Object.keys(parameters).sort() + + for (var i = 0; i < params.length; i++) { + param = params[i] + + if (!TOKEN_REGEXP.test(param)) { + throw new TypeError('invalid parameter name') + } + + string += '; ' + param + '=' + qstring(parameters[param]) + } + } + + return string +} + +/** + * Parse media type to object. + * + * @param {string|object} string + * @return {Object} + * @public + */ + +function parse (string) { + if (!string) { + throw new TypeError('argument string is required') + } + + // support req/res-like objects as argument + var header = typeof string === 'object' + ? getcontenttype(string) + : string + + if (typeof header !== 'string') { + throw new TypeError('argument string is required to be a string') + } + + var index = header.indexOf(';') + var type = index !== -1 + ? header.slice(0, index).trim() + : header.trim() + + if (!TYPE_REGEXP.test(type)) { + throw new TypeError('invalid media type') + } + + var obj = new ContentType(type.toLowerCase()) + + // parse parameters + if (index !== -1) { + var key + var match + var value + + PARAM_REGEXP.lastIndex = index + + while ((match = PARAM_REGEXP.exec(header))) { + if (match.index !== index) { + throw new TypeError('invalid parameter format') + } + + index += match[0].length + key = match[1].toLowerCase() + value = match[2] + + if (value.charCodeAt(0) === 0x22 /* " */) { + // remove quotes + value = value.slice(1, -1) + + // remove escapes + if (value.indexOf('\\') !== -1) { + value = value.replace(QESC_REGEXP, '$1') + } + } + + obj.parameters[key] = value + } + + if (index !== header.length) { + throw new TypeError('invalid parameter format') + } + } + + return obj +} + +/** + * Get content-type from req/res objects. + * + * @param {object} + * @return {Object} + * @private + */ + +function getcontenttype (obj) { + var header + + if (typeof obj.getHeader === 'function') { + // res-like + header = obj.getHeader('content-type') + } else if (typeof obj.headers === 'object') { + // req-like + header = obj.headers && obj.headers['content-type'] + } + + if (typeof header !== 'string') { + throw new TypeError('content-type header is missing from object') + } + + return header +} + +/** + * Quote a string if necessary. + * + * @param {string} val + * @return {string} + * @private + */ + +function qstring (val) { + var str = String(val) + + // no need to quote tokens + if (TOKEN_REGEXP.test(str)) { + return str + } + + if (str.length > 0 && !TEXT_REGEXP.test(str)) { + throw new TypeError('invalid parameter value') + } + + return '"' + str.replace(QUOTE_REGEXP, '\\$1') + '"' +} + +/** + * Class to represent a content type. + * @private + */ +function ContentType (type) { + this.parameters = Object.create(null) + this.type = type +} diff --git a/data/node_modules/content-type/package.json b/data/node_modules/content-type/package.json new file mode 100644 index 0000000000000000000000000000000000000000..9db19f63fb96592d8d3bced654a72d47c12cef97 --- /dev/null +++ b/data/node_modules/content-type/package.json @@ -0,0 +1,42 @@ +{ + "name": "content-type", + "description": "Create and parse HTTP Content-Type header", + "version": "1.0.5", + "author": "Douglas Christopher Wilson ", + "license": "MIT", + "keywords": [ + "content-type", + "http", + "req", + "res", + "rfc7231" + ], + "repository": "jshttp/content-type", + "devDependencies": { + "deep-equal": "1.0.1", + "eslint": "8.32.0", + "eslint-config-standard": "15.0.1", + "eslint-plugin-import": "2.27.5", + "eslint-plugin-node": "11.1.0", + "eslint-plugin-promise": "6.1.1", + "eslint-plugin-standard": "4.1.0", + "mocha": "10.2.0", + "nyc": "15.1.0" + }, + "files": [ + "LICENSE", + "HISTORY.md", + "README.md", + "index.js" + ], + "engines": { + "node": ">= 0.6" + }, + "scripts": { + "lint": "eslint .", + "test": "mocha --reporter spec --check-leaks --bail test/", + "test-ci": "nyc --reporter=lcovonly --reporter=text npm test", + "test-cov": "nyc --reporter=html --reporter=text npm test", + "version": "node scripts/version-history.js && git add HISTORY.md" + } +} diff --git a/data/node_modules/cookie-signature/.npmignore b/data/node_modules/cookie-signature/.npmignore new file mode 100644 index 0000000000000000000000000000000000000000..f1250e584c94b80208b61cf7cae29db8e486a5c7 --- /dev/null +++ b/data/node_modules/cookie-signature/.npmignore @@ -0,0 +1,4 @@ +support +test +examples +*.sock diff --git a/data/node_modules/cookie-signature/History.md b/data/node_modules/cookie-signature/History.md new file mode 100644 index 0000000000000000000000000000000000000000..78513cc3d28ce3516c93b4d425f83df247486ae5 --- /dev/null +++ b/data/node_modules/cookie-signature/History.md @@ -0,0 +1,38 @@ +1.0.6 / 2015-02-03 +================== + +* use `npm test` instead of `make test` to run tests +* clearer assertion messages when checking input + + +1.0.5 / 2014-09-05 +================== + +* add license to package.json + +1.0.4 / 2014-06-25 +================== + + * corrected avoidance of timing attacks (thanks @tenbits!) + +1.0.3 / 2014-01-28 +================== + + * [incorrect] fix for timing attacks + +1.0.2 / 2014-01-28 +================== + + * fix missing repository warning + * fix typo in test + +1.0.1 / 2013-04-15 +================== + + * Revert "Changed underlying HMAC algo. to sha512." + * Revert "Fix for timing attacks on MAC verification." + +0.0.1 / 2010-01-03 +================== + + * Initial release diff --git a/data/node_modules/cookie-signature/Readme.md b/data/node_modules/cookie-signature/Readme.md new file mode 100644 index 0000000000000000000000000000000000000000..2559e841b02edfdc128176bfbdc0b938209a99ea --- /dev/null +++ b/data/node_modules/cookie-signature/Readme.md @@ -0,0 +1,42 @@ + +# cookie-signature + + Sign and unsign cookies. + +## Example + +```js +var cookie = require('cookie-signature'); + +var val = cookie.sign('hello', 'tobiiscool'); +val.should.equal('hello.DGDUkGlIkCzPz+C0B064FNgHdEjox7ch8tOBGslZ5QI'); + +var val = cookie.sign('hello', 'tobiiscool'); +cookie.unsign(val, 'tobiiscool').should.equal('hello'); +cookie.unsign(val, 'luna').should.be.false; +``` + +## License + +(The MIT License) + +Copyright (c) 2012 LearnBoost <tj@learnboost.com> + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/data/node_modules/cookie-signature/index.js b/data/node_modules/cookie-signature/index.js new file mode 100644 index 0000000000000000000000000000000000000000..b8c9463a238b7ec090ff9090234e3f34322a36df --- /dev/null +++ b/data/node_modules/cookie-signature/index.js @@ -0,0 +1,51 @@ +/** + * Module dependencies. + */ + +var crypto = require('crypto'); + +/** + * Sign the given `val` with `secret`. + * + * @param {String} val + * @param {String} secret + * @return {String} + * @api private + */ + +exports.sign = function(val, secret){ + if ('string' != typeof val) throw new TypeError("Cookie value must be provided as a string."); + if ('string' != typeof secret) throw new TypeError("Secret string must be provided."); + return val + '.' + crypto + .createHmac('sha256', secret) + .update(val) + .digest('base64') + .replace(/\=+$/, ''); +}; + +/** + * Unsign and decode the given `val` with `secret`, + * returning `false` if the signature is invalid. + * + * @param {String} val + * @param {String} secret + * @return {String|Boolean} + * @api private + */ + +exports.unsign = function(val, secret){ + if ('string' != typeof val) throw new TypeError("Signed cookie string must be provided."); + if ('string' != typeof secret) throw new TypeError("Secret string must be provided."); + var str = val.slice(0, val.lastIndexOf('.')) + , mac = exports.sign(str, secret); + + return sha1(mac) == sha1(val) ? str : false; +}; + +/** + * Private + */ + +function sha1(str){ + return crypto.createHash('sha1').update(str).digest('hex'); +} diff --git a/data/node_modules/cookie-signature/package.json b/data/node_modules/cookie-signature/package.json new file mode 100644 index 0000000000000000000000000000000000000000..29c4498e07ab1ae43692d7a27f959771a459815c --- /dev/null +++ b/data/node_modules/cookie-signature/package.json @@ -0,0 +1,18 @@ +{ + "name": "cookie-signature", + "version": "1.0.6", + "description": "Sign and unsign cookies", + "keywords": ["cookie", "sign", "unsign"], + "author": "TJ Holowaychuk ", + "license": "MIT", + "repository": { "type": "git", "url": "https://github.com/visionmedia/node-cookie-signature.git"}, + "dependencies": {}, + "devDependencies": { + "mocha": "*", + "should": "*" + }, + "scripts": { + "test": "mocha --require should --reporter spec" + }, + "main": "index" +} diff --git a/data/node_modules/cookie/HISTORY.md b/data/node_modules/cookie/HISTORY.md new file mode 100644 index 0000000000000000000000000000000000000000..41ae4b013c5d07b90c13e59a01009e4fd6bb2038 --- /dev/null +++ b/data/node_modules/cookie/HISTORY.md @@ -0,0 +1,147 @@ +0.6.0 / 2023-11-06 +================== + + * Add `partitioned` option + +0.5.0 / 2022-04-11 +================== + + * Add `priority` option + * Fix `expires` option to reject invalid dates + * perf: improve default decode speed + * perf: remove slow string split in parse + +0.4.2 / 2022-02-02 +================== + + * perf: read value only when assigning in parse + * perf: remove unnecessary regexp in parse + +0.4.1 / 2020-04-21 +================== + + * Fix `maxAge` option to reject invalid values + +0.4.0 / 2019-05-15 +================== + + * Add `SameSite=None` support + +0.3.1 / 2016-05-26 +================== + + * Fix `sameSite: true` to work with draft-7 clients + - `true` now sends `SameSite=Strict` instead of `SameSite` + +0.3.0 / 2016-05-26 +================== + + * Add `sameSite` option + - Replaces `firstPartyOnly` option, never implemented by browsers + * Improve error message when `encode` is not a function + * Improve error message when `expires` is not a `Date` + +0.2.4 / 2016-05-20 +================== + + * perf: enable strict mode + * perf: use for loop in parse + * perf: use string concatenation for serialization + +0.2.3 / 2015-10-25 +================== + + * Fix cookie `Max-Age` to never be a floating point number + +0.2.2 / 2015-09-17 +================== + + * Fix regression when setting empty cookie value + - Ease the new restriction, which is just basic header-level validation + * Fix typo in invalid value errors + +0.2.1 / 2015-09-17 +================== + + * Throw on invalid values provided to `serialize` + - Ensures the resulting string is a valid HTTP header value + +0.2.0 / 2015-08-13 +================== + + * Add `firstPartyOnly` option + * Throw better error for invalid argument to parse + * perf: hoist regular expression + +0.1.5 / 2015-09-17 +================== + + * Fix regression when setting empty cookie value + - Ease the new restriction, which is just basic header-level validation + * Fix typo in invalid value errors + +0.1.4 / 2015-09-17 +================== + + * Throw better error for invalid argument to parse + * Throw on invalid values provided to `serialize` + - Ensures the resulting string is a valid HTTP header value + +0.1.3 / 2015-05-19 +================== + + * Reduce the scope of try-catch deopt + * Remove argument reassignments + +0.1.2 / 2014-04-16 +================== + + * Remove unnecessary files from npm package + +0.1.1 / 2014-02-23 +================== + + * Fix bad parse when cookie value contained a comma + * Fix support for `maxAge` of `0` + +0.1.0 / 2013-05-01 +================== + + * Add `decode` option + * Add `encode` option + +0.0.6 / 2013-04-08 +================== + + * Ignore cookie parts missing `=` + +0.0.5 / 2012-10-29 +================== + + * Return raw cookie value if value unescape errors + +0.0.4 / 2012-06-21 +================== + + * Use encode/decodeURIComponent for cookie encoding/decoding + - Improve server/client interoperability + +0.0.3 / 2012-06-06 +================== + + * Only escape special characters per the cookie RFC + +0.0.2 / 2012-06-01 +================== + + * Fix `maxAge` option to not throw error + +0.0.1 / 2012-05-28 +================== + + * Add more tests + +0.0.0 / 2012-05-28 +================== + + * Initial release diff --git a/data/node_modules/cookie/LICENSE b/data/node_modules/cookie/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..058b6b4efa3f45896ae691f2558a2a1aca05bebd --- /dev/null +++ b/data/node_modules/cookie/LICENSE @@ -0,0 +1,24 @@ +(The MIT License) + +Copyright (c) 2012-2014 Roman Shtylman +Copyright (c) 2015 Douglas Christopher Wilson + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/data/node_modules/cookie/README.md b/data/node_modules/cookie/README.md new file mode 100644 index 0000000000000000000000000000000000000000..71fdac1110bba222a716c4b56b90028a6a9e5af4 --- /dev/null +++ b/data/node_modules/cookie/README.md @@ -0,0 +1,317 @@ +# cookie + +[![NPM Version][npm-version-image]][npm-url] +[![NPM Downloads][npm-downloads-image]][npm-url] +[![Node.js Version][node-image]][node-url] +[![Build Status][ci-image]][ci-url] +[![Coverage Status][coveralls-image]][coveralls-url] + +Basic HTTP cookie parser and serializer for HTTP servers. + +## Installation + +This is a [Node.js](https://nodejs.org/en/) module available through the +[npm registry](https://www.npmjs.com/). Installation is done using the +[`npm install` command](https://docs.npmjs.com/getting-started/installing-npm-packages-locally): + +```sh +$ npm install cookie +``` + +## API + +```js +var cookie = require('cookie'); +``` + +### cookie.parse(str, options) + +Parse an HTTP `Cookie` header string and returning an object of all cookie name-value pairs. +The `str` argument is the string representing a `Cookie` header value and `options` is an +optional object containing additional parsing options. + +```js +var cookies = cookie.parse('foo=bar; equation=E%3Dmc%5E2'); +// { foo: 'bar', equation: 'E=mc^2' } +``` + +#### Options + +`cookie.parse` accepts these properties in the options object. + +##### decode + +Specifies a function that will be used to decode a cookie's value. Since the value of a cookie +has a limited character set (and must be a simple string), this function can be used to decode +a previously-encoded cookie value into a JavaScript string or other object. + +The default function is the global `decodeURIComponent`, which will decode any URL-encoded +sequences into their byte representations. + +**note** if an error is thrown from this function, the original, non-decoded cookie value will +be returned as the cookie's value. + +### cookie.serialize(name, value, options) + +Serialize a cookie name-value pair into a `Set-Cookie` header string. The `name` argument is the +name for the cookie, the `value` argument is the value to set the cookie to, and the `options` +argument is an optional object containing additional serialization options. + +```js +var setCookie = cookie.serialize('foo', 'bar'); +// foo=bar +``` + +#### Options + +`cookie.serialize` accepts these properties in the options object. + +##### domain + +Specifies the value for the [`Domain` `Set-Cookie` attribute][rfc-6265-5.2.3]. By default, no +domain is set, and most clients will consider the cookie to apply to only the current domain. + +##### encode + +Specifies a function that will be used to encode a cookie's value. Since value of a cookie +has a limited character set (and must be a simple string), this function can be used to encode +a value into a string suited for a cookie's value. + +The default function is the global `encodeURIComponent`, which will encode a JavaScript string +into UTF-8 byte sequences and then URL-encode any that fall outside of the cookie range. + +##### expires + +Specifies the `Date` object to be the value for the [`Expires` `Set-Cookie` attribute][rfc-6265-5.2.1]. +By default, no expiration is set, and most clients will consider this a "non-persistent cookie" and +will delete it on a condition like exiting a web browser application. + +**note** the [cookie storage model specification][rfc-6265-5.3] states that if both `expires` and +`maxAge` are set, then `maxAge` takes precedence, but it is possible not all clients by obey this, +so if both are set, they should point to the same date and time. + +##### httpOnly + +Specifies the `boolean` value for the [`HttpOnly` `Set-Cookie` attribute][rfc-6265-5.2.6]. When truthy, +the `HttpOnly` attribute is set, otherwise it is not. By default, the `HttpOnly` attribute is not set. + +**note** be careful when setting this to `true`, as compliant clients will not allow client-side +JavaScript to see the cookie in `document.cookie`. + +##### maxAge + +Specifies the `number` (in seconds) to be the value for the [`Max-Age` `Set-Cookie` attribute][rfc-6265-5.2.2]. +The given number will be converted to an integer by rounding down. By default, no maximum age is set. + +**note** the [cookie storage model specification][rfc-6265-5.3] states that if both `expires` and +`maxAge` are set, then `maxAge` takes precedence, but it is possible not all clients by obey this, +so if both are set, they should point to the same date and time. + +##### partitioned + +Specifies the `boolean` value for the [`Partitioned` `Set-Cookie`](rfc-cutler-httpbis-partitioned-cookies) +attribute. When truthy, the `Partitioned` attribute is set, otherwise it is not. By default, the +`Partitioned` attribute is not set. + +**note** This is an attribute that has not yet been fully standardized, and may change in the future. +This also means many clients may ignore this attribute until they understand it. + +More information about can be found in [the proposal](https://github.com/privacycg/CHIPS). + +##### path + +Specifies the value for the [`Path` `Set-Cookie` attribute][rfc-6265-5.2.4]. By default, the path +is considered the ["default path"][rfc-6265-5.1.4]. + +##### priority + +Specifies the `string` to be the value for the [`Priority` `Set-Cookie` attribute][rfc-west-cookie-priority-00-4.1]. + + - `'low'` will set the `Priority` attribute to `Low`. + - `'medium'` will set the `Priority` attribute to `Medium`, the default priority when not set. + - `'high'` will set the `Priority` attribute to `High`. + +More information about the different priority levels can be found in +[the specification][rfc-west-cookie-priority-00-4.1]. + +**note** This is an attribute that has not yet been fully standardized, and may change in the future. +This also means many clients may ignore this attribute until they understand it. + +##### sameSite + +Specifies the `boolean` or `string` to be the value for the [`SameSite` `Set-Cookie` attribute][rfc-6265bis-09-5.4.7]. + + - `true` will set the `SameSite` attribute to `Strict` for strict same site enforcement. + - `false` will not set the `SameSite` attribute. + - `'lax'` will set the `SameSite` attribute to `Lax` for lax same site enforcement. + - `'none'` will set the `SameSite` attribute to `None` for an explicit cross-site cookie. + - `'strict'` will set the `SameSite` attribute to `Strict` for strict same site enforcement. + +More information about the different enforcement levels can be found in +[the specification][rfc-6265bis-09-5.4.7]. + +**note** This is an attribute that has not yet been fully standardized, and may change in the future. +This also means many clients may ignore this attribute until they understand it. + +##### secure + +Specifies the `boolean` value for the [`Secure` `Set-Cookie` attribute][rfc-6265-5.2.5]. When truthy, +the `Secure` attribute is set, otherwise it is not. By default, the `Secure` attribute is not set. + +**note** be careful when setting this to `true`, as compliant clients will not send the cookie back to +the server in the future if the browser does not have an HTTPS connection. + +## Example + +The following example uses this module in conjunction with the Node.js core HTTP server +to prompt a user for their name and display it back on future visits. + +```js +var cookie = require('cookie'); +var escapeHtml = require('escape-html'); +var http = require('http'); +var url = require('url'); + +function onRequest(req, res) { + // Parse the query string + var query = url.parse(req.url, true, true).query; + + if (query && query.name) { + // Set a new cookie with the name + res.setHeader('Set-Cookie', cookie.serialize('name', String(query.name), { + httpOnly: true, + maxAge: 60 * 60 * 24 * 7 // 1 week + })); + + // Redirect back after setting cookie + res.statusCode = 302; + res.setHeader('Location', req.headers.referer || '/'); + res.end(); + return; + } + + // Parse the cookies on the request + var cookies = cookie.parse(req.headers.cookie || ''); + + // Get the visitor name set in the cookie + var name = cookies.name; + + res.setHeader('Content-Type', 'text/html; charset=UTF-8'); + + if (name) { + res.write('

Welcome back, ' + escapeHtml(name) + '!

'); + } else { + res.write('

Hello, new visitor!

'); + } + + res.write('
'); + res.write(' '); + res.end('
'); +} + +http.createServer(onRequest).listen(3000); +``` + +## Testing + +```sh +$ npm test +``` + +## Benchmark + +``` +$ npm run bench + +> cookie@0.5.0 bench +> node benchmark/index.js + + node@18.18.2 + acorn@8.10.0 + ada@2.6.0 + ares@1.19.1 + brotli@1.0.9 + cldr@43.1 + icu@73.2 + llhttp@6.0.11 + modules@108 + napi@9 + nghttp2@1.57.0 + nghttp3@0.7.0 + ngtcp2@0.8.1 + openssl@3.0.10+quic + simdutf@3.2.14 + tz@2023c + undici@5.26.3 + unicode@15.0 + uv@1.44.2 + uvwasi@0.0.18 + v8@10.2.154.26-node.26 + zlib@1.2.13.1-motley + +> node benchmark/parse-top.js + + cookie.parse - top sites + + 14 tests completed. + + parse accounts.google.com x 2,588,913 ops/sec ±0.74% (186 runs sampled) + parse apple.com x 2,370,002 ops/sec ±0.69% (186 runs sampled) + parse cloudflare.com x 2,213,102 ops/sec ±0.88% (188 runs sampled) + parse docs.google.com x 2,194,157 ops/sec ±1.03% (184 runs sampled) + parse drive.google.com x 2,265,084 ops/sec ±0.79% (187 runs sampled) + parse en.wikipedia.org x 457,099 ops/sec ±0.81% (186 runs sampled) + parse linkedin.com x 504,407 ops/sec ±0.89% (186 runs sampled) + parse maps.google.com x 1,230,959 ops/sec ±0.98% (186 runs sampled) + parse microsoft.com x 926,294 ops/sec ±0.88% (184 runs sampled) + parse play.google.com x 2,311,338 ops/sec ±0.83% (185 runs sampled) + parse support.google.com x 1,508,850 ops/sec ±0.86% (186 runs sampled) + parse www.google.com x 1,022,582 ops/sec ±1.32% (182 runs sampled) + parse youtu.be x 332,136 ops/sec ±1.02% (185 runs sampled) + parse youtube.com x 323,833 ops/sec ±0.77% (183 runs sampled) + +> node benchmark/parse.js + + cookie.parse - generic + + 6 tests completed. + + simple x 3,214,032 ops/sec ±1.61% (183 runs sampled) + decode x 587,237 ops/sec ±1.16% (187 runs sampled) + unquote x 2,954,618 ops/sec ±1.35% (183 runs sampled) + duplicates x 857,008 ops/sec ±0.89% (187 runs sampled) + 10 cookies x 292,133 ops/sec ±0.89% (187 runs sampled) + 100 cookies x 22,610 ops/sec ±0.68% (187 runs sampled) +``` + +## References + +- [RFC 6265: HTTP State Management Mechanism][rfc-6265] +- [Same-site Cookies][rfc-6265bis-09-5.4.7] + +[rfc-cutler-httpbis-partitioned-cookies]: https://tools.ietf.org/html/draft-cutler-httpbis-partitioned-cookies/ +[rfc-west-cookie-priority-00-4.1]: https://tools.ietf.org/html/draft-west-cookie-priority-00#section-4.1 +[rfc-6265bis-09-5.4.7]: https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-09#section-5.4.7 +[rfc-6265]: https://tools.ietf.org/html/rfc6265 +[rfc-6265-5.1.4]: https://tools.ietf.org/html/rfc6265#section-5.1.4 +[rfc-6265-5.2.1]: https://tools.ietf.org/html/rfc6265#section-5.2.1 +[rfc-6265-5.2.2]: https://tools.ietf.org/html/rfc6265#section-5.2.2 +[rfc-6265-5.2.3]: https://tools.ietf.org/html/rfc6265#section-5.2.3 +[rfc-6265-5.2.4]: https://tools.ietf.org/html/rfc6265#section-5.2.4 +[rfc-6265-5.2.5]: https://tools.ietf.org/html/rfc6265#section-5.2.5 +[rfc-6265-5.2.6]: https://tools.ietf.org/html/rfc6265#section-5.2.6 +[rfc-6265-5.3]: https://tools.ietf.org/html/rfc6265#section-5.3 + +## License + +[MIT](LICENSE) + +[ci-image]: https://badgen.net/github/checks/jshttp/cookie/master?label=ci +[ci-url]: https://github.com/jshttp/cookie/actions/workflows/ci.yml +[coveralls-image]: https://badgen.net/coveralls/c/github/jshttp/cookie/master +[coveralls-url]: https://coveralls.io/r/jshttp/cookie?branch=master +[node-image]: https://badgen.net/npm/node/cookie +[node-url]: https://nodejs.org/en/download +[npm-downloads-image]: https://badgen.net/npm/dm/cookie +[npm-url]: https://npmjs.org/package/cookie +[npm-version-image]: https://badgen.net/npm/v/cookie diff --git a/data/node_modules/cookie/SECURITY.md b/data/node_modules/cookie/SECURITY.md new file mode 100644 index 0000000000000000000000000000000000000000..fd4a6c53a9cd1abacf91125dab3fde3163b4c412 --- /dev/null +++ b/data/node_modules/cookie/SECURITY.md @@ -0,0 +1,25 @@ +# Security Policies and Procedures + +## Reporting a Bug + +The `cookie` team and community take all security bugs seriously. Thank +you for improving the security of the project. We appreciate your efforts and +responsible disclosure and will make every effort to acknowledge your +contributions. + +Report security bugs by emailing the current owner(s) of `cookie`. This +information can be found in the npm registry using the command +`npm owner ls cookie`. +If unsure or unable to get the information from the above, open an issue +in the [project issue tracker](https://github.com/jshttp/cookie/issues) +asking for the current contact information. + +To ensure the timely response to your report, please ensure that the entirety +of the report is contained within the email body and not solely behind a web +link or an attachment. + +At least one owner will acknowledge your email within 48 hours, and will send a +more detailed response within 48 hours indicating the next steps in handling +your report. After the initial reply to your report, the owners will +endeavor to keep you informed of the progress towards a fix and full +announcement, and may ask for additional information or guidance. diff --git a/data/node_modules/cookie/index.js b/data/node_modules/cookie/index.js new file mode 100644 index 0000000000000000000000000000000000000000..03d4c386b0120ff3cc787b601958a3f77366cebe --- /dev/null +++ b/data/node_modules/cookie/index.js @@ -0,0 +1,274 @@ +/*! + * cookie + * Copyright(c) 2012-2014 Roman Shtylman + * Copyright(c) 2015 Douglas Christopher Wilson + * MIT Licensed + */ + +'use strict'; + +/** + * Module exports. + * @public + */ + +exports.parse = parse; +exports.serialize = serialize; + +/** + * Module variables. + * @private + */ + +var __toString = Object.prototype.toString + +/** + * RegExp to match field-content in RFC 7230 sec 3.2 + * + * field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] + * field-vchar = VCHAR / obs-text + * obs-text = %x80-FF + */ + +var fieldContentRegExp = /^[\u0009\u0020-\u007e\u0080-\u00ff]+$/; + +/** + * Parse a cookie header. + * + * Parse the given cookie header string into an object + * The object has the various cookies as keys(names) => values + * + * @param {string} str + * @param {object} [options] + * @return {object} + * @public + */ + +function parse(str, options) { + if (typeof str !== 'string') { + throw new TypeError('argument str must be a string'); + } + + var obj = {} + var opt = options || {}; + var dec = opt.decode || decode; + + var index = 0 + while (index < str.length) { + var eqIdx = str.indexOf('=', index) + + // no more cookie pairs + if (eqIdx === -1) { + break + } + + var endIdx = str.indexOf(';', index) + + if (endIdx === -1) { + endIdx = str.length + } else if (endIdx < eqIdx) { + // backtrack on prior semicolon + index = str.lastIndexOf(';', eqIdx - 1) + 1 + continue + } + + var key = str.slice(index, eqIdx).trim() + + // only assign once + if (undefined === obj[key]) { + var val = str.slice(eqIdx + 1, endIdx).trim() + + // quoted values + if (val.charCodeAt(0) === 0x22) { + val = val.slice(1, -1) + } + + obj[key] = tryDecode(val, dec); + } + + index = endIdx + 1 + } + + return obj; +} + +/** + * Serialize data into a cookie header. + * + * Serialize the a name value pair into a cookie string suitable for + * http headers. An optional options object specified cookie parameters. + * + * serialize('foo', 'bar', { httpOnly: true }) + * => "foo=bar; httpOnly" + * + * @param {string} name + * @param {string} val + * @param {object} [options] + * @return {string} + * @public + */ + +function serialize(name, val, options) { + var opt = options || {}; + var enc = opt.encode || encode; + + if (typeof enc !== 'function') { + throw new TypeError('option encode is invalid'); + } + + if (!fieldContentRegExp.test(name)) { + throw new TypeError('argument name is invalid'); + } + + var value = enc(val); + + if (value && !fieldContentRegExp.test(value)) { + throw new TypeError('argument val is invalid'); + } + + var str = name + '=' + value; + + if (null != opt.maxAge) { + var maxAge = opt.maxAge - 0; + + if (isNaN(maxAge) || !isFinite(maxAge)) { + throw new TypeError('option maxAge is invalid') + } + + str += '; Max-Age=' + Math.floor(maxAge); + } + + if (opt.domain) { + if (!fieldContentRegExp.test(opt.domain)) { + throw new TypeError('option domain is invalid'); + } + + str += '; Domain=' + opt.domain; + } + + if (opt.path) { + if (!fieldContentRegExp.test(opt.path)) { + throw new TypeError('option path is invalid'); + } + + str += '; Path=' + opt.path; + } + + if (opt.expires) { + var expires = opt.expires + + if (!isDate(expires) || isNaN(expires.valueOf())) { + throw new TypeError('option expires is invalid'); + } + + str += '; Expires=' + expires.toUTCString() + } + + if (opt.httpOnly) { + str += '; HttpOnly'; + } + + if (opt.secure) { + str += '; Secure'; + } + + if (opt.partitioned) { + str += '; Partitioned' + } + + if (opt.priority) { + var priority = typeof opt.priority === 'string' + ? opt.priority.toLowerCase() + : opt.priority + + switch (priority) { + case 'low': + str += '; Priority=Low' + break + case 'medium': + str += '; Priority=Medium' + break + case 'high': + str += '; Priority=High' + break + default: + throw new TypeError('option priority is invalid') + } + } + + if (opt.sameSite) { + var sameSite = typeof opt.sameSite === 'string' + ? opt.sameSite.toLowerCase() : opt.sameSite; + + switch (sameSite) { + case true: + str += '; SameSite=Strict'; + break; + case 'lax': + str += '; SameSite=Lax'; + break; + case 'strict': + str += '; SameSite=Strict'; + break; + case 'none': + str += '; SameSite=None'; + break; + default: + throw new TypeError('option sameSite is invalid'); + } + } + + return str; +} + +/** + * URL-decode string value. Optimized to skip native call when no %. + * + * @param {string} str + * @returns {string} + */ + +function decode (str) { + return str.indexOf('%') !== -1 + ? decodeURIComponent(str) + : str +} + +/** + * URL-encode value. + * + * @param {string} val + * @returns {string} + */ + +function encode (val) { + return encodeURIComponent(val) +} + +/** + * Determine if value is a Date. + * + * @param {*} val + * @private + */ + +function isDate (val) { + return __toString.call(val) === '[object Date]' || + val instanceof Date +} + +/** + * Try decoding a string using a decoding function. + * + * @param {string} str + * @param {function} decode + * @private + */ + +function tryDecode(str, decode) { + try { + return decode(str); + } catch (e) { + return str; + } +} diff --git a/data/node_modules/cookie/package.json b/data/node_modules/cookie/package.json new file mode 100644 index 0000000000000000000000000000000000000000..0c3f0063de63b33307793d247338efcbae8929d5 --- /dev/null +++ b/data/node_modules/cookie/package.json @@ -0,0 +1,44 @@ +{ + "name": "cookie", + "description": "HTTP server cookie parsing and serialization", + "version": "0.6.0", + "author": "Roman Shtylman ", + "contributors": [ + "Douglas Christopher Wilson " + ], + "license": "MIT", + "keywords": [ + "cookie", + "cookies" + ], + "repository": "jshttp/cookie", + "devDependencies": { + "beautify-benchmark": "0.2.4", + "benchmark": "2.1.4", + "eslint": "8.53.0", + "eslint-plugin-markdown": "3.0.1", + "mocha": "10.2.0", + "nyc": "15.1.0", + "safe-buffer": "5.2.1", + "top-sites": "1.1.194" + }, + "files": [ + "HISTORY.md", + "LICENSE", + "README.md", + "SECURITY.md", + "index.js" + ], + "engines": { + "node": ">= 0.6" + }, + "scripts": { + "bench": "node benchmark/index.js", + "lint": "eslint .", + "test": "mocha --reporter spec --bail --check-leaks test/", + "test-ci": "nyc --reporter=lcov --reporter=text npm test", + "test-cov": "nyc --reporter=html --reporter=text npm test", + "update-bench": "node scripts/update-benchmark.js", + "version": "node scripts/version-history.js && git add HISTORY.md" + } +} diff --git a/data/node_modules/debug/.coveralls.yml b/data/node_modules/debug/.coveralls.yml new file mode 100644 index 0000000000000000000000000000000000000000..20a7068581791335487166ddc5001a2ca3a3b060 --- /dev/null +++ b/data/node_modules/debug/.coveralls.yml @@ -0,0 +1 @@ +repo_token: SIAeZjKYlHK74rbcFvNHMUzjRiMpflxve diff --git a/data/node_modules/debug/.eslintrc b/data/node_modules/debug/.eslintrc new file mode 100644 index 0000000000000000000000000000000000000000..8a37ae2c2e5a35db74b4607b4c74e0f4fe39a3e4 --- /dev/null +++ b/data/node_modules/debug/.eslintrc @@ -0,0 +1,11 @@ +{ + "env": { + "browser": true, + "node": true + }, + "rules": { + "no-console": 0, + "no-empty": [1, { "allowEmptyCatch": true }] + }, + "extends": "eslint:recommended" +} diff --git a/data/node_modules/debug/.npmignore b/data/node_modules/debug/.npmignore new file mode 100644 index 0000000000000000000000000000000000000000..5f60eecc84e219e52554407ad38d04abd1cf2111 --- /dev/null +++ b/data/node_modules/debug/.npmignore @@ -0,0 +1,9 @@ +support +test +examples +example +*.sock +dist +yarn.lock +coverage +bower.json diff --git a/data/node_modules/debug/.travis.yml b/data/node_modules/debug/.travis.yml new file mode 100644 index 0000000000000000000000000000000000000000..6c6090c3b09f2e45d8c0a1dc77ff5f4a81e78a3c --- /dev/null +++ b/data/node_modules/debug/.travis.yml @@ -0,0 +1,14 @@ + +language: node_js +node_js: + - "6" + - "5" + - "4" + +install: + - make node_modules + +script: + - make lint + - make test + - make coveralls diff --git a/data/node_modules/debug/CHANGELOG.md b/data/node_modules/debug/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..eadaa189517bbcfb2a6784a48ac8d05d2edafe7c --- /dev/null +++ b/data/node_modules/debug/CHANGELOG.md @@ -0,0 +1,362 @@ + +2.6.9 / 2017-09-22 +================== + + * remove ReDoS regexp in %o formatter (#504) + +2.6.8 / 2017-05-18 +================== + + * Fix: Check for undefined on browser globals (#462, @marbemac) + +2.6.7 / 2017-05-16 +================== + + * Fix: Update ms to 2.0.0 to fix regular expression denial of service vulnerability (#458, @hubdotcom) + * Fix: Inline extend function in node implementation (#452, @dougwilson) + * Docs: Fix typo (#455, @msasad) + +2.6.5 / 2017-04-27 +================== + + * Fix: null reference check on window.documentElement.style.WebkitAppearance (#447, @thebigredgeek) + * Misc: clean up browser reference checks (#447, @thebigredgeek) + * Misc: add npm-debug.log to .gitignore (@thebigredgeek) + + +2.6.4 / 2017-04-20 +================== + + * Fix: bug that would occure if process.env.DEBUG is a non-string value. (#444, @LucianBuzzo) + * Chore: ignore bower.json in npm installations. (#437, @joaovieira) + * Misc: update "ms" to v0.7.3 (@tootallnate) + +2.6.3 / 2017-03-13 +================== + + * Fix: Electron reference to `process.env.DEBUG` (#431, @paulcbetts) + * Docs: Changelog fix (@thebigredgeek) + +2.6.2 / 2017-03-10 +================== + + * Fix: DEBUG_MAX_ARRAY_LENGTH (#420, @slavaGanzin) + * Docs: Add backers and sponsors from Open Collective (#422, @piamancini) + * Docs: Add Slackin invite badge (@tootallnate) + +2.6.1 / 2017-02-10 +================== + + * Fix: Module's `export default` syntax fix for IE8 `Expected identifier` error + * Fix: Whitelist DEBUG_FD for values 1 and 2 only (#415, @pi0) + * Fix: IE8 "Expected identifier" error (#414, @vgoma) + * Fix: Namespaces would not disable once enabled (#409, @musikov) + +2.6.0 / 2016-12-28 +================== + + * Fix: added better null pointer checks for browser useColors (@thebigredgeek) + * Improvement: removed explicit `window.debug` export (#404, @tootallnate) + * Improvement: deprecated `DEBUG_FD` environment variable (#405, @tootallnate) + +2.5.2 / 2016-12-25 +================== + + * Fix: reference error on window within webworkers (#393, @KlausTrainer) + * Docs: fixed README typo (#391, @lurch) + * Docs: added notice about v3 api discussion (@thebigredgeek) + +2.5.1 / 2016-12-20 +================== + + * Fix: babel-core compatibility + +2.5.0 / 2016-12-20 +================== + + * Fix: wrong reference in bower file (@thebigredgeek) + * Fix: webworker compatibility (@thebigredgeek) + * Fix: output formatting issue (#388, @kribblo) + * Fix: babel-loader compatibility (#383, @escwald) + * Misc: removed built asset from repo and publications (@thebigredgeek) + * Misc: moved source files to /src (#378, @yamikuronue) + * Test: added karma integration and replaced babel with browserify for browser tests (#378, @yamikuronue) + * Test: coveralls integration (#378, @yamikuronue) + * Docs: simplified language in the opening paragraph (#373, @yamikuronue) + +2.4.5 / 2016-12-17 +================== + + * Fix: `navigator` undefined in Rhino (#376, @jochenberger) + * Fix: custom log function (#379, @hsiliev) + * Improvement: bit of cleanup + linting fixes (@thebigredgeek) + * Improvement: rm non-maintainted `dist/` dir (#375, @freewil) + * Docs: simplified language in the opening paragraph. (#373, @yamikuronue) + +2.4.4 / 2016-12-14 +================== + + * Fix: work around debug being loaded in preload scripts for electron (#368, @paulcbetts) + +2.4.3 / 2016-12-14 +================== + + * Fix: navigation.userAgent error for react native (#364, @escwald) + +2.4.2 / 2016-12-14 +================== + + * Fix: browser colors (#367, @tootallnate) + * Misc: travis ci integration (@thebigredgeek) + * Misc: added linting and testing boilerplate with sanity check (@thebigredgeek) + +2.4.1 / 2016-12-13 +================== + + * Fix: typo that broke the package (#356) + +2.4.0 / 2016-12-13 +================== + + * Fix: bower.json references unbuilt src entry point (#342, @justmatt) + * Fix: revert "handle regex special characters" (@tootallnate) + * Feature: configurable util.inspect()`options for NodeJS (#327, @tootallnate) + * Feature: %O`(big O) pretty-prints objects (#322, @tootallnate) + * Improvement: allow colors in workers (#335, @botverse) + * Improvement: use same color for same namespace. (#338, @lchenay) + +2.3.3 / 2016-11-09 +================== + + * Fix: Catch `JSON.stringify()` errors (#195, Jovan Alleyne) + * Fix: Returning `localStorage` saved values (#331, Levi Thomason) + * Improvement: Don't create an empty object when no `process` (Nathan Rajlich) + +2.3.2 / 2016-11-09 +================== + + * Fix: be super-safe in index.js as well (@TooTallNate) + * Fix: should check whether process exists (Tom Newby) + +2.3.1 / 2016-11-09 +================== + + * Fix: Added electron compatibility (#324, @paulcbetts) + * Improvement: Added performance optimizations (@tootallnate) + * Readme: Corrected PowerShell environment variable example (#252, @gimre) + * Misc: Removed yarn lock file from source control (#321, @fengmk2) + +2.3.0 / 2016-11-07 +================== + + * Fix: Consistent placement of ms diff at end of output (#215, @gorangajic) + * Fix: Escaping of regex special characters in namespace strings (#250, @zacronos) + * Fix: Fixed bug causing crash on react-native (#282, @vkarpov15) + * Feature: Enabled ES6+ compatible import via default export (#212 @bucaran) + * Feature: Added %O formatter to reflect Chrome's console.log capability (#279, @oncletom) + * Package: Update "ms" to 0.7.2 (#315, @DevSide) + * Package: removed superfluous version property from bower.json (#207 @kkirsche) + * Readme: fix USE_COLORS to DEBUG_COLORS + * Readme: Doc fixes for format string sugar (#269, @mlucool) + * Readme: Updated docs for DEBUG_FD and DEBUG_COLORS environment variables (#232, @mattlyons0) + * Readme: doc fixes for PowerShell (#271 #243, @exoticknight @unreadable) + * Readme: better docs for browser support (#224, @matthewmueller) + * Tooling: Added yarn integration for development (#317, @thebigredgeek) + * Misc: Renamed History.md to CHANGELOG.md (@thebigredgeek) + * Misc: Added license file (#226 #274, @CantemoInternal @sdaitzman) + * Misc: Updated contributors (@thebigredgeek) + +2.2.0 / 2015-05-09 +================== + + * package: update "ms" to v0.7.1 (#202, @dougwilson) + * README: add logging to file example (#193, @DanielOchoa) + * README: fixed a typo (#191, @amir-s) + * browser: expose `storage` (#190, @stephenmathieson) + * Makefile: add a `distclean` target (#189, @stephenmathieson) + +2.1.3 / 2015-03-13 +================== + + * Updated stdout/stderr example (#186) + * Updated example/stdout.js to match debug current behaviour + * Renamed example/stderr.js to stdout.js + * Update Readme.md (#184) + * replace high intensity foreground color for bold (#182, #183) + +2.1.2 / 2015-03-01 +================== + + * dist: recompile + * update "ms" to v0.7.0 + * package: update "browserify" to v9.0.3 + * component: fix "ms.js" repo location + * changed bower package name + * updated documentation about using debug in a browser + * fix: security error on safari (#167, #168, @yields) + +2.1.1 / 2014-12-29 +================== + + * browser: use `typeof` to check for `console` existence + * browser: check for `console.log` truthiness (fix IE 8/9) + * browser: add support for Chrome apps + * Readme: added Windows usage remarks + * Add `bower.json` to properly support bower install + +2.1.0 / 2014-10-15 +================== + + * node: implement `DEBUG_FD` env variable support + * package: update "browserify" to v6.1.0 + * package: add "license" field to package.json (#135, @panuhorsmalahti) + +2.0.0 / 2014-09-01 +================== + + * package: update "browserify" to v5.11.0 + * node: use stderr rather than stdout for logging (#29, @stephenmathieson) + +1.0.4 / 2014-07-15 +================== + + * dist: recompile + * example: remove `console.info()` log usage + * example: add "Content-Type" UTF-8 header to browser example + * browser: place %c marker after the space character + * browser: reset the "content" color via `color: inherit` + * browser: add colors support for Firefox >= v31 + * debug: prefer an instance `log()` function over the global one (#119) + * Readme: update documentation about styled console logs for FF v31 (#116, @wryk) + +1.0.3 / 2014-07-09 +================== + + * Add support for multiple wildcards in namespaces (#122, @seegno) + * browser: fix lint + +1.0.2 / 2014-06-10 +================== + + * browser: update color palette (#113, @gscottolson) + * common: make console logging function configurable (#108, @timoxley) + * node: fix %o colors on old node <= 0.8.x + * Makefile: find node path using shell/which (#109, @timoxley) + +1.0.1 / 2014-06-06 +================== + + * browser: use `removeItem()` to clear localStorage + * browser, node: don't set DEBUG if namespaces is undefined (#107, @leedm777) + * package: add "contributors" section + * node: fix comment typo + * README: list authors + +1.0.0 / 2014-06-04 +================== + + * make ms diff be global, not be scope + * debug: ignore empty strings in enable() + * node: make DEBUG_COLORS able to disable coloring + * *: export the `colors` array + * npmignore: don't publish the `dist` dir + * Makefile: refactor to use browserify + * package: add "browserify" as a dev dependency + * Readme: add Web Inspector Colors section + * node: reset terminal color for the debug content + * node: map "%o" to `util.inspect()` + * browser: map "%j" to `JSON.stringify()` + * debug: add custom "formatters" + * debug: use "ms" module for humanizing the diff + * Readme: add "bash" syntax highlighting + * browser: add Firebug color support + * browser: add colors for WebKit browsers + * node: apply log to `console` + * rewrite: abstract common logic for Node & browsers + * add .jshintrc file + +0.8.1 / 2014-04-14 +================== + + * package: re-add the "component" section + +0.8.0 / 2014-03-30 +================== + + * add `enable()` method for nodejs. Closes #27 + * change from stderr to stdout + * remove unnecessary index.js file + +0.7.4 / 2013-11-13 +================== + + * remove "browserify" key from package.json (fixes something in browserify) + +0.7.3 / 2013-10-30 +================== + + * fix: catch localStorage security error when cookies are blocked (Chrome) + * add debug(err) support. Closes #46 + * add .browser prop to package.json. Closes #42 + +0.7.2 / 2013-02-06 +================== + + * fix package.json + * fix: Mobile Safari (private mode) is broken with debug + * fix: Use unicode to send escape character to shell instead of octal to work with strict mode javascript + +0.7.1 / 2013-02-05 +================== + + * add repository URL to package.json + * add DEBUG_COLORED to force colored output + * add browserify support + * fix component. Closes #24 + +0.7.0 / 2012-05-04 +================== + + * Added .component to package.json + * Added debug.component.js build + +0.6.0 / 2012-03-16 +================== + + * Added support for "-" prefix in DEBUG [Vinay Pulim] + * Added `.enabled` flag to the node version [TooTallNate] + +0.5.0 / 2012-02-02 +================== + + * Added: humanize diffs. Closes #8 + * Added `debug.disable()` to the CS variant + * Removed padding. Closes #10 + * Fixed: persist client-side variant again. Closes #9 + +0.4.0 / 2012-02-01 +================== + + * Added browser variant support for older browsers [TooTallNate] + * Added `debug.enable('project:*')` to browser variant [TooTallNate] + * Added padding to diff (moved it to the right) + +0.3.0 / 2012-01-26 +================== + + * Added millisecond diff when isatty, otherwise UTC string + +0.2.0 / 2012-01-22 +================== + + * Added wildcard support + +0.1.0 / 2011-12-02 +================== + + * Added: remove colors unless stderr isatty [TooTallNate] + +0.0.1 / 2010-01-03 +================== + + * Initial release diff --git a/data/node_modules/debug/LICENSE b/data/node_modules/debug/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..658c933d28255e8c716899789e8c0f846e5dc125 --- /dev/null +++ b/data/node_modules/debug/LICENSE @@ -0,0 +1,19 @@ +(The MIT License) + +Copyright (c) 2014 TJ Holowaychuk + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software +and associated documentation files (the 'Software'), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT +LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/data/node_modules/debug/Makefile b/data/node_modules/debug/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..584da8bf938e639ece3ba2bd4105c215c2b1ff51 --- /dev/null +++ b/data/node_modules/debug/Makefile @@ -0,0 +1,50 @@ +# get Makefile directory name: http://stackoverflow.com/a/5982798/376773 +THIS_MAKEFILE_PATH:=$(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)) +THIS_DIR:=$(shell cd $(dir $(THIS_MAKEFILE_PATH));pwd) + +# BIN directory +BIN := $(THIS_DIR)/node_modules/.bin + +# Path +PATH := node_modules/.bin:$(PATH) +SHELL := /bin/bash + +# applications +NODE ?= $(shell which node) +YARN ?= $(shell which yarn) +PKG ?= $(if $(YARN),$(YARN),$(NODE) $(shell which npm)) +BROWSERIFY ?= $(NODE) $(BIN)/browserify + +.FORCE: + +install: node_modules + +node_modules: package.json + @NODE_ENV= $(PKG) install + @touch node_modules + +lint: .FORCE + eslint browser.js debug.js index.js node.js + +test-node: .FORCE + istanbul cover node_modules/mocha/bin/_mocha -- test/**.js + +test-browser: .FORCE + mkdir -p dist + + @$(BROWSERIFY) \ + --standalone debug \ + . > dist/debug.js + + karma start --single-run + rimraf dist + +test: .FORCE + concurrently \ + "make test-node" \ + "make test-browser" + +coveralls: + cat ./coverage/lcov.info | ./node_modules/coveralls/bin/coveralls.js + +.PHONY: all install clean distclean diff --git a/data/node_modules/debug/README.md b/data/node_modules/debug/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f67be6b317c19952bb506a9e15e797615eea4533 --- /dev/null +++ b/data/node_modules/debug/README.md @@ -0,0 +1,312 @@ +# debug +[![Build Status](https://travis-ci.org/visionmedia/debug.svg?branch=master)](https://travis-ci.org/visionmedia/debug) [![Coverage Status](https://coveralls.io/repos/github/visionmedia/debug/badge.svg?branch=master)](https://coveralls.io/github/visionmedia/debug?branch=master) [![Slack](https://visionmedia-community-slackin.now.sh/badge.svg)](https://visionmedia-community-slackin.now.sh/) [![OpenCollective](https://opencollective.com/debug/backers/badge.svg)](#backers) +[![OpenCollective](https://opencollective.com/debug/sponsors/badge.svg)](#sponsors) + + + +A tiny node.js debugging utility modelled after node core's debugging technique. + +**Discussion around the V3 API is under way [here](https://github.com/visionmedia/debug/issues/370)** + +## Installation + +```bash +$ npm install debug +``` + +## Usage + +`debug` exposes a function; simply pass this function the name of your module, and it will return a decorated version of `console.error` for you to pass debug statements to. This will allow you to toggle the debug output for different parts of your module as well as the module as a whole. + +Example _app.js_: + +```js +var debug = require('debug')('http') + , http = require('http') + , name = 'My App'; + +// fake app + +debug('booting %s', name); + +http.createServer(function(req, res){ + debug(req.method + ' ' + req.url); + res.end('hello\n'); +}).listen(3000, function(){ + debug('listening'); +}); + +// fake worker of some kind + +require('./worker'); +``` + +Example _worker.js_: + +```js +var debug = require('debug')('worker'); + +setInterval(function(){ + debug('doing some work'); +}, 1000); +``` + + The __DEBUG__ environment variable is then used to enable these based on space or comma-delimited names. Here are some examples: + + ![debug http and worker](http://f.cl.ly/items/18471z1H402O24072r1J/Screenshot.png) + + ![debug worker](http://f.cl.ly/items/1X413v1a3M0d3C2c1E0i/Screenshot.png) + +#### Windows note + + On Windows the environment variable is set using the `set` command. + + ```cmd + set DEBUG=*,-not_this + ``` + + Note that PowerShell uses different syntax to set environment variables. + + ```cmd + $env:DEBUG = "*,-not_this" + ``` + +Then, run the program to be debugged as usual. + +## Millisecond diff + + When actively developing an application it can be useful to see when the time spent between one `debug()` call and the next. Suppose for example you invoke `debug()` before requesting a resource, and after as well, the "+NNNms" will show you how much time was spent between calls. + + ![](http://f.cl.ly/items/2i3h1d3t121M2Z1A3Q0N/Screenshot.png) + + When stdout is not a TTY, `Date#toUTCString()` is used, making it more useful for logging the debug information as shown below: + + ![](http://f.cl.ly/items/112H3i0e0o0P0a2Q2r11/Screenshot.png) + +## Conventions + + If you're using this in one or more of your libraries, you _should_ use the name of your library so that developers may toggle debugging as desired without guessing names. If you have more than one debuggers you _should_ prefix them with your library name and use ":" to separate features. For example "bodyParser" from Connect would then be "connect:bodyParser". + +## Wildcards + + The `*` character may be used as a wildcard. Suppose for example your library has debuggers named "connect:bodyParser", "connect:compress", "connect:session", instead of listing all three with `DEBUG=connect:bodyParser,connect:compress,connect:session`, you may simply do `DEBUG=connect:*`, or to run everything using this module simply use `DEBUG=*`. + + You can also exclude specific debuggers by prefixing them with a "-" character. For example, `DEBUG=*,-connect:*` would include all debuggers except those starting with "connect:". + +## Environment Variables + + When running through Node.js, you can set a few environment variables that will + change the behavior of the debug logging: + +| Name | Purpose | +|-----------|-------------------------------------------------| +| `DEBUG` | Enables/disables specific debugging namespaces. | +| `DEBUG_COLORS`| Whether or not to use colors in the debug output. | +| `DEBUG_DEPTH` | Object inspection depth. | +| `DEBUG_SHOW_HIDDEN` | Shows hidden properties on inspected objects. | + + + __Note:__ The environment variables beginning with `DEBUG_` end up being + converted into an Options object that gets used with `%o`/`%O` formatters. + See the Node.js documentation for + [`util.inspect()`](https://nodejs.org/api/util.html#util_util_inspect_object_options) + for the complete list. + +## Formatters + + + Debug uses [printf-style](https://wikipedia.org/wiki/Printf_format_string) formatting. Below are the officially supported formatters: + +| Formatter | Representation | +|-----------|----------------| +| `%O` | Pretty-print an Object on multiple lines. | +| `%o` | Pretty-print an Object all on a single line. | +| `%s` | String. | +| `%d` | Number (both integer and float). | +| `%j` | JSON. Replaced with the string '[Circular]' if the argument contains circular references. | +| `%%` | Single percent sign ('%'). This does not consume an argument. | + +### Custom formatters + + You can add custom formatters by extending the `debug.formatters` object. For example, if you wanted to add support for rendering a Buffer as hex with `%h`, you could do something like: + +```js +const createDebug = require('debug') +createDebug.formatters.h = (v) => { + return v.toString('hex') +} + +// …elsewhere +const debug = createDebug('foo') +debug('this is hex: %h', new Buffer('hello world')) +// foo this is hex: 68656c6c6f20776f726c6421 +0ms +``` + +## Browser support + You can build a browser-ready script using [browserify](https://github.com/substack/node-browserify), + or just use the [browserify-as-a-service](https://wzrd.in/) [build](https://wzrd.in/standalone/debug@latest), + if you don't want to build it yourself. + + Debug's enable state is currently persisted by `localStorage`. + Consider the situation shown below where you have `worker:a` and `worker:b`, + and wish to debug both. You can enable this using `localStorage.debug`: + +```js +localStorage.debug = 'worker:*' +``` + +And then refresh the page. + +```js +a = debug('worker:a'); +b = debug('worker:b'); + +setInterval(function(){ + a('doing some work'); +}, 1000); + +setInterval(function(){ + b('doing some work'); +}, 1200); +``` + +#### Web Inspector Colors + + Colors are also enabled on "Web Inspectors" that understand the `%c` formatting + option. These are WebKit web inspectors, Firefox ([since version + 31](https://hacks.mozilla.org/2014/05/editable-box-model-multiple-selection-sublime-text-keys-much-more-firefox-developer-tools-episode-31/)) + and the Firebug plugin for Firefox (any version). + + Colored output looks something like: + + ![](https://cloud.githubusercontent.com/assets/71256/3139768/b98c5fd8-e8ef-11e3-862a-f7253b6f47c6.png) + + +## Output streams + + By default `debug` will log to stderr, however this can be configured per-namespace by overriding the `log` method: + +Example _stdout.js_: + +```js +var debug = require('debug'); +var error = debug('app:error'); + +// by default stderr is used +error('goes to stderr!'); + +var log = debug('app:log'); +// set this namespace to log via console.log +log.log = console.log.bind(console); // don't forget to bind to console! +log('goes to stdout'); +error('still goes to stderr!'); + +// set all output to go via console.info +// overrides all per-namespace log settings +debug.log = console.info.bind(console); +error('now goes to stdout via console.info'); +log('still goes to stdout, but via console.info now'); +``` + + +## Authors + + - TJ Holowaychuk + - Nathan Rajlich + - Andrew Rhyne + +## Backers + +Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/debug#backer)] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## Sponsors + +Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/debug#sponsor)] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## License + +(The MIT License) + +Copyright (c) 2014-2016 TJ Holowaychuk <tj@vision-media.ca> + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/data/node_modules/debug/component.json b/data/node_modules/debug/component.json new file mode 100644 index 0000000000000000000000000000000000000000..9de26410f0d0bba2e48a07f094407d602eb5dd89 --- /dev/null +++ b/data/node_modules/debug/component.json @@ -0,0 +1,19 @@ +{ + "name": "debug", + "repo": "visionmedia/debug", + "description": "small debugging utility", + "version": "2.6.9", + "keywords": [ + "debug", + "log", + "debugger" + ], + "main": "src/browser.js", + "scripts": [ + "src/browser.js", + "src/debug.js" + ], + "dependencies": { + "rauchg/ms.js": "0.7.1" + } +} diff --git a/data/node_modules/debug/karma.conf.js b/data/node_modules/debug/karma.conf.js new file mode 100644 index 0000000000000000000000000000000000000000..103a82d15bd72b3cdf9ba4108272985f7e0bfdb3 --- /dev/null +++ b/data/node_modules/debug/karma.conf.js @@ -0,0 +1,70 @@ +// Karma configuration +// Generated on Fri Dec 16 2016 13:09:51 GMT+0000 (UTC) + +module.exports = function(config) { + config.set({ + + // base path that will be used to resolve all patterns (eg. files, exclude) + basePath: '', + + + // frameworks to use + // available frameworks: https://npmjs.org/browse/keyword/karma-adapter + frameworks: ['mocha', 'chai', 'sinon'], + + + // list of files / patterns to load in the browser + files: [ + 'dist/debug.js', + 'test/*spec.js' + ], + + + // list of files to exclude + exclude: [ + 'src/node.js' + ], + + + // preprocess matching files before serving them to the browser + // available preprocessors: https://npmjs.org/browse/keyword/karma-preprocessor + preprocessors: { + }, + + // test results reporter to use + // possible values: 'dots', 'progress' + // available reporters: https://npmjs.org/browse/keyword/karma-reporter + reporters: ['progress'], + + + // web server port + port: 9876, + + + // enable / disable colors in the output (reporters and logs) + colors: true, + + + // level of logging + // possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG + logLevel: config.LOG_INFO, + + + // enable / disable watching file and executing tests whenever any file changes + autoWatch: true, + + + // start these browsers + // available browser launchers: https://npmjs.org/browse/keyword/karma-launcher + browsers: ['PhantomJS'], + + + // Continuous Integration mode + // if true, Karma captures browsers, runs the tests and exits + singleRun: false, + + // Concurrency level + // how many browser should be started simultaneous + concurrency: Infinity + }) +} diff --git a/data/node_modules/debug/node.js b/data/node_modules/debug/node.js new file mode 100644 index 0000000000000000000000000000000000000000..7fc36fe6dbecbfd41530c5a490cc738ec2968653 --- /dev/null +++ b/data/node_modules/debug/node.js @@ -0,0 +1 @@ +module.exports = require('./src/node'); diff --git a/data/node_modules/debug/package.json b/data/node_modules/debug/package.json new file mode 100644 index 0000000000000000000000000000000000000000..dc787ba76781de4c1d4721b69aa881a548365a90 --- /dev/null +++ b/data/node_modules/debug/package.json @@ -0,0 +1,49 @@ +{ + "name": "debug", + "version": "2.6.9", + "repository": { + "type": "git", + "url": "git://github.com/visionmedia/debug.git" + }, + "description": "small debugging utility", + "keywords": [ + "debug", + "log", + "debugger" + ], + "author": "TJ Holowaychuk ", + "contributors": [ + "Nathan Rajlich (http://n8.io)", + "Andrew Rhyne " + ], + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + }, + "devDependencies": { + "browserify": "9.0.3", + "chai": "^3.5.0", + "concurrently": "^3.1.0", + "coveralls": "^2.11.15", + "eslint": "^3.12.1", + "istanbul": "^0.4.5", + "karma": "^1.3.0", + "karma-chai": "^0.1.0", + "karma-mocha": "^1.3.0", + "karma-phantomjs-launcher": "^1.0.2", + "karma-sinon": "^1.0.5", + "mocha": "^3.2.0", + "mocha-lcov-reporter": "^1.2.0", + "rimraf": "^2.5.4", + "sinon": "^1.17.6", + "sinon-chai": "^2.8.0" + }, + "main": "./src/index.js", + "browser": "./src/browser.js", + "component": { + "scripts": { + "debug/index.js": "browser.js", + "debug/debug.js": "debug.js" + } + } +} diff --git a/data/node_modules/debug/src/browser.js b/data/node_modules/debug/src/browser.js new file mode 100644 index 0000000000000000000000000000000000000000..7106924934501fd4035efe78678281020328acc5 --- /dev/null +++ b/data/node_modules/debug/src/browser.js @@ -0,0 +1,185 @@ +/** + * This is the web browser implementation of `debug()`. + * + * Expose `debug()` as the module. + */ + +exports = module.exports = require('./debug'); +exports.log = log; +exports.formatArgs = formatArgs; +exports.save = save; +exports.load = load; +exports.useColors = useColors; +exports.storage = 'undefined' != typeof chrome + && 'undefined' != typeof chrome.storage + ? chrome.storage.local + : localstorage(); + +/** + * Colors. + */ + +exports.colors = [ + 'lightseagreen', + 'forestgreen', + 'goldenrod', + 'dodgerblue', + 'darkorchid', + 'crimson' +]; + +/** + * Currently only WebKit-based Web Inspectors, Firefox >= v31, + * and the Firebug extension (any Firefox version) are known + * to support "%c" CSS customizations. + * + * TODO: add a `localStorage` variable to explicitly enable/disable colors + */ + +function useColors() { + // NB: In an Electron preload script, document will be defined but not fully + // initialized. Since we know we're in Chrome, we'll just detect this case + // explicitly + if (typeof window !== 'undefined' && window.process && window.process.type === 'renderer') { + return true; + } + + // is webkit? http://stackoverflow.com/a/16459606/376773 + // document is undefined in react-native: https://github.com/facebook/react-native/pull/1632 + return (typeof document !== 'undefined' && document.documentElement && document.documentElement.style && document.documentElement.style.WebkitAppearance) || + // is firebug? http://stackoverflow.com/a/398120/376773 + (typeof window !== 'undefined' && window.console && (window.console.firebug || (window.console.exception && window.console.table))) || + // is firefox >= v31? + // https://developer.mozilla.org/en-US/docs/Tools/Web_Console#Styling_messages + (typeof navigator !== 'undefined' && navigator.userAgent && navigator.userAgent.toLowerCase().match(/firefox\/(\d+)/) && parseInt(RegExp.$1, 10) >= 31) || + // double check webkit in userAgent just in case we are in a worker + (typeof navigator !== 'undefined' && navigator.userAgent && navigator.userAgent.toLowerCase().match(/applewebkit\/(\d+)/)); +} + +/** + * Map %j to `JSON.stringify()`, since no Web Inspectors do that by default. + */ + +exports.formatters.j = function(v) { + try { + return JSON.stringify(v); + } catch (err) { + return '[UnexpectedJSONParseError]: ' + err.message; + } +}; + + +/** + * Colorize log arguments if enabled. + * + * @api public + */ + +function formatArgs(args) { + var useColors = this.useColors; + + args[0] = (useColors ? '%c' : '') + + this.namespace + + (useColors ? ' %c' : ' ') + + args[0] + + (useColors ? '%c ' : ' ') + + '+' + exports.humanize(this.diff); + + if (!useColors) return; + + var c = 'color: ' + this.color; + args.splice(1, 0, c, 'color: inherit') + + // the final "%c" is somewhat tricky, because there could be other + // arguments passed either before or after the %c, so we need to + // figure out the correct index to insert the CSS into + var index = 0; + var lastC = 0; + args[0].replace(/%[a-zA-Z%]/g, function(match) { + if ('%%' === match) return; + index++; + if ('%c' === match) { + // we only are interested in the *last* %c + // (the user may have provided their own) + lastC = index; + } + }); + + args.splice(lastC, 0, c); +} + +/** + * Invokes `console.log()` when available. + * No-op when `console.log` is not a "function". + * + * @api public + */ + +function log() { + // this hackery is required for IE8/9, where + // the `console.log` function doesn't have 'apply' + return 'object' === typeof console + && console.log + && Function.prototype.apply.call(console.log, console, arguments); +} + +/** + * Save `namespaces`. + * + * @param {String} namespaces + * @api private + */ + +function save(namespaces) { + try { + if (null == namespaces) { + exports.storage.removeItem('debug'); + } else { + exports.storage.debug = namespaces; + } + } catch(e) {} +} + +/** + * Load `namespaces`. + * + * @return {String} returns the previously persisted debug modes + * @api private + */ + +function load() { + var r; + try { + r = exports.storage.debug; + } catch(e) {} + + // If debug isn't set in LS, and we're in Electron, try to load $DEBUG + if (!r && typeof process !== 'undefined' && 'env' in process) { + r = process.env.DEBUG; + } + + return r; +} + +/** + * Enable namespaces listed in `localStorage.debug` initially. + */ + +exports.enable(load()); + +/** + * Localstorage attempts to return the localstorage. + * + * This is necessary because safari throws + * when a user disables cookies/localstorage + * and you attempt to access it. + * + * @return {LocalStorage} + * @api private + */ + +function localstorage() { + try { + return window.localStorage; + } catch (e) {} +} diff --git a/data/node_modules/debug/src/debug.js b/data/node_modules/debug/src/debug.js new file mode 100644 index 0000000000000000000000000000000000000000..6a5e3fc94c3ab80e123c3056b6c5dbe056d21658 --- /dev/null +++ b/data/node_modules/debug/src/debug.js @@ -0,0 +1,202 @@ + +/** + * This is the common logic for both the Node.js and web browser + * implementations of `debug()`. + * + * Expose `debug()` as the module. + */ + +exports = module.exports = createDebug.debug = createDebug['default'] = createDebug; +exports.coerce = coerce; +exports.disable = disable; +exports.enable = enable; +exports.enabled = enabled; +exports.humanize = require('ms'); + +/** + * The currently active debug mode names, and names to skip. + */ + +exports.names = []; +exports.skips = []; + +/** + * Map of special "%n" handling functions, for the debug "format" argument. + * + * Valid key names are a single, lower or upper-case letter, i.e. "n" and "N". + */ + +exports.formatters = {}; + +/** + * Previous log timestamp. + */ + +var prevTime; + +/** + * Select a color. + * @param {String} namespace + * @return {Number} + * @api private + */ + +function selectColor(namespace) { + var hash = 0, i; + + for (i in namespace) { + hash = ((hash << 5) - hash) + namespace.charCodeAt(i); + hash |= 0; // Convert to 32bit integer + } + + return exports.colors[Math.abs(hash) % exports.colors.length]; +} + +/** + * Create a debugger with the given `namespace`. + * + * @param {String} namespace + * @return {Function} + * @api public + */ + +function createDebug(namespace) { + + function debug() { + // disabled? + if (!debug.enabled) return; + + var self = debug; + + // set `diff` timestamp + var curr = +new Date(); + var ms = curr - (prevTime || curr); + self.diff = ms; + self.prev = prevTime; + self.curr = curr; + prevTime = curr; + + // turn the `arguments` into a proper Array + var args = new Array(arguments.length); + for (var i = 0; i < args.length; i++) { + args[i] = arguments[i]; + } + + args[0] = exports.coerce(args[0]); + + if ('string' !== typeof args[0]) { + // anything else let's inspect with %O + args.unshift('%O'); + } + + // apply any `formatters` transformations + var index = 0; + args[0] = args[0].replace(/%([a-zA-Z%])/g, function(match, format) { + // if we encounter an escaped % then don't increase the array index + if (match === '%%') return match; + index++; + var formatter = exports.formatters[format]; + if ('function' === typeof formatter) { + var val = args[index]; + match = formatter.call(self, val); + + // now we need to remove `args[index]` since it's inlined in the `format` + args.splice(index, 1); + index--; + } + return match; + }); + + // apply env-specific formatting (colors, etc.) + exports.formatArgs.call(self, args); + + var logFn = debug.log || exports.log || console.log.bind(console); + logFn.apply(self, args); + } + + debug.namespace = namespace; + debug.enabled = exports.enabled(namespace); + debug.useColors = exports.useColors(); + debug.color = selectColor(namespace); + + // env-specific initialization logic for debug instances + if ('function' === typeof exports.init) { + exports.init(debug); + } + + return debug; +} + +/** + * Enables a debug mode by namespaces. This can include modes + * separated by a colon and wildcards. + * + * @param {String} namespaces + * @api public + */ + +function enable(namespaces) { + exports.save(namespaces); + + exports.names = []; + exports.skips = []; + + var split = (typeof namespaces === 'string' ? namespaces : '').split(/[\s,]+/); + var len = split.length; + + for (var i = 0; i < len; i++) { + if (!split[i]) continue; // ignore empty strings + namespaces = split[i].replace(/\*/g, '.*?'); + if (namespaces[0] === '-') { + exports.skips.push(new RegExp('^' + namespaces.substr(1) + '$')); + } else { + exports.names.push(new RegExp('^' + namespaces + '$')); + } + } +} + +/** + * Disable debug output. + * + * @api public + */ + +function disable() { + exports.enable(''); +} + +/** + * Returns true if the given mode name is enabled, false otherwise. + * + * @param {String} name + * @return {Boolean} + * @api public + */ + +function enabled(name) { + var i, len; + for (i = 0, len = exports.skips.length; i < len; i++) { + if (exports.skips[i].test(name)) { + return false; + } + } + for (i = 0, len = exports.names.length; i < len; i++) { + if (exports.names[i].test(name)) { + return true; + } + } + return false; +} + +/** + * Coerce `val`. + * + * @param {Mixed} val + * @return {Mixed} + * @api private + */ + +function coerce(val) { + if (val instanceof Error) return val.stack || val.message; + return val; +} diff --git a/data/node_modules/debug/src/index.js b/data/node_modules/debug/src/index.js new file mode 100644 index 0000000000000000000000000000000000000000..e12cf4d58c9f2d6d2d2e656f9cbb0f703cb5fa29 --- /dev/null +++ b/data/node_modules/debug/src/index.js @@ -0,0 +1,10 @@ +/** + * Detect Electron renderer process, which is node, but we should + * treat as a browser. + */ + +if (typeof process !== 'undefined' && process.type === 'renderer') { + module.exports = require('./browser.js'); +} else { + module.exports = require('./node.js'); +} diff --git a/data/node_modules/debug/src/inspector-log.js b/data/node_modules/debug/src/inspector-log.js new file mode 100644 index 0000000000000000000000000000000000000000..60ea6c04aafd41d0ea3bcd78f58312ecf0eda436 --- /dev/null +++ b/data/node_modules/debug/src/inspector-log.js @@ -0,0 +1,15 @@ +module.exports = inspectorLog; + +// black hole +const nullStream = new (require('stream').Writable)(); +nullStream._write = () => {}; + +/** + * Outputs a `console.log()` to the Node.js Inspector console *only*. + */ +function inspectorLog() { + const stdout = console._stdout; + console._stdout = nullStream; + console.log.apply(console, arguments); + console._stdout = stdout; +} diff --git a/data/node_modules/debug/src/node.js b/data/node_modules/debug/src/node.js new file mode 100644 index 0000000000000000000000000000000000000000..b15109c905a45bcb5db701cf37cf4e19385c3167 --- /dev/null +++ b/data/node_modules/debug/src/node.js @@ -0,0 +1,248 @@ +/** + * Module dependencies. + */ + +var tty = require('tty'); +var util = require('util'); + +/** + * This is the Node.js implementation of `debug()`. + * + * Expose `debug()` as the module. + */ + +exports = module.exports = require('./debug'); +exports.init = init; +exports.log = log; +exports.formatArgs = formatArgs; +exports.save = save; +exports.load = load; +exports.useColors = useColors; + +/** + * Colors. + */ + +exports.colors = [6, 2, 3, 4, 5, 1]; + +/** + * Build up the default `inspectOpts` object from the environment variables. + * + * $ DEBUG_COLORS=no DEBUG_DEPTH=10 DEBUG_SHOW_HIDDEN=enabled node script.js + */ + +exports.inspectOpts = Object.keys(process.env).filter(function (key) { + return /^debug_/i.test(key); +}).reduce(function (obj, key) { + // camel-case + var prop = key + .substring(6) + .toLowerCase() + .replace(/_([a-z])/g, function (_, k) { return k.toUpperCase() }); + + // coerce string value into JS value + var val = process.env[key]; + if (/^(yes|on|true|enabled)$/i.test(val)) val = true; + else if (/^(no|off|false|disabled)$/i.test(val)) val = false; + else if (val === 'null') val = null; + else val = Number(val); + + obj[prop] = val; + return obj; +}, {}); + +/** + * The file descriptor to write the `debug()` calls to. + * Set the `DEBUG_FD` env variable to override with another value. i.e.: + * + * $ DEBUG_FD=3 node script.js 3>debug.log + */ + +var fd = parseInt(process.env.DEBUG_FD, 10) || 2; + +if (1 !== fd && 2 !== fd) { + util.deprecate(function(){}, 'except for stderr(2) and stdout(1), any other usage of DEBUG_FD is deprecated. Override debug.log if you want to use a different log function (https://git.io/debug_fd)')() +} + +var stream = 1 === fd ? process.stdout : + 2 === fd ? process.stderr : + createWritableStdioStream(fd); + +/** + * Is stdout a TTY? Colored output is enabled when `true`. + */ + +function useColors() { + return 'colors' in exports.inspectOpts + ? Boolean(exports.inspectOpts.colors) + : tty.isatty(fd); +} + +/** + * Map %o to `util.inspect()`, all on a single line. + */ + +exports.formatters.o = function(v) { + this.inspectOpts.colors = this.useColors; + return util.inspect(v, this.inspectOpts) + .split('\n').map(function(str) { + return str.trim() + }).join(' '); +}; + +/** + * Map %o to `util.inspect()`, allowing multiple lines if needed. + */ + +exports.formatters.O = function(v) { + this.inspectOpts.colors = this.useColors; + return util.inspect(v, this.inspectOpts); +}; + +/** + * Adds ANSI color escape codes if enabled. + * + * @api public + */ + +function formatArgs(args) { + var name = this.namespace; + var useColors = this.useColors; + + if (useColors) { + var c = this.color; + var prefix = ' \u001b[3' + c + ';1m' + name + ' ' + '\u001b[0m'; + + args[0] = prefix + args[0].split('\n').join('\n' + prefix); + args.push('\u001b[3' + c + 'm+' + exports.humanize(this.diff) + '\u001b[0m'); + } else { + args[0] = new Date().toUTCString() + + ' ' + name + ' ' + args[0]; + } +} + +/** + * Invokes `util.format()` with the specified arguments and writes to `stream`. + */ + +function log() { + return stream.write(util.format.apply(util, arguments) + '\n'); +} + +/** + * Save `namespaces`. + * + * @param {String} namespaces + * @api private + */ + +function save(namespaces) { + if (null == namespaces) { + // If you set a process.env field to null or undefined, it gets cast to the + // string 'null' or 'undefined'. Just delete instead. + delete process.env.DEBUG; + } else { + process.env.DEBUG = namespaces; + } +} + +/** + * Load `namespaces`. + * + * @return {String} returns the previously persisted debug modes + * @api private + */ + +function load() { + return process.env.DEBUG; +} + +/** + * Copied from `node/src/node.js`. + * + * XXX: It's lame that node doesn't expose this API out-of-the-box. It also + * relies on the undocumented `tty_wrap.guessHandleType()` which is also lame. + */ + +function createWritableStdioStream (fd) { + var stream; + var tty_wrap = process.binding('tty_wrap'); + + // Note stream._type is used for test-module-load-list.js + + switch (tty_wrap.guessHandleType(fd)) { + case 'TTY': + stream = new tty.WriteStream(fd); + stream._type = 'tty'; + + // Hack to have stream not keep the event loop alive. + // See https://github.com/joyent/node/issues/1726 + if (stream._handle && stream._handle.unref) { + stream._handle.unref(); + } + break; + + case 'FILE': + var fs = require('fs'); + stream = new fs.SyncWriteStream(fd, { autoClose: false }); + stream._type = 'fs'; + break; + + case 'PIPE': + case 'TCP': + var net = require('net'); + stream = new net.Socket({ + fd: fd, + readable: false, + writable: true + }); + + // FIXME Should probably have an option in net.Socket to create a + // stream from an existing fd which is writable only. But for now + // we'll just add this hack and set the `readable` member to false. + // Test: ./node test/fixtures/echo.js < /etc/passwd + stream.readable = false; + stream.read = null; + stream._type = 'pipe'; + + // FIXME Hack to have stream not keep the event loop alive. + // See https://github.com/joyent/node/issues/1726 + if (stream._handle && stream._handle.unref) { + stream._handle.unref(); + } + break; + + default: + // Probably an error on in uv_guess_handle() + throw new Error('Implement me. Unknown stream file type!'); + } + + // For supporting legacy API we put the FD here. + stream.fd = fd; + + stream._isStdio = true; + + return stream; +} + +/** + * Init logic for `debug` instances. + * + * Create a new `inspectOpts` object in case `useColors` is set + * differently for a particular `debug` instance. + */ + +function init (debug) { + debug.inspectOpts = {}; + + var keys = Object.keys(exports.inspectOpts); + for (var i = 0; i < keys.length; i++) { + debug.inspectOpts[keys[i]] = exports.inspectOpts[keys[i]]; + } +} + +/** + * Enable namespaces listed in `process.env.DEBUG` initially. + */ + +exports.enable(load()); diff --git a/data/node_modules/define-data-property/.eslintrc b/data/node_modules/define-data-property/.eslintrc new file mode 100644 index 0000000000000000000000000000000000000000..75443e81ee0f6180b9ceee33bc69fa8696162783 --- /dev/null +++ b/data/node_modules/define-data-property/.eslintrc @@ -0,0 +1,24 @@ +{ + "root": true, + + "extends": "@ljharb", + + "rules": { + "complexity": 0, + "id-length": 0, + "new-cap": ["error", { + "capIsNewExceptions": [ + "GetIntrinsic", + ], + }], + }, + + "overrides": [ + { + "files": "test/**", + "rules": { + "max-lines-per-function": "off", + }, + }, + ], +} diff --git a/data/node_modules/define-data-property/.github/FUNDING.yml b/data/node_modules/define-data-property/.github/FUNDING.yml new file mode 100644 index 0000000000000000000000000000000000000000..3e17725ddfc684e0a254b7eed5adb8d05bfdeaa5 --- /dev/null +++ b/data/node_modules/define-data-property/.github/FUNDING.yml @@ -0,0 +1,12 @@ +# These are supported funding model platforms + +github: [ljharb] +patreon: # Replace with a single Patreon username +open_collective: # Replace with a single Open Collective username +ko_fi: # Replace with a single Ko-fi username +tidelift: npm/define-data-property +community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +liberapay: # Replace with a single Liberapay username +issuehunt: # Replace with a single IssueHunt username +otechie: # Replace with a single Otechie username +custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] diff --git a/data/node_modules/define-data-property/.nycrc b/data/node_modules/define-data-property/.nycrc new file mode 100644 index 0000000000000000000000000000000000000000..1826526e091b89c896e7099ccd891db79165e329 --- /dev/null +++ b/data/node_modules/define-data-property/.nycrc @@ -0,0 +1,13 @@ +{ + "all": true, + "check-coverage": false, + "reporter": ["text-summary", "text", "html", "json"], + "lines": 86, + "statements": 85.93, + "functions": 82.43, + "branches": 76.06, + "exclude": [ + "coverage", + "test" + ] +} diff --git a/data/node_modules/define-data-property/CHANGELOG.md b/data/node_modules/define-data-property/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..4eed75ea9096735ec95f1158a6f30461cec02430 --- /dev/null +++ b/data/node_modules/define-data-property/CHANGELOG.md @@ -0,0 +1,70 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [v1.1.4](https://github.com/ljharb/define-data-property/compare/v1.1.3...v1.1.4) - 2024-02-13 + +### Commits + +- [Refactor] use `es-define-property` [`90f2f4c`](https://github.com/ljharb/define-data-property/commit/90f2f4cc20298401e71c28e1e08888db12021453) +- [Dev Deps] update `@types/object.getownpropertydescriptors` [`cd929d9`](https://github.com/ljharb/define-data-property/commit/cd929d9a04f5f2fdcfa9d5be140940b91a083153) + +## [v1.1.3](https://github.com/ljharb/define-data-property/compare/v1.1.2...v1.1.3) - 2024-02-12 + +### Commits + +- [types] hand-write d.ts instead of emitting it [`0cbc988`](https://github.com/ljharb/define-data-property/commit/0cbc988203c105f2d97948327c7167ebd33bd318) +- [meta] simplify `exports` [`690781e`](https://github.com/ljharb/define-data-property/commit/690781eed28bbf2d6766237efda0ba6dd591609e) +- [Dev Deps] update `hasown`; clean up DT packages [`6cdfd1c`](https://github.com/ljharb/define-data-property/commit/6cdfd1cb2d91d791bfd18cda5d5cab232fd5d8fc) +- [actions] cleanup [`3142bc6`](https://github.com/ljharb/define-data-property/commit/3142bc6a4bc406a51f5b04f31e98562a27f35ffd) +- [meta] add `funding` [`8474423`](https://github.com/ljharb/define-data-property/commit/847442391a79779af3e0f1bf0b5bb923552b7804) +- [Deps] update `get-intrinsic` [`3e9be00`](https://github.com/ljharb/define-data-property/commit/3e9be00e07784ba34e7c77d8bc0fdbc832ad61de) + +## [v1.1.2](https://github.com/ljharb/define-data-property/compare/v1.1.1...v1.1.2) - 2024-02-05 + +### Commits + +- [Dev Deps] update @types packages, `object-inspect`, `tape`, `typescript` [`df41bf8`](https://github.com/ljharb/define-data-property/commit/df41bf84ca3456be6226055caab44e38e3a7fd2f) +- [Dev Deps] update DT packages, `aud`, `npmignore`, `tape`, typescript` [`fab0e4e`](https://github.com/ljharb/define-data-property/commit/fab0e4ec709ee02b79f42d6db3ee5f26e0a34b8a) +- [Dev Deps] use `hasown` instead of `has` [`aa51ef9`](https://github.com/ljharb/define-data-property/commit/aa51ef93f6403d49d9bb72a807bcdb6e418978c0) +- [Refactor] use `es-errors`, so things that only need those do not need `get-intrinsic` [`d89be50`](https://github.com/ljharb/define-data-property/commit/d89be50571175888d391238605122679f7e65ffc) +- [Deps] update `has-property-descriptors` [`7af887c`](https://github.com/ljharb/define-data-property/commit/7af887c9083b59b195b0079e04815cfed9fcee2b) +- [Deps] update `get-intrinsic` [`bb8728e`](https://github.com/ljharb/define-data-property/commit/bb8728ec42cd998505a7157ae24853a560c20646) + +## [v1.1.1](https://github.com/ljharb/define-data-property/compare/v1.1.0...v1.1.1) - 2023-10-12 + +### Commits + +- [Tests] fix tests in ES3 engines [`5c6920e`](https://github.com/ljharb/define-data-property/commit/5c6920edd1f52f675b02f417e539c28135b43f94) +- [Dev Deps] update `@types/es-value-fixtures`, `@types/for-each`, `@types/gopd`, `@types/has-property-descriptors`, `tape`, `typescript` [`7d82dfc`](https://github.com/ljharb/define-data-property/commit/7d82dfc20f778b4465bba06335dd53f6f431aea3) +- [Fix] IE 8 has a broken `Object.defineProperty` [`0672e1a`](https://github.com/ljharb/define-data-property/commit/0672e1af2a9fcc787e7c23b96dea60d290df5548) +- [meta] emit types on prepack [`73acb1f`](https://github.com/ljharb/define-data-property/commit/73acb1f903c21b314ec7156bf10f73c7910530c0) +- [Dev Deps] update `tape`, `typescript` [`9489a77`](https://github.com/ljharb/define-data-property/commit/9489a7738bf2ecf0ac71d5b78ec4ca6ad7ba0142) + +## [v1.1.0](https://github.com/ljharb/define-data-property/compare/v1.0.1...v1.1.0) - 2023-09-13 + +### Commits + +- [New] add `loose` arg [`155235a`](https://github.com/ljharb/define-data-property/commit/155235a4c4d7741f6de01cd87c99599a56654b72) +- [New] allow `null` to be passed for the non* args [`7d2fa5f`](https://github.com/ljharb/define-data-property/commit/7d2fa5f06be0392736c13b126f7cd38979f34792) + +## [v1.0.1](https://github.com/ljharb/define-data-property/compare/v1.0.0...v1.0.1) - 2023-09-12 + +### Commits + +- [meta] add TS types [`43d763c`](https://github.com/ljharb/define-data-property/commit/43d763c6c883f652de1c9c02ef6216ee507ffa69) +- [Dev Deps] update `@types/tape`, `typescript` [`f444985`](https://github.com/ljharb/define-data-property/commit/f444985811c36f3e6448a03ad2f9b7898917f4c7) +- [meta] add `safe-publish-latest`, [`172bb10`](https://github.com/ljharb/define-data-property/commit/172bb10890896ebb160e64398f6ee55760107bee) + +## v1.0.0 - 2023-09-12 + +### Commits + +- Initial implementation, tests, readme [`5b43d6b`](https://github.com/ljharb/define-data-property/commit/5b43d6b44e675a904810467a7d4e0adb7efc3196) +- Initial commit [`35e577a`](https://github.com/ljharb/define-data-property/commit/35e577a6ba59a98befa97776d70d90f3bea9009d) +- npm init [`82a0a04`](https://github.com/ljharb/define-data-property/commit/82a0a04a321ca7de220af02d41e2745e8a9962ed) +- Only apps should have lockfiles [`96df244`](https://github.com/ljharb/define-data-property/commit/96df244a3c6f426f9a2437be825d1c6f5dd7158e) +- [meta] use `npmignore` to autogenerate an npmignore file [`a87ff18`](https://github.com/ljharb/define-data-property/commit/a87ff18cb79e14c2eb5720486c4759fd9a189375) diff --git a/data/node_modules/define-data-property/LICENSE b/data/node_modules/define-data-property/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..b4213ac640ad53446325d2dab2c2bc8620406b72 --- /dev/null +++ b/data/node_modules/define-data-property/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Jordan Harband + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/data/node_modules/define-data-property/README.md b/data/node_modules/define-data-property/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f2304daef9b105844c4495414f1b8164d86befa2 --- /dev/null +++ b/data/node_modules/define-data-property/README.md @@ -0,0 +1,67 @@ +# define-data-property [![Version Badge][npm-version-svg]][package-url] + +[![github actions][actions-image]][actions-url] +[![coverage][codecov-image]][codecov-url] +[![License][license-image]][license-url] +[![Downloads][downloads-image]][downloads-url] + +[![npm badge][npm-badge-png]][package-url] + +Define a data property on an object. Will fall back to assignment in an engine without descriptors. + +The three `non*` argument can also be passed `null`, which will use the existing state if available. + +The `loose` argument will mean that if you attempt to set a non-normal data property, in an environment without descriptor support, it will fall back to normal assignment. + +## Usage + +```javascript +var defineDataProperty = require('define-data-property'); +var assert = require('assert'); + +var obj = {}; +defineDataProperty(obj, 'key', 'value'); +defineDataProperty( + obj, + 'key2', + 'value', + true, // nonEnumerable, optional + false, // nonWritable, optional + true, // nonConfigurable, optional + false // loose, optional +); + +assert.deepEqual( + Object.getOwnPropertyDescriptors(obj), + { + key: { + configurable: true, + enumerable: true, + value: 'value', + writable: true, + }, + key2: { + configurable: false, + enumerable: false, + value: 'value', + writable: true, + }, + } +); +``` + +[package-url]: https://npmjs.org/package/define-data-property +[npm-version-svg]: https://versionbadg.es/ljharb/define-data-property.svg +[deps-svg]: https://david-dm.org/ljharb/define-data-property.svg +[deps-url]: https://david-dm.org/ljharb/define-data-property +[dev-deps-svg]: https://david-dm.org/ljharb/define-data-property/dev-status.svg +[dev-deps-url]: https://david-dm.org/ljharb/define-data-property#info=devDependencies +[npm-badge-png]: https://nodei.co/npm/define-data-property.png?downloads=true&stars=true +[license-image]: https://img.shields.io/npm/l/define-data-property.svg +[license-url]: LICENSE +[downloads-image]: https://img.shields.io/npm/dm/define-data-property.svg +[downloads-url]: https://npm-stat.com/charts.html?package=define-data-property +[codecov-image]: https://codecov.io/gh/ljharb/define-data-property/branch/main/graphs/badge.svg +[codecov-url]: https://app.codecov.io/gh/ljharb/define-data-property/ +[actions-image]: https://img.shields.io/endpoint?url=https://github-actions-badge-u3jn4tfpocch.runkit.sh/ljharb/define-data-property +[actions-url]: https://github.com/ljharb/define-data-property/actions diff --git a/data/node_modules/define-data-property/index.d.ts b/data/node_modules/define-data-property/index.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..b56a77da820232226188558bb7c86772c5b2c834 --- /dev/null +++ b/data/node_modules/define-data-property/index.d.ts @@ -0,0 +1,12 @@ + +declare function defineDataProperty( + obj: Record, + property: keyof typeof obj, + value: typeof obj[typeof property], + nonEnumerable?: boolean | null, + nonWritable?: boolean | null, + nonConfigurable?: boolean | null, + loose?: boolean +): void; + +export = defineDataProperty; \ No newline at end of file diff --git a/data/node_modules/define-data-property/index.js b/data/node_modules/define-data-property/index.js new file mode 100644 index 0000000000000000000000000000000000000000..e1a38c07bb14bb1ca31acdf7b9d677abff94d34b --- /dev/null +++ b/data/node_modules/define-data-property/index.js @@ -0,0 +1,56 @@ +'use strict'; + +var $defineProperty = require('es-define-property'); + +var $SyntaxError = require('es-errors/syntax'); +var $TypeError = require('es-errors/type'); + +var gopd = require('gopd'); + +/** @type {import('.')} */ +module.exports = function defineDataProperty( + obj, + property, + value +) { + if (!obj || (typeof obj !== 'object' && typeof obj !== 'function')) { + throw new $TypeError('`obj` must be an object or a function`'); + } + if (typeof property !== 'string' && typeof property !== 'symbol') { + throw new $TypeError('`property` must be a string or a symbol`'); + } + if (arguments.length > 3 && typeof arguments[3] !== 'boolean' && arguments[3] !== null) { + throw new $TypeError('`nonEnumerable`, if provided, must be a boolean or null'); + } + if (arguments.length > 4 && typeof arguments[4] !== 'boolean' && arguments[4] !== null) { + throw new $TypeError('`nonWritable`, if provided, must be a boolean or null'); + } + if (arguments.length > 5 && typeof arguments[5] !== 'boolean' && arguments[5] !== null) { + throw new $TypeError('`nonConfigurable`, if provided, must be a boolean or null'); + } + if (arguments.length > 6 && typeof arguments[6] !== 'boolean') { + throw new $TypeError('`loose`, if provided, must be a boolean'); + } + + var nonEnumerable = arguments.length > 3 ? arguments[3] : null; + var nonWritable = arguments.length > 4 ? arguments[4] : null; + var nonConfigurable = arguments.length > 5 ? arguments[5] : null; + var loose = arguments.length > 6 ? arguments[6] : false; + + /* @type {false | TypedPropertyDescriptor} */ + var desc = !!gopd && gopd(obj, property); + + if ($defineProperty) { + $defineProperty(obj, property, { + configurable: nonConfigurable === null && desc ? desc.configurable : !nonConfigurable, + enumerable: nonEnumerable === null && desc ? desc.enumerable : !nonEnumerable, + value: value, + writable: nonWritable === null && desc ? desc.writable : !nonWritable + }); + } else if (loose || (!nonEnumerable && !nonWritable && !nonConfigurable)) { + // must fall back to [[Set]], and was not explicitly asked to make non-enumerable, non-writable, or non-configurable + obj[property] = value; // eslint-disable-line no-param-reassign + } else { + throw new $SyntaxError('This environment does not support defining a property as non-configurable, non-writable, or non-enumerable.'); + } +}; diff --git a/data/node_modules/define-data-property/package.json b/data/node_modules/define-data-property/package.json new file mode 100644 index 0000000000000000000000000000000000000000..eec40971ebb115846a8c6f7d2d8a880b6fb92bfe --- /dev/null +++ b/data/node_modules/define-data-property/package.json @@ -0,0 +1,106 @@ +{ + "name": "define-data-property", + "version": "1.1.4", + "description": "Define a data property on an object. Will fall back to assignment in an engine without descriptors.", + "main": "index.js", + "types": "./index.d.ts", + "exports": { + ".": "./index.js", + "./package.json": "./package.json" + }, + "sideEffects": false, + "scripts": { + "prepack": "npmignore --auto --commentLines=autogenerated", + "prepublish": "not-in-publish || npm run prepublishOnly", + "prepublishOnly": "safe-publish-latest", + "tsc": "tsc -p .", + "prelint": "evalmd README.md", + "lint": "eslint --ext=js,mjs .", + "postlint": "npm run tsc", + "pretest": "npm run lint", + "tests-only": "nyc tape 'test/**/*.js'", + "test": "npm run tests-only", + "posttest": "aud --production", + "version": "auto-changelog && git add CHANGELOG.md", + "postversion": "auto-changelog && git add CHANGELOG.md && git commit --no-edit --amend && git tag -f \"v$(node -e \"console.log(require('./package.json').version)\")\"" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/ljharb/define-data-property.git" + }, + "keywords": [ + "define", + "data", + "property", + "object", + "accessor", + "javascript", + "ecmascript", + "enumerable", + "configurable", + "writable" + ], + "author": "Jordan Harband ", + "funding": { + "url": "https://github.com/sponsors/ljharb" + }, + "license": "MIT", + "bugs": { + "url": "https://github.com/ljharb/define-data-property/issues" + }, + "homepage": "https://github.com/ljharb/define-data-property#readme", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "devDependencies": { + "@ljharb/eslint-config": "^21.1.0", + "@types/call-bind": "^1.0.5", + "@types/define-properties": "^1.1.5", + "@types/es-value-fixtures": "^1.4.4", + "@types/for-each": "^0.3.3", + "@types/get-intrinsic": "^1.2.2", + "@types/gopd": "^1.0.3", + "@types/has-property-descriptors": "^1.0.3", + "@types/object-inspect": "^1.8.4", + "@types/object.getownpropertydescriptors": "^2.1.4", + "@types/tape": "^5.6.4", + "aud": "^2.0.4", + "auto-changelog": "^2.4.0", + "es-value-fixtures": "^1.4.2", + "eslint": "=8.8.0", + "evalmd": "^0.0.19", + "for-each": "^0.3.3", + "hasown": "^2.0.1", + "in-publish": "^2.0.1", + "npmignore": "^0.3.1", + "nyc": "^10.3.2", + "object-inspect": "^1.13.1", + "object.getownpropertydescriptors": "^2.1.7", + "reflect.ownkeys": "^1.1.4", + "safe-publish-latest": "^2.0.0", + "tape": "^5.7.4", + "typescript": "next" + }, + "engines": { + "node": ">= 0.4" + }, + "testling": { + "files": "test/index.js" + }, + "auto-changelog": { + "output": "CHANGELOG.md", + "template": "keepachangelog", + "unreleased": false, + "commitLimit": false, + "backfillLimit": false, + "hideCredit": true + }, + "publishConfig": { + "ignore": [ + ".github/workflows", + "types/reflect.ownkeys" + ] + } +} diff --git a/data/node_modules/define-data-property/test/index.js b/data/node_modules/define-data-property/test/index.js new file mode 100644 index 0000000000000000000000000000000000000000..68204c66b26697b2d2b1ae9ef97eb5de63c55e2d --- /dev/null +++ b/data/node_modules/define-data-property/test/index.js @@ -0,0 +1,392 @@ +'use strict'; + +var test = require('tape'); +var v = require('es-value-fixtures'); +var forEach = require('for-each'); +var inspect = require('object-inspect'); +var hasOwn = require('hasown'); +var hasPropertyDescriptors = require('has-property-descriptors')(); +var getOwnPropertyDescriptors = require('object.getownpropertydescriptors'); +var ownKeys = require('reflect.ownkeys'); + +var defineDataProperty = require('../'); + +test('defineDataProperty', function (t) { + t.test('argument validation', function (st) { + forEach(v.primitives, function (nonObject) { + st['throws']( + // @ts-expect-error + function () { defineDataProperty(nonObject, 'key', 'value'); }, + TypeError, + 'throws on non-object input: ' + inspect(nonObject) + ); + }); + + forEach(v.nonPropertyKeys, function (nonPropertyKey) { + st['throws']( + // @ts-expect-error + function () { defineDataProperty({}, nonPropertyKey, 'value'); }, + TypeError, + 'throws on non-PropertyKey input: ' + inspect(nonPropertyKey) + ); + }); + + forEach(v.nonBooleans, function (nonBoolean) { + if (nonBoolean !== null) { + st['throws']( + // @ts-expect-error + function () { defineDataProperty({}, 'key', 'value', nonBoolean); }, + TypeError, + 'throws on non-boolean nonEnumerable: ' + inspect(nonBoolean) + ); + + st['throws']( + // @ts-expect-error + function () { defineDataProperty({}, 'key', 'value', false, nonBoolean); }, + TypeError, + 'throws on non-boolean nonWritable: ' + inspect(nonBoolean) + ); + + st['throws']( + // @ts-expect-error + function () { defineDataProperty({}, 'key', 'value', false, false, nonBoolean); }, + TypeError, + 'throws on non-boolean nonConfigurable: ' + inspect(nonBoolean) + ); + } + }); + + st.end(); + }); + + t.test('normal data property', function (st) { + /** @type {Record} */ + var obj = { existing: 'existing property' }; + st.ok(hasOwn(obj, 'existing'), 'has initial own property'); + st.equal(obj.existing, 'existing property', 'has expected initial value'); + + var res = defineDataProperty(obj, 'added', 'added property'); + st.equal(res, void undefined, 'returns `undefined`'); + st.ok(hasOwn(obj, 'added'), 'has expected own property'); + st.equal(obj.added, 'added property', 'has expected value'); + + defineDataProperty(obj, 'existing', 'new value'); + st.ok(hasOwn(obj, 'existing'), 'still has expected own property'); + st.equal(obj.existing, 'new value', 'has new expected value'); + + defineDataProperty(obj, 'explicit1', 'new value', false); + st.ok(hasOwn(obj, 'explicit1'), 'has expected own property (explicit enumerable)'); + st.equal(obj.explicit1, 'new value', 'has new expected value (explicit enumerable)'); + + defineDataProperty(obj, 'explicit2', 'new value', false, false); + st.ok(hasOwn(obj, 'explicit2'), 'has expected own property (explicit writable)'); + st.equal(obj.explicit2, 'new value', 'has new expected value (explicit writable)'); + + defineDataProperty(obj, 'explicit3', 'new value', false, false, false); + st.ok(hasOwn(obj, 'explicit3'), 'has expected own property (explicit configurable)'); + st.equal(obj.explicit3, 'new value', 'has new expected value (explicit configurable)'); + + st.end(); + }); + + t.test('loose mode', { skip: !hasPropertyDescriptors }, function (st) { + var obj = { existing: 'existing property' }; + + defineDataProperty(obj, 'added', 'added value 1', true, null, null, true); + st.deepEqual( + getOwnPropertyDescriptors(obj), + { + existing: { + configurable: true, + enumerable: true, + value: 'existing property', + writable: true + }, + added: { + configurable: true, + enumerable: !hasPropertyDescriptors, + value: 'added value 1', + writable: true + } + }, + 'in loose mode, obj still adds property 1' + ); + + defineDataProperty(obj, 'added', 'added value 2', false, true, null, true); + st.deepEqual( + getOwnPropertyDescriptors(obj), + { + existing: { + configurable: true, + enumerable: true, + value: 'existing property', + writable: true + }, + added: { + configurable: true, + enumerable: true, + value: 'added value 2', + writable: !hasPropertyDescriptors + } + }, + 'in loose mode, obj still adds property 2' + ); + + defineDataProperty(obj, 'added', 'added value 3', false, false, true, true); + st.deepEqual( + getOwnPropertyDescriptors(obj), + { + existing: { + configurable: true, + enumerable: true, + value: 'existing property', + writable: true + }, + added: { + configurable: !hasPropertyDescriptors, + enumerable: true, + value: 'added value 3', + writable: true + } + }, + 'in loose mode, obj still adds property 3' + ); + + st.end(); + }); + + t.test('non-normal data property, ES3', { skip: hasPropertyDescriptors }, function (st) { + /** @type {Record} */ + var obj = { existing: 'existing property' }; + + st['throws']( + function () { defineDataProperty(obj, 'added', 'added value', true); }, + SyntaxError, + 'nonEnumerable throws a Syntax Error' + ); + + st['throws']( + function () { defineDataProperty(obj, 'added', 'added value', false, true); }, + SyntaxError, + 'nonWritable throws a Syntax Error' + ); + + st['throws']( + function () { defineDataProperty(obj, 'added', 'added value', false, false, true); }, + SyntaxError, + 'nonWritable throws a Syntax Error' + ); + + st.deepEqual( + ownKeys(obj), + ['existing'], + 'obj still has expected keys' + ); + st.equal(obj.existing, 'existing property', 'obj still has expected values'); + + st.end(); + }); + + t.test('new non-normal data property, ES5+', { skip: !hasPropertyDescriptors }, function (st) { + /** @type {Record} */ + var obj = { existing: 'existing property' }; + + defineDataProperty(obj, 'nonEnum', null, true); + defineDataProperty(obj, 'nonWrit', null, false, true); + defineDataProperty(obj, 'nonConf', null, false, false, true); + + st.deepEqual( + getOwnPropertyDescriptors(obj), + { + existing: { + configurable: true, + enumerable: true, + value: 'existing property', + writable: true + }, + nonEnum: { + configurable: true, + enumerable: false, + value: null, + writable: true + }, + nonWrit: { + configurable: true, + enumerable: true, + value: null, + writable: false + }, + nonConf: { + configurable: false, + enumerable: true, + value: null, + writable: true + } + }, + 'obj has expected property descriptors' + ); + + st.end(); + }); + + t.test('existing non-normal data property, ES5+', { skip: !hasPropertyDescriptors }, function (st) { + // test case changing an existing non-normal property + + /** @type {Record} */ + var obj = {}; + Object.defineProperty(obj, 'nonEnum', { configurable: true, enumerable: false, value: null, writable: true }); + Object.defineProperty(obj, 'nonWrit', { configurable: true, enumerable: true, value: null, writable: false }); + Object.defineProperty(obj, 'nonConf', { configurable: false, enumerable: true, value: null, writable: true }); + + st.deepEqual( + getOwnPropertyDescriptors(obj), + { + nonEnum: { + configurable: true, + enumerable: false, + value: null, + writable: true + }, + nonWrit: { + configurable: true, + enumerable: true, + value: null, + writable: false + }, + nonConf: { + configurable: false, + enumerable: true, + value: null, + writable: true + } + }, + 'obj initially has expected property descriptors' + ); + + defineDataProperty(obj, 'nonEnum', 'new value', false); + defineDataProperty(obj, 'nonWrit', 'new value', false, false); + st['throws']( + function () { defineDataProperty(obj, 'nonConf', 'new value', false, false, false); }, + TypeError, + 'can not alter a nonconfigurable property' + ); + + st.deepEqual( + getOwnPropertyDescriptors(obj), + { + nonEnum: { + configurable: true, + enumerable: true, + value: 'new value', + writable: true + }, + nonWrit: { + configurable: true, + enumerable: true, + value: 'new value', + writable: true + }, + nonConf: { + configurable: false, + enumerable: true, + value: null, + writable: true + } + }, + 'obj ends up with expected property descriptors' + ); + + st.end(); + }); + + t.test('frozen object, ES5+', { skip: !hasPropertyDescriptors }, function (st) { + var frozen = Object.freeze({ existing: true }); + + st['throws']( + function () { defineDataProperty(frozen, 'existing', 'new value'); }, + TypeError, + 'frozen object can not modify an existing property' + ); + + st['throws']( + function () { defineDataProperty(frozen, 'new', 'new property'); }, + TypeError, + 'frozen object can not add a new property' + ); + + st.end(); + }); + + t.test('sealed object, ES5+', { skip: !hasPropertyDescriptors }, function (st) { + var sealed = Object.seal({ existing: true }); + st.deepEqual( + Object.getOwnPropertyDescriptor(sealed, 'existing'), + { + configurable: false, + enumerable: true, + value: true, + writable: true + }, + 'existing value on sealed object has expected descriptor' + ); + + defineDataProperty(sealed, 'existing', 'new value'); + + st.deepEqual( + Object.getOwnPropertyDescriptor(sealed, 'existing'), + { + configurable: false, + enumerable: true, + value: 'new value', + writable: true + }, + 'existing value on sealed object has changed descriptor' + ); + + st['throws']( + function () { defineDataProperty(sealed, 'new', 'new property'); }, + TypeError, + 'sealed object can not add a new property' + ); + + st.end(); + }); + + t.test('nonextensible object, ES5+', { skip: !hasPropertyDescriptors }, function (st) { + var nonExt = Object.preventExtensions({ existing: true }); + + st.deepEqual( + Object.getOwnPropertyDescriptor(nonExt, 'existing'), + { + configurable: true, + enumerable: true, + value: true, + writable: true + }, + 'existing value on non-extensible object has expected descriptor' + ); + + defineDataProperty(nonExt, 'existing', 'new value', true); + + st.deepEqual( + Object.getOwnPropertyDescriptor(nonExt, 'existing'), + { + configurable: true, + enumerable: false, + value: 'new value', + writable: true + }, + 'existing value on non-extensible object has changed descriptor' + ); + + st['throws']( + function () { defineDataProperty(nonExt, 'new', 'new property'); }, + TypeError, + 'non-extensible object can not add a new property' + ); + + st.end(); + }); + + t.end(); +}); diff --git a/data/node_modules/define-data-property/tsconfig.json b/data/node_modules/define-data-property/tsconfig.json new file mode 100644 index 0000000000000000000000000000000000000000..69f060dccdff2bc2089bd1056026de02fe6c760a --- /dev/null +++ b/data/node_modules/define-data-property/tsconfig.json @@ -0,0 +1,59 @@ +{ + "compilerOptions": { + /* Visit https://aka.ms/tsconfig to read more about this file */ + + /* Projects */ + + /* Language and Environment */ + "target": "es2022", /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */ + // "lib": [], /* Specify a set of bundled library declaration files that describe the target runtime environment. */ + // "noLib": true, /* Disable including any library files, including the default lib.d.ts. */ + "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */ + // "moduleDetection": "auto", /* Control what method is used to detect module-format JS files. */ + + /* Modules */ + "module": "commonjs", /* Specify what module code is generated. */ + // "rootDir": "./", /* Specify the root folder within your source files. */ + // "moduleResolution": "node10", /* Specify how TypeScript looks up a file from a given module specifier. */ + // "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */ + // "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */ + // "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */ + "typeRoots": ["types"], /* Specify multiple folders that act like './node_modules/@types'. */ + "resolveJsonModule": true, /* Enable importing .json files. */ + + /* JavaScript Support */ + "allowJs": true, /* Allow JavaScript files to be a part of your program. Use the 'checkJS' option to get errors from these files. */ + "checkJs": true, /* Enable error reporting in type-checked JavaScript files. */ + "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from 'node_modules'. Only applicable with 'allowJs'. */ + + /* Emit */ + "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */ + "declarationMap": true, /* Create sourcemaps for d.ts files. */ + // "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */ + "noEmit": true, /* Disable emitting files from a compilation. */ + + /* Interop Constraints */ + "allowSyntheticDefaultImports": true, /* Allow 'import x from y' when a module doesn't have a default export. */ + "esModuleInterop": true, /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */ + "forceConsistentCasingInFileNames": true, /* Ensure that casing is correct in imports. */ + + /* Type Checking */ + "strict": true, /* Enable all strict type-checking options. */ + "noImplicitAny": true, /* Enable error reporting for expressions and declarations with an implied 'any' type. */ + "noImplicitThis": true, /* Enable error reporting when 'this' is given the type 'any'. */ + "useUnknownInCatchVariables": true, /* Default catch clause variables as 'unknown' instead of 'any'. */ + "noUnusedLocals": true, /* Enable error reporting when local variables aren't read. */ + "noUnusedParameters": true, /* Raise an error when a function parameter isn't read. */ + "noImplicitReturns": true, /* Enable error reporting for codepaths that do not explicitly return in a function. */ + "noFallthroughCasesInSwitch": true, /* Enable error reporting for fallthrough cases in switch statements. */ + "noUncheckedIndexedAccess": true, /* Add 'undefined' to a type when accessed using an index. */ + "noImplicitOverride": true, /* Ensure overriding members in derived classes are marked with an override modifier. */ + // "noPropertyAccessFromIndexSignature": true, /* Enforces using indexed accessors for keys declared using an indexed type. */ + + /* Completeness */ + // "skipLibCheck": true /* Skip type checking all .d.ts files. */ + }, + "exclude": [ + "coverage" + ] +} diff --git a/data/node_modules/depd/History.md b/data/node_modules/depd/History.md new file mode 100644 index 0000000000000000000000000000000000000000..cd9ebaaa9963f794167f74e00a37d9ceb42e7b91 --- /dev/null +++ b/data/node_modules/depd/History.md @@ -0,0 +1,103 @@ +2.0.0 / 2018-10-26 +================== + + * Drop support for Node.js 0.6 + * Replace internal `eval` usage with `Function` constructor + * Use instance methods on `process` to check for listeners + +1.1.2 / 2018-01-11 +================== + + * perf: remove argument reassignment + * Support Node.js 0.6 to 9.x + +1.1.1 / 2017-07-27 +================== + + * Remove unnecessary `Buffer` loading + * Support Node.js 0.6 to 8.x + +1.1.0 / 2015-09-14 +================== + + * Enable strict mode in more places + * Support io.js 3.x + * Support io.js 2.x + * Support web browser loading + - Requires bundler like Browserify or webpack + +1.0.1 / 2015-04-07 +================== + + * Fix `TypeError`s when under `'use strict'` code + * Fix useless type name on auto-generated messages + * Support io.js 1.x + * Support Node.js 0.12 + +1.0.0 / 2014-09-17 +================== + + * No changes + +0.4.5 / 2014-09-09 +================== + + * Improve call speed to functions using the function wrapper + * Support Node.js 0.6 + +0.4.4 / 2014-07-27 +================== + + * Work-around v8 generating empty stack traces + +0.4.3 / 2014-07-26 +================== + + * Fix exception when global `Error.stackTraceLimit` is too low + +0.4.2 / 2014-07-19 +================== + + * Correct call site for wrapped functions and properties + +0.4.1 / 2014-07-19 +================== + + * Improve automatic message generation for function properties + +0.4.0 / 2014-07-19 +================== + + * Add `TRACE_DEPRECATION` environment variable + * Remove non-standard grey color from color output + * Support `--no-deprecation` argument + * Support `--trace-deprecation` argument + * Support `deprecate.property(fn, prop, message)` + +0.3.0 / 2014-06-16 +================== + + * Add `NO_DEPRECATION` environment variable + +0.2.0 / 2014-06-15 +================== + + * Add `deprecate.property(obj, prop, message)` + * Remove `supports-color` dependency for node.js 0.8 + +0.1.0 / 2014-06-15 +================== + + * Add `deprecate.function(fn, message)` + * Add `process.on('deprecation', fn)` emitter + * Automatically generate message when omitted from `deprecate()` + +0.0.1 / 2014-06-15 +================== + + * Fix warning for dynamic calls at singe call site + +0.0.0 / 2014-06-15 +================== + + * Initial implementation diff --git a/data/node_modules/depd/LICENSE b/data/node_modules/depd/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..248de7af2bd16cc7f2b4d8017bbeb9e7a0b2ccd6 --- /dev/null +++ b/data/node_modules/depd/LICENSE @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2014-2018 Douglas Christopher Wilson + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/data/node_modules/depd/Readme.md b/data/node_modules/depd/Readme.md new file mode 100644 index 0000000000000000000000000000000000000000..043d1ca28403a9460a22fcd403b559b90244458f --- /dev/null +++ b/data/node_modules/depd/Readme.md @@ -0,0 +1,280 @@ +# depd + +[![NPM Version][npm-version-image]][npm-url] +[![NPM Downloads][npm-downloads-image]][npm-url] +[![Node.js Version][node-image]][node-url] +[![Linux Build][travis-image]][travis-url] +[![Windows Build][appveyor-image]][appveyor-url] +[![Coverage Status][coveralls-image]][coveralls-url] + +Deprecate all the things + +> With great modules comes great responsibility; mark things deprecated! + +## Install + +This module is installed directly using `npm`: + +```sh +$ npm install depd +``` + +This module can also be bundled with systems like +[Browserify](http://browserify.org/) or [webpack](https://webpack.github.io/), +though by default this module will alter it's API to no longer display or +track deprecations. + +## API + + + +```js +var deprecate = require('depd')('my-module') +``` + +This library allows you to display deprecation messages to your users. +This library goes above and beyond with deprecation warnings by +introspection of the call stack (but only the bits that it is interested +in). + +Instead of just warning on the first invocation of a deprecated +function and never again, this module will warn on the first invocation +of a deprecated function per unique call site, making it ideal to alert +users of all deprecated uses across the code base, rather than just +whatever happens to execute first. + +The deprecation warnings from this module also include the file and line +information for the call into the module that the deprecated function was +in. + +**NOTE** this library has a similar interface to the `debug` module, and +this module uses the calling file to get the boundary for the call stacks, +so you should always create a new `deprecate` object in each file and not +within some central file. + +### depd(namespace) + +Create a new deprecate function that uses the given namespace name in the +messages and will display the call site prior to the stack entering the +file this function was called from. It is highly suggested you use the +name of your module as the namespace. + +### deprecate(message) + +Call this function from deprecated code to display a deprecation message. +This message will appear once per unique caller site. Caller site is the +first call site in the stack in a different file from the caller of this +function. + +If the message is omitted, a message is generated for you based on the site +of the `deprecate()` call and will display the name of the function called, +similar to the name displayed in a stack trace. + +### deprecate.function(fn, message) + +Call this function to wrap a given function in a deprecation message on any +call to the function. An optional message can be supplied to provide a custom +message. + +### deprecate.property(obj, prop, message) + +Call this function to wrap a given property on object in a deprecation message +on any accessing or setting of the property. An optional message can be supplied +to provide a custom message. + +The method must be called on the object where the property belongs (not +inherited from the prototype). + +If the property is a data descriptor, it will be converted to an accessor +descriptor in order to display the deprecation message. + +### process.on('deprecation', fn) + +This module will allow easy capturing of deprecation errors by emitting the +errors as the type "deprecation" on the global `process`. If there are no +listeners for this type, the errors are written to STDERR as normal, but if +there are any listeners, nothing will be written to STDERR and instead only +emitted. From there, you can write the errors in a different format or to a +logging source. + +The error represents the deprecation and is emitted only once with the same +rules as writing to STDERR. The error has the following properties: + + - `message` - This is the message given by the library + - `name` - This is always `'DeprecationError'` + - `namespace` - This is the namespace the deprecation came from + - `stack` - This is the stack of the call to the deprecated thing + +Example `error.stack` output: + +``` +DeprecationError: my-cool-module deprecated oldfunction + at Object. ([eval]-wrapper:6:22) + at Module._compile (module.js:456:26) + at evalScript (node.js:532:25) + at startup (node.js:80:7) + at node.js:902:3 +``` + +### process.env.NO_DEPRECATION + +As a user of modules that are deprecated, the environment variable `NO_DEPRECATION` +is provided as a quick solution to silencing deprecation warnings from being +output. The format of this is similar to that of `DEBUG`: + +```sh +$ NO_DEPRECATION=my-module,othermod node app.js +``` + +This will suppress deprecations from being output for "my-module" and "othermod". +The value is a list of comma-separated namespaces. To suppress every warning +across all namespaces, use the value `*` for a namespace. + +Providing the argument `--no-deprecation` to the `node` executable will suppress +all deprecations (only available in Node.js 0.8 or higher). + +**NOTE** This will not suppress the deperecations given to any "deprecation" +event listeners, just the output to STDERR. + +### process.env.TRACE_DEPRECATION + +As a user of modules that are deprecated, the environment variable `TRACE_DEPRECATION` +is provided as a solution to getting more detailed location information in deprecation +warnings by including the entire stack trace. The format of this is the same as +`NO_DEPRECATION`: + +```sh +$ TRACE_DEPRECATION=my-module,othermod node app.js +``` + +This will include stack traces for deprecations being output for "my-module" and +"othermod". The value is a list of comma-separated namespaces. To trace every +warning across all namespaces, use the value `*` for a namespace. + +Providing the argument `--trace-deprecation` to the `node` executable will trace +all deprecations (only available in Node.js 0.8 or higher). + +**NOTE** This will not trace the deperecations silenced by `NO_DEPRECATION`. + +## Display + +![message](files/message.png) + +When a user calls a function in your library that you mark deprecated, they +will see the following written to STDERR (in the given colors, similar colors +and layout to the `debug` module): + +``` +bright cyan bright yellow +| | reset cyan +| | | | +▼ ▼ ▼ ▼ +my-cool-module deprecated oldfunction [eval]-wrapper:6:22 +▲ ▲ ▲ ▲ +| | | | +namespace | | location of mycoolmod.oldfunction() call + | deprecation message + the word "deprecated" +``` + +If the user redirects their STDERR to a file or somewhere that does not support +colors, they see (similar layout to the `debug` module): + +``` +Sun, 15 Jun 2014 05:21:37 GMT my-cool-module deprecated oldfunction at [eval]-wrapper:6:22 +▲ ▲ ▲ ▲ ▲ +| | | | | +timestamp of message namespace | | location of mycoolmod.oldfunction() call + | deprecation message + the word "deprecated" +``` + +## Examples + +### Deprecating all calls to a function + +This will display a deprecated message about "oldfunction" being deprecated +from "my-module" on STDERR. + +```js +var deprecate = require('depd')('my-cool-module') + +// message automatically derived from function name +// Object.oldfunction +exports.oldfunction = deprecate.function(function oldfunction () { + // all calls to function are deprecated +}) + +// specific message +exports.oldfunction = deprecate.function(function () { + // all calls to function are deprecated +}, 'oldfunction') +``` + +### Conditionally deprecating a function call + +This will display a deprecated message about "weirdfunction" being deprecated +from "my-module" on STDERR when called with less than 2 arguments. + +```js +var deprecate = require('depd')('my-cool-module') + +exports.weirdfunction = function () { + if (arguments.length < 2) { + // calls with 0 or 1 args are deprecated + deprecate('weirdfunction args < 2') + } +} +``` + +When calling `deprecate` as a function, the warning is counted per call site +within your own module, so you can display different deprecations depending +on different situations and the users will still get all the warnings: + +```js +var deprecate = require('depd')('my-cool-module') + +exports.weirdfunction = function () { + if (arguments.length < 2) { + // calls with 0 or 1 args are deprecated + deprecate('weirdfunction args < 2') + } else if (typeof arguments[0] !== 'string') { + // calls with non-string first argument are deprecated + deprecate('weirdfunction non-string first arg') + } +} +``` + +### Deprecating property access + +This will display a deprecated message about "oldprop" being deprecated +from "my-module" on STDERR when accessed. A deprecation will be displayed +when setting the value and when getting the value. + +```js +var deprecate = require('depd')('my-cool-module') + +exports.oldprop = 'something' + +// message automatically derives from property name +deprecate.property(exports, 'oldprop') + +// explicit message +deprecate.property(exports, 'oldprop', 'oldprop >= 0.10') +``` + +## License + +[MIT](LICENSE) + +[appveyor-image]: https://badgen.net/appveyor/ci/dougwilson/nodejs-depd/master?label=windows +[appveyor-url]: https://ci.appveyor.com/project/dougwilson/nodejs-depd +[coveralls-image]: https://badgen.net/coveralls/c/github/dougwilson/nodejs-depd/master +[coveralls-url]: https://coveralls.io/r/dougwilson/nodejs-depd?branch=master +[node-image]: https://badgen.net/npm/node/depd +[node-url]: https://nodejs.org/en/download/ +[npm-downloads-image]: https://badgen.net/npm/dm/depd +[npm-url]: https://npmjs.org/package/depd +[npm-version-image]: https://badgen.net/npm/v/depd +[travis-image]: https://badgen.net/travis/dougwilson/nodejs-depd/master?label=linux +[travis-url]: https://travis-ci.org/dougwilson/nodejs-depd diff --git a/data/node_modules/depd/index.js b/data/node_modules/depd/index.js new file mode 100644 index 0000000000000000000000000000000000000000..1bf2fcfdeffc984e5ad792eec08744c29d4a4590 --- /dev/null +++ b/data/node_modules/depd/index.js @@ -0,0 +1,538 @@ +/*! + * depd + * Copyright(c) 2014-2018 Douglas Christopher Wilson + * MIT Licensed + */ + +/** + * Module dependencies. + */ + +var relative = require('path').relative + +/** + * Module exports. + */ + +module.exports = depd + +/** + * Get the path to base files on. + */ + +var basePath = process.cwd() + +/** + * Determine if namespace is contained in the string. + */ + +function containsNamespace (str, namespace) { + var vals = str.split(/[ ,]+/) + var ns = String(namespace).toLowerCase() + + for (var i = 0; i < vals.length; i++) { + var val = vals[i] + + // namespace contained + if (val && (val === '*' || val.toLowerCase() === ns)) { + return true + } + } + + return false +} + +/** + * Convert a data descriptor to accessor descriptor. + */ + +function convertDataDescriptorToAccessor (obj, prop, message) { + var descriptor = Object.getOwnPropertyDescriptor(obj, prop) + var value = descriptor.value + + descriptor.get = function getter () { return value } + + if (descriptor.writable) { + descriptor.set = function setter (val) { return (value = val) } + } + + delete descriptor.value + delete descriptor.writable + + Object.defineProperty(obj, prop, descriptor) + + return descriptor +} + +/** + * Create arguments string to keep arity. + */ + +function createArgumentsString (arity) { + var str = '' + + for (var i = 0; i < arity; i++) { + str += ', arg' + i + } + + return str.substr(2) +} + +/** + * Create stack string from stack. + */ + +function createStackString (stack) { + var str = this.name + ': ' + this.namespace + + if (this.message) { + str += ' deprecated ' + this.message + } + + for (var i = 0; i < stack.length; i++) { + str += '\n at ' + stack[i].toString() + } + + return str +} + +/** + * Create deprecate for namespace in caller. + */ + +function depd (namespace) { + if (!namespace) { + throw new TypeError('argument namespace is required') + } + + var stack = getStack() + var site = callSiteLocation(stack[1]) + var file = site[0] + + function deprecate (message) { + // call to self as log + log.call(deprecate, message) + } + + deprecate._file = file + deprecate._ignored = isignored(namespace) + deprecate._namespace = namespace + deprecate._traced = istraced(namespace) + deprecate._warned = Object.create(null) + + deprecate.function = wrapfunction + deprecate.property = wrapproperty + + return deprecate +} + +/** + * Determine if event emitter has listeners of a given type. + * + * The way to do this check is done three different ways in Node.js >= 0.8 + * so this consolidates them into a minimal set using instance methods. + * + * @param {EventEmitter} emitter + * @param {string} type + * @returns {boolean} + * @private + */ + +function eehaslisteners (emitter, type) { + var count = typeof emitter.listenerCount !== 'function' + ? emitter.listeners(type).length + : emitter.listenerCount(type) + + return count > 0 +} + +/** + * Determine if namespace is ignored. + */ + +function isignored (namespace) { + if (process.noDeprecation) { + // --no-deprecation support + return true + } + + var str = process.env.NO_DEPRECATION || '' + + // namespace ignored + return containsNamespace(str, namespace) +} + +/** + * Determine if namespace is traced. + */ + +function istraced (namespace) { + if (process.traceDeprecation) { + // --trace-deprecation support + return true + } + + var str = process.env.TRACE_DEPRECATION || '' + + // namespace traced + return containsNamespace(str, namespace) +} + +/** + * Display deprecation message. + */ + +function log (message, site) { + var haslisteners = eehaslisteners(process, 'deprecation') + + // abort early if no destination + if (!haslisteners && this._ignored) { + return + } + + var caller + var callFile + var callSite + var depSite + var i = 0 + var seen = false + var stack = getStack() + var file = this._file + + if (site) { + // provided site + depSite = site + callSite = callSiteLocation(stack[1]) + callSite.name = depSite.name + file = callSite[0] + } else { + // get call site + i = 2 + depSite = callSiteLocation(stack[i]) + callSite = depSite + } + + // get caller of deprecated thing in relation to file + for (; i < stack.length; i++) { + caller = callSiteLocation(stack[i]) + callFile = caller[0] + + if (callFile === file) { + seen = true + } else if (callFile === this._file) { + file = this._file + } else if (seen) { + break + } + } + + var key = caller + ? depSite.join(':') + '__' + caller.join(':') + : undefined + + if (key !== undefined && key in this._warned) { + // already warned + return + } + + this._warned[key] = true + + // generate automatic message from call site + var msg = message + if (!msg) { + msg = callSite === depSite || !callSite.name + ? defaultMessage(depSite) + : defaultMessage(callSite) + } + + // emit deprecation if listeners exist + if (haslisteners) { + var err = DeprecationError(this._namespace, msg, stack.slice(i)) + process.emit('deprecation', err) + return + } + + // format and write message + var format = process.stderr.isTTY + ? formatColor + : formatPlain + var output = format.call(this, msg, caller, stack.slice(i)) + process.stderr.write(output + '\n', 'utf8') +} + +/** + * Get call site location as array. + */ + +function callSiteLocation (callSite) { + var file = callSite.getFileName() || '' + var line = callSite.getLineNumber() + var colm = callSite.getColumnNumber() + + if (callSite.isEval()) { + file = callSite.getEvalOrigin() + ', ' + file + } + + var site = [file, line, colm] + + site.callSite = callSite + site.name = callSite.getFunctionName() + + return site +} + +/** + * Generate a default message from the site. + */ + +function defaultMessage (site) { + var callSite = site.callSite + var funcName = site.name + + // make useful anonymous name + if (!funcName) { + funcName = '' + } + + var context = callSite.getThis() + var typeName = context && callSite.getTypeName() + + // ignore useless type name + if (typeName === 'Object') { + typeName = undefined + } + + // make useful type name + if (typeName === 'Function') { + typeName = context.name || typeName + } + + return typeName && callSite.getMethodName() + ? typeName + '.' + funcName + : funcName +} + +/** + * Format deprecation message without color. + */ + +function formatPlain (msg, caller, stack) { + var timestamp = new Date().toUTCString() + + var formatted = timestamp + + ' ' + this._namespace + + ' deprecated ' + msg + + // add stack trace + if (this._traced) { + for (var i = 0; i < stack.length; i++) { + formatted += '\n at ' + stack[i].toString() + } + + return formatted + } + + if (caller) { + formatted += ' at ' + formatLocation(caller) + } + + return formatted +} + +/** + * Format deprecation message with color. + */ + +function formatColor (msg, caller, stack) { + var formatted = '\x1b[36;1m' + this._namespace + '\x1b[22;39m' + // bold cyan + ' \x1b[33;1mdeprecated\x1b[22;39m' + // bold yellow + ' \x1b[0m' + msg + '\x1b[39m' // reset + + // add stack trace + if (this._traced) { + for (var i = 0; i < stack.length; i++) { + formatted += '\n \x1b[36mat ' + stack[i].toString() + '\x1b[39m' // cyan + } + + return formatted + } + + if (caller) { + formatted += ' \x1b[36m' + formatLocation(caller) + '\x1b[39m' // cyan + } + + return formatted +} + +/** + * Format call site location. + */ + +function formatLocation (callSite) { + return relative(basePath, callSite[0]) + + ':' + callSite[1] + + ':' + callSite[2] +} + +/** + * Get the stack as array of call sites. + */ + +function getStack () { + var limit = Error.stackTraceLimit + var obj = {} + var prep = Error.prepareStackTrace + + Error.prepareStackTrace = prepareObjectStackTrace + Error.stackTraceLimit = Math.max(10, limit) + + // capture the stack + Error.captureStackTrace(obj) + + // slice this function off the top + var stack = obj.stack.slice(1) + + Error.prepareStackTrace = prep + Error.stackTraceLimit = limit + + return stack +} + +/** + * Capture call site stack from v8. + */ + +function prepareObjectStackTrace (obj, stack) { + return stack +} + +/** + * Return a wrapped function in a deprecation message. + */ + +function wrapfunction (fn, message) { + if (typeof fn !== 'function') { + throw new TypeError('argument fn must be a function') + } + + var args = createArgumentsString(fn.length) + var stack = getStack() + var site = callSiteLocation(stack[1]) + + site.name = fn.name + + // eslint-disable-next-line no-new-func + var deprecatedfn = new Function('fn', 'log', 'deprecate', 'message', 'site', + '"use strict"\n' + + 'return function (' + args + ') {' + + 'log.call(deprecate, message, site)\n' + + 'return fn.apply(this, arguments)\n' + + '}')(fn, log, this, message, site) + + return deprecatedfn +} + +/** + * Wrap property in a deprecation message. + */ + +function wrapproperty (obj, prop, message) { + if (!obj || (typeof obj !== 'object' && typeof obj !== 'function')) { + throw new TypeError('argument obj must be object') + } + + var descriptor = Object.getOwnPropertyDescriptor(obj, prop) + + if (!descriptor) { + throw new TypeError('must call property on owner object') + } + + if (!descriptor.configurable) { + throw new TypeError('property must be configurable') + } + + var deprecate = this + var stack = getStack() + var site = callSiteLocation(stack[1]) + + // set site name + site.name = prop + + // convert data descriptor + if ('value' in descriptor) { + descriptor = convertDataDescriptorToAccessor(obj, prop, message) + } + + var get = descriptor.get + var set = descriptor.set + + // wrap getter + if (typeof get === 'function') { + descriptor.get = function getter () { + log.call(deprecate, message, site) + return get.apply(this, arguments) + } + } + + // wrap setter + if (typeof set === 'function') { + descriptor.set = function setter () { + log.call(deprecate, message, site) + return set.apply(this, arguments) + } + } + + Object.defineProperty(obj, prop, descriptor) +} + +/** + * Create DeprecationError for deprecation + */ + +function DeprecationError (namespace, message, stack) { + var error = new Error() + var stackString + + Object.defineProperty(error, 'constructor', { + value: DeprecationError + }) + + Object.defineProperty(error, 'message', { + configurable: true, + enumerable: false, + value: message, + writable: true + }) + + Object.defineProperty(error, 'name', { + enumerable: false, + configurable: true, + value: 'DeprecationError', + writable: true + }) + + Object.defineProperty(error, 'namespace', { + configurable: true, + enumerable: false, + value: namespace, + writable: true + }) + + Object.defineProperty(error, 'stack', { + configurable: true, + enumerable: false, + get: function () { + if (stackString !== undefined) { + return stackString + } + + // prepare stack trace + return (stackString = createStackString.call(this, stack)) + }, + set: function setter (val) { + stackString = val + } + }) + + return error +} diff --git a/data/node_modules/depd/lib/browser/index.js b/data/node_modules/depd/lib/browser/index.js new file mode 100644 index 0000000000000000000000000000000000000000..6be45cc20b33f20dcdc580b9709f1a4a20bb87a1 --- /dev/null +++ b/data/node_modules/depd/lib/browser/index.js @@ -0,0 +1,77 @@ +/*! + * depd + * Copyright(c) 2015 Douglas Christopher Wilson + * MIT Licensed + */ + +'use strict' + +/** + * Module exports. + * @public + */ + +module.exports = depd + +/** + * Create deprecate for namespace in caller. + */ + +function depd (namespace) { + if (!namespace) { + throw new TypeError('argument namespace is required') + } + + function deprecate (message) { + // no-op in browser + } + + deprecate._file = undefined + deprecate._ignored = true + deprecate._namespace = namespace + deprecate._traced = false + deprecate._warned = Object.create(null) + + deprecate.function = wrapfunction + deprecate.property = wrapproperty + + return deprecate +} + +/** + * Return a wrapped function in a deprecation message. + * + * This is a no-op version of the wrapper, which does nothing but call + * validation. + */ + +function wrapfunction (fn, message) { + if (typeof fn !== 'function') { + throw new TypeError('argument fn must be a function') + } + + return fn +} + +/** + * Wrap property in a deprecation message. + * + * This is a no-op version of the wrapper, which does nothing but call + * validation. + */ + +function wrapproperty (obj, prop, message) { + if (!obj || (typeof obj !== 'object' && typeof obj !== 'function')) { + throw new TypeError('argument obj must be object') + } + + var descriptor = Object.getOwnPropertyDescriptor(obj, prop) + + if (!descriptor) { + throw new TypeError('must call property on owner object') + } + + if (!descriptor.configurable) { + throw new TypeError('property must be configurable') + } +} diff --git a/data/node_modules/depd/package.json b/data/node_modules/depd/package.json new file mode 100644 index 0000000000000000000000000000000000000000..3857e199184a0a3f0d921e740525278af0f86ff3 --- /dev/null +++ b/data/node_modules/depd/package.json @@ -0,0 +1,45 @@ +{ + "name": "depd", + "description": "Deprecate all the things", + "version": "2.0.0", + "author": "Douglas Christopher Wilson ", + "license": "MIT", + "keywords": [ + "deprecate", + "deprecated" + ], + "repository": "dougwilson/nodejs-depd", + "browser": "lib/browser/index.js", + "devDependencies": { + "benchmark": "2.1.4", + "beautify-benchmark": "0.2.4", + "eslint": "5.7.0", + "eslint-config-standard": "12.0.0", + "eslint-plugin-import": "2.14.0", + "eslint-plugin-markdown": "1.0.0-beta.7", + "eslint-plugin-node": "7.0.1", + "eslint-plugin-promise": "4.0.1", + "eslint-plugin-standard": "4.0.0", + "istanbul": "0.4.5", + "mocha": "5.2.0", + "safe-buffer": "5.1.2", + "uid-safe": "2.1.5" + }, + "files": [ + "lib/", + "History.md", + "LICENSE", + "index.js", + "Readme.md" + ], + "engines": { + "node": ">= 0.8" + }, + "scripts": { + "bench": "node benchmark/index.js", + "lint": "eslint --plugin markdown --ext js,md .", + "test": "mocha --reporter spec --bail test/", + "test-ci": "istanbul cover --print=none node_modules/mocha/bin/_mocha -- --reporter spec test/ && istanbul report lcovonly text-summary", + "test-cov": "istanbul cover --print=none node_modules/mocha/bin/_mocha -- --reporter dot test/ && istanbul report lcov text-summary" + } +} diff --git a/data/node_modules/destroy/LICENSE b/data/node_modules/destroy/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..0e2c35f0ea23b51310f40689c96e3f8e1da8d3d4 --- /dev/null +++ b/data/node_modules/destroy/LICENSE @@ -0,0 +1,23 @@ + +The MIT License (MIT) + +Copyright (c) 2014 Jonathan Ong me@jongleberry.com +Copyright (c) 2015-2022 Douglas Christopher Wilson doug@somethingdoug.com + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/data/node_modules/destroy/README.md b/data/node_modules/destroy/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e7701aee746cafca9d5d09056d0b91071b4b5dc3 --- /dev/null +++ b/data/node_modules/destroy/README.md @@ -0,0 +1,63 @@ +# destroy + +[![NPM version][npm-image]][npm-url] +[![Build Status][github-actions-ci-image]][github-actions-ci-url] +[![Test coverage][coveralls-image]][coveralls-url] +[![License][license-image]][license-url] +[![Downloads][downloads-image]][downloads-url] + +Destroy a stream. + +This module is meant to ensure a stream gets destroyed, handling different APIs +and Node.js bugs. + +## API + +```js +var destroy = require('destroy') +``` + +### destroy(stream [, suppress]) + +Destroy the given stream, and optionally suppress any future `error` events. + +In most cases, this is identical to a simple `stream.destroy()` call. The rules +are as follows for a given stream: + + 1. If the `stream` is an instance of `ReadStream`, then call `stream.destroy()` + and add a listener to the `open` event to call `stream.close()` if it is + fired. This is for a Node.js bug that will leak a file descriptor if + `.destroy()` is called before `open`. + 2. If the `stream` is an instance of a zlib stream, then call `stream.destroy()` + and close the underlying zlib handle if open, otherwise call `stream.close()`. + This is for consistency across Node.js versions and a Node.js bug that will + leak a native zlib handle. + 3. If the `stream` is not an instance of `Stream`, then nothing happens. + 4. If the `stream` has a `.destroy()` method, then call it. + +The function returns the `stream` passed in as the argument. + +## Example + +```js +var destroy = require('destroy') + +var fs = require('fs') +var stream = fs.createReadStream('package.json') + +// ... and later +destroy(stream) +``` + +[npm-image]: https://img.shields.io/npm/v/destroy.svg?style=flat-square +[npm-url]: https://npmjs.org/package/destroy +[github-tag]: http://img.shields.io/github/tag/stream-utils/destroy.svg?style=flat-square +[github-url]: https://github.com/stream-utils/destroy/tags +[coveralls-image]: https://img.shields.io/coveralls/stream-utils/destroy.svg?style=flat-square +[coveralls-url]: https://coveralls.io/r/stream-utils/destroy?branch=master +[license-image]: http://img.shields.io/npm/l/destroy.svg?style=flat-square +[license-url]: LICENSE.md +[downloads-image]: http://img.shields.io/npm/dm/destroy.svg?style=flat-square +[downloads-url]: https://npmjs.org/package/destroy +[github-actions-ci-image]: https://img.shields.io/github/workflow/status/stream-utils/destroy/ci/master?label=ci&style=flat-square +[github-actions-ci-url]: https://github.com/stream-utils/destroy/actions/workflows/ci.yml diff --git a/data/node_modules/destroy/index.js b/data/node_modules/destroy/index.js new file mode 100644 index 0000000000000000000000000000000000000000..7fd5c09363796a9276e332ce6bb225d238fb0a85 --- /dev/null +++ b/data/node_modules/destroy/index.js @@ -0,0 +1,209 @@ +/*! + * destroy + * Copyright(c) 2014 Jonathan Ong + * Copyright(c) 2015-2022 Douglas Christopher Wilson + * MIT Licensed + */ + +'use strict' + +/** + * Module dependencies. + * @private + */ + +var EventEmitter = require('events').EventEmitter +var ReadStream = require('fs').ReadStream +var Stream = require('stream') +var Zlib = require('zlib') + +/** + * Module exports. + * @public + */ + +module.exports = destroy + +/** + * Destroy the given stream, and optionally suppress any future `error` events. + * + * @param {object} stream + * @param {boolean} suppress + * @public + */ + +function destroy (stream, suppress) { + if (isFsReadStream(stream)) { + destroyReadStream(stream) + } else if (isZlibStream(stream)) { + destroyZlibStream(stream) + } else if (hasDestroy(stream)) { + stream.destroy() + } + + if (isEventEmitter(stream) && suppress) { + stream.removeAllListeners('error') + stream.addListener('error', noop) + } + + return stream +} + +/** + * Destroy a ReadStream. + * + * @param {object} stream + * @private + */ + +function destroyReadStream (stream) { + stream.destroy() + + if (typeof stream.close === 'function') { + // node.js core bug work-around + stream.on('open', onOpenClose) + } +} + +/** + * Close a Zlib stream. + * + * Zlib streams below Node.js 4.5.5 have a buggy implementation + * of .close() when zlib encountered an error. + * + * @param {object} stream + * @private + */ + +function closeZlibStream (stream) { + if (stream._hadError === true) { + var prop = stream._binding === null + ? '_binding' + : '_handle' + + stream[prop] = { + close: function () { this[prop] = null } + } + } + + stream.close() +} + +/** + * Destroy a Zlib stream. + * + * Zlib streams don't have a destroy function in Node.js 6. On top of that + * simply calling destroy on a zlib stream in Node.js 8+ will result in a + * memory leak. So until that is fixed, we need to call both close AND destroy. + * + * PR to fix memory leak: https://github.com/nodejs/node/pull/23734 + * + * In Node.js 6+8, it's important that destroy is called before close as the + * stream would otherwise emit the error 'zlib binding closed'. + * + * @param {object} stream + * @private + */ + +function destroyZlibStream (stream) { + if (typeof stream.destroy === 'function') { + // node.js core bug work-around + // istanbul ignore if: node.js 0.8 + if (stream._binding) { + // node.js < 0.10.0 + stream.destroy() + if (stream._processing) { + stream._needDrain = true + stream.once('drain', onDrainClearBinding) + } else { + stream._binding.clear() + } + } else if (stream._destroy && stream._destroy !== Stream.Transform.prototype._destroy) { + // node.js >= 12, ^11.1.0, ^10.15.1 + stream.destroy() + } else if (stream._destroy && typeof stream.close === 'function') { + // node.js 7, 8 + stream.destroyed = true + stream.close() + } else { + // fallback + // istanbul ignore next + stream.destroy() + } + } else if (typeof stream.close === 'function') { + // node.js < 8 fallback + closeZlibStream(stream) + } +} + +/** + * Determine if stream has destroy. + * @private + */ + +function hasDestroy (stream) { + return stream instanceof Stream && + typeof stream.destroy === 'function' +} + +/** + * Determine if val is EventEmitter. + * @private + */ + +function isEventEmitter (val) { + return val instanceof EventEmitter +} + +/** + * Determine if stream is fs.ReadStream stream. + * @private + */ + +function isFsReadStream (stream) { + return stream instanceof ReadStream +} + +/** + * Determine if stream is Zlib stream. + * @private + */ + +function isZlibStream (stream) { + return stream instanceof Zlib.Gzip || + stream instanceof Zlib.Gunzip || + stream instanceof Zlib.Deflate || + stream instanceof Zlib.DeflateRaw || + stream instanceof Zlib.Inflate || + stream instanceof Zlib.InflateRaw || + stream instanceof Zlib.Unzip +} + +/** + * No-op function. + * @private + */ + +function noop () {} + +/** + * On drain handler to clear binding. + * @private + */ + +// istanbul ignore next: node.js 0.8 +function onDrainClearBinding () { + this._binding.clear() +} + +/** + * On open handler to close stream. + * @private + */ + +function onOpenClose () { + if (typeof this.fd === 'number') { + // actually close down the fd + this.close() + } +} diff --git a/data/node_modules/destroy/package.json b/data/node_modules/destroy/package.json new file mode 100644 index 0000000000000000000000000000000000000000..c85e438378a65c39b3ccd4e0316d1855cf5b7887 --- /dev/null +++ b/data/node_modules/destroy/package.json @@ -0,0 +1,48 @@ +{ + "name": "destroy", + "description": "destroy a stream if possible", + "version": "1.2.0", + "author": { + "name": "Jonathan Ong", + "email": "me@jongleberry.com", + "url": "http://jongleberry.com", + "twitter": "https://twitter.com/jongleberry" + }, + "contributors": [ + "Douglas Christopher Wilson " + ], + "license": "MIT", + "repository": "stream-utils/destroy", + "devDependencies": { + "eslint": "7.32.0", + "eslint-config-standard": "14.1.1", + "eslint-plugin-import": "2.25.4", + "eslint-plugin-node": "11.1.0", + "eslint-plugin-promise": "5.2.0", + "eslint-plugin-standard": "4.1.0", + "mocha": "9.2.2", + "nyc": "15.1.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + }, + "scripts": { + "lint": "eslint .", + "test": "mocha --reporter spec", + "test-ci": "nyc --reporter=lcovonly --reporter=text npm test", + "test-cov": "nyc --reporter=html --reporter=text npm test" + }, + "files": [ + "index.js", + "LICENSE" + ], + "keywords": [ + "stream", + "streams", + "destroy", + "cleanup", + "leak", + "fd" + ] +} diff --git a/data/node_modules/dotenv/CHANGELOG.md b/data/node_modules/dotenv/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..e35152ae27046ea41693eb448deecc7efced6c83 --- /dev/null +++ b/data/node_modules/dotenv/CHANGELOG.md @@ -0,0 +1,475 @@ +# Changelog + +All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. + +## [Unreleased](https://github.com/motdotla/dotenv/compare/v16.4.5...master) + +## [16.4.5](https://github.com/motdotla/dotenv/compare/v16.4.4...v16.4.5) (2024-02-19) + +### Changed + +- 🐞 fix recent regression when using `path` option. return to historical behavior: do not attempt to auto find `.env` if `path` set. (regression was introduced in `16.4.3`) [#814](https://github.com/motdotla/dotenv/pull/814) + +## [16.4.4](https://github.com/motdotla/dotenv/compare/v16.4.3...v16.4.4) (2024-02-13) + +### Changed + +- 🐞 Replaced chaining operator `?.` with old school `&&` (fixing node 12 failures) [#812](https://github.com/motdotla/dotenv/pull/812) + +## [16.4.3](https://github.com/motdotla/dotenv/compare/v16.4.2...v16.4.3) (2024-02-12) + +### Changed + +- Fixed processing of multiple files in `options.path` [#805](https://github.com/motdotla/dotenv/pull/805) + +## [16.4.2](https://github.com/motdotla/dotenv/compare/v16.4.1...v16.4.2) (2024-02-10) + +### Changed + +- Changed funding link in package.json to [`dotenvx.com`](https://dotenvx.com) + +## [16.4.1](https://github.com/motdotla/dotenv/compare/v16.4.0...v16.4.1) (2024-01-24) + +- Patch support for array as `path` option [#797](https://github.com/motdotla/dotenv/pull/797) + +## [16.4.0](https://github.com/motdotla/dotenv/compare/v16.3.2...v16.4.0) (2024-01-23) + +- Add `error.code` to error messages around `.env.vault` decryption handling [#795](https://github.com/motdotla/dotenv/pull/795) +- Add ability to find `.env.vault` file when filename(s) passed as an array [#784](https://github.com/motdotla/dotenv/pull/784) + +## [16.3.2](https://github.com/motdotla/dotenv/compare/v16.3.1...v16.3.2) (2024-01-18) + +### Added + +- Add debug message when no encoding set [#735](https://github.com/motdotla/dotenv/pull/735) + +### Changed + +- Fix output typing for `populate` [#792](https://github.com/motdotla/dotenv/pull/792) +- Use subarray instead of slice [#793](https://github.com/motdotla/dotenv/pull/793) + +## [16.3.1](https://github.com/motdotla/dotenv/compare/v16.3.0...v16.3.1) (2023-06-17) + +### Added + +- Add missing type definitions for `processEnv` and `DOTENV_KEY` options. [#756](https://github.com/motdotla/dotenv/pull/756) + +## [16.3.0](https://github.com/motdotla/dotenv/compare/v16.2.0...v16.3.0) (2023-06-16) + +### Added + +- Optionally pass `DOTENV_KEY` to options rather than relying on `process.env.DOTENV_KEY`. Defaults to `process.env.DOTENV_KEY` [#754](https://github.com/motdotla/dotenv/pull/754) + +## [16.2.0](https://github.com/motdotla/dotenv/compare/v16.1.4...v16.2.0) (2023-06-15) + +### Added + +- Optionally write to your own target object rather than `process.env`. Defaults to `process.env`. [#753](https://github.com/motdotla/dotenv/pull/753) +- Add import type URL to types file [#751](https://github.com/motdotla/dotenv/pull/751) + +## [16.1.4](https://github.com/motdotla/dotenv/compare/v16.1.3...v16.1.4) (2023-06-04) + +### Added + +- Added `.github/` to `.npmignore` [#747](https://github.com/motdotla/dotenv/pull/747) + +## [16.1.3](https://github.com/motdotla/dotenv/compare/v16.1.2...v16.1.3) (2023-05-31) + +### Removed + +- Removed `browser` keys for `path`, `os`, and `crypto` in package.json. These were set to false incorrectly as of 16.1. Instead, if using dotenv on the front-end make sure to include polyfills for `path`, `os`, and `crypto`. [node-polyfill-webpack-plugin](https://github.com/Richienb/node-polyfill-webpack-plugin) provides these. + +## [16.1.2](https://github.com/motdotla/dotenv/compare/v16.1.1...v16.1.2) (2023-05-31) + +### Changed + +- Exposed private function `_configDotenv` as `configDotenv`. [#744](https://github.com/motdotla/dotenv/pull/744) + +## [16.1.1](https://github.com/motdotla/dotenv/compare/v16.1.0...v16.1.1) (2023-05-30) + +### Added + +- Added type definition for `decrypt` function + +### Changed + +- Fixed `{crypto: false}` in `packageJson.browser` + +## [16.1.0](https://github.com/motdotla/dotenv/compare/v16.0.3...v16.1.0) (2023-05-30) + +### Added + +- Add `populate` convenience method [#733](https://github.com/motdotla/dotenv/pull/733) +- Accept URL as path option [#720](https://github.com/motdotla/dotenv/pull/720) +- Add dotenv to `npm fund` command +- Spanish language README [#698](https://github.com/motdotla/dotenv/pull/698) +- Add `.env.vault` support. 🎉 ([#730](https://github.com/motdotla/dotenv/pull/730)) + +ℹ️ `.env.vault` extends the `.env` file format standard with a localized encrypted vault file. Package it securely with your production code deploys. It's cloud agnostic so that you can deploy your secrets anywhere – without [risky third-party integrations](https://techcrunch.com/2023/01/05/circleci-breach/). [read more](https://github.com/motdotla/dotenv#-deploying) + +### Changed + +- Fixed "cannot resolve 'fs'" error on tools like Replit [#693](https://github.com/motdotla/dotenv/pull/693) + +## [16.0.3](https://github.com/motdotla/dotenv/compare/v16.0.2...v16.0.3) (2022-09-29) + +### Changed + +- Added library version to debug logs ([#682](https://github.com/motdotla/dotenv/pull/682)) + +## [16.0.2](https://github.com/motdotla/dotenv/compare/v16.0.1...v16.0.2) (2022-08-30) + +### Added + +- Export `env-options.js` and `cli-options.js` in package.json for use with downstream [dotenv-expand](https://github.com/motdotla/dotenv-expand) module + +## [16.0.1](https://github.com/motdotla/dotenv/compare/v16.0.0...v16.0.1) (2022-05-10) + +### Changed + +- Minor README clarifications +- Development ONLY: updated devDependencies as recommended for development only security risks ([#658](https://github.com/motdotla/dotenv/pull/658)) + +## [16.0.0](https://github.com/motdotla/dotenv/compare/v15.0.1...v16.0.0) (2022-02-02) + +### Added + +- _Breaking:_ Backtick support 🎉 ([#615](https://github.com/motdotla/dotenv/pull/615)) + +If you had values containing the backtick character, please quote those values with either single or double quotes. + +## [15.0.1](https://github.com/motdotla/dotenv/compare/v15.0.0...v15.0.1) (2022-02-02) + +### Changed + +- Properly parse empty single or double quoted values 🐞 ([#614](https://github.com/motdotla/dotenv/pull/614)) + +## [15.0.0](https://github.com/motdotla/dotenv/compare/v14.3.2...v15.0.0) (2022-01-31) + +`v15.0.0` is a major new release with some important breaking changes. + +### Added + +- _Breaking:_ Multiline parsing support (just works. no need for the flag.) + +### Changed + +- _Breaking:_ `#` marks the beginning of a comment (UNLESS the value is wrapped in quotes. Please update your `.env` files to wrap in quotes any values containing `#`. For example: `SECRET_HASH="something-with-a-#-hash"`). + +..Understandably, (as some teams have noted) this is tedious to do across the entire team. To make it less tedious, we recommend using [dotenv cli](https://github.com/dotenv-org/cli) going forward. It's an optional plugin that will keep your `.env` files in sync between machines, environments, or team members. + +### Removed + +- _Breaking:_ Remove multiline option (just works out of the box now. no need for the flag.) + +## [14.3.2](https://github.com/motdotla/dotenv/compare/v14.3.1...v14.3.2) (2022-01-25) + +### Changed + +- Preserve backwards compatibility on values containing `#` 🐞 ([#603](https://github.com/motdotla/dotenv/pull/603)) + +## [14.3.1](https://github.com/motdotla/dotenv/compare/v14.3.0...v14.3.1) (2022-01-25) + +### Changed + +- Preserve backwards compatibility on exports by re-introducing the prior in-place exports 🐞 ([#606](https://github.com/motdotla/dotenv/pull/606)) + +## [14.3.0](https://github.com/motdotla/dotenv/compare/v14.2.0...v14.3.0) (2022-01-24) + +### Added + +- Add `multiline` option 🎉 ([#486](https://github.com/motdotla/dotenv/pull/486)) + +## [14.2.0](https://github.com/motdotla/dotenv/compare/v14.1.1...v14.2.0) (2022-01-17) + +### Added + +- Add `dotenv_config_override` cli option +- Add `DOTENV_CONFIG_OVERRIDE` command line env option + +## [14.1.1](https://github.com/motdotla/dotenv/compare/v14.1.0...v14.1.1) (2022-01-17) + +### Added + +- Add React gotcha to FAQ on README + +## [14.1.0](https://github.com/motdotla/dotenv/compare/v14.0.1...v14.1.0) (2022-01-17) + +### Added + +- Add `override` option 🎉 ([#595](https://github.com/motdotla/dotenv/pull/595)) + +## [14.0.1](https://github.com/motdotla/dotenv/compare/v14.0.0...v14.0.1) (2022-01-16) + +### Added + +- Log error on failure to load `.env` file ([#594](https://github.com/motdotla/dotenv/pull/594)) + +## [14.0.0](https://github.com/motdotla/dotenv/compare/v13.0.1...v14.0.0) (2022-01-16) + +### Added + +- _Breaking:_ Support inline comments for the parser 🎉 ([#568](https://github.com/motdotla/dotenv/pull/568)) + +## [13.0.1](https://github.com/motdotla/dotenv/compare/v13.0.0...v13.0.1) (2022-01-16) + +### Changed + +* Hide comments and newlines from debug output ([#404](https://github.com/motdotla/dotenv/pull/404)) + +## [13.0.0](https://github.com/motdotla/dotenv/compare/v12.0.4...v13.0.0) (2022-01-16) + +### Added + +* _Breaking:_ Add type file for `config.js` ([#539](https://github.com/motdotla/dotenv/pull/539)) + +## [12.0.4](https://github.com/motdotla/dotenv/compare/v12.0.3...v12.0.4) (2022-01-16) + +### Changed + +* README updates +* Minor order adjustment to package json format + +## [12.0.3](https://github.com/motdotla/dotenv/compare/v12.0.2...v12.0.3) (2022-01-15) + +### Changed + +* Simplified jsdoc for consistency across editors + +## [12.0.2](https://github.com/motdotla/dotenv/compare/v12.0.1...v12.0.2) (2022-01-15) + +### Changed + +* Improve embedded jsdoc type documentation + +## [12.0.1](https://github.com/motdotla/dotenv/compare/v12.0.0...v12.0.1) (2022-01-15) + +### Changed + +* README updates and clarifications + +## [12.0.0](https://github.com/motdotla/dotenv/compare/v11.0.0...v12.0.0) (2022-01-15) + +### Removed + +- _Breaking:_ drop support for Flow static type checker ([#584](https://github.com/motdotla/dotenv/pull/584)) + +### Changed + +- Move types/index.d.ts to lib/main.d.ts ([#585](https://github.com/motdotla/dotenv/pull/585)) +- Typescript cleanup ([#587](https://github.com/motdotla/dotenv/pull/587)) +- Explicit typescript inclusion in package.json ([#566](https://github.com/motdotla/dotenv/pull/566)) + +## [11.0.0](https://github.com/motdotla/dotenv/compare/v10.0.0...v11.0.0) (2022-01-11) + +### Changed + +- _Breaking:_ drop support for Node v10 ([#558](https://github.com/motdotla/dotenv/pull/558)) +- Patch debug option ([#550](https://github.com/motdotla/dotenv/pull/550)) + +## [10.0.0](https://github.com/motdotla/dotenv/compare/v9.0.2...v10.0.0) (2021-05-20) + +### Added + +- Add generic support to parse function +- Allow for import "dotenv/config.js" +- Add support to resolve home directory in path via ~ + +## [9.0.2](https://github.com/motdotla/dotenv/compare/v9.0.1...v9.0.2) (2021-05-10) + +### Changed + +- Support windows newlines with debug mode + +## [9.0.1](https://github.com/motdotla/dotenv/compare/v9.0.0...v9.0.1) (2021-05-08) + +### Changed + +- Updates to README + +## [9.0.0](https://github.com/motdotla/dotenv/compare/v8.6.0...v9.0.0) (2021-05-05) + +### Changed + +- _Breaking:_ drop support for Node v8 + +## [8.6.0](https://github.com/motdotla/dotenv/compare/v8.5.1...v8.6.0) (2021-05-05) + +### Added + +- define package.json in exports + +## [8.5.1](https://github.com/motdotla/dotenv/compare/v8.5.0...v8.5.1) (2021-05-05) + +### Changed + +- updated dev dependencies via npm audit + +## [8.5.0](https://github.com/motdotla/dotenv/compare/v8.4.0...v8.5.0) (2021-05-05) + +### Added + +- allow for `import "dotenv/config"` + +## [8.4.0](https://github.com/motdotla/dotenv/compare/v8.3.0...v8.4.0) (2021-05-05) + +### Changed + +- point to exact types file to work with VS Code + +## [8.3.0](https://github.com/motdotla/dotenv/compare/v8.2.0...v8.3.0) (2021-05-05) + +### Changed + +- _Breaking:_ drop support for Node v8 (mistake to be released as minor bump. later bumped to 9.0.0. see above.) + +## [8.2.0](https://github.com/motdotla/dotenv/compare/v8.1.0...v8.2.0) (2019-10-16) + +### Added + +- TypeScript types + +## [8.1.0](https://github.com/motdotla/dotenv/compare/v8.0.0...v8.1.0) (2019-08-18) + +### Changed + +- _Breaking:_ drop support for Node v6 ([#392](https://github.com/motdotla/dotenv/issues/392)) + +# [8.0.0](https://github.com/motdotla/dotenv/compare/v7.0.0...v8.0.0) (2019-05-02) + +### Changed + +- _Breaking:_ drop support for Node v6 ([#302](https://github.com/motdotla/dotenv/issues/392)) + +## [7.0.0] - 2019-03-12 + +### Fixed + +- Fix removing unbalanced quotes ([#376](https://github.com/motdotla/dotenv/pull/376)) + +### Removed + +- Removed `load` alias for `config` for consistency throughout code and documentation. + +## [6.2.0] - 2018-12-03 + +### Added + +- Support preload configuration via environment variables ([#351](https://github.com/motdotla/dotenv/issues/351)) + +## [6.1.0] - 2018-10-08 + +### Added + +- `debug` option for `config` and `parse` methods will turn on logging + +## [6.0.0] - 2018-06-02 + +### Changed + +- _Breaking:_ drop support for Node v4 ([#304](https://github.com/motdotla/dotenv/pull/304)) + +## [5.0.0] - 2018-01-29 + +### Added + +- Testing against Node v8 and v9 +- Documentation on trim behavior of values +- Documentation on how to use with `import` + +### Changed + +- _Breaking_: default `path` is now `path.resolve(process.cwd(), '.env')` +- _Breaking_: does not write over keys already in `process.env` if the key has a falsy value +- using `const` and `let` instead of `var` + +### Removed + +- Testing against Node v7 + +## [4.0.0] - 2016-12-23 + +### Changed + +- Return Object with parsed content or error instead of false ([#165](https://github.com/motdotla/dotenv/pull/165)). + +### Removed + +- `verbose` option removed in favor of returning result. + +## [3.0.0] - 2016-12-20 + +### Added + +- `verbose` option will log any error messages. Off by default. +- parses email addresses correctly +- allow importing config method directly in ES6 + +### Changed + +- Suppress error messages by default ([#154](https://github.com/motdotla/dotenv/pull/154)) +- Ignoring more files for NPM to make package download smaller + +### Fixed + +- False positive test due to case-sensitive variable ([#124](https://github.com/motdotla/dotenv/pull/124)) + +### Removed + +- `silent` option removed in favor of `verbose` + +## [2.0.0] - 2016-01-20 + +### Added + +- CHANGELOG to ["make it easier for users and contributors to see precisely what notable changes have been made between each release"](http://keepachangelog.com/). Linked to from README +- LICENSE to be more explicit about what was defined in `package.json`. Linked to from README +- Testing nodejs v4 on travis-ci +- added examples of how to use dotenv in different ways +- return parsed object on success rather than boolean true + +### Changed + +- README has shorter description not referencing ruby gem since we don't have or want feature parity + +### Removed + +- Variable expansion and escaping so environment variables are encouraged to be fully orthogonal + +## [1.2.0] - 2015-06-20 + +### Added + +- Preload hook to require dotenv without including it in your code + +### Changed + +- clarified license to be "BSD-2-Clause" in `package.json` + +### Fixed + +- retain spaces in string vars + +## [1.1.0] - 2015-03-31 + +### Added + +- Silent option to silence `console.log` when `.env` missing + +## [1.0.0] - 2015-03-13 + +### Removed + +- support for multiple `.env` files. should always use one `.env` file for the current environment + +[7.0.0]: https://github.com/motdotla/dotenv/compare/v6.2.0...v7.0.0 +[6.2.0]: https://github.com/motdotla/dotenv/compare/v6.1.0...v6.2.0 +[6.1.0]: https://github.com/motdotla/dotenv/compare/v6.0.0...v6.1.0 +[6.0.0]: https://github.com/motdotla/dotenv/compare/v5.0.0...v6.0.0 +[5.0.0]: https://github.com/motdotla/dotenv/compare/v4.0.0...v5.0.0 +[4.0.0]: https://github.com/motdotla/dotenv/compare/v3.0.0...v4.0.0 +[3.0.0]: https://github.com/motdotla/dotenv/compare/v2.0.0...v3.0.0 +[2.0.0]: https://github.com/motdotla/dotenv/compare/v1.2.0...v2.0.0 +[1.2.0]: https://github.com/motdotla/dotenv/compare/v1.1.0...v1.2.0 +[1.1.0]: https://github.com/motdotla/dotenv/compare/v1.0.0...v1.1.0 +[1.0.0]: https://github.com/motdotla/dotenv/compare/v0.4.0...v1.0.0 diff --git a/data/node_modules/dotenv/LICENSE b/data/node_modules/dotenv/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..c430ad8bd06f2c6495641c83b78c910f2f53c837 --- /dev/null +++ b/data/node_modules/dotenv/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015, Scott Motte +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/data/node_modules/dotenv/README-es.md b/data/node_modules/dotenv/README-es.md new file mode 100644 index 0000000000000000000000000000000000000000..154c1390944f88cd012f59c271140c2df0231d8c --- /dev/null +++ b/data/node_modules/dotenv/README-es.md @@ -0,0 +1,448 @@ +
+🎉 announcing dotenvx. run anywhere, multi-environment, encrypted envs. +
+ +  + +
+ +

+ + Dotenv es apoyado por la comunidad. + +

+Gracias espaciales a: +
+
+ +
+ Warp +
+ Warp es una rápida e impresionante terminal basada en Rust, reinventado para funcionar como una aplicación moderna. +
+ Haga más en la CLI con edición de texto real, resultado básado en bloques, y busqueda de comandos de IA. +
+
+
+ +
+ Retool +
+ Retool ayuda a los desarrolladores a crear software interno personalizado, como aplicaciones CRUD y paneles de administración, realmente rápido. +
+ Construya Interfaces de Usuario de forma visual con componentes flexibles, conéctese a cualquier fuente de datos, y escriba lógica de negocio en JavaScript. +
+
+
+ +
+ WorkOS +
+ Su Apliación, Lista para la Empresa. +
+ Agrega Inicio de Sesión Único, Autenticación Multi-Factor, y mucho más, en minutos en lugar de meses. +
+
+
+
+
+
+
+ +
+ +# dotenv [![NPM version](https://img.shields.io/npm/v/dotenv.svg?style=flat-square)](https://www.npmjs.com/package/dotenv) + +dotenv + +Dotenv es un módulo de dependencia cero que carga las variables de entorno desde un archivo `.env` en [`process.env`](https://nodejs.org/docs/latest/api/process.html#process_process_env). El almacenamiento de la configuración del entorno separado del código está basado en la metodología [The Twelve-Factor App](http://12factor.net/config). + +[![js-standard-style](https://img.shields.io/badge/code%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/feross/standard) +[![LICENSE](https://img.shields.io/github/license/motdotla/dotenv.svg)](LICENSE) + +## Instalación + +```bash +# instalación local (recomendado) +npm install dotenv --save +``` + +O installación con yarn? `yarn add dotenv` + +## Uso + +Cree un archivo `.env` en la raíz de su proyecto: + +```dosini +S3_BUCKET="YOURS3BUCKET" +SECRET_KEY="YOURSECRETKEYGOESHERE" +``` + +Tan prónto como sea posible en su aplicación, importe y configure dotenv: + +```javascript +require('dotenv').config() +console.log(process.env) // elimine esto después que haya confirmado que esta funcionando +``` + +.. o usa ES6? + +```javascript +import * as dotenv from 'dotenv' // vea en https://github.com/motdotla/dotenv#como-uso-dotenv-con-import +// REVISAR LINK DE REFERENCIA DE IMPORTACIÓN +dotenv.config() +import express from 'express' +``` + +Eso es todo. `process.env` ahora tiene las claves y los valores que definiste en tu archivo `.env`: + +```javascript +require('dotenv').config() + +... + +s3.getBucketCors({Bucket: process.env.S3_BUCKET}, function(err, data) {}) +``` + +### Valores multilínea + +Si necesita variables de varias líneas, por ejemplo, claves privadas, ahora se admiten en la versión (`>= v15.0.0`) con saltos de línea: + +```dosini +PRIVATE_KEY="-----BEGIN RSA PRIVATE KEY----- +... +Kh9NV... +... +-----END RSA PRIVATE KEY-----" +``` + +Alternativamente, puede usar comillas dobles y usar el carácter `\n`: + +```dosini +PRIVATE_KEY="-----BEGIN RSA PRIVATE KEY-----\nKh9NV...\n-----END RSA PRIVATE KEY-----\n" +``` + +### Comentarios + +Los comentarios pueden ser agregados en tu archivo o en la misma línea: + +```dosini +# This is a comment +SECRET_KEY=YOURSECRETKEYGOESHERE # comment +SECRET_HASH="something-with-a-#-hash" +``` + +Los comentarios comienzan donde existe un `#`, entonces, si su valor contiene un `#`, enciérrelo entre comillas. Este es un cambio importante desde la versión `>= v15.0.0` en adelante. + +### Análisis + +El motor que analiza el contenido de su archivo que contiene variables de entorno está disponible para su uso. Este Acepta una Cadena o un Búfer y devolverá un Objeto con las claves y los valores analizados. + +```javascript +const dotenv = require('dotenv') +const buf = Buffer.from('BASICO=basico') +const config = dotenv.parse(buf) // devolverá un objeto +console.log(typeof config, config) // objeto { BASICO : 'basico' } +``` + +### Precarga + +Puede usar el `--require` (`-r`) [opción de línea de comando](https://nodejs.org/api/cli.html#-r---require-module) para precargar dotenv. Al hacer esto, no necesita requerir ni cargar dotnev en el código de su aplicación. + +```bash +$ node -r dotenv/config tu_script.js +``` + +Las opciones de configuración a continuación se admiten como argumentos de línea de comandos en el formato `dotenv_config_