id
stringlengths
53
86
api_name
stringlengths
2
76
api_description
stringlengths
1
500
api_score
float64
0
10
endpoint_name
stringlengths
1
190
endpoint_description
stringlengths
0
500
response_status_code
int64
100
505
response_summary
stringlengths
1
68
response_json
stringlengths
6
50k
response_json_schema
stringlengths
14
150k
f7a8fe41-af32-4441-b399-80de9b7a6fd1/42e8e6d2-70a9-4062-bd5a-4699d5182c9c/0/0
Remoote Job Search
Remoote Job Search
null
list_countries_countries_get
401
null
{"code": 0, "message": "Unauthorized!"}
{"properties": {"code": {"default": -1, "description": "Status Code", "title": "Code", "type": "integer"}, "message": {"default": "Unauthorized!", "title": "Message", "type": "string"}}, "title": "Unauthorized", "type": "object"}
f7a8fe41-af32-4441-b399-80de9b7a6fd1/42e8e6d2-70a9-4062-bd5a-4699d5182c9c/1/0
Remoote Job Search
Remoote Job Search
null
list_countries_countries_get
200
null
{"countries": [{"alpha2": "", "id": 0, "name": ""}]}
{"$defs": {"CountryModel": {"properties": {"alpha2": {"title": "Alpha2", "type": "string"}, "id": {"title": "Id", "type": "integer"}, "name": {"title": "Name", "type": "string"}}, "required": ["id", "alpha2", "name"], "title": "CountryModel", "type": "object"}}, "properties": {"countries": {"items": {"properties": {"alpha2": {"title": "Alpha2", "type": "string"}, "id": {"title": "Id", "type": "integer"}, "name": {"title": "Name", "type": "string"}}, "required": ["id", "alpha2", "name"], "title": "CountryModel", "type": "object"}, "title": "Countries", "type": "array"}}, "required": ["countries"], "title": "CountryListResponseModel", "type": "object"}
f7a8fe41-af32-4441-b399-80de9b7a6fd1/42e8e6d2-70a9-4062-bd5a-4699d5182c9c/2/0
Remoote Job Search
Remoote Job Search
null
list_countries_countries_get
422
null
[{"ctx": {}, "loc": {}, "msg": {}, "type_": {}}]
{"items": {"properties": {"ctx": {"anyOf": [{"type": "object"}, {"type": "null"}], "description": "an optional object which contains values required to render the error message.", "title": "Error context"}, "loc": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "description": "the error's location as a list. ", "title": "Location"}, "msg": {"anyOf": [{"type": "string"}, {"type": "null"}], "description": "a computer-readable identifier of the error type.", "title": "Message"}, "type_": {"anyOf": [{"type": "string"}, {"type": "null"}], "description": "a human readable explanation of the error.", "title": "Error Type"}}, "title": "ValidationErrorModel", "type": "object"}, "type": "array"}
f7a8fe41-af32-4441-b399-80de9b7a6fd1/5f7a7802-3cec-4845-8674-745089085430/0/0
Remoote Job Search
Remoote Job Search
null
list_jobs_jobs_get
422
null
[{"ctx": {}, "loc": {}, "msg": {}, "type_": {}}]
{"items": {"properties": {"ctx": {"anyOf": [{"type": "object"}, {"type": "null"}], "description": "an optional object which contains values required to render the error message.", "title": "Error context"}, "loc": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "description": "the error's location as a list. ", "title": "Location"}, "msg": {"anyOf": [{"type": "string"}, {"type": "null"}], "description": "a computer-readable identifier of the error type.", "title": "Message"}, "type_": {"anyOf": [{"type": "string"}, {"type": "null"}], "description": "a human readable explanation of the error.", "title": "Error Type"}}, "title": "ValidationErrorModel", "type": "object"}, "type": "array"}
f7a8fe41-af32-4441-b399-80de9b7a6fd1/5f7a7802-3cec-4845-8674-745089085430/1/0
Remoote Job Search
Remoote Job Search
null
list_jobs_jobs_get
200
null
{"jobs": [{"apply_url": {}, "countries": [{"alpha2": "", "id": 0, "name": ""}], "currency": {}, "description": {}, "geo_raw": {}, "id": 0, "industry_raw": {}, "is_remote": {}, "max_salary": {}, "min_salary": {}, "requirements_raw": {}, "responsibilities_raw": {}, "salary_range_raw": {}, "skills": [{"id": 0, "name": ""}], "title": {}, "url": {}}], "search_id": {}, "total": 0}
{"$defs": {"CountryModel": {"properties": {"alpha2": {"title": "Alpha2", "type": "string"}, "id": {"title": "Id", "type": "integer"}, "name": {"title": "Name", "type": "string"}}, "required": ["id", "alpha2", "name"], "title": "CountryModel", "type": "object"}, "JobModel": {"properties": {"apply_url": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Apply Url"}, "countries": {"items": {"properties": {"alpha2": {"title": "Alpha2", "type": "string"}, "id": {"title": "Id", "type": "integer"}, "name": {"title": "Name", "type": "string"}}, "required": ["id", "alpha2", "name"], "title": "CountryModel", "type": "object"}, "title": "Countries", "type": "array"}, "currency": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Currency"}, "description": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Description"}, "geo_raw": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Geo Raw"}, "id": {"title": "Id", "type": "integer"}, "industry_raw": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Industry Raw"}, "is_remote": {"anyOf": [{"type": "boolean"}, {"type": "null"}], "title": "Is Remote"}, "max_salary": {"anyOf": [{"type": "number"}, {"type": "null"}], "title": "Max Salary"}, "min_salary": {"anyOf": [{"type": "number"}, {"type": "null"}], "title": "Min Salary"}, "requirements_raw": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Requirements Raw"}, "responsibilities_raw": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Responsibilities Raw"}, "salary_range_raw": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Salary Range Raw"}, "skills": {"items": {"properties": {"id": {"title": "Id", "type": "integer"}, "name": {"title": "Name", "type": "string"}}, "required": ["id", "name"], "title": "SkillModel", "type": "object"}, "title": "Skills", "type": "array"}, "title": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Title"}, "url": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Url"}}, "required": ["id", "url", "apply_url", "title", "description", "salary_range_raw", "is_remote", "min_salary", "max_salary", "currency", "requirements_raw", "responsibilities_raw", "industry_raw", "geo_raw", "skills", "countries"], "title": "JobModel", "type": "object"}, "SkillModel": {"properties": {"id": {"title": "Id", "type": "integer"}, "name": {"title": "Name", "type": "string"}}, "required": ["id", "name"], "title": "SkillModel", "type": "object"}}, "properties": {"jobs": {"items": {"properties": {"apply_url": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Apply Url"}, "countries": {"items": {"properties": {"alpha2": {"title": "Alpha2", "type": "string"}, "id": {"title": "Id", "type": "integer"}, "name": {"title": "Name", "type": "string"}}, "required": ["id", "alpha2", "name"], "title": "CountryModel", "type": "object"}, "title": "Countries", "type": "array"}, "currency": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Currency"}, "description": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Description"}, "geo_raw": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Geo Raw"}, "id": {"title": "Id", "type": "integer"}, "industry_raw": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Industry Raw"}, "is_remote": {"anyOf": [{"type": "boolean"}, {"type": "null"}], "title": "Is Remote"}, "max_salary": {"anyOf": [{"type": "number"}, {"type": "null"}], "title": "Max Salary"}, "min_salary": {"anyOf": [{"type": "number"}, {"type": "null"}], "title": "Min Salary"}, "requirements_raw": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Requirements Raw"}, "responsibilities_raw": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Responsibilities Raw"}, "salary_range_raw": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Salary Range Raw"}, "skills": {"items": {"properties": {"id": {"title": "Id", "type": "integer"}, "name": {"title": "Name", "type": "string"}}, "required": ["id", "name"], "title": "SkillModel", "type": "object"}, "title": "Skills", "type": "array"}, "title": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Title"}, "url": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Url"}}, "required": ["id", "url", "apply_url", "title", "description", "salary_range_raw", "is_remote", "min_salary", "max_salary", "currency", "requirements_raw", "responsibilities_raw", "industry_raw", "geo_raw", "skills", "countries"], "title": "JobModel", "type": "object"}, "title": "Jobs", "type": "array"}, "search_id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Search Id"}, "total": {"title": "Total", "type": "integer"}}, "required": ["jobs", "total", "search_id"], "title": "JobListResponseModel", "type": "object"}
f7a8fe41-af32-4441-b399-80de9b7a6fd1/5f7a7802-3cec-4845-8674-745089085430/2/0
Remoote Job Search
Remoote Job Search
null
list_jobs_jobs_get
401
null
{"code": 0, "message": "Unauthorized!"}
{"properties": {"code": {"default": -1, "description": "Status Code", "title": "Code", "type": "integer"}, "message": {"default": "Unauthorized!", "title": "Message", "type": "string"}}, "title": "Unauthorized", "type": "object"}
f7a8fe41-af32-4441-b399-80de9b7a6fd1/a386fccf-552b-43d8-8062-a91f4dc84eeb/0/0
Remoote Job Search
Remoote Job Search
null
list_titles_titles_get
422
null
[{"ctx": {}, "loc": {}, "msg": {}, "type_": {}}]
{"items": {"properties": {"ctx": {"anyOf": [{"type": "object"}, {"type": "null"}], "description": "an optional object which contains values required to render the error message.", "title": "Error context"}, "loc": {"anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], "description": "the error's location as a list. ", "title": "Location"}, "msg": {"anyOf": [{"type": "string"}, {"type": "null"}], "description": "a computer-readable identifier of the error type.", "title": "Message"}, "type_": {"anyOf": [{"type": "string"}, {"type": "null"}], "description": "a human readable explanation of the error.", "title": "Error Type"}}, "title": "ValidationErrorModel", "type": "object"}, "type": "array"}
f7a8fe41-af32-4441-b399-80de9b7a6fd1/a386fccf-552b-43d8-8062-a91f4dc84eeb/1/0
Remoote Job Search
Remoote Job Search
null
list_titles_titles_get
200
null
{"titles": [{"name": ""}]}
{"$defs": {"TitleModel": {"properties": {"name": {"title": "Name", "type": "string"}}, "required": ["name"], "title": "TitleModel", "type": "object"}}, "properties": {"titles": {"items": {"properties": {"name": {"title": "Name", "type": "string"}}, "required": ["name"], "title": "TitleModel", "type": "object"}, "title": "Titles", "type": "array"}}, "required": ["titles"], "title": "TitleListResponseModel", "type": "object"}
f7a8fe41-af32-4441-b399-80de9b7a6fd1/a386fccf-552b-43d8-8062-a91f4dc84eeb/2/0
Remoote Job Search
Remoote Job Search
null
list_titles_titles_get
401
null
{"code": 0, "message": "Unauthorized!"}
{"properties": {"code": {"default": -1, "description": "Status Code", "title": "Code", "type": "integer"}, "message": {"default": "Unauthorized!", "title": "Message", "type": "string"}}, "title": "Unauthorized", "type": "object"}
b54ee136-3c62-43e4-a700-95245357e32c/a4e05985-fc21-4687-b79a-994b5d157583/0/0
name-gender
name to gender prediction
7.1
predict
Run predictions
200
Response
{"response": [{"name": "Joe", "boy_or_girl": "boy", "probability": 0.56}, {"name": "Biden", "boy_or_girl": "boy", "probability": 0.71}, {"name": "kamala", "boy_or_girl": "girl", "probability": 0.91}, {"name": "harris", "boy_or_girl": "boy", "probability": 0.74}]}
{"type": "object", "properties": {"response": {"type": "array", "items": {"type": "object", "properties": {"name": {"type": "string"}, "boy_or_girl": {"type": "string"}, "probability": {"type": "number"}}}}}}
27391a93-c2b7-4152-843b-3bc9e54f8ed8/11bf44ee-fd24-470e-b95e-20075e09b3b6/0/0
Keyword Extractor API
Extract important keywords or key phrases from textual content. With this API, you can easily integrate keyword extraction capabilities into your applications, platforms, or services, enabling you to extract relevant keywords that can be used for various purposes such as content analysis, search engine optimization (SEO), text summarization, or data categorization.
8.1
Extract Keywords URL
Extract Keywords from a URL
200
Success
{"status": "ok", "error": null, "data": ["tiberius", "wikipedia", "short", "descriptionshort", "description", "matches", "wikidatafeatured", "articlesarticles", "textarticles", "textinstances", "unnamed", "sources", "dearticles", "viaf", "identifiersarticles", "worldcat", "bnfdata", "byzantine", "emperorsbyzantine", "deathstwenty", "years", "empireexecuted", "monarchs", "people", "executed", "jump", "content", "main", "menu", "move", "sidebar", "hide", "navigation", "pagecontentscurrent", "eventsrandom", "articleabout", "wikipediacontact", "usdonate", "contribute", "helplearn", "editcommunity", "portalrecent", "changesupload", "file", "languages", "language", "links", "page", "title", "search", "create", "accountlog", "personal", "tools", "account", "pages", "logged", "editors", "learn", "contributionstalk", "contents", "toggle", "history", "subsection", "life", "references", "table", "srpskisrpskohrvatski", "edit", "article", "talk", "english", "read", "view", "source", "actions", "readview", "sourceview", "general", "hererelated", "filespecial", "pagespermanent", "linkpage", "informationcite", "pagewikidata", "item", "download", "pdfprintable", "version", "projects", "wikimedia", "commons", "free", "encyclopedia", "emperor", "infobox", "iiiemperor", "romanssolidus", "bearing", "image"]}
{"type": "object", "properties": {"status": {"type": "string"}, "error": {"type": "null"}, "data": {"type": "array", "items": {"type": "string"}}}}
27391a93-c2b7-4152-843b-3bc9e54f8ed8/c496e4a5-3abd-4c53-b634-7243f82dae58/0/0
Keyword Extractor API
Extract important keywords or key phrases from textual content. With this API, you can easily integrate keyword extraction capabilities into your applications, platforms, or services, enabling you to extract relevant keywords that can be used for various purposes such as content analysis, search engine optimization (SEO), text summarization, or data categorization.
8.1
Extract Keywords Text
Extract Keywords from a body of text
200
Success
{"status": "ok", "error": null, "data": ["lawsuit", "proceeding", "parties", "plaintiff", "claimant", "defendant", "civil", "court", "archaic", "term", "suit", "found", "small", "number", "laws", "effect", "today", "respect", "action", "brought", "party", "claims", "incurred", "loss", "result", "actions", "requests", "legal", "remedy", "equitable", "required", "respond", "complaint", "risk", "default", "judgment", "successful", "entered", "favor", "variety", "orders", "issued", "connection", "part", "enforce", "award", "damages", "restitution", "impose", "temporary", "permanent", "injunction", "prevent", "compel", "declaratory", "future", "disputes"]}
{"type": "object", "properties": {"status": {"type": "string"}, "error": {"type": "null"}, "data": {"type": "array", "items": {"type": "string"}}}}
d17524a2-c4b9-4902-b50b-ae30d2c2bb37/4af1f690-b873-4e80-b67b-32e2517c2ddc/0/0
DocuExprt
DocuExprt
null
/CustomDocument/Verify
Used To Verify Custom Documents.
200
Response
{"SeatNumber": "F125643"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"SeatNumber": {"type": "string"}}, "required": ["SeatNumber"]}
d17524a2-c4b9-4902-b50b-ae30d2c2bb37/f8b0b25b-2b93-4c4a-92d2-81ec3abaf412/0/0
DocuExprt
DocuExprt
null
/Document/PanCardVerify
Used to Verify Pan Card.
200
Response
{"FileName": "https://eklavvyablob.blob.core.windows.net/ssc-result/AGRIUG21024276_28968_XMarksheet.pdf", "status": "Success", "ApplicationID": null, "SeatNumber": "F125643", "CenterNumber": "5205", "DistAndSchoolNumber": "23.06.052", "MonthAndYearExam": "MARCH-2019", "SrNumberOfStatement": "101100", "CandidateFullName": "Chougule Priyanka Rajaram", "CandidateMotherName": "Manisha", "Percentage": 82.8, "Result": "PASS", "ObtainedMarks": "414", "TotalMarks": 500, "SubjectList": [{"Subject": "MARATHI", "Marks": 77}, {"Subject": "HINDI", "Marks": 82}, {"Subject": "ENGLISH", "Marks": 89}, {"Subject": "MATHEMATICS", "Marks": 75}, {"Subject": "SCIENCE & TECHNOLOGY", "Marks": 68}, {"Subject": "SOCIAL SCIENCES", "Marks": 91}]}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"FileName": {"type": "string"}, "status": {"type": "string"}, "ApplicationID": {"type": "null"}, "SeatNumber": {"type": "string"}, "CenterNumber": {"type": "string"}, "DistAndSchoolNumber": {"type": "string"}, "MonthAndYearExam": {"type": "string"}, "SrNumberOfStatement": {"type": "string"}, "CandidateFullName": {"type": "string"}, "CandidateMotherName": {"type": "string"}, "Percentage": {"type": "number"}, "Result": {"type": "string"}, "ObtainedMarks": {"type": "string"}, "TotalMarks": {"type": "integer"}, "SubjectList": {"type": "array", "items": {"type": "object", "properties": {"Subject": {"type": "string"}, "Marks": {"type": "integer"}}, "required": ["Marks", "Subject"]}}}, "required": ["ApplicationID", "CandidateFullName", "CandidateMotherName", "CenterNumber", "DistAndSchoolNumber", "FileName", "MonthAndYearExam", "ObtainedMarks", "Percentage", "Result", "SeatNumber", "SrNumberOfStatement", "SubjectList", "TotalMarks", "status"]}
d17524a2-c4b9-4902-b50b-ae30d2c2bb37/0a38c7be-2a63-4c73-9b5e-5e9827fc30a5/0/0
DocuExprt
DocuExprt
null
/Document/AadharCardVerify
Used to Verify Aadhar Card.
200
Response
{"FileName": "https://eklavvyablob.blob.core.windows.net/ssc-result/AGRIUG21024276_28968_XMarksheet.pdf", "status": "Success", "ApplicationID": null, "SeatNumber": "F125643", "CenterNumber": "5205", "DistAndSchoolNumber": "23.06.052", "MonthAndYearExam": "MARCH-2019", "SrNumberOfStatement": "101100", "CandidateFullName": "Chougule Priyanka Rajaram", "CandidateMotherName": "Manisha", "Percentage": 82.8, "Result": "PASS", "ObtainedMarks": "414", "TotalMarks": 500, "SubjectList": [{"Subject": "MARATHI", "Marks": 77}, {"Subject": "HINDI", "Marks": 82}, {"Subject": "ENGLISH", "Marks": 89}, {"Subject": "MATHEMATICS", "Marks": 75}, {"Subject": "SCIENCE & TECHNOLOGY", "Marks": 68}, {"Subject": "SOCIAL SCIENCES", "Marks": 91}]}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"FileName": {"type": "string"}, "status": {"type": "string"}, "ApplicationID": {"type": "null"}, "SeatNumber": {"type": "string"}, "CenterNumber": {"type": "string"}, "DistAndSchoolNumber": {"type": "string"}, "MonthAndYearExam": {"type": "string"}, "SrNumberOfStatement": {"type": "string"}, "CandidateFullName": {"type": "string"}, "CandidateMotherName": {"type": "string"}, "Percentage": {"type": "number"}, "Result": {"type": "string"}, "ObtainedMarks": {"type": "string"}, "TotalMarks": {"type": "integer"}, "SubjectList": {"type": "array", "items": {"type": "object", "properties": {"Subject": {"type": "string"}, "Marks": {"type": "integer"}}, "required": ["Marks", "Subject"]}}}, "required": ["ApplicationID", "CandidateFullName", "CandidateMotherName", "CenterNumber", "DistAndSchoolNumber", "FileName", "MonthAndYearExam", "ObtainedMarks", "Percentage", "Result", "SeatNumber", "SrNumberOfStatement", "SubjectList", "TotalMarks", "status"]}
d17524a2-c4b9-4902-b50b-ae30d2c2bb37/f2f7818e-b706-4227-8732-1b2f28cb3b10/0/0
DocuExprt
DocuExprt
null
/Document/GetDocumentList
Get Authorized Document List.
200
Response
{"Document Names": "DoNotMessAround,SSC"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"Document Names": {"type": "string"}}, "required": ["Document Names"]}
d17524a2-c4b9-4902-b50b-ae30d2c2bb37/c04990f9-ca09-4186-97be-388402f1ec89/0/0
DocuExprt
DocuExprt
null
/Document/Verify
Used to Verify Document and Extract Data
200
Response
{"FileName": "https://eklavvyablob.blob.core.windows.net/ssc-result/AGRIUG21024276_28968_XMarksheet.pdf", "status": "Success", "ApplicationID": null, "SeatNumber": "F125643", "CenterNumber": "5205", "DistAndSchoolNumber": "23.06.052", "MonthAndYearExam": "MARCH-2019", "SrNumberOfStatement": "101100", "CandidateFullName": "Chougule Priyanka Rajaram", "CandidateMotherName": "Manisha", "Percentage": 82.8, "Result": "PASS", "ObtainedMarks": "414", "TotalMarks": 500, "SubjectList": [{"Subject": "MARATHI", "Marks": 77}, {"Subject": "HINDI", "Marks": 82}, {"Subject": "ENGLISH", "Marks": 89}, {"Subject": "MATHEMATICS", "Marks": 75}, {"Subject": "SCIENCE & TECHNOLOGY", "Marks": 68}, {"Subject": "SOCIAL SCIENCES", "Marks": 91}]}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"FileName": {"type": "string"}, "status": {"type": "string"}, "ApplicationID": {"type": "null"}, "SeatNumber": {"type": "string"}, "CenterNumber": {"type": "string"}, "DistAndSchoolNumber": {"type": "string"}, "MonthAndYearExam": {"type": "string"}, "SrNumberOfStatement": {"type": "string"}, "CandidateFullName": {"type": "string"}, "CandidateMotherName": {"type": "string"}, "Percentage": {"type": "number"}, "Result": {"type": "string"}, "ObtainedMarks": {"type": "string"}, "TotalMarks": {"type": "integer"}, "SubjectList": {"type": "array", "items": {"type": "object", "properties": {"Subject": {"type": "string"}, "Marks": {"type": "integer"}}, "required": ["Marks", "Subject"]}}}, "required": ["ApplicationID", "CandidateFullName", "CandidateMotherName", "CenterNumber", "DistAndSchoolNumber", "FileName", "MonthAndYearExam", "ObtainedMarks", "Percentage", "Result", "SeatNumber", "SrNumberOfStatement", "SubjectList", "TotalMarks", "status"]}
d17524a2-c4b9-4902-b50b-ae30d2c2bb37/822173c7-3a3d-4681-a164-2fb7114f39a9/0/0
DocuExprt
DocuExprt
null
/Document/Login
Used to verify Identity.
200
Response
{"token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1bmlxdWVfbmFtZSI6ImRlbW9Ac3BsYXNoZ2Fpbi5jb20iLCJJSUQiOiI0IiwianRpIjoiNGM0ZmQwMTAtMzE2NC00YmRlLThiNzItNzc1ZjVlZjVjMmM3IiwiVmFsaWREb2N1bWVudCI6IkRvTm90TWVzc0Fyb3VuZCxTU0MiLCJuYmYiOjE2NTc3MjgxMTksImV4cCI6MTY1Nzc0NjExOSwiaWF0IjoxNjU3NzI4MTE5fQ.Vs40JPgZaLPek8ck9EbZ4TlUdsR3_4qDjJ9gutozI2k"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"token": {"type": "string"}}, "required": ["token"]}
53aa5088e4b07e1f4ebeb439/53aa5089e4b0a798dbd1a61f/0/0
Word Dictionary
A dictionary and so much more! Rich information for a word with definitions, associations, examples, and much more.
9.6
Theme
Get the themes of a word.
200
Response
{"author": "twinword inc.", "email": "help@twinword.com", "entry": "mask", "request": "mask", "response": "mask", "result_code": "200", "result_msg": "Success", "theme": ["cover", "search", "secret", "wear", "skirt", "boot", "skin", "image", "human"], "version": "4.0.0"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"author": {"type": "string"}, "email": {"type": "string"}, "entry": {"type": "string"}, "request": {"type": "string"}, "response": {"type": "string"}, "result_code": {"type": "string"}, "result_msg": {"type": "string"}, "theme": {"type": "array", "items": {"type": "string"}}, "version": {"type": "string"}}, "required": ["author", "email", "entry", "request", "response", "result_code", "result_msg", "theme", "version"]}
53aa5088e4b07e1f4ebeb439/53aa5089e4b0a9b1348da6ad/0/0
Word Dictionary
A dictionary and so much more! Rich information for a word with definitions, associations, examples, and much more.
9.6
Difficulty
Get the difficulty level of a word.
200
Response
{"author": "twinword inc.", "email": "help@twinword.com", "entry": "mask", "request": "mask", "response": "mask", "result_code": "200", "result_msg": "Success", "ten_degree": 3, "version": "4.0.0"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"author": {"type": "string"}, "email": {"type": "string"}, "entry": {"type": "string"}, "request": {"type": "string"}, "response": {"type": "string"}, "result_code": {"type": "string"}, "result_msg": {"type": "string"}, "ten_degree": {"type": "integer"}, "version": {"type": "string"}}, "required": ["author", "email", "entry", "request", "response", "result_code", "result_msg", "ten_degree", "version"]}
53aa5088e4b07e1f4ebeb439/53aa5089e4b0a798dbd1a61d/0/0
Word Dictionary
A dictionary and so much more! Rich information for a word with definitions, associations, examples, and much more.
9.6
Example
See examples of a word used in a sentence
200
Response
{"author": "twinword inc.", "email": "help@twinword.com", "entry": "mask", "example": ["The red figure wears the same hat and mask in the background.", "The conformable mask is then stripped and a new conformable mask laid down.", "She was wearing a Kachina dancer's mask.", "The grandiosity is just a momentary mask.", "The mask is for protection and intimidation.", "The grating in the mask is adjusted.", "The mask of the male mummy is gilded.", "The mask covers the aperture of the telescope.", "Mask and wig celebrates the centennial of the clubhouse.", "The latter disguised his scarred visage with a steel mask."], "request": "mask", "response": "mask", "result_code": "200", "result_msg": "Success", "version": "4.0.0"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"author": {"type": "string"}, "email": {"type": "string"}, "entry": {"type": "string"}, "example": {"type": "array", "items": {"type": "string"}}, "request": {"type": "string"}, "response": {"type": "string"}, "result_code": {"type": "string"}, "result_msg": {"type": "string"}, "version": {"type": "string"}}, "required": ["author", "email", "entry", "example", "request", "response", "result_code", "result_msg", "version"]}
53aa5088e4b07e1f4ebeb439/53aa5089e4b07e1f4ebeb443/0/0
Word Dictionary
A dictionary and so much more! Rich information for a word with definitions, associations, examples, and much more.
9.6
Reference
Get the broad terms, narrow terms, related terms, evocations, synonyms, associations, and derived terms of a word.
200
Response
{"author": "twinword inc.", "email": "help@twinword.com", "entry": "mask", "relation": {"associations": "", "broad_terms": "protective covering, protective cover, protection, party, mask, hiding, hide, disguise, covering, cover, concealment, concealing, conceal", "derived_terms": "death mask, 'Mask house', , a house for masquerades, sleep mask, unmask", "evocations": "disguise", "narrow_terms": "welder's mask, respirator, masquerade ball, masquerade, masked ball, mask, half mask, gasmask, gas helmet, fancy-dress ball, false face, face mask, eye mask, domino, dissimulate, dissemble, cloak, camouflage", "related_terms": "masquerade party, masquerade, masque, dissemble, disguise, cook, cloak, block out", "synonyms": ""}, "request": "mask", "response": "mask", "result_code": "200", "result_msg": "Success", "version": "4.0.0"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"author": {"type": "string"}, "email": {"type": "string"}, "entry": {"type": "string"}, "relation": {"type": "object", "properties": {"associations": {"type": "string"}, "broad_terms": {"type": "string"}, "derived_terms": {"type": "string"}, "evocations": {"type": "string"}, "narrow_terms": {"type": "string"}, "related_terms": {"type": "string"}, "synonyms": {"type": "string"}}, "required": ["associations", "broad_terms", "derived_terms", "evocations", "narrow_terms", "related_terms", "synonyms"]}, "request": {"type": "string"}, "response": {"type": "string"}, "result_code": {"type": "string"}, "result_msg": {"type": "string"}, "version": {"type": "string"}}, "required": ["author", "email", "entry", "relation", "request", "response", "result_code", "result_msg", "version"]}
53aa5088e4b07e1f4ebeb439/53aa5089e4b0596140340685/0/0
Word Dictionary
A dictionary and so much more! Rich information for a word with definitions, associations, examples, and much more.
9.6
Association
Get the associations of a word.
200
Response
{"assoc_word": ["hide", "hat", "face"], "assoc_word_ex": ["hide", "hat", "face", "veil", "disguise", "camouflage"], "author": "twinword inc.", "email": "help@twinword.com", "entry": "mask", "request": "mask", "response": "mask", "result_code": "200", "result_msg": "Success", "version": "4.0.0"}
{"properties": {"assoc_word": {"items": {"type": "string"}, "type": "array"}, "assoc_word_ex": {"items": {"type": "string"}, "type": "array"}, "author": {"type": "string"}, "email": {"format": "email", "type": "string"}, "entry": {"type": "string"}, "request": {"type": "string"}, "response": {"type": "string"}, "result_code": {"format": "color", "type": "string"}, "result_msg": {"type": "string"}, "version": {"type": "string"}}, "type": "object"}
53aa5088e4b07e1f4ebeb439/53aa5089e4b0a798dbd1a61c/0/0
Word Dictionary
A dictionary and so much more! Rich information for a word with definitions, associations, examples, and much more.
9.6
Exam History
See which exams a word has been on
200
Response
{"author": "twinword inc.", "email": "help@twinword.com", "entry": "mask", "exam": ["toeic", "toefl", "ielts", "gre", "gmat"], "request": "mask", "response": "mask", "result_code": "200", "result_msg": "Success", "version": "4.0.0"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"author": {"type": "string"}, "email": {"type": "string"}, "entry": {"type": "string"}, "exam": {"type": "array", "items": {"type": "string"}}, "request": {"type": "string"}, "response": {"type": "string"}, "result_code": {"type": "string"}, "result_msg": {"type": "string"}, "version": {"type": "string"}}, "required": ["author", "email", "entry", "exam", "request", "response", "result_code", "result_msg", "version"]}
53aa5088e4b07e1f4ebeb439/53aa5089e4b0a798dbd1a61b/0/0
Word Dictionary
A dictionary and so much more! Rich information for a word with definitions, associations, examples, and much more.
9.6
Definition
Get the definitions of a word.
200
Response
{"author": "twinword inc.", "email": "help@twinword.com", "entry": "mask", "ipa": "m\u0251\u02d0sk", "meaning": {"adjective": "", "adverb": "", "noun": "(nou) a covering to disguise or conceal the face\n(nou) activity that tries to conceal something\n(nou) a party of guests wearing costumes and masks\n(nou) a protective covering worn over the face", "verb": "(vrb) hide under a false appearance\n(vrb) put a mask on or cover with a mask\n(vrb) make unrecognizable\n(vrb) cover with a sauce\n(vrb) shield from light"}, "request": "mask", "response": "mask", "result_code": "200", "result_msg": "Success", "version": "4.0.0"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"author": {"type": "string"}, "email": {"type": "string"}, "entry": {"type": "string"}, "ipa": {"type": "string"}, "meaning": {"type": "object", "properties": {"adjective": {"type": "string"}, "adverb": {"type": "string"}, "noun": {"type": "string"}, "verb": {"type": "string"}}, "required": ["adjective", "adverb", "noun", "verb"]}, "request": {"type": "string"}, "response": {"type": "string"}, "result_code": {"type": "string"}, "result_msg": {"type": "string"}, "version": {"type": "string"}}, "required": ["author", "email", "entry", "ipa", "meaning", "request", "response", "result_code", "result_msg", "version"]}
18e13722-ad38-4c09-aa48-0b93bf75a459/47ad4f74-3cfe-4b1d-87d5-4e16bd004c79/0/0
French Entity Recognition
Lettria's French Named Entity Recognition.
null
French Named Entity Recognition
This API automatically identifies names entities within your text. Extract more than 40 entities from a wide range of information : name, date, phone number, email, postal address, mass, organization, IP address, social security number etc.
200
Extract NER Standard Return
[[{"source": "1kg", "type": "mass", "value": {"centigramme": 100000, "confidence": 0.99, "gramme": 1000, "kilogramme": 1, "onces": 35.27399072294044, "pounds": 2.2046244201837775, "scalar": 1, "stone": 0.1574731232746851, "ton": 9.842035332906845e-05, "tonnes": 0.001, "unit": "kg"}}], [{"source": "3kg", "type": "mass", "value": {"centigramme": 300000, "confidence": 0.99, "gramme": 3000, "kilogramme": 3, "onces": 105.82197216882132, "pounds": 6.613873260551332, "scalar": 3, "stone": 0.4724193698240553, "ton": 0.00029526105998720537, "tonnes": 0.003, "unit": "kg"}}, {"source": "7kg", "type": "mass", "value": {"centigramme": 700000, "confidence": 0.99, "gramme": 7000, "kilogramme": 7, "onces": 246.9179350605831, "pounds": 15.432370941286443, "scalar": 7, "stone": 1.1023118629227957, "ton": 0.0006889424733034792, "tonnes": 0.007, "unit": "kg"}}]]
{"items": {"items": {"properties": {"source": {"type": "string"}, "type": {"type": "string"}, "value": {"properties": {"centigramme": {"type": "integer"}, "confidence": {"type": "number"}, "gramme": {"type": "integer"}, "kilogramme": {"type": "integer"}, "onces": {"type": "number"}, "pounds": {"type": "number"}, "scalar": {"type": "integer"}, "stone": {"type": "number"}, "ton": {"type": "number"}, "tonnes": {"type": "number"}, "unit": {"type": "string"}}, "type": "object"}}, "type": "object"}, "type": "array"}, "type": "array"}
d742c702-1808-481b-93ef-2b6222368055/1687b75a-4a7f-41ec-9a1a-2c888c9f0d7b/0/0
Stacks Patent Similarity
This is a useful API for finding claims similar to the user's input text. The "Infringement Research" is a tool for returning similar claims that the text is likely to infringe. The "101 Eligibility Analyzer" is a tool for testing a claim for eligibility under both "Alice Test Step One" and "Alice Test Step Two". The "Patent Analytics" provides multiple useful endpoints powered by the Stacks Similarity Engine for exploring IP infringement and clearance, client prospecting, finding patent lawy...
0.2
post_infringement_v1_infringement_research_predict_post
Create a request item with the following information: text: should be minimum length of 20 words for better performance. The maximum limit is 512 words, so longer queries will be truncated. (required). claims: is a list of claims on which you're testing to see if the text is infringing. The maximum number of entries is 25 (optional). patent_number: has the same format and limits as in the GET request (optional). model_name: (required).
200
Example_1
{"data": [{"patent_number": "US9141433B2", "document_url": "https://patents.google.com/patent/US9141433B2", "predictions": [{"claim_number": 1, "claim_text": "A system, comprising: a memory; and a processor programmed to: retrieve a computing job comprising a plurality of prioritized tasks from the memory, where the prioritized tasks comprise associated priority information usable to identify relative priorities among the prioritized tasks; ...", "infringement_prob": 0.8490521907806396}, {"claim_number": 8, "claim_text": "A computer program product comprises a computer readable storage medium including computer readable program code, wherein the computer readable program code when executed on a computer causes the computer to: retrieve a computing job comprising a plurality of prioritized tasks, where the prioritized tasks comprise associated priority information usable to identify relative priorities among the prioritized tasks; ...", "infringement_prob": 0.8665629625320435}]}, "..."], "model": "stk_nova", "messages": []}
{"title": "InfringementPostResponse", "required": ["data", "messages"], "type": "object", "properties": {"data": {"title": "Data", "type": "array", "items": {}}, "model": {"title": "Model", "type": "string"}, "messages": {"title": "Messages", "type": "array", "items": {"type": "string"}}}, "description": "Returned response has the following information:\n\n- **data**: is a list of results each containing a dictionary with the following keys:\n - **patent_number**: shows the patent number,\n - **document_url**: is the address to the patent,\n - **predictions**:\n - **claim_number**: is the claim number. If a list of claims is submitted, it would be\n the same as the index of the item in the list.\n - **claim_text**: is the text of the claim.\n - **infringement_prob**: is the likelihood of infringing the claim. Any probability below a threshold of `0.3` will be considered too low for infringement to take place regardless of the model in action.\n- **model**: is the model used in Stacks to make predictions.\n- **messages**: is the list of messages sent from the backend."}
d742c702-1808-481b-93ef-2b6222368055/1687b75a-4a7f-41ec-9a1a-2c888c9f0d7b/1/0
Stacks Patent Similarity
This is a useful API for finding claims similar to the user's input text. The "Infringement Research" is a tool for returning similar claims that the text is likely to infringe. The "101 Eligibility Analyzer" is a tool for testing a claim for eligibility under both "Alice Test Step One" and "Alice Test Step Two". The "Patent Analytics" provides multiple useful endpoints powered by the Stacks Similarity Engine for exploring IP infringement and clearance, client prospecting, finding patent lawy...
0.2
post_infringement_v1_infringement_research_predict_post
Create a request item with the following information: text: should be minimum length of 20 words for better performance. The maximum limit is 512 words, so longer queries will be truncated. (required). claims: is a list of claims on which you're testing to see if the text is infringing. The maximum number of entries is 25 (optional). patent_number: has the same format and limits as in the GET request (optional). model_name: (required).
422
Example_1
{"detail": [{"loc": [], "msg": "", "type": ""}]}
{"title": "HTTPValidationError", "type": "object", "properties": {"detail": {"title": "Detail", "type": "array", "items": {"title": "ValidationError", "required": ["loc", "msg", "type"], "type": "object", "properties": {"loc": {"title": "Location", "type": "array", "items": {"type": "string"}}, "msg": {"title": "Message", "type": "string"}, "type": {"title": "Error Type", "type": "string"}}}}}}
d742c702-1808-481b-93ef-2b6222368055/e0a09bc5-6d5f-4dfc-9911-68e44ab66fb6/0/0
Stacks Patent Similarity
This is a useful API for finding claims similar to the user's input text. The "Infringement Research" is a tool for returning similar claims that the text is likely to infringe. The "101 Eligibility Analyzer" is a tool for testing a claim for eligibility under both "Alice Test Step One" and "Alice Test Step Two". The "Patent Analytics" provides multiple useful endpoints powered by the Stacks Similarity Engine for exploring IP infringement and clearance, client prospecting, finding patent lawy...
0.2
post_infringement_clearance_v1_patent_analytics_clearance_post
Create a request item with the following information: text: should be minimum length of 20 words for better performance. The maximum limit is 512 words, so longer queries will be truncated. (required). max_limit: is the maximum number of results to be returned (optional). Default number is 10 and it cannot go over 100. model_name: (required).
422
Example_1
{"detail": [{"loc": [], "msg": "", "type": ""}]}
{"title": "HTTPValidationError", "type": "object", "properties": {"detail": {"title": "Detail", "type": "array", "items": {"title": "ValidationError", "required": ["loc", "msg", "type"], "type": "object", "properties": {"loc": {"title": "Location", "type": "array", "items": {"type": "string"}}, "msg": {"title": "Message", "type": "string"}, "type": {"title": "Error Type", "type": "string"}}}}}}
d742c702-1808-481b-93ef-2b6222368055/e0a09bc5-6d5f-4dfc-9911-68e44ab66fb6/1/0
Stacks Patent Similarity
This is a useful API for finding claims similar to the user's input text. The "Infringement Research" is a tool for returning similar claims that the text is likely to infringe. The "101 Eligibility Analyzer" is a tool for testing a claim for eligibility under both "Alice Test Step One" and "Alice Test Step Two". The "Patent Analytics" provides multiple useful endpoints powered by the Stacks Similarity Engine for exploring IP infringement and clearance, client prospecting, finding patent lawy...
0.2
post_infringement_clearance_v1_patent_analytics_clearance_post
Create a request item with the following information: text: should be minimum length of 20 words for better performance. The maximum limit is 512 words, so longer queries will be truncated. (required). max_limit: is the maximum number of results to be returned (optional). Default number is 10 and it cannot go over 100. model_name: (required).
200
Example_1
{"data": [{"patent_id": "8570814", "type": "utility", "number": "8570814", "country": "US", "date": "2013-10-29", "year": 2013, "abstract": "Memory states of a multi-bit memory cell are demarcated by generating read reference signals having levels that constitute boundaries of the memory states. The read reference signals may be dependent upon the levels of programming reference signals used for controlling the programming of the memory cell. The memory cell can thus be programmed without reading out its memory state during the programming process, with programming margins being assured by the dependence of the read reference signals on the programming reference signals. Both sets of reference signals may be generated by reference cells which track variations in the operating characteristics of the memory cell with changes in conditions, such as temperature and system voltages, to enhance the reliability of memory programming and readout.", "title": "Memory apparatus including programmable non-volatile multi-bit memory cell, and apparatus and method for demarcating memory states of the cell", "kind": "B2", "num_claims": 19, "firstnamed_assignee_id": 9289, "firstnamed_assignee_persistent_id": "6ef12aaf-f924-4065-9af6-11374dee7909", "firstnamed_assignee_location_id": 33512, "firstnamed_assignee_persistent_location_id": "37.3818|-122.179", "firstnamed_assignee_city": "Palo Alto", "firstnamed_assignee_state": "CA", "firstnamed_assignee_country": "US", "firstnamed_assignee_latitude": 37.3818, "firstnamed_assignee_longitude": -122.179, "firstnamed_inventor_id": 252038, "firstnamed_inventor_persistent_id": "fl:g_ln:banks-6", "firstnamed_inventor_location_id": 34016, "firstnamed_inventor_persistent_location_id": "37.5308|-121.942", "firstnamed_inventor_city": "Fremont", "firstnamed_inventor_state": "CA", "firstnamed_inventor_country": "US", "firstnamed_inventor_latitude": 37.5308, "firstnamed_inventor_longitude": -121.942, "num_foreign_documents_cited": 40, "num_us_applications_cited": 0, "num_us_patents_cited": 191, "num_total_documents_cited": 231, "num_times_cited_by_us_patents": 0, "earliest_application_date": "2011-03-04", "patent_processing_days": 970, "uspc_current_mainclass_average_patent_processing_days": 769, "cpc_current_group_average_patent_processing_days": 829, "detail_desc_length": 50394, "claim_number": [1], "confidence_level": [0.7604705393314362]}, "..."], "model": "stk_nova", "messages": []}
{"title": "AnalyticsClearancePostResponse", "required": ["data", "messages"], "type": "object", "properties": {"data": {"title": "Data", "type": "array", "items": {}}, "model": {"title": "Model", "type": "string"}, "messages": {"title": "Messages", "type": "array", "items": {"type": "string"}}}, "description": "Returned response has the following information:\n\n- **data**: is a list of results\n- **model**: is the model used in Stacks to analyze the text.\n- **messages**: is the list of messages sent from the backend."}
37767073-c86d-4c9d-925d-e9235f160f98/6e48f84d-4263-4914-a9f7-f1a1dcb1e4ea/0/0
Sentiment Analysis API
English text sentiment analysis API
6
Text Input
Please input plain text
200
Response
{"sentiment": "negative", "score": {"negative": 0.343, "neutral": 0.493, "positive": 0.164, "compound": -0.4915}}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"sentiment": {"type": "string"}, "score": {"type": "object", "properties": {"negative": {"type": "number"}, "neutral": {"type": "number"}, "positive": {"type": "number"}, "compound": {"type": "number"}}, "required": ["compound", "negative", "neutral", "positive"]}}, "required": ["score", "sentiment"]}
028d1d3d-7b88-48d7-ab3b-2f7ba2df022f/14f7a292-198f-4e05-9b36-a40d10fb2bf5/0/0
Poem-Rap Wizard
Poem-Rap Wiz API harnesses AI to create custom poems and raps from user-provided details. Input a name, relevant info, and choose between a poem or rap for a unique, AI-generated piece, perfect for special occasions, personalized messages, or discovering AI-powered creative writing.
null
/generate_poem_rap
200
null
{"text": ""}
{"type": "object", "properties": {"text": {"type": "string", "description": "The generated poem or rap text."}}}
087a2128-28d2-43af-b884-c84bafc5fd60/49951cb0-2760-4adc-aa25-889b3bf4290f/0/0
Classify any Webpage with Google Cloud Natural Language
Using Google's Natural Language Processing (NLP) technology is a highly effective approach to effectively categorizing content to optimize Search Engine Optimization (SEO) and establish topical authority. By utilizing this cutting-edge technology, businesses can effectively organize and optimize their content to ensure it is easily discoverable and relevant to their target audience. This approach is highly recommended for those looking to establish a solid online presence and drive traffic to...
5.7
Classify any URL with Google NLP
Classify any URL with Google NLP
200
New Example
[{"confidence": 0.70254016, "name": "/Computers & Electronics/Programming/Development Tools"}, {"confidence": 0.6787314, "name": "/Computers & Electronics/Software/Business & Productivity Software"}, {"confidence": 0.24937595, "name": "/Computers & Electronics/Programming/Other"}, {"confidence": 0.15354379, "name": "/Internet & Telecom/Other"}, {"confidence": 0.12731557, "name": "/Business & Industrial/Business Operations/Management"}, {"confidence": 0.11253812, "name": "/Computers & Electronics/Enterprise Technology/Other"}, {"confidence": 0.10599707, "name": "/Internet & Telecom/Mobile & Wireless/Mobile Apps & Add-Ons"}, {"confidence": 0.10380578, "name": "/Internet & Telecom/Web Services/Web Design & Development"}]
{"$schema": "http://json-schema.org/schema#", "type": "array", "items": {"type": "object", "properties": {"confidence": {"type": "number"}, "name": {"type": "string"}}, "required": ["confidence", "name"]}}
0d3bb6c6-9dc4-4c7e-b81f-6141d886c18e/5acaa4dc-1d4c-4bf1-a7ff-c3a89b950753/0/0
Translator
Translate any text you want with our Translator; we support ~100 languages. Please note that the expected time for processing a request can be 10-30 seconds during the beta testing period.
9.2
translate_language
Translate the provided text into the chosen language. The input language is auto-detected.
422
null
{"detail": [{"loc": [], "msg": "", "type": ""}]}
{"title": "HTTPValidationError", "type": "object", "properties": {"detail": {"title": "Detail", "type": "array", "items": {"title": "ValidationError", "required": ["loc", "msg", "type"], "type": "object", "properties": {"loc": {"title": "Location", "type": "array", "items": {"type": "string"}}, "msg": {"title": "Message", "type": "string"}, "type": {"title": "Error Type", "type": "string"}}}}}}
0d3bb6c6-9dc4-4c7e-b81f-6141d886c18e/5acaa4dc-1d4c-4bf1-a7ff-c3a89b950753/1/0
Translator
Translate any text you want with our Translator; we support ~100 languages. Please note that the expected time for processing a request can be 10-30 seconds during the beta testing period.
9.2
translate_language
Translate the provided text into the chosen language. The input language is auto-detected.
200
null
{"result": {}, "model_used": "", "time": ""}
{"title": "Response", "required": ["result", "model_used", "time"], "type": "object", "properties": {"result": {"title": "Result", "anyOf": [{"type": "string"}, {"type": "object", "additionalProperties": {"type": "string"}}, {"type": "object", "additionalProperties": {"type": "object"}}]}, "model_used": {"title": "Model Used", "type": "string"}, "time": {"title": "Time", "type": "string"}}}
f2d5ddff-5f6a-4ffc-bc73-0546960ef384/33daeb04-a8b8-4cd6-94cb-72b1f30a3e79/0/0
Sentiment Analysis
Classify text into three class sentiments (neutral, positive and negative). The api currently supports English and Arabic dialects.
null
en_sentiment
Recieve a new sentiment extraction job ... Parameters payload: str string object that can be read as dictoinary contains the input text and optional threshold
200
New Example
{"text": "It's perfect ", "normalized_text": "It s perfect", "negative_probability": 0.12482409924268723, "neutral_probability": 0.19524116814136505, "positive_probability": 0.6799347400665283, "predicted_sentiment": 1, "confidence": 0.6799347400665283}
{"type": "object", "properties": {"text": {"type": "string"}, "normalized_text": {"type": "string"}, "negative_probability": {"type": "number"}, "neutral_probability": {"type": "number"}, "positive_probability": {"type": "number"}, "predicted_sentiment": {"type": "integer"}, "confidence": {"type": "number"}}}
f2d5ddff-5f6a-4ffc-bc73-0546960ef384/0138f907-ae34-4be8-9986-6a9968554f4f/0/0
Sentiment Analysis
Classify text into three class sentiments (neutral, positive and negative). The api currently supports English and Arabic dialects.
null
ar_sentiment
Recieve a new sentiment extraction job ... Parameters payload: str string object that can be read as dictoinary contains the input text and optional threshold
200
New Example
{"text": "\u0627\u0644\u0645\u0646\u062a\u062c \u062f\u0647 \u062c\u0627\u0645\u062f", "normalized_text": "\u0645\u0646\u062a\u062c \u062f\u0647 \u062c\u0627\u0645\u062f", "negative_probability": 0.055074308067560196, "neutral_probability": 0.30793607234954834, "positive_probability": 0.6369895935058594, "predicted_sentiment": 1, "confidence": 0.6369895935058594}
{"type": "object", "properties": {"text": {"type": "string"}, "threshold": {"type": "number"}}}
69b15242-eef9-4565-9560-230c923439f7/6d2bb7fb-342c-4924-91be-83720aaa3a44/0/0
Language Tools
Various tools to analyze and modify text.
null
/language
Gives the language of the given text.
200
New Example
{"language": "English"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"language": {"type": "string"}}, "required": ["language"]}
69b15242-eef9-4565-9560-230c923439f7/80c345f7-1137-4e1b-a2a8-4054769664bf/0/0
Language Tools
Various tools to analyze and modify text.
null
/topic
Gives the topic of the given text.
200
The Beatles
{"topic": "The Beatles"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"topic": {"type": "string"}}, "required": ["topic"]}
69b15242-eef9-4565-9560-230c923439f7/9556e6dd-23c2-4606-801a-9cc015798e02/0/0
Language Tools
Various tools to analyze and modify text.
null
/similarity
Rates the similarity of two texts with a value between 0 and 1.
200
Famous Bands
{"similarity": 0.5}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"similarity": {"type": "number"}}, "required": ["similarity"]}
69b15242-eef9-4565-9560-230c923439f7/690e8042-0067-48cd-9ab5-a8bd3e0c3439/0/0
Language Tools
Various tools to analyze and modify text.
null
/sentiment
Gives the sentiment of the given text in a value between -1 and +1 and in a brief description.
200
The Beatles
{"sentiment": 0.7, "description": "The Beatles are seen as a revolutionary force in popular music and culture that changed the face of the industry and impacted generations."}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"sentiment": {"type": "number"}, "description": {"type": "string"}}, "required": ["description", "sentiment"]}
69b15242-eef9-4565-9560-230c923439f7/90d54df8-e723-4114-98ee-fba1416c9d41/0/0
Language Tools
Various tools to analyze and modify text.
null
/keywords
Lists keywords for the given text.
200
The Beatles
{"keywords": ["Beatles", "English", "Liverpool", "John Lennon", "Paul McCartney", "George Harrison", "Ringo Starr", "Skiffle", "Beat", "Rock 'n' Roll", "Classical", "Pop", "Folk", "Indian", "Psychedelia", "Hard Rock", "Recording", "Songwriting", "Artistic", "Revolutionised", "Youth", "Socio-cultural", "Movements"]}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"keywords": {"type": "array", "items": {"type": "string"}}}, "required": ["keywords"]}
69b15242-eef9-4565-9560-230c923439f7/95f8d7a1-91b2-423f-82e8-c552614b0532/0/0
Language Tools
Various tools to analyze and modify text.
null
/entities
Lists the entities and their type for the given text.
200
The Beatles
{"entities": [{"entity": "John Lennon", "type": "Person"}, {"entity": "Paul McCartney", "type": "Person"}, {"entity": "George Harrison", "type": "Person"}, {"entity": "Ringo Starr", "type": "Person"}, {"entity": "The Beatles", "type": "Band"}, {"entity": "Liverpool", "type": "City"}, {"entity": "1960s", "type": "Decade"}, {"entity": "Skiffle", "type": "Music Genre"}, {"entity": "Beat", "type": "Music Genre"}, {"entity": "1950s Rock 'n' Roll", "type": "Music Genre"}, {"entity": "Classical Music", "type": "Music Genre"}, {"entity": "Traditional Pop", "type": "Music Genre"}, {"entity": "Folk", "type": "Music Genre"}, {"entity": "Indian Music", "type": "Music Genre"}, {"entity": "Psychedelia", "type": "Music Genre"}, {"entity": "Hard Rock", "type": "Music Genre"}]}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"entities": {"type": "array", "items": {"type": "object", "properties": {"entity": {"type": "string"}, "type": {"type": "string"}}, "required": ["entity", "type"]}}}, "required": ["entities"]}
69b15242-eef9-4565-9560-230c923439f7/8b2d20c9-57e3-422f-97ce-ecca4bee2460/0/0
Language Tools
Various tools to analyze and modify text.
null
/brainstorm
Lists ideas for a given topic, context and within the defined constraints.
200
YouTube video about flowers
{"ideas": ["Bloomin' Amazing: A Look at Fabulous Flowers", "Flower Power: How to Make Your Garden Grow", "Sow-Worthy Spectacles: A Bouquet of Beautiful Blooms", "Roses are Red, Violets are Blue-tiful: A Garden Tour", "The Petal Pusher: An Exploration of Flowery Fun", "Pretty in Petals: An Appreciation of Floral Finds", "Blooming Marvellous: A Floral Fete", "Scent-sational Splendour: A Garden Getaway", "A Floral Flourish: An Aromatic Adventure", "Flower Fiesta: A Celebration of Blooms"]}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"ideas": {"type": "array", "items": {"type": "string"}}}, "required": ["ideas"]}
69b15242-eef9-4565-9560-230c923439f7/60e7a5e7-7402-4d2a-a8cc-545935272307/0/0
Language Tools
Various tools to analyze and modify text.
null
/associations
Lists associations for the given word.
200
New Example
{"associations": ["Petal", "Bloom", "Garden", "Nature", "Spring", "Pollen", "Color", "Bouquet", "Aroma", "Vase"]}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"associations": {"type": "array", "items": {"type": "string"}}}, "required": ["associations"]}
69b15242-eef9-4565-9560-230c923439f7/1cf169ab-abd9-4538-a306-204994246f05/0/0
Language Tools
Various tools to analyze and modify text.
null
/rephrase-simple
Rephrases a given text in simple language.
200
The Beatles
{"text": "The Beatles were an English rock band formed in Liverpool in 1960. They were made up of John Lennon, Paul McCartney, George Harrison and Ringo Starr. They were very famous and important in the development of popular music. They combined different kinds of music like skiffle, beat, rock 'n' roll, classical music, folk music and Indian music. They changed how people made music and were seen as leaders of the 1960s."}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"text": {"type": "string"}}, "required": ["text"]}
69b15242-eef9-4565-9560-230c923439f7/71c15e62-64a3-48a7-b1cc-f86b0e5d376d/0/0
Language Tools
Various tools to analyze and modify text.
null
/rephrase-joke
Rephrases a given text in a fun way and even adds jokes.
200
New Example
{"text": "The Fab Four from Liverpool, John Lennon, Paul McCartney, George Harrison and Ringo Starr, known as The Beatles, were a legendary rock band that changed the face of music forever! From their roots in skiffle, beat and 50s rock 'n' roll, to their forays into classical music, traditional pop and even psychedelia, the Beatles pushed the limits of what music could be. They had a hand in ushering in the counterculture of the 60s, and they were celebrated as champions of the youth and the culture of the time. They even made art out of popular music! Talk about genius! And who could forget their iconic hairstyles? I mean, really, who looks that cool with mop-tops?"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"text": {"type": "string"}}, "required": ["text"]}
69b15242-eef9-4565-9560-230c923439f7/5030239f-e121-4fb1-88ff-c8333aba41a6/0/0
Language Tools
Various tools to analyze and modify text.
null
/rephrase-formal
Rephrases a given text in a formal tone.
200
The Beatles
{"text": "The Beatles were an English rock band formed in Liverpool in 1960, comprised of John Lennon, Paul McCartney, George Harrison and Ringo Starr. Widely acknowledged as the most influential band of all time, they were pivotal in the progression of 1960s counterculture and popular music as an art form. Drawing from skiffle, beat and 1950s rock 'n' roll, their sound was characterized by the incorporation of classical music and traditional pop in innovative ways, in addition to exploring a range of music styles from folk and Indian music to psychedelia and hard rock. As pioneers in recording, songwriting and artistic presentation, the Beatles revolutionised various aspects of the music industry, and were often associated with being the figureheads of the youth and sociocultural movements of the era."}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"text": {"type": "string"}}, "required": ["text"]}
69b15242-eef9-4565-9560-230c923439f7/c75302f2-ef41-4498-81e8-5c2eded42cdb/0/0
Language Tools
Various tools to analyze and modify text.
null
/rephrase-fiveyearold
Rephrases a given text as if written by a five-year-old.
200
New Example
{"text": "The Beatles were a really cool band from England that was made up of John Lennon, Paul McCartney, George Harrison and Ringo Starr. They made lots of great music that was a mix of lots of different kinds of sounds like skiffle, beat and rock 'n' roll. They were really special because they were the first to do lots of things like recording, writing songs and making cool art. They were famous for being part of the 1960s counterculture and helping to make music an art."}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"text": {"type": "string"}}, "required": ["text"]}
69b15242-eef9-4565-9560-230c923439f7/a528d978-c9a9-4b26-973d-c81c17a8b41a/0/0
Language Tools
Various tools to analyze and modify text.
null
/rephrase-casual
Rephrases a given text in a casual tone.
200
The Beatles
{"text": "The Beatles were a legendary English rock band from Liverpool formed in 1960. They consisted of John Lennon, Paul McCartney, George Harrison and Ringo Starr and are widely considered to be the most influential band of all time. They blended elements of skiffle, beat, 1950s rock 'n' roll, classical music and traditional pop in unique ways. They also explored lots of different music styles, from folk and Indian music to psychedelia and hard rock. They revolutionised the music industry with their innovative recording, songwriting and artistic presentation, and were recognised as icons of the youth and social movements of the era."}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"text": {"type": "string"}}, "required": ["text"]}
69b15242-eef9-4565-9560-230c923439f7/74efdb29-0f0c-468b-a532-c2eb97a7e929/0/0
Language Tools
Various tools to analyze and modify text.
null
/rephrase-neutral
Rephrases a given text.
200
The Beatles
{"text": "The Beatles were a revolutionary English rock band from Liverpool that changed the entire music industry. Formed in 1960, the band included John Lennon, Paul McCartney, George Harrison, and Ringo Starr. Drawing inspiration from skiffle, beat, and 1950s rock 'n' roll, their sound incorporated elements of classical music and traditional pop in unique ways. The Beatles also experimented with different genres of music, from folk and Indian music to psychedelia and hard rock. By being pioneers in recording, songwriting, and artistic presentation, they showed the world the power of pop music and made them the face of the 1960s counterculture and youth movements."}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"text": {"type": "string"}}, "required": ["text"]}
69b15242-eef9-4565-9560-230c923439f7/0a0f1e2e-08e3-4a67-aa0a-695b65a180e5/0/0
Language Tools
Various tools to analyze and modify text.
null
/summary
Summarizes a given text.
200
The Beatles
{"summary": "The Beatles revolutionized the music industry and were seen as pioneers in recording, songwriting, and artistic presentation."}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"summary": {"type": "string"}}, "required": ["summary"]}
8e363693-ed0b-42a4-b325-e137f59e12f4/d06489a9-cb2a-4ebd-9e79-f492863be223/0/0
Gender From Name
Find the gender by just using a name.
8.8
getGender
Finds the gender from over 100,000 (first) names. Supports name variants as well.
200
New Example
{"gender": "F", "probability": 100}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"gender": {"type": "string"}, "probability": {"type": "integer"}}, "required": ["gender", "probability"]}
fcd11cd1-f862-440a-9b45-5179c67b2243/ddebf136-eec2-44c9-843f-cca595daff80/0/0
lenguaje
Spanish language tools
null
GET de conjugación de verbos
Devuelve la conjugación de un verbo en infinitivo
200
Response
["aprender", "aprendiendo", "aprendido", "aprendo", "aprendes", "aprende", "aprendemos", "aprend\u00e9is", "aprenden", "aprend\u00eda", "aprend\u00edas", "aprend\u00eda", "aprend\u00edamos", "aprend\u00edais", "aprend\u00edan", "aprend\u00ed", "aprendiste", "aprendi\u00f3", "aprendimos", "aprendisteis", "aprendieron", "aprender\u00e9", "aprender\u00e1s", "aprender\u00e1", "aprenderemos", "aprender\u00e9is", "aprender\u00e1n", "aprender\u00eda", "aprender\u00edas", "aprender\u00eda", "aprender\u00edamos", "aprender\u00edais", "aprender\u00edan", "aprenda", "aprendas", "aprenda", "aprendamos", "aprend\u00e1is", "aprendan", "aprendiera, aprendiese", "aprendieras, aprendieses", "aprendiera, aprendiese", "aprendi\u00e9ramos, aprendi\u00e9semos", "aprendierais, aprendieseis", "aprendieran, aprendiesen", "aprendiere", "aprendieres", "aprendiere", "aprendi\u00e9remos", "aprendiereis", "aprendieren", "(No existe)", "aprende", "aprenda", "aprendamos", "aprended", "aprendan"]
{"$schema": "http://json-schema.org/schema#", "type": "array", "items": {"type": "string"}}
578e2f91-04d8-4007-8ecc-33f4a76fa470/af673b67-9a0e-4830-8f3b-65bec7af4dfe/0/0
Zephyr Beta
ChatGPT API: Highly performant, fast language model. Fullly openai compatible. Use existing python or node openai libs! Compare to GPT3.
7.2
/chat/completions
openai chat completions
200
New Example
{"choices": [{"finish_reason": "length", "index": 0, "message": {"content": " Bonjour, comment allez-vous ?", "role": "assistant"}}], "created": 1700284875, "id": "chatcmpl", "model": "LLaMA_CPP", "object": "chat.completion", "truncated": false, "usage": {"completion_tokens": 50, "prompt_tokens": 69, "total_tokens": 119}}
{"type": "object", "properties": {"key1": {"type": "string"}, "key2": {"type": "string"}}}
30527e89-2e11-4a23-9eb7-e16e7a1bcbc5/d6d1a7bc-1aee-4bcb-8056-b3349c08bef4/0/0
Multi-lingual Sentiment Analysis
Multi-lingual Sentimel Analysis uses computational linguistics and text mining to automatically determine the sentiment or affective nature of the text being analyzed in multiple language support. This API can detect the languange and reponse the accurate sentiment polarity of given text, but you can define {lang} parameter for better result and accurate. Supported languange (lang): 'af': 'afrikaans', 'sq': 'albanian', 'am': 'amharic', 'ar': 'arabic', 'hy': 'armenian', ...
7.9
Sentiment Analysis
Multi-lingual Sentiment Analysis parameter {lang} is optional, we can detect the language but you can define it for better result and accurate
200
Response
{"label": "positive", "language": "it", "sentiment_score": {"compound": 0.0772, "neg": 0, "neu": 0.435, "pos": 0.565}, "text": "numero uno"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"label": {"type": "string"}, "language": {"type": "string"}, "sentiment_score": {"type": "object", "properties": {"compound": {"type": "number"}, "neg": {"type": "integer"}, "neu": {"type": "number"}, "pos": {"type": "number"}}, "required": ["compound", "neg", "neu", "pos"]}, "text": {"type": "string"}}, "required": ["label", "language", "sentiment_score", "text"]}
b54aba2d-6958-404d-bd25-d485a108e038/34e06a78-eee0-4f5f-94bc-01d6312649d3/0/0
Profanity Buster
Find profanities and offensive words from any text. The JSON API that helps you filter profanity and bad words in your reviews, chat, or any text. Supported Languages: English French Spanish (coming soon) Italian (coming soon) Many more are coming...
null
/profanities
200
null
{"results": [{"segment": "idiot", "category": "insult", "severity": "soft"}]}
{"type": "object", "properties": {"results": {"type": "array", "items": {"type": "object", "properties": {"segment": {"type": "string"}, "category": {"type": "string"}, "severity": {"type": "string"}}}}}}
c64b4004-25f3-42b3-938b-bbbf22e6b943/d5c7ff81-6812-4464-8caa-ec50dcb0ea07/0/0
French Keywords Extraction
Lettria's French Keywords Exraction
null
Extract Keywords
Easily retrieve keywords within your text. This API automatically analyses your input (tokenization, pos-tagging, lemmatization) and erases stop words to help you identify main keywords within a text (such as vers, common nouns, proper nouns, adjectives, adverbs).
200
Extract Keywords Standard Return
[[{"lemma": "etre", "lexicon": [], "meaning": [], "nlp": {"lemmatizer": [{"confidence": 0.99, "conjugate": [{"mode": "indicative", "pronom": 3, "temps": "present"}], "infinit": "etre"}], "source": "est", "tag": "V"}, "source": "est", "tag": "V"}, {"lemma": "incroyable", "lexicon": [], "meaning": [], "nlp": {"lemmatizer": {"confidence": 0.99, "gender": {"female": false, "plural": false}, "lemma": "incroyable"}, "source": "incroyable", "tag": "JJ"}, "source": "incroyable", "tag": "JJ"}], [{"lemma": "etre", "lexicon": [], "meaning": [], "nlp": {"auxiliary": "avoir", "lemmatizer": {"confidence": 0.99, "conjugate": [{"mode": "indicative", "pronom": 3, "temps": "past"}], "gender": {"female": true, "plural": true}, "infinit": "etre", "source": "ete"}, "source": "a ete", "tag": "V"}, "source": "a ete", "tag": "V"}]]
{"$schema": "http://json-schema.org/schema#", "type": "array", "items": {"type": "array", "items": {"type": "object", "properties": {"lemma": {"type": "string"}, "lexicon": {"type": "array"}, "meaning": {"type": "array"}, "nlp": {"type": "object", "properties": {"lemmatizer": {"anyOf": [{"type": "array", "items": {"type": "object", "properties": {"confidence": {"type": "number"}, "conjugate": {"type": "array", "items": {"type": "object", "properties": {"mode": {"type": "string"}, "pronom": {"type": "integer"}, "temps": {"type": "string"}}, "required": ["mode", "pronom", "temps"]}}, "infinit": {"type": "string"}}, "required": ["confidence", "conjugate", "infinit"]}}, {"type": "object", "properties": {"confidence": {"type": "number"}, "gender": {"type": "object", "properties": {"female": {"type": "boolean"}, "plural": {"type": "boolean"}}, "required": ["female", "plural"]}, "lemma": {"type": "string"}, "conjugate": {"type": "array", "items": {"type": "object", "properties": {"mode": {"type": "string"}, "pronom": {"type": "integer"}, "temps": {"type": "string"}}, "required": ["mode", "pronom", "temps"]}}, "infinit": {"type": "string"}, "source": {"type": "string"}}, "required": ["confidence", "gender"]}]}, "source": {"type": "string"}, "tag": {"type": "string"}, "auxiliary": {"type": "string"}}, "required": ["lemmatizer", "source", "tag"]}, "source": {"type": "string"}, "tag": {"type": "string"}}, "required": ["lemma", "lexicon", "meaning", "nlp", "source", "tag"]}}}
2f1ec093-2eee-404f-bc59-7c50bdcc26db/854b5a42-2213-4a66-8938-ce1c359e051d/0/0
Keyword Extraction
Extract the most relevant words and expressions from text.
null
extract
Extract the most relevant words and expressions from text.
200
Response
{"illustrative examples": 4, "prior coordination": 4, "text": 1, "apis": 1, "testing": 1, "permission": 1}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"illustrative examples": {"type": "integer"}, "prior coordination": {"type": "integer"}, "text": {"type": "integer"}, "apis": {"type": "integer"}, "testing": {"type": "integer"}, "permission": {"type": "integer"}}, "required": ["apis", "illustrative examples", "permission", "prior coordination", "testing", "text"]}
6a2c6316-f5c2-4e3e-91c4-76029d671f7f/0dae9a55-153b-4933-9fd2-5b56fc171658/1/0
Gibberish Text Detection
null
null
predict_detect_gibberish_post
422
null
{"detail": [{"loc": [], "msg": "", "type": ""}]}
{"title": "HTTPValidationError", "type": "object", "properties": {"detail": {"title": "Detail", "type": "array", "items": {"title": "ValidationError", "required": ["loc", "msg", "type"], "type": "object", "properties": {"loc": {"title": "Location", "type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "integer"}]}}, "msg": {"title": "Message", "type": "string"}, "type": {"title": "Error Type", "type": "string"}}}}}}
9ac8d477-a033-41c2-80d0-c2c82186d4bf/24069108-91ae-4713-9a7d-05760ce7a997/0/0
TLDR Text Analysis
TLDR (Too Long Didn't Read) is a Text Analysis API that allows you to extract summaries and ranked keywords from articles on web.
8.7
Summarize
Summarizes an article by fetching text from a specified URL or reading the input text and generating a summary for a web article.
200
Sample Article Summary
{"summary": "This is a sample summary."}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"summary": {"type": "string"}}, "required": ["summary"]}
9ac8d477-a033-41c2-80d0-c2c82186d4bf/c105f25b-b50f-4fe5-99d0-59fb0ad4a705/0/0
TLDR Text Analysis
TLDR (Too Long Didn't Read) is a Text Analysis API that allows you to extract summaries and ranked keywords from articles on web.
8.7
Sentiment Analysis
Performs sentiment analysis on a web article or text input.
200
New Example
{"sentiment": "positive", "polarity": 0.5}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"sentiment": {"type": "string"}, "polarity": {"type": "number"}}, "required": ["polarity", "sentiment"]}
9ac8d477-a033-41c2-80d0-c2c82186d4bf/f30bf0e3-831f-43a8-9d05-727db2012dc5/0/0
TLDR Text Analysis
TLDR (Too Long Didn't Read) is a Text Analysis API that allows you to extract summaries and ranked keywords from articles on web.
8.7
Extract Keywords
Extracts keywords from an article given the URL for the article and the number of keywords to search for.
200
Sample Keywords
[{"keyword": "keyword1", "score": 10}, {"keyword": "keyword2", "score": 7}]
{"$schema": "http://json-schema.org/schema#", "type": "array", "items": {"type": "object", "properties": {"keyword": {"type": "string"}, "score": {"type": "integer"}}, "required": ["keyword", "score"]}}
f37694cf-6151-4917-9502-eed3ebad6b67/d4fe2c15-611f-437b-8459-890a21d50304/0/0
Suggest Keyword Generator
This API generates keyword and content idea suggestions for the keyword you've requested.
null
Gain Keyword Suggestions
This will generate keyword suggestions for the given keyword. The keywords will be based on global keywords, how to keywords, top keywords, best keywords and many more.
200
Response
{"best search engine": ["best search engine", "best search engine 2021", "best search engine reddit", "best search engine for dark web", "best search engine for privacy", "best search engine in the world", "best search engine for flights", "best search engine for android", "best search engine for iphone", "best search engine for students", "search engine marketing can also be called what", "best search engine", "best search engines that don't track you", "best search engines 2021", "best torrent search engine", "best search engine 2021", "what is the best search engine", "best flight search engine", "best search engines 2020"], "global": ["search engine optimization", "search engine", "search engine marketing", "search engine optimisation", "search engine meaning", "search engine journal", "search engines list", "search engine marketing examples", "search engine land", "search engine optimization techniques", "search engines", "search engine optimization", "search engines other than google", "search engine definition", "search engines list", "search engine journal", "search engine marketing", "search engine land", "search engine evaluator", "search engine meaning", "search engine examples", "search engine optimization definition", "search engines better than google", "search engine evaluator jobs", "searchenginereports.net", "search engine keeps changing to yahoo", "search engines that don't track", "search engine google", "search engine change", "search engines", "search engine edge", "search engine l\u00e0 g\u00ec", "search engine reports", "search engine optimization", "search engine evaluator", "search engine marketing", "search engine microsoft edge"], "how search engine": ["how search engine works", "how search engines help them", "how search engines help them brainly", "how search engines help them google", "how search engine works step by step", "how search engines help them yahoo", "how search engine optimization works", "how search engine works pdf", "how search engines help them bing", "how search engines help them youtube", "search engine list", "how search engines work", "how search engine optimization works", "how search engines help them", "how search engine works step by step", "how search engines interact with websites", "how search engines make money", "how search engine works ppt", "how search engine marketing works", "search engine default", "search engine settings", "search engines list besides google"], "how to * search engine": ["how to search engine optimization", "how to search engines work", "how to search engine", "how to search engine number", "how to search engine marketing", "how to search engines help them", "how to search engine optimization google", "how to search engine definition", "how to search engine web", "how to engine search on iphone"], "is search engine": ["is search engine optimization free", "is search engine optimization worth it", "is search engine", "is search engine a website", "is search engine a software", "is search engine startup legit", "is search engine an application", "is search engine and browser the same", "is search engine and web browser same", "is search engine optimization capitalized", "what is a search engine", "what is the best search engine", "what is search engine", "what is search engine optimization", "why is bing my search engine in chrome", "which is the best search engine", "why is bing my search engine", "what is search engine marketing"], "search engine 2022": ["search engine 2022", "best search engine 2022", "torrentz2 search engine 2022", "torrent search engine 2022", "browser hijacker search engine 2022", "best search engine 2022"], "top search engine": ["top search engines", "top search engine in china", "top search engines 2021", "top search engines in india", "top search engine list", "top search engines in the philippines", "top search engines uk", "top search engines besides google", "top search engine in korea", "top search engine in japan", "top search engines", "top search engine ranking", "top search engine placement", "top search engines 2021", "top search engine optimization", "top search engine marketing", "top search engine marketing companies", "top search engine optimization company"], "what is search engine": ["what is search engine optimization", "what is search engine marketing", "what is search engine", "what is search engine optimization in digital marketing", "what is search engine marketing and how does it work", "what is search engine in computer", "what is search engine in hindi", "what is search engine and example", "what is search engine give 5 examples", "what is search engines in research", "what is search engine optimization", "what is search engine", "what is search engine marketing", "what is search engine optimisation", "what is search engine evaluator", "what is search engine advertising", "what is search engine optimization seo", "what is search engine indexing"], "why search engine": ["why search engine marketing is important", "why search engine goes to yahoo", "why search engine is important", "why search engine optimization is important", "why search engine optimization", "why search engine", "why search engine is important to students", "why search engine marketing", "why search engine is important brainly", "why search engine is important to student like you", "why is bing my search engine in chrome", "why is bing my search engine", "why is yahoo my default search engine", "why google is the best search engine", "why is bing suddenly my search engine", "why does search engine calculate relevancy", "why yahoo search engine instead of google"]}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"best search engine": {"type": "array", "items": {"type": "string"}}, "global": {"type": "array", "items": {"type": "string"}}, "how search engine": {"type": "array", "items": {"type": "string"}}, "how to * search engine": {"type": "array", "items": {"type": "string"}}, "is search engine": {"type": "array", "items": {"type": "string"}}, "search engine 2022": {"type": "array", "items": {"type": "string"}}, "top search engine": {"type": "array", "items": {"type": "string"}}, "what is search engine": {"type": "array", "items": {"type": "string"}}, "why search engine": {"type": "array", "items": {"type": "string"}}}, "required": ["best search engine", "global", "how search engine", "how to * search engine", "is search engine", "search engine 2022", "top search engine", "what is search engine", "why search engine"]}
2a7febbf-a321-43cb-8eaa-3cf333c85d0b/06e1416f-00c5-40f3-926f-3982a2ed3128/0/0
Document Parser and Extraction
Extract data from documents and scans
null
Delete
Delete an existing vocabulary by id
204
New Example
{"key1": "value", "key2": "value"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"key1": {"type": "string"}, "key2": {"type": "string"}}, "required": ["key1", "key2"]}
2a7febbf-a321-43cb-8eaa-3cf333c85d0b/0141ac92-a8c7-44d2-ac04-a7abf2c951b9/0/0
Document Parser and Extraction
Extract data from documents and scans
null
List
Get a list of existing extraction vocabularies
200
New Example
[{"id": "2cbdee00-34c8-43c9-a08f-f1bb48bedd2f", "name": "Asset_Amount", "entries": []}, {"id": "2cbdee00-34c8-43c9-a08f-f1bb48bedd2f-2", "name": "Account Number", "entries": []}]
{"$schema": "http://json-schema.org/schema#", "type": "array", "items": {"type": "object", "properties": {"id": {"type": "string"}, "name": {"type": "string"}, "entries": {"type": "array"}}, "required": ["entries", "id", "name"]}}
2a7febbf-a321-43cb-8eaa-3cf333c85d0b/a2652749-a03a-4f08-95ee-c9cfe0caf27b/0/0
Document Parser and Extraction
Extract data from documents and scans
null
Create
Create one or more variations for a field name to be captured from the input documents.
201
New Example
{"id": "168afb0d-7d28-4aa6-b554-10fcb460434c", "name": "Account Number", "entries": [{"str": "account number", "category": "Account Number"}, {"str": "act id", "category": "Account Number"}, {"str": "account no", "category": "Account Number"}]}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"id": {"type": "string"}, "name": {"type": "string"}, "entries": {"type": "array", "items": {"type": "object", "properties": {"str": {"type": "string"}, "category": {"type": "string"}}, "required": ["category", "str"]}}}, "required": ["entries", "id", "name"]}
072c65ba-bb82-4e81-bd3d-b48bfb6286f1/69a0379d-afb1-4f5e-8b7f-67905a658f86/0/0
Geneea Interpretor NLP
NLP, Sentiment, Named and General entity extraction, Language identification 30+, Tags
null
General API
Interpretor API is a bundle of Natural Language Processing functions, a set of text analysis tools that can extract insights from large quantities of text. The current endpoints are General NLP, Sentiment analysis, Entity extraction, Tags extraction and Language detection.
200
Example Response 1
{"docSentiment": {"label": "positive", "mean": 0.1, "negative": 0, "positive": 0.1}, "entities": [{"id": "e0", "mentions": [{"id": "m0", "mwl": "Angela Merkel", "text": "Angela Merkel", "tokenIds": ["w0", "w1"]}, {"id": "m1", "mwl": "Angela Merkel", "text": "Angela Merkel", "tokenIds": ["w5", "w6"]}], "stdForm": "Angela Merkel", "type": "person"}, {"id": "e1", "mentions": [{"id": "m2", "mwl": "New Orleans", "text": "New Orleans", "tokenIds": ["w3", "w4"]}, {"id": "m3", "mwl": "New Orleans", "text": "New Orleans", "tokenIds": ["w13", "w14"]}], "stdForm": "New Orleans", "type": "location"}, {"id": "e2", "mentions": [{"id": "m4", "mwl": "Germany", "text": "Germany", "tokenIds": ["w8"]}], "stdForm": "Germany", "type": "location"}], "itemSentiments": {"e0": {"label": "neutral", "mean": 0, "negative": 0, "positive": 0}, "e1": {"label": "neutral", "mean": 0, "negative": 0, "positive": 0}, "e2": {"label": "neutral", "mean": 0, "negative": 0, "positive": 0}}, "language": {"detected": "en"}, "paragraphs": [{"id": "p0", "sentences": [{"id": "s0", "tokens": [{"id": "w0", "off": 0, "text": "Angela"}, {"id": "w1", "off": 7, "text": "Merkel"}, {"id": "w2", "off": 14, "text": "in"}, {"id": "w3", "off": 17, "text": "New"}, {"id": "w4", "off": 21, "text": "Orleans"}]}], "text": "Angela Merkel in New Orleans", "type": "TITLE"}, {"id": "p2", "sentences": [{"id": "s1", "tokens": [{"id": "w5", "off": 0, "text": "Angela"}, {"id": "w6", "off": 7, "text": "Merkel"}, {"id": "w7", "off": 14, "text": "left"}, {"id": "w8", "off": 19, "text": "Germany"}, {"id": "w9", "off": 26, "text": "."}]}, {"id": "s2", "tokens": [{"id": "w10", "off": 28, "text": "She"}, {"id": "w11", "off": 32, "text": "moved"}, {"id": "w12", "off": 38, "text": "to"}, {"id": "w13", "off": 41, "text": "New"}, {"id": "w14", "off": 45, "text": "Orleans"}, {"id": "w15", "off": 53, "text": "to"}, {"id": "w16", "off": 56, "text": "learn"}, {"id": "w17", "off": 62, "text": "jazz"}, {"id": "w18", "off": 66, "text": "."}]}, {"id": "s3", "tokens": [{"id": "w19", "off": 68, "text": "That"}, {"id": "w20", "off": 72, "text": "'s"}, {"id": "w21", "off": 75, "text": "amazing"}, {"id": "w22", "off": 82, "text": "."}]}], "text": "Angela Merkel left Germany. She moved to New Orleans to learn jazz. That's amazing.", "type": "BODY"}], "usedChars": 111, "version": "3.2.1"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"docSentiment": {"type": "object", "properties": {"label": {"type": "string"}, "mean": {"type": "number"}, "negative": {"type": "integer"}, "positive": {"type": "number"}}, "required": ["label", "mean", "negative", "positive"]}, "entities": {"type": "array", "items": {"type": "object", "properties": {"id": {"type": "string"}, "mentions": {"type": "array", "items": {"type": "object", "properties": {"id": {"type": "string"}, "mwl": {"type": "string"}, "text": {"type": "string"}, "tokenIds": {"type": "array", "items": {"type": "string"}}}, "required": ["id", "mwl", "text", "tokenIds"]}}, "stdForm": {"type": "string"}, "type": {"type": "string"}}, "required": ["id", "mentions", "stdForm", "type"]}}, "itemSentiments": {"type": "object", "properties": {"e0": {"type": "object", "properties": {"label": {"type": "string"}, "mean": {"type": "integer"}, "negative": {"type": "integer"}, "positive": {"type": "integer"}}, "required": ["label", "mean", "negative", "positive"]}, "e1": {"type": "object", "properties": {"label": {"type": "string"}, "mean": {"type": "integer"}, "negative": {"type": "integer"}, "positive": {"type": "integer"}}, "required": ["label", "mean", "negative", "positive"]}, "e2": {"type": "object", "properties": {"label": {"type": "string"}, "mean": {"type": "integer"}, "negative": {"type": "integer"}, "positive": {"type": "integer"}}, "required": ["label", "mean", "negative", "positive"]}}, "required": ["e0", "e1", "e2"]}, "language": {"type": "object", "properties": {"detected": {"type": "string"}}, "required": ["detected"]}, "paragraphs": {"type": "array", "items": {"type": "object", "properties": {"id": {"type": "string"}, "sentences": {"type": "array", "items": {"type": "object", "properties": {"id": {"type": "string"}, "tokens": {"type": "array", "items": {"type": "object", "properties": {"id": {"type": "string"}, "off": {"type": "integer"}, "text": {"type": "string"}}, "required": ["id", "off", "text"]}}}, "required": ["id", "tokens"]}}, "text": {"type": "string"}, "type": {"type": "string"}}, "required": ["id", "sentences", "text", "type"]}}, "usedChars": {"type": "integer"}, "version": {"type": "string"}}, "required": ["docSentiment", "entities", "itemSentiments", "language", "paragraphs", "usedChars", "version"]}
388f7c3b-8f89-464c-abb6-c24987956df2/ea4d0d9b-6669-42d3-9002-e92525276d2e/0/0
Text Extract
Extracting Text, Word Count, and Metadata from a Provided URL
9.1
Extract Text
Extracts text, word count and metadata from given URL
200
New Example
{"title": "A brief history of code search at GitHub | The GitHub Blog", "author": "Pavel Avgustinov", "hostname": "github.blog", "date": "2021-12-15", "categories": "Engineering;Product", "tags": "code search", "fingerprint": "iHIM4x4kA7Yb2twQKDrFkxTZkRY=", "id": null, "license": null, "raw-text": "We recently launched a technology preview for the next-generation code search we have been building. If you haven\u2019t signed up already, go ahead and do it now! We want to share more about our work on code exploration, navigation, search, and developer productivity. Recently, we substantially improved the precision of our code navigation for Python, and open-sourced the tools we developed for this. The stack graph formalism we developed will form the basis for precise code navigation support for more languages, and will even allow us to empower language communities to build and improve support for their own languages, similarly to how we accept contributions to github/linguist to expand GitHub\u2019s syntax highlighting capabilities. This blog post is part of the same series, and tells the story of why we built a new search engine optimized for code over the past 18 months. What challenges did we set ourselves? What is the historical context, and why could we not continue to build on off-the-shelf solutions? Read on to find out. What\u2019s our goal? We set out to provide an experience that could become an integral part of every developer\u2019s workflow. This has imposed hard constraints on the features, performance, and scalability of the system we\u2019re building. In particular: Searching code is different: many standard techniques (like stemming and tokenization) are at odds with the kind of searches we want to support for source code. Identifier names and punctuation matter. We need to be able to match substrings, not just whole \u201cwords\u201d. Specialized queries can require wildcards or even regular expressions. In addition, scoring heuristics tuned for natural language and web pages do not work well for source code. The scale of the corpus size: GitHub hosts over 200 million repositories, with over 61 million repositories created in the past year. We aim to support global queries across all of them, now and in the foreseeable future. The rate of change: over 170 million pull requests were merged in the past year, and this does not even account for code pushed directly to a branch. We would like our index to reflect the updated state of a repository within a few minutes of a push event. Search performance and latency: developers want their tools to be blazingly fast, and if we want to become part of every developer\u2019s workflow we have to satisfy that expectation. Despite the scale of our index, we want p95 query times to be (well) under a second. Most user queries, or queries scoped to a set of repositories or organizations, should be much faster than that. Over the years, GitHub has leveraged several off-the-shelf solutions, but as the requirements evolved over time, and the scale problem became ever more daunting, we became convinced that we had to build a bespoke search engine for code to achieve our objectives. The early years In the beginning, GitHub announced support for code search, as you might expect from a website with the tagline of \u201cSocial Code Hosting.\u201d And all was well. Except\u2026 you might note the disclaimer \u201cGitHub Public Code Search.\u201d This first iteration of global search worked by indexing all public documents into a Solr instance, which determined the results you got. While this nicely side-steps visibility and authorization concerns (everything is public!), not allowing private repositories to be searched would be a major functionality gap. The solution? Image credit: Patrick Linskey on Stack Overflow The repository page showed a \u201cSearch source code\u201d field. For public repos, this was still backed by the Solr index, scoped to the active repository. For private repos, it shelled out to git grep . Quite soon after shipping this, the then-in-beta Google Code Search began crawling public repositories on GitHub too, thus giving developers an alternative way of searching them. (Ultimately, Google Code Search was discontinued a few years later, though Russ Cox\u2019s excellent blog post on how it worked remains a great source of inspiration for successor projects.) Unfortunately, the different search experience for public and private repositories proved pretty confusing in practice. In addition, while git grep is a widely understood gold standard for how to search the contents of a Git repository, it operates without a dedicated index and hence works by scanning each document\u2014taking time proportional to the size of the repository. This could lead to resource exhaustion on the Git hosts, and to an unresponsive web page, making it necessary to introduce timeouts. Large private repositories remained unsearchable. Scaling with Elasticsearch By 2010, the search landscape was seeing considerable upheaval. Solr joined Lucene as a subproject, and Elasticsearch sprang up as a great way of building and scaling on top of Lucene. While Elasticsearch wouldn\u2019t hit a 1.0.0 release until February 2014, GitHub started experimenting with adopting it in 2011. An initial tentative experiment that indexed gists into Elasticsearch to make them searchable showed great promise, and before long it was clear that this was the future for all search on GitHub, including code search. Indeed in early 2013, just as Google Code Search was winding down, GitHub launched a whole new code search backed by an Elasticsearch cluster, consolidating the search experience for public and private repositories and updating the design. The search index covered almost five million repositories at launch. The scale of operations was definitely challenging, and within days or weeks of the launch GitHub experienced its first code search outages. The postmortem blog post is quite interesting on several levels, and it gives a glimpse of the cluster size (26 storage nodes with 2 TB of SSD storage each), utilization (67% of storage used), environment (Elasticsearch 0.19.9 and 0.20.2, Java 6 and 7), and indexing complexity (several months to backfill all repository data). Several bugs in Elasticsearch were identified and fixed, allowing GitHub to resume operations on the code search service. In November 2013, Elasticsearch published a case study on GitHub\u2019s code search cluster, again including some interesting data on scale. By that point, GitHub was indexing eight million repositories and responding to 5 search requests per second on average. In general, our experience working with Elasticsearch has been truly excellent. It powers all kinds of search on GitHub.com, doing an excellent job throughout. The code search index is by far the largest cluster we operate , and it has grown in scale by another 20-40x since the case study (to 162 nodes, comprising 5184 vCPUs, 40TB of RAM, and 1.25PB of backing storage, supporting a query load of 200 requests per second on average and indexing over 53 billion source files). It is a testament to the capabilities of Elasticsearch that we have got this far with essentially an off-the-shelf search engine. My code is not a novel Elasticsearch excelled at most search workloads, but almost immediately some wrinkles and friction started cropping up in connection with code search. Perhaps the most widely observed is this comment from the code search documentation: You can\u2019t use the following wildcard characters as part of your search query: . , : ; / \\ ` ' \" = * ! ? # $ & + ^ | ~ < > ( ) { } [ ] @ . The search will simply ignore these symbols. Source code is not like normal text, and those \u201cpunctuation\u201d characters actually matter. So why are they ignored by GitHub\u2019s production code search? It comes down to how our ingest pipeline for Elasticsearch is configured. Click here to read the full details When documents are added to an Elasticsearch index, they are passed through a process called text analysis, which converts unstructured text into a structured format optimized for search. Commonly, text analysis is configured to normalize away details that don\u2019t matter to search (for example, case folding the document to provide case-insensitive matches, or compressing runs of whitespace into one, or stemming words so that searching for \u201cingestion\u201d also finds \u201cingest pipeline\u201d). Ultimately, it performs tokenization, splitting the normalized input document into a list of tokens whose occurrence should be indexed. Many features and defaults available to text analysis are geared towards indexing natural-language text. To create an index for source code, we defined a custom text analyzer, applying a carefully selected set of normalizations (for example, case-folding and compressing whitespace make sense, but stemming does not). Then, we configured a custom pattern tokenizer, splitting the document using the following regular expression: %q_[.,:;/\\\\\\\\`'\"=*!@?#$&+^|~<>(){}\\[\\]\\s]_ . If you look closely, you\u2019ll recognise the list of characters that are ignored in your query string! The tokens resulting from this split then undergo a final round of splitting, extracting word parts delimited in CamelCase and snake_case as additional tokens to make them searchable. To illustrate, suppose we are ingesting a document containing this declaration: pub fn pthread_getname_np(tid: ::pthread_t, name: *mut ::c_char, len: ::size_t) -> ::c_int; . Our text analysis phase would pass the following list of tokens to Elasticsearch to index: pub fn pthread_getname_np pthread getname np tid pthread_t pthread t name mut c_char c char len size_t size t c_int c int . The special characters simply do not figure in the index; instead, the focus is on words recovered from identifiers and keywords. Designing a text analyzer is tricky, and involves hard trade-offs between index size and performance on the one hand, and the types of queries that can be answered on the other. The approach described above was the result of careful experimentation with different strategies, and represented a good compromise that has allowed us to launch and evolve code search for almost a decade. Another consideration for source code is substring matching. Suppose that I want to find out how to get the name of a thread in Rust, and I vaguely remember the function is called something like thread_getname . Searching for thread_getname org:rust-lang will give no results on our Elasticsearch index; meanwhile, if I cloned rust-lang/libc locally and used git grep , I would instantly find pthread_getname_np . More generally, power users reach for regular expression searches almost immediately. The earliest internal discussions of this that I can find date to October 2012, more than a year before the public release of Elasticsearch-based code search. We considered various ways of refining the Elasticsearch tokenization (in fact, we turn pthread_getname_np into the tokens pthread , getname , np , and pthread_getname_np \u2014if I had searched for pthread getname rather than thread_getname , I would have found the definition of pthread_getname_np ). We also evaluated trigram tokenization as described by Russ Cox. Our conclusion was summarized by a GitHub employee as follows: The trigram tokenization strategy is very powerful. It will yield wonderful search results at the cost of search time and index size. This is the approach I would like to take, but there is work to be done to ensure we can scale the ElasticSearch cluster to meet the needs of this strategy. Given the initial scale of the Elasticsearch cluster mentioned above, it wasn\u2019t viable to substantially increase storage and CPU requirements at the time, and so we launched with a best-effort tokenization tuned for code identifiers. Over the years, we kept coming back to this discussion. One promising idea for supporting special characters, inspired by some conversations with Elasticsearch experts at Elasticon 2016, was to use a Lucene tokenizer pattern that split code on runs of whitespace, but also on transitions from word characters to non-word characters (crucially, using lookahead/lookbehind assertions, without consuming any characters in this case; this would create a token for each special character). This would allow a search for \u201danswer >= 42\u201d to find the source text answer >= 42 (disregarding whitespace, but including the comparison). Experiments showed this approach took 43-100% longer to index code, and produced an index that was 18-28% larger than the baseline. Query performance also suffered: at best, it was as fast as the baseline, but some queries (especially those that used special characters, or otherwise split into many tokens) were up to 4x slower. In the end, a typical query slowdown of 2.1x seemed like too high a price to pay. By 2019, we had made significant investments in scaling our Elasticsearch cluster simply to keep up with the organic growth of the underlying code corpus. This gave us some performance headroom, and at GitHub Universe 2019 we felt confident enough to announce an \u201cexact-match search\u201d beta, which essentially followed the ideas above and was available for allow-listed repositories and organizations. We projected around a 1.3x increase in Elasticsearch resource usage for this index. The experience from the limited beta was very illuminating, but it proved too difficult to balance the additional resource requirements with ongoing growth of the index. In addition, even after the tokenization improvements, there were still numerous unsupported use cases (like substring searches and regular expressions) that we saw no path towards. Ultimately, exact-match search was sunset in just over half a year. Project Blackbird Actually, a major factor in pausing investment in exact-match search was a very promising research prototype search engine, internally code-named Blackbird. The project had been kicked off in early 2020, with the goal of determining which technologies would enable us to offer code search features at GitHub scale, and it showed a path forward that has led to the technology preview we launched last week. Let\u2019s recall our ambitious objectives: comprehensively index all source code on GitHub, support incremental indexing and document deletion, and provide lightning-fast exact-match and regex searches (specifically, a p95 of under a second for global queries, with correspondingly lower targets for org-scoped and repo-scoped searches). Do all this without using substantially more resources than the existing Elasticsearch cluster. Integrate other sources of rich code intelligence information available on GitHub. Easy, right? We found that no off-the-shelf code indexing solution could satisfy those requirements. Russ Cox\u2019s trigram index for code search only stores document IDs rather than positions in the posting lists; while that makes it very space-efficient, performance degrades rapidly with a large corpus size. Several successor projects augment the posting lists with position information or other data; this comes at a large storage and RAM cost (Zoekt reports a typical index size of 3.5x corpus size) that makes it too expensive at our scale. The sharding strategy is also crucial, as it determines how evenly distributed the load is. And any significant per-repo overhead becomes prohibitive when considering scaling the index to all repositories on GitHub. In the end, Blackbird convinced us to go all-in on building a custom search engine for code. Written in Rust, it creates and incrementally maintains a code search index sharded by Git blob object ID; this gives us substantial storage savings via deduplication and guarantees a uniform load distribution across shards (something that classic approaches sharding by repo or org, like our existing Elasticsearch cluster, lack). It supports regular expression searches over document content and can capture additional metadata\u2014for example, it also maintains an index of symbol definitions. It meets our performance goals: while it\u2019s always possible to come up with a pathological search that misses the index, it\u2019s exceptionally fast for \u201creal\u201d searches. The index is also extremely compact, weighing in at about \u2154 of the (deduplicated) corpus size. One crucial realization was that if we want to index all code on GitHub into a single index, result scoring and ranking are absolutely critical; you really need to find useful documents first. Blackbird implements a number of heuristics, some code-specific (ranking up definitions and penalizing test code), and others general-purpose (ranking up complete matches and penalizing partial matches, so that when searching for thread an identifier called thread will rank above thread_id , which will rank above pthread_getname_np ). Of course, the repository in which a match occurs also influences ranking. We want to show results from popular open-source repositories before a random match in a long-forgotten repository created as a test. All of this is very much a work in progress. We are continuously tuning our scoring and ranking heuristics, optimizing the index and query process, and iterating on the query language. We have a long list of features to add. But we want to get what we have today into the hands of users, so that your feedback can shape our priorities. We have more to share about the work we\u2019re doing to enhance developer productivity at GitHub, so stay tuned. The shoulders of giants Modern software development is about collaboration and about leveraging the power of open source. Our new code search is no different. We wouldn\u2019t have gotten anywhere close to its current state without the excellent work of tens of thousands of open source contributors and maintainers who built the tools we use, the libraries we depend on, and whose insightful ideas we could adopt and develop. A small selection of shout-outs and thank-yous: The communities of the languages and frameworks we build on: Rust, Go, and React. Thanks for enabling us to move fast. @BurntSushi: we are inspired by Andrew\u2019s prolific output, and his work on the regex and aho-corasick crates in particular has been invaluable to us. @lemire\u2019s work on fast bit packing is integral to our design, and we drew a lot of inspiration from his optimization work more broadly (especially regarding the use of SIMD). Check out his blog for more. Enry and Tree-sitter, which power Blackbird\u2019s language detection and symbol extraction, respectively.", "source": "https://github.blog/2021-12-15-a-brief-history-of-code-search-at-github/", "source-hostname": "The GitHub Blog", "excerpt": "This blog post tells the story of why we built a new search engine optimized for code, including historical context and challenges along the way.", "text": "We recently launched a technology preview for the next-generation code search we have been building. If you haven\u2019t signed up already, go ahead and do it now!\nWe want to share more about our work on code exploration, navigation, search, and developer productivity. Recently, we substantially improved the precision of our code navigation for Python, and open-sourced the tools we developed for this. The stack graph formalism we developed will form the basis for precise code navigation support for more languages, and will even allow us to empower language communities to build and improve support for their own languages, similarly to how we accept contributions to github/linguist to expand GitHub\u2019s syntax highlighting capabilities.\nThis blog post is part of the same series, and tells the story of why we built a new search engine optimized for code over the past 18 months. What challenges did we set ourselves? What is the historical context, and why could we not continue to build on off-the-shelf solutions? Read on to find out.\nWhat\u2019s our goal?\nWe set out to provide an experience that could become an integral part of every developer\u2019s workflow. This has imposed hard constraints on the features, performance, and scalability of the system we\u2019re building. In particular:\n- Searching code is different: many standard techniques (like stemming and tokenization) are at odds with the kind of searches we want to support for source code. Identifier names and punctuation matter. We need to be able to match substrings, not just whole \u201cwords\u201d. Specialized queries can require wildcards or even regular expressions. In addition, scoring heuristics tuned for natural language and web pages do not work well for source code.\n- The scale of the corpus size: GitHub hosts over 200 million repositories, with over 61 million repositories created in the past year. We aim to support global queries across all of them, now and in the foreseeable future.\n- The rate of change: over 170 million pull requests were merged in the past year, and this does not even account for code pushed directly to a branch. We would like our index to reflect the updated state of a repository within a few minutes of a push event.\n- Search performance and latency: developers want their tools to be blazingly fast, and if we want to become part of every developer\u2019s workflow we have to satisfy that expectation. Despite the scale of our index, we want p95 query times to be (well) under a second. Most user queries, or queries scoped to a set of repositories or organizations, should be much faster than that.\nOver the years, GitHub has leveraged several off-the-shelf solutions, but as the requirements evolved over time, and the scale problem became ever more daunting, we became convinced that we had to build a bespoke search engine for code to achieve our objectives.\nThe early years\nIn the beginning, GitHub announced support for code search, as you might expect from a website with the tagline of \u201cSocial Code Hosting.\u201d And all was well.\nExcept\u2026 you might note the disclaimer \u201cGitHub Public Code Search.\u201d This first iteration of global search worked by indexing all public documents into a Solr instance, which determined the results you got. While this nicely side-steps visibility and authorization concerns (everything is public!), not allowing private repositories to be searched would be a major functionality gap. The solution?\nImage credit: Patrick Linskey on Stack Overflow\nThe repository page showed a \u201cSearch source code\u201d field. For public repos, this was still backed by the Solr index, scoped to the active repository. For private repos, it shelled out to\ngit grep.\nQuite soon after shipping this, the then-in-beta Google Code Search began crawling public repositories on GitHub too, thus giving developers an alternative way of searching them. (Ultimately, Google Code Search was discontinued a few years later, though Russ Cox\u2019s excellent blog post on how it worked remains a great source of inspiration for successor projects.)\nUnfortunately, the different search experience for public and private repositories proved pretty confusing in practice. In addition, while\ngit grep is a widely understood gold standard for how to search the contents of a Git repository, it operates without a dedicated index and hence works by scanning each document\u2014taking time proportional to the size of the repository. This could lead to resource exhaustion on the Git hosts, and to an unresponsive web page, making it necessary to introduce timeouts. Large private repositories remained unsearchable.\nScaling with Elasticsearch\nBy 2010, the search landscape was seeing considerable upheaval. Solr joined Lucene as a subproject, and Elasticsearch sprang up as a great way of building and scaling on top of Lucene. While Elasticsearch wouldn\u2019t hit a 1.0.0 release until February 2014, GitHub started experimenting with adopting it in 2011. An initial tentative experiment that indexed gists into Elasticsearch to make them searchable showed great promise, and before long it was clear that this was the future for all search on GitHub, including code search.\nIndeed in early 2013, just as Google Code Search was winding down, GitHub launched a whole new code search backed by an Elasticsearch cluster, consolidating the search experience for public and private repositories and updating the design. The search index covered almost five million repositories at launch.\nThe scale of operations was definitely challenging, and within days or weeks of the launch GitHub experienced its first code search outages. The postmortem blog post is quite interesting on several levels, and it gives a glimpse of the cluster size (26 storage nodes with 2 TB of SSD storage each), utilization (67% of storage used), environment (Elasticsearch 0.19.9 and 0.20.2, Java 6 and 7), and indexing complexity (several months to backfill all repository data). Several bugs in Elasticsearch were identified and fixed, allowing GitHub to resume operations on the code search service.\nIn November 2013, Elasticsearch published a case study on GitHub\u2019s code search cluster, again including some interesting data on scale. By that point, GitHub was indexing eight million repositories and responding to 5 search requests per second on average.\nIn general, our experience working with Elasticsearch has been truly excellent. It powers all kinds of search on GitHub.com, doing an excellent job throughout. The code search index is by far the largest cluster we operate , and it has grown in scale by another 20-40x since the case study (to 162 nodes, comprising 5184 vCPUs, 40TB of RAM, and 1.25PB of backing storage, supporting a query load of 200 requests per second on average and indexing over 53 billion source files). It is a testament to the capabilities of Elasticsearch that we have got this far with essentially an off-the-shelf search engine.\nMy code is not a novel\nElasticsearch excelled at most search workloads, but almost immediately some wrinkles and friction started cropping up in connection with code search. Perhaps the most widely observed is this comment from the code search documentation:\nYou can\u2019t use the following wildcard characters as part of your search query:\n. , : ; / \\ ` ' \" = * ! ? # $ & + ^ | ~ < > ( ) { } [ ] @. The search will simply ignore these symbols.\nSource code is not like normal text, and those \u201cpunctuation\u201d characters actually matter. So why are they ignored by GitHub\u2019s production code search? It comes down to how our ingest pipeline for Elasticsearch is configured.\nClick here to read the full details\nWhen documents are added to an Elasticsearch index, they are passed through a process called text analysis, which converts unstructured text into a structured format optimized for search. Commonly, text analysis is configured to normalize away details that don\u2019t matter to search (for example, case folding the document to provide case-insensitive matches, or compressing runs of whitespace into one, or stemming words so that searching for \u201cingestion\u201d also finds \u201cingest pipeline\u201d). Ultimately, it performs tokenization, splitting the normalized input document into a list of tokens whose occurrence should be indexed.\nMany features and defaults available to text analysis are geared towards indexing natural-language text. To create an index for source code, we defined a custom text analyzer, applying a carefully selected set of normalizations (for example, case-folding and compressing whitespace make sense, but stemming does not). Then, we configured a custom pattern tokenizer, splitting the document using the following regular expression:\n%q_[.,:;/\\\\\\\\`'\"=*!@?#$&+^|~<>(){}\\[\\]\\s]_. If you look closely, you\u2019ll recognise the list of characters that are ignored in your query string!\nThe tokens resulting from this split then undergo a final round of splitting, extracting word parts delimited in CamelCase and snake_case as additional tokens to make them searchable. To illustrate, suppose we are ingesting a document containing this declaration:\npub fn pthread_getname_np(tid: ::pthread_t, name: *mut ::c_char, len: ::size_t) -> ::c_int;. Our text analysis phase would pass the following list of tokens to Elasticsearch to index:\npub fn pthread_getname_np pthread getname np tid pthread_t pthread t name mut c_char c char len size_t size t c_int c int. The special characters simply do not figure in the index; instead, the focus is on words recovered from identifiers and keywords.\nDesigning a text analyzer is tricky, and involves hard trade-offs between index size and performance on the one hand, and the types of queries that can be answered on the other. The approach described above was the result of careful experimentation with different strategies, and represented a good compromise that has allowed us to launch and evolve code search for almost a decade.\nAnother consideration for source code is substring matching. Suppose that I want to find out how to get the name of a thread in Rust, and I vaguely remember the function is called something like\nthread_getname. Searching for\nthread_getname org:rust-lang will give no results on our Elasticsearch index; meanwhile, if I cloned rust-lang/libc locally and used\ngit grep, I would instantly find\npthread_getname_np. More generally, power users reach for regular expression searches almost immediately.\nThe earliest internal discussions of this that I can find date to October 2012, more than a year before the public release of Elasticsearch-based code search. We considered various ways of refining the Elasticsearch tokenization (in fact, we turn\npthread_getname_np into the tokens\npthread,\ngetname,\nnp, and\npthread_getname_np\u2014if I had searched for\npthread getname rather than\nthread_getname, I would have found the definition of\npthread_getname_np). We also evaluated trigram tokenization as described by Russ Cox. Our conclusion was summarized by a GitHub employee as follows:\nThe trigram tokenization strategy is very powerful. It will yield wonderful search results at the cost of search time and index size. This is the approach I would like to take, but there is work to be done to ensure we can scale the ElasticSearch cluster to meet the needs of this strategy.\nGiven the initial scale of the Elasticsearch cluster mentioned above, it wasn\u2019t viable to substantially increase storage and CPU requirements at the time, and so we launched with a best-effort tokenization tuned for code identifiers.\nOver the years, we kept coming back to this discussion. One promising idea for supporting special characters, inspired by some conversations with Elasticsearch experts at Elasticon 2016, was to use a Lucene tokenizer pattern that split code on runs of whitespace, but also on transitions from word characters to non-word characters (crucially, using lookahead/lookbehind assertions, without consuming any characters in this case; this would create a token for each special character). This would allow a search for\n\u201danswer >= 42\u201d to find the source text\nanswer >= 42 (disregarding whitespace, but including the comparison). Experiments showed this approach took 43-100% longer to index code, and produced an index that was 18-28% larger than the baseline. Query performance also suffered: at best, it was as fast as the baseline, but some queries (especially those that used special characters, or otherwise split into many tokens) were up to 4x slower. In the end, a typical query slowdown of 2.1x seemed like too high a price to pay.\nBy 2019, we had made significant investments in scaling our Elasticsearch cluster simply to keep up with the organic growth of the underlying code corpus. This gave us some performance headroom, and at GitHub Universe 2019 we felt confident enough to announce an \u201cexact-match search\u201d beta, which essentially followed the ideas above and was available for allow-listed repositories and organizations. We projected around a 1.3x increase in Elasticsearch resource usage for this index. The experience from the limited beta was very illuminating, but it proved too difficult to balance the additional resource requirements with ongoing growth of the index. In addition, even after the tokenization improvements, there were still numerous unsupported use cases (like substring searches and regular expressions) that we saw no path towards. Ultimately, exact-match search was sunset in just over half a year.\nProject Blackbird\nActually, a major factor in pausing investment in exact-match search was a very promising research prototype search engine, internally code-named Blackbird. The project had been kicked off in early 2020, with the goal of determining which technologies would enable us to offer code search features at GitHub scale, and it showed a path forward that has led to the technology preview we launched last week.\nLet\u2019s recall our ambitious objectives: comprehensively index all source code on GitHub, support incremental indexing and document deletion, and provide lightning-fast exact-match and regex searches (specifically, a p95 of under a second for global queries, with correspondingly lower targets for org-scoped and repo-scoped searches). Do all this without using substantially more resources than the existing Elasticsearch cluster. Integrate other sources of rich code intelligence information available on GitHub. Easy, right?\nWe found that no off-the-shelf code indexing solution could satisfy those requirements. Russ Cox\u2019s trigram index for code search only stores document IDs rather than positions in the posting lists; while that makes it very space-efficient, performance degrades rapidly with a large corpus size. Several successor projects augment the posting lists with position information or other data; this comes at a large storage and RAM cost (Zoekt reports a typical index size of 3.5x corpus size) that makes it too expensive at our scale. The sharding strategy is also crucial, as it determines how evenly distributed the load is. And any significant per-repo overhead becomes prohibitive when considering scaling the index to all repositories on GitHub.\nIn the end, Blackbird convinced us to go all-in on building a custom search engine for code. Written in Rust, it creates and incrementally maintains a code search index sharded by Git blob object ID; this gives us substantial storage savings via deduplication and guarantees a uniform load distribution across shards (something that classic approaches sharding by repo or org, like our existing Elasticsearch cluster, lack). It supports regular expression searches over document content and can capture additional metadata\u2014for example, it also maintains an index of symbol definitions. It meets our performance goals: while it\u2019s always possible to come up with a pathological search that misses the index, it\u2019s exceptionally fast for \u201creal\u201d searches. The index is also extremely compact, weighing in at about \u2154 of the (deduplicated) corpus size.\nOne crucial realization was that if we want to index all code on GitHub into a single index, result scoring and ranking are absolutely critical; you really need to find useful documents first. Blackbird implements a number of heuristics, some code-specific (ranking up definitions and penalizing test code), and others general-purpose (ranking up complete matches and penalizing partial matches, so that when searching for\nthread an identifier called\nthread will rank above\nthread_id, which will rank above\npthread_getname_np). Of course, the repository in which a match occurs also influences ranking. We want to show results from popular open-source repositories before a random match in a long-forgotten repository created as a test.\nAll of this is very much a work in progress. We are continuously tuning our scoring and ranking heuristics, optimizing the index and query process, and iterating on the query language. We have a long list of features to add. But we want to get what we have today into the hands of users, so that your feedback can shape our priorities.\nWe have more to share about the work we\u2019re doing to enhance developer productivity at GitHub, so stay tuned.\nThe shoulders of giants\nModern software development is about collaboration and about leveraging the power of open source. Our new code search is no different. We wouldn\u2019t have gotten anywhere close to its current state without the excellent work of tens of thousands of open source contributors and maintainers who built the tools we use, the libraries we depend on, and whose insightful ideas we could adopt and develop. A small selection of shout-outs and thank-yous:\n- The communities of the languages and frameworks we build on: Rust, Go, and React. Thanks for enabling us to move fast.\n- @BurntSushi: we are inspired by Andrew\u2019s prolific output, and his work on the\nregexand\naho-corasickcrates in particular has been invaluable to us.\n- @lemire\u2019s work on fast bit packing is integral to our design, and we drew a lot of inspiration from his optimization work more broadly (especially regarding the use of SIMD). Check out his blog for more.\n- Enry and Tree-sitter, which power Blackbird\u2019s language detection and symbol extraction, respectively.", "comments": "", "word-count": 2887}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"title": {"type": "string"}, "author": {"type": "string"}, "hostname": {"type": "string"}, "date": {"type": "string"}, "categories": {"type": "string"}, "tags": {"type": "string"}, "fingerprint": {"type": "string"}, "id": {"type": "null"}, "license": {"type": "null"}, "raw-text": {"type": "string"}, "source": {"type": "string"}, "source-hostname": {"type": "string"}, "excerpt": {"type": "string"}, "text": {"type": "string"}, "comments": {"type": "string"}, "word-count": {"type": "integer"}}, "required": ["author", "categories", "comments", "date", "excerpt", "fingerprint", "hostname", "id", "license", "raw-text", "source", "source-hostname", "tags", "text", "title", "word-count"]}
8ef1f9eb-9070-469f-81be-29b2f676fd61/58dc518c-7ca4-4ad0-8cb6-44b587da417d/0/0
Keyword Extraction
Keyword extraction API - MULTILANGUAGE provides most relevant keywords to the text which would be useful in getting insights, knowledge about the text, meaningful context etc. There are numerous languages this particular api supports. Fuzzy selection is implemented where multiple Algorithms/approaches work to select best keywords. Use proper "LANGUAGE CODE", for particular language. Various supported languages are : Arabic Azerbaijani Basque Bengali Catalan Chinese Danish Dutch Engli...
7.7
Keyword Extraction for text
Send text and no of required max keywords in the API. Give proper text as input.
200
New Example
{"key1": "value", "key2": "value"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"key1": {"type": "string"}, "key2": {"type": "string"}}, "required": ["key1", "key2"]}
ac296abd-83ae-4f9f-af4a-13b7ccf21031/303a284c-4d76-4b51-9ebc-f6837b0d6394/0/0
DeSpam Spam Filter
State of the art spam filtering apis with spam score. Despam strives to remain carbon neutral at the same time without compromising with the results of our API.
null
Spam Filter
Provide the input in json and it will return spamProabability in the response.
200
New Example
{"isSpam": true, "spamProbability": 0.61}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"isSpam": {"type": "boolean"}, "spamProbability": {"type": "number"}}, "required": ["isSpam", "spamProbability"]}
e10d65a8-427a-4f06-b1e0-d632ac021f48/11f085b7-891c-4e0a-bfa9-88155e61b71f/0/0
Skill Ranking
Skill Ranking is an API that is used to rank the skills as per the weightage.
null
Resume Skill Ranking
Resume Skill Ranking is an API that is used to rank the skills within the resume as per the weightage and return the skill list along with the weightage.
200
New Example
{"Skills": [{"Skill": "Application Programming", "Score": "35.42"}, {"Skill": "Software Services", "Score": "35.42"}, {"Skill": "Programming", "Score": "35.42"}, {"Skill": "Saas", "Score": "35.42"}, {"Skill": "Coding", "Score": "35.42"}, {"Skill": "Interpreting Medical Research", "Score": "29.08"}, {"Skill": "Market Research and Analysis", "Score": "29.08"}, {"Skill": "PowerPoint Presentations", "Score": "29.08"}, {"Skill": "Clinical Research", "Score": "29.08"}, {"Skill": "Medical Research", "Score": "29.08"}, {"Skill": "Giving Patients Injections Or Medications", "Score": "26.74"}, {"Skill": "Scheduling Patient Appointments", "Score": "26.74"}, {"Skill": "Best Practices", "Score": "26.30"}, {"Skill": "Maintenance", "Score": "26.30"}, {"Skill": "Automation", "Score": "26.30"}, {"Skill": "Contract", "Score": "26.30"}, {"Skill": "Design", "Score": "26.30"}, {"Skill": "Preparing Blood Samples For Laboratory Tests", "Score": "26.30"}, {"Skill": "Preparing Patients For Examination", "Score": "26.30"}, {"Skill": "Diagnose and Treat Human Injuries", "Score": "26.30"}, {"Skill": "Insurance", "Score": "26.30"}, {"Skill": "Billing", "Score": "26.30"}, {"Skill": "Science", "Score": "26.30"}, {"Skill": "Fax", "Score": "26.30"}, {"Skill": "Maintaining Medical Records", "Score": "23.09"}, {"Skill": "Catheterization", "Score": "23.09"}, {"Skill": "Drawing Blood", "Score": "23.09"}, {"Skill": "Dentistry", "Score": "23.09"}, {"Skill": "GOOD COMMUNICATION", "Score": "21.56"}, {"Skill": "Communication Skills", "Score": "21.56"}, {"Skill": "Good Communication", "Score": "21.56"}, {"Skill": "Communication", "Score": "21.56"}, {"Skill": "Examinations", "Score": "21.56"}, {"Skill": "Automation Testing", "Score": "20.83"}, {"Skill": "Manual Testing", "Score": "20.83"}, {"Skill": "Methodology", "Score": "17.53"}, {"Skill": "Https", "Score": "15.97"}]}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"Skills": {"type": "array", "items": {"type": "object", "properties": {"Skill": {"type": "string"}, "Score": {"type": "string"}}, "required": ["Score", "Skill"]}}}, "required": ["Skills"]}
e10d65a8-427a-4f06-b1e0-d632ac021f48/7e635cc7-35ef-4fb8-969c-87698fcac3ba/0/0
Skill Ranking
Skill Ranking is an API that is used to rank the skills as per the weightage.
null
JD Skill Ranking
JD Skill Ranking is an API that is used to rank the skills within the job description as per the weightage and return the skill list along with the weightage.
200
New Example
{"Skills": [{"Skill": "SOFTWARE DEVELOPMENT", "Score": "82.00"}, {"Skill": "Software Development Life Cycle", "Score": "82.00"}, {"Skill": "Core Java", "Score": "80.00"}, {"Skill": "Multithreading", "Score": "80.00"}, {"Skill": "Spring", "Score": "80.00"}, {"Skill": "Automation Testing", "Score": "76.88"}, {"Skill": "Application Programming", "Score": "75.00"}, {"Skill": "Computer Science", "Score": "75.00"}, {"Skill": "Programming", "Score": "75.00"}, {"Skill": "Saas", "Score": "75.00"}, {"Skill": "Scripting", "Score": "75.00"}, {"Skill": "Software Services", "Score": "75.00"}, {"Skill": "Unified Functional Testing", "Score": "75.00"}, {"Skill": "Java", "Score": "75.00"}, {"Skill": "Bug Tracking", "Score": "71.88"}, {"Skill": "Functional Testing", "Score": "71.88"}, {"Skill": "Manual Testing", "Score": "71.88"}, {"Skill": "QTP", "Score": "71.88"}, {"Skill": "Regression Testing", "Score": "71.88"}, {"Skill": "Software Testing", "Score": "71.88"}, {"Skill": "Test Automation", "Score": "71.88"}, {"Skill": "Test Cases", "Score": "71.88"}, {"Skill": "Test Engineering", "Score": "71.88"}, {"Skill": "Test Plans", "Score": "71.88"}, {"Skill": "Test Reports", "Score": "71.88"}, {"Skill": "Test Requirements", "Score": "71.88"}, {"Skill": "Testing", "Score": "71.88"}, {"Skill": "Validation Test", "Score": "71.88"}, {"Skill": "Hibernate", "Score": "33.13"}, {"Skill": "Capital Market", "Score": "20.39"}, {"Skill": "Self Motivated", "Score": "16.56"}, {"Skill": "Best Practices", "Score": "15.59"}, {"Skill": "Consulting", "Score": "15.59"}, {"Skill": "Design", "Score": "15.59"}, {"Skill": "Designing", "Score": "15.59"}, {"Skill": "Maintenance", "Score": "15.39"}, {"Skill": "Automation", "Score": "15.39"}, {"Skill": "Immigration", "Score": "15.29"}, {"Skill": "Computer Forensic", "Score": "15.29"}, {"Skill": "Client Handling", "Score": "15.20"}, {"Skill": "Analysis", "Score": "11.56"}, {"Skill": "COMMUNICATION SKILL", "Score": "11.56"}, {"Skill": "Mentors", "Score": "11.56"}, {"Skill": "Investigator", "Score": "10.39"}, {"Skill": "Ability To Read", "Score": "10.20"}, {"Skill": "Confidence", "Score": "5.39"}]}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"Skills": {"type": "array", "items": {"type": "object", "properties": {"Skill": {"type": "string"}, "Score": {"type": "string"}}, "required": ["Score", "Skill"]}}}, "required": ["Skills"]}
ac288311-037e-429c-b42f-3937c3124019/ae211be3-55e8-4793-9e2e-0672113bbd04/0/0
Paraphrase Genius
No word/character limit: ✓ Multi-language input: ✓ Multiple sentences per input: ✓ Average under 2s response times: ✓ State-of-the-art paraphrasing: ✓ Tailoring API's responses to suit your needs: ✓ Hidden Limitations: ⨉ Need to paraphrase your text and get multiple paraphrased sentences per sentence? Look no further! Our craftly built paraphrasing API can handle the most complex queries in seconds. No hidden limitations. No word limit. Best quality in class, cheap pricing, and no nonsense. ...
9.7
Paraphrase
This is the default paraphrase endpoint.
200
Multiple Results
[["Using pre-existing functional blocks, RapidAPI is a backend development platform that enables app and web developers to build the backend for their applications.", "A backend development platform called RapidAPI enables app and web developers to build the backend for their applications using pre-existing functional blocks.", "RapidAPI is a platform for backend development that enables app and web developers to build the backend for their applications using pre-existing functional blocks.", "RapidAPI is a backend development platform that enables app and web developers to build the backend for their applications using pre-existing functional blocks."], ["Iddo Gino and Mickey Haslavsky established the business in Tel Aviv in January 2015.", "In January 2015, Iddo Gino and Mickey Haslavsky established the business in Tel Aviv.", "Iddo Gino and Mickey Haslavsky founded the business on January 1, 2015, in Tel Aviv.", "Iddo Gino and Mickey Haslavsky founded the business in Tel Aviv in January 2015."], ["Since then, it has received investments from angel investors such as Marius Nacht, a co-founder of Checkpoint, and Dov Moran, the founder of M-Systems, among others.", "Since then, it has received funding from angel investors such as Dov Moran, the founder of M-Systems, and Marius Nacht, the co-founder of Checkpoint, among others.", "Since then, it has attracted the support of angel investors, including Dov Moran (founder of M-Systems) and Marius Nacht (co-founder of Checkpoint), among others.", "Since then, it has received investments from angel investors, including Dov Moran (founder of M-Systems) and Marius Nacht (co-founder of Checkpoint), among others."], ["Additionally, the 500 Startups accelerator, which is currently participating in its 16th batch in San Francisco, has invested in RapidAPI.", "Additionally, RapidAPI has received funding from the 16th San Francisco batch of the 500 Startups accelerator.", "The 500 Startups accelerator, which is currently in its 16th batch in San Francisco, has also invested in RapidAPI.", "Additionally, the 500 Startups accelerator, which is currently in its 16th batch in San Francisco, has invested in RapidAPI."]]
{"$schema": "http://json-schema.org/schema#", "type": "array", "items": {"type": "array", "items": {"type": "string"}}}
ac288311-037e-429c-b42f-3937c3124019/ae211be3-55e8-4793-9e2e-0672113bbd04/0/1
Paraphrase Genius
No word/character limit: ✓ Multi-language input: ✓ Multiple sentences per input: ✓ Average under 2s response times: ✓ State-of-the-art paraphrasing: ✓ Tailoring API's responses to suit your needs: ✓ Hidden Limitations: ⨉ Need to paraphrase your text and get multiple paraphrased sentences per sentence? Look no further! Our craftly built paraphrasing API can handle the most complex queries in seconds. No hidden limitations. No word limit. Best quality in class, cheap pricing, and no nonsense. ...
9.7
Paraphrase
This is the default paraphrase endpoint.
200
Single Result
["Using pre-existing functional blocks, RapidAPI is a backend development platform that enables app and web developers to build the backend for their applications. Iddo Gino and Mickey Haslavsky founded the business on January 1, 2015, in Tel Aviv. Since then, it has received investments from angel investors such as Marius Nacht, a co-founder of Checkpoint, and Dov Moran, the founder of M-Systems, among others. Additionally, RapidAPI has received funding from the 16th San Francisco batch of the 500 Startups accelerator."]
{"$schema": "http://json-schema.org/schema#", "type": "array", "items": {"type": "string"}}
6ae26e73-e4d4-4d34-bb78-af9a769580bc/91b0614e-c90e-46ff-b818-35d751399197/1/0
Text To Emotions
Text To Emotions API, a powerful text analysis tool designed to unveil the underlying emotions in any given text. This innovative API takes textual input and processes it to provide a comprehensive emotional profile with values ranging from 0 to 1, enabling a more nuanced understanding of the emotions conveyed.
6.4
predict_predict_emotion_post
422
null
{"detail": [{"loc": [], "msg": "", "type": ""}]}
{"title": "HTTPValidationError", "type": "object", "properties": {"detail": {"title": "Detail", "type": "array", "items": {"title": "ValidationError", "required": ["loc", "msg", "type"], "type": "object", "properties": {"loc": {"title": "Location", "type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "integer"}]}}, "msg": {"title": "Message", "type": "string"}, "type": {"title": "Error Type", "type": "string"}}}}}}
6ae26e73-e4d4-4d34-bb78-af9a769580bc/2706a248-626c-41ce-9c32-d3d6e18e8c69/1/0
Text To Emotions
Text To Emotions API, a powerful text analysis tool designed to unveil the underlying emotions in any given text. This innovative API takes textual input and processes it to provide a comprehensive emotional profile with values ranging from 0 to 1, enabling a more nuanced understanding of the emotions conveyed.
6.4
predictMany_predict_emotion_many_post
422
null
{"detail": [{"loc": [], "msg": "", "type": ""}]}
{"title": "HTTPValidationError", "type": "object", "properties": {"detail": {"title": "Detail", "type": "array", "items": {"title": "ValidationError", "required": ["loc", "msg", "type"], "type": "object", "properties": {"loc": {"title": "Location", "type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "integer"}]}}, "msg": {"title": "Message", "type": "string"}, "type": {"title": "Error Type", "type": "string"}}}}}}
6ae26e73-e4d4-4d34-bb78-af9a769580bc/ea0950cc-536a-481c-87c5-1dbca6ed9330/0/0
Text To Emotions
Text To Emotions API, a powerful text analysis tool designed to unveil the underlying emotions in any given text. This innovative API takes textual input and processes it to provide a comprehensive emotional profile with values ranging from 0 to 1, enabling a more nuanced understanding of the emotions conveyed.
6.4
Predict Emotions
Predict emotions for input
422
Example_1
{"detail": [{"loc": [], "msg": "", "type": ""}]}
{"title": "HTTPValidationError", "type": "object", "properties": {"detail": {"title": "Detail", "type": "array", "items": {"title": "ValidationError", "required": ["loc", "msg", "type"], "type": "object", "properties": {"loc": {"title": "Location", "type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "integer"}]}}, "msg": {"title": "Message", "type": "string"}, "type": {"title": "Error Type", "type": "string"}}}}}}
6ae26e73-e4d4-4d34-bb78-af9a769580bc/ea0950cc-536a-481c-87c5-1dbca6ed9330/1/0
Text To Emotions
Text To Emotions API, a powerful text analysis tool designed to unveil the underlying emotions in any given text. This innovative API takes textual input and processes it to provide a comprehensive emotional profile with values ranging from 0 to 1, enabling a more nuanced understanding of the emotions conveyed.
6.4
Predict Emotions
Predict emotions for input
200
Example_1
{"predictions": [{"admiration": 0.05039341375231743, "amusement": 0.05932464450597763, "anger": 0.018408922478556633, "annoyance": 0.030141789466142654, "approval": 0.05154214799404144, "caring": 0.04556211084127426, "confusion": 0.011223535984754562, "curiosity": 0.1425555795431137, "desire": 0.03363674134016037, "disappointment": 0.012111042626202106, "disapproval": 0.032884787768125534, "disgust": 0.022325068712234497, "embarrassment": 0.027485257014632225, "excitement": 0.020106453448534012, "fear": 0.017465250566601753, "gratitude": 0.019867800176143646, "grief": 0.01665147766470909, "joy": 0.01269635558128357, "love": 0.009675121866166592, "nervousness": 0.03101506270468235, "optimism": 0.021099019795656204, "pride": 0.0427837148308754, "realization": 0.12767428159713745, "relief": 0.03162908926606178, "remorse": 0.02234966680407524, "sadness": 0.014348126947879791, "surprise": 0.03215537220239639, "neutral": 0.04288807511329651}]}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"predictions": {"type": "array", "items": {"type": "object", "properties": {"admiration": {"type": "number"}, "amusement": {"type": "number"}, "anger": {"type": "number"}, "annoyance": {"type": "number"}, "approval": {"type": "number"}, "caring": {"type": "number"}, "confusion": {"type": "number"}, "curiosity": {"type": "number"}, "desire": {"type": "number"}, "disappointment": {"type": "number"}, "disapproval": {"type": "number"}, "disgust": {"type": "number"}, "embarrassment": {"type": "number"}, "excitement": {"type": "number"}, "fear": {"type": "number"}, "gratitude": {"type": "number"}, "grief": {"type": "number"}, "joy": {"type": "number"}, "love": {"type": "number"}, "nervousness": {"type": "number"}, "optimism": {"type": "number"}, "pride": {"type": "number"}, "realization": {"type": "number"}, "relief": {"type": "number"}, "remorse": {"type": "number"}, "sadness": {"type": "number"}, "surprise": {"type": "number"}, "neutral": {"type": "number"}}, "required": ["admiration", "amusement", "anger", "annoyance", "approval", "caring", "confusion", "curiosity", "desire", "disappointment", "disapproval", "disgust", "embarrassment", "excitement", "fear", "gratitude", "grief", "joy", "love", "nervousness", "neutral", "optimism", "pride", "realization", "relief", "remorse", "sadness", "surprise"]}}}, "required": ["predictions"]}
6ae26e73-e4d4-4d34-bb78-af9a769580bc/6f469d04-c297-487e-a1eb-f327dbc9a116/1/0
Text To Emotions
Text To Emotions API, a powerful text analysis tool designed to unveil the underlying emotions in any given text. This innovative API takes textual input and processes it to provide a comprehensive emotional profile with values ranging from 0 to 1, enabling a more nuanced understanding of the emotions conveyed.
6.4
predict_predict_emotions___text__get
422
null
{"detail": [{"loc": [], "msg": "", "type": ""}]}
{"title": "HTTPValidationError", "type": "object", "properties": {"detail": {"title": "Detail", "type": "array", "items": {"title": "ValidationError", "required": ["loc", "msg", "type"], "type": "object", "properties": {"loc": {"title": "Location", "type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "integer"}]}}, "msg": {"title": "Message", "type": "string"}, "type": {"title": "Error Type", "type": "string"}}}}}}
6ae26e73-e4d4-4d34-bb78-af9a769580bc/9ab128fe-7b51-4aa3-a03f-8941d43102a6/1/0
Text To Emotions
Text To Emotions API, a powerful text analysis tool designed to unveil the underlying emotions in any given text. This innovative API takes textual input and processes it to provide a comprehensive emotional profile with values ranging from 0 to 1, enabling a more nuanced understanding of the emotions conveyed.
6.4
predictMany_predict_emotions_many___texts__get
422
null
{"detail": [{"loc": [], "msg": "", "type": ""}]}
{"title": "HTTPValidationError", "type": "object", "properties": {"detail": {"title": "Detail", "type": "array", "items": {"title": "ValidationError", "required": ["loc", "msg", "type"], "type": "object", "properties": {"loc": {"title": "Location", "type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "integer"}]}}, "msg": {"title": "Message", "type": "string"}, "type": {"title": "Error Type", "type": "string"}}}}}}
6ae26e73-e4d4-4d34-bb78-af9a769580bc/ae15380a-2291-4b93-b3e8-8a21ee09c936/1/0
Text To Emotions
Text To Emotions API, a powerful text analysis tool designed to unveil the underlying emotions in any given text. This innovative API takes textual input and processes it to provide a comprehensive emotional profile with values ranging from 0 to 1, enabling a more nuanced understanding of the emotions conveyed.
6.4
predictMany_predict_emotions_many_post
422
null
{"detail": [{"loc": [], "msg": "", "type": ""}]}
{"title": "HTTPValidationError", "type": "object", "properties": {"detail": {"title": "Detail", "type": "array", "items": {"title": "ValidationError", "required": ["loc", "msg", "type"], "type": "object", "properties": {"loc": {"title": "Location", "type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "integer"}]}}, "msg": {"title": "Message", "type": "string"}, "type": {"title": "Error Type", "type": "string"}}}}}}
6ae26e73-e4d4-4d34-bb78-af9a769580bc/8e527129-8b7d-44ee-bdf1-d1aff43c78c3/1/0
Text To Emotions
Text To Emotions API, a powerful text analysis tool designed to unveil the underlying emotions in any given text. This innovative API takes textual input and processes it to provide a comprehensive emotional profile with values ranging from 0 to 1, enabling a more nuanced understanding of the emotions conveyed.
6.4
predict_predict_emotion___text__get
422
null
{"detail": [{"loc": [], "msg": "", "type": ""}]}
{"title": "HTTPValidationError", "type": "object", "properties": {"detail": {"title": "Detail", "type": "array", "items": {"title": "ValidationError", "required": ["loc", "msg", "type"], "type": "object", "properties": {"loc": {"title": "Location", "type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "integer"}]}}, "msg": {"title": "Message", "type": "string"}, "type": {"title": "Error Type", "type": "string"}}}}}}
6ae26e73-e4d4-4d34-bb78-af9a769580bc/b0f6da65-27ff-48c1-be2b-e664be5553e6/1/0
Text To Emotions
Text To Emotions API, a powerful text analysis tool designed to unveil the underlying emotions in any given text. This innovative API takes textual input and processes it to provide a comprehensive emotional profile with values ranging from 0 to 1, enabling a more nuanced understanding of the emotions conveyed.
6.4
predictMany_predict_emotion_many___texts__get
422
null
{"detail": [{"loc": [], "msg": "", "type": ""}]}
{"title": "HTTPValidationError", "type": "object", "properties": {"detail": {"title": "Detail", "type": "array", "items": {"title": "ValidationError", "required": ["loc", "msg", "type"], "type": "object", "properties": {"loc": {"title": "Location", "type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "integer"}]}}, "msg": {"title": "Message", "type": "string"}, "type": {"title": "Error Type", "type": "string"}}}}}}
80fdc577-ef7d-42c5-9f3a-bfa6d908e319/d05bc955-5411-4312-a191-ec0bc2c422bc/0/0
Text Similarity Checker
Comparison of similarity between two texts using popular algorithms.
6.8
check_similarity
Please provide "Text 1" and "Text 2" for the comparison.
200
New Example
{"text_as_is": {"cosine_similarity": 0.5788825817770245, "jaccard_similarity": 0.2702702702702703, "fuzzy_ratio": 60, "levenshtein_distance": 421}, "without_stopwords": {"cosine_similarity": 0.33715035698790374, "jaccard_similarity": 0.2736842105263158, "fuzzy_ratio": 59, "levenshtein_distance": 344}, "lemmatized_text_without_stopwords": {"cosine_similarity": 0.338491021811274, "jaccard_similarity": 0.2717391304347826, "fuzzy_ratio": 59, "levenshtein_distance": 319, "total_common_lemmas": 22, "common_lemmas": ["impact", "photosynthesis", "influence", "solar", "earth'\\", "technological", "life", "celestial", "display", "mass", "magnetic", "storm", "``", "corona", "eclipse", "sun", "dynamic", "insight", "captivate", "system", "flare", "cosmo"], "total_common_lemmatized_bigrams": 2, "common_lemmatized_bigrams": ["life celestial", "impact photosynthesis"]}}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"text_as_is": {"type": "object", "properties": {"cosine_similarity": {"type": "number"}, "jaccard_similarity": {"type": "number"}, "fuzzy_ratio": {"type": "integer"}, "levenshtein_distance": {"type": "integer"}}, "required": ["cosine_similarity", "fuzzy_ratio", "jaccard_similarity", "levenshtein_distance"]}, "without_stopwords": {"type": "object", "properties": {"cosine_similarity": {"type": "number"}, "jaccard_similarity": {"type": "number"}, "fuzzy_ratio": {"type": "integer"}, "levenshtein_distance": {"type": "integer"}}, "required": ["cosine_similarity", "fuzzy_ratio", "jaccard_similarity", "levenshtein_distance"]}, "lemmatized_text_without_stopwords": {"type": "object", "properties": {"cosine_similarity": {"type": "number"}, "jaccard_similarity": {"type": "number"}, "fuzzy_ratio": {"type": "integer"}, "levenshtein_distance": {"type": "integer"}, "total_common_lemmas": {"type": "integer"}, "common_lemmas": {"type": "array", "items": {"type": "string"}}, "total_common_lemmatized_bigrams": {"type": "integer"}, "common_lemmatized_bigrams": {"type": "array", "items": {"type": "string"}}}, "required": ["common_lemmas", "common_lemmatized_bigrams", "cosine_similarity", "fuzzy_ratio", "jaccard_similarity", "levenshtein_distance", "total_common_lemmas", "total_common_lemmatized_bigrams"]}}, "required": ["lemmatized_text_without_stopwords", "text_as_is", "without_stopwords"]}
67dd69f0-5727-41f8-8ab0-6c11f8ce121a/87117ea6-acb2-4ea4-a76c-d65d429247ec/0/0
Webit Text Analytics
FREE. Sentiment, Entities, Language, Similarity, Key Phrases, Text Comparisons.
8.2
Similarity
Compares two strings by using a sophisticated set of AI algorithms and returns the similarity score between the two strings.
200
Response
{"data": {"similarity": 0.668939393939394}, "message": null, "status": "success"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"data": {"type": "object", "properties": {"similarity": {"type": "number"}}, "required": ["similarity"]}, "message": {"type": "null"}, "status": {"type": "string"}}, "required": ["data", "message", "status"]}
67dd69f0-5727-41f8-8ab0-6c11f8ce121a/08c95704-4a68-4a05-ba53-5d2f15511653/0/0
Webit Text Analytics
FREE. Sentiment, Entities, Language, Similarity, Key Phrases, Text Comparisons.
8.2
Key Phrases
Key phrases extraction allows for quickly identifying the main concepts in a text.
200
Response
{"data": {"key_phrases": ["catania", "city", "eastern coast", "sicily", "foot", "mount etna", "biggest volcano", "europe", "seventh largest city", "italy", "metropolitan area reaching", "million inhabitants", "major transport hub", "economic centre", "university city", "enjoy", "busy downtown", "active nightlife", "baroque architecture", "urban design"]}, "message": null, "status": "success"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"data": {"type": "object", "properties": {"key_phrases": {"type": "array", "items": {"type": "string"}}}, "required": ["key_phrases"]}, "message": {"type": "null"}, "status": {"type": "string"}}, "required": ["data", "message", "status"]}
67dd69f0-5727-41f8-8ab0-6c11f8ce121a/0e1353f3-1d07-4bd5-a706-8219580a767a/0/0
Webit Text Analytics
FREE. Sentiment, Entities, Language, Similarity, Key Phrases, Text Comparisons.
8.2
Match
Compares a string with an array of strings and does an AI affinity comparison to find the closest matches.
200
Response
{"data": {"matches": [{"match": "pasta", "score": 1}, {"match": "hasta", "score": 0.8}, {"match": "pasties", "score": 0.57}, {"match": "pizza", "score": 0.4}]}, "message": null, "status": "success"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"data": {"type": "object", "properties": {"matches": {"type": "array", "items": {"type": "object", "properties": {"match": {"type": "string"}, "score": {"type": "number"}}, "required": ["match", "score"]}}}, "required": ["matches"]}, "message": {"type": "null"}, "status": {"type": "string"}}, "required": ["data", "message", "status"]}
67dd69f0-5727-41f8-8ab0-6c11f8ce121a/d2416c52-fba8-43e6-83b3-2a7d1f658014/0/0
Webit Text Analytics
FREE. Sentiment, Entities, Language, Similarity, Key Phrases, Text Comparisons.
8.2
Sentiment
Analyzes the sentiment of an input text and detects positivity, negativity and neutrality of such a string.
200
Response
{"data": {"score": 0.75, "sentiment": "positive"}, "message": null, "status": "success"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"data": {"type": "object", "properties": {"score": {"type": "number"}, "sentiment": {"type": "string"}}, "required": ["score", "sentiment"]}, "message": {"type": "null"}, "status": {"type": "string"}}, "required": ["data", "message", "status"]}
67dd69f0-5727-41f8-8ab0-6c11f8ce121a/028656b0-0283-46e3-b44b-70e9634cf4fd/0/0
Webit Text Analytics
FREE. Sentiment, Entities, Language, Similarity, Key Phrases, Text Comparisons.
8.2
Detect Languages
Detects the language of an input text. Supports 100+ languages.
200
Response
{"status": "success", "data": {"input_text": "\u4f60\u597d! \u4f60\u597d\u5417? \u0643\u064a\u0641 \u062d\u0627\u0644\u0643\u061f Neural recognition can detect multiple languages at the same time!", "languages": [{"language": "en", "confidence": 0.9946622848510742, "is_reliable": true, "proportion": 0.656862735748291}, {"language": "zh", "confidence": 0.9963256120681763, "is_reliable": true, "proportion": 0.1764705926179886}, {"language": "ar", "confidence": 0.949984610080719, "is_reliable": true, "proportion": 0.1666666716337204}]}, "message": null}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"status": {"type": "string"}, "data": {"type": "object", "properties": {"input_text": {"type": "string"}, "languages": {"type": "array", "items": {"type": "object", "properties": {"language": {"type": "string"}, "confidence": {"type": "number"}, "is_reliable": {"type": "boolean"}, "proportion": {"type": "number"}}, "required": ["confidence", "is_reliable", "language", "proportion"]}}}, "required": ["input_text", "languages"]}, "message": {"type": "null"}}, "required": ["data", "message", "status"]}
53aa5080e4b07e1f4ebeb407/53aa5081e4b0a798dbd1a5e9/0/0
Topic Tagging
Automatically generate topics and keywords for articles and blogs.
9.2
Generate (get)
Detect and generate human like topics to the given text.
200
Response
{"author": "twinword inc.", "email": "help@twinword.com", "keyword": {"cell": 1, "compute": 2, "computer": 4, "design": 1, "information": 2, "memory": 1, "science": 2, "structure": 2, "study": 2, "transcribe": 1}, "result_code": "200", "result_msg": "Success", "topic": {"art": 0.20782122905028, "biology": 0.18472998137803, "computer science": 0.5010800744879, "human": 0.23091247672253, "machine": 0.23091247672253, "research": 0.18472998137803, "study": 0.30018621973929, "system": 0.23091247672253, "technology": 0.18472998137803}, "version": "5.0.0"}
{"properties": {"author": {"type": "string"}, "email": {"format": "email", "type": "string"}, "keyword": {"properties": {"alternate": {"type": "integer"}, "biological": {"type": "integer"}, "cell": {"type": "integer"}, "compute": {"type": "integer"}, "computer": {"type": "integer"}, "gene": {"type": "integer"}, "information": {"type": "integer"}, "science": {"type": "integer"}, "structure": {"type": "integer"}, "study": {"type": "integer"}}, "type": "object"}, "result_code": {"format": "color", "type": "string"}, "result_msg": {"type": "string"}, "topic": {"properties": {"art": {"type": "number"}, "computer science": {"type": "number"}, "development": {"type": "number"}, "human": {"type": "number"}, "machine": {"type": "number"}, "number": {"type": "number"}, "study": {"type": "number"}, "system": {"type": "number"}, "technology": {"type": "number"}}, "type": "object"}, "version": {"type": "string"}}, "type": "object"}
53aa5080e4b07e1f4ebeb407/53aa5081e4b0a9b1348da674/0/0
Topic Tagging
Automatically generate topics and keywords for articles and blogs.
9.2
Generate (post)
Detect and generate human like topics to the given text.
200
Response
{"author": "twinword inc.", "email": "help@twinword.com", "keyword": {"cell": 1, "compute": 2, "computer": 4, "design": 1, "information": 2, "memory": 1, "science": 2, "structure": 2, "study": 2, "transcribe": 1}, "result_code": "200", "result_msg": "Success", "topic": {"art": 0.20782122905028, "biology": 0.18472998137803, "computer science": 0.5010800744879, "human": 0.23091247672253, "machine": 0.23091247672253, "research": 0.18472998137803, "study": 0.30018621973929, "system": 0.23091247672253, "technology": 0.18472998137803}, "version": "5.0.0"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"author": {"type": "string"}, "email": {"type": "string"}, "keyword": {"type": "object", "properties": {"cell": {"type": "integer"}, "compute": {"type": "integer"}, "computer": {"type": "integer"}, "design": {"type": "integer"}, "information": {"type": "integer"}, "memory": {"type": "integer"}, "science": {"type": "integer"}, "structure": {"type": "integer"}, "study": {"type": "integer"}, "transcribe": {"type": "integer"}}, "required": ["cell", "compute", "computer", "design", "information", "memory", "science", "structure", "study", "transcribe"]}, "result_code": {"type": "string"}, "result_msg": {"type": "string"}, "topic": {"type": "object", "properties": {"art": {"type": "number"}, "biology": {"type": "number"}, "computer science": {"type": "number"}, "human": {"type": "number"}, "machine": {"type": "number"}, "research": {"type": "number"}, "study": {"type": "number"}, "system": {"type": "number"}, "technology": {"type": "number"}}, "required": ["art", "biology", "computer science", "human", "machine", "research", "study", "system", "technology"]}, "version": {"type": "string"}}, "required": ["author", "email", "keyword", "result_code", "result_msg", "topic", "version"]}
2facf127-0774-4e6b-9b3e-86eaead8d2c7/8e84f9c6-44f9-4131-b213-2a421ca8164a/0/0
Fastest Spell Checker API
Should you require a reliable and efficient means of verifying the accuracy of your textual content, our Spell Checker API is a commendable option. Recently updated to ensure it's precision and currency, it presents JSON responses that can be easily integrated into your existing workflow.
7.1
Spell Check Beta
Spell Check with our beta version
200
New Example
{"corrected_text": "This ssentenceccontainsspspellinggrgrammaramistakestionoin mistkes.", "corrections": [{"correction": "This", "length": 5, "message": "Possible spelling mistake found.", "offset": 0, "word": "Thiss"}, {"correction": "sentence", "length": 8, "message": "Possible spelling mistake found.", "offset": 6, "word": "sentance"}, {"correction": "contains", "length": 9, "message": "Possible spelling mistake found.", "offset": 15, "word": "conntains"}, {"correction": "spelling", "length": 8, "message": "Possible spelling mistake found.", "offset": 25, "word": "speeling"}, {"correction": "grammar", "length": 6, "message": "Possible spelling mistake found.", "offset": 35, "word": "gramar"}, {"correction": "punctuation", "length": 11, "message": "Possible spelling mistake found.", "offset": 43, "word": "punctuatoin"}, {"correction": "mistakes", "length": 7, "message": "Possible spelling mistake found.", "offset": 43, "word": "mistkes"}], "original_text": "Thiss sentance conntains speeling, gramar, and punctuatoin mistkes.", "response_code": 200, "success": true}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"corrected_text": {"type": "string"}, "corrections": {"type": "array", "items": {"type": "object", "properties": {"correction": {"type": "string"}, "length": {"type": "integer"}, "message": {"type": "string"}, "offset": {"type": "integer"}, "word": {"type": "string"}}, "required": ["correction", "length", "message", "offset", "word"]}}, "original_text": {"type": "string"}, "response_code": {"type": "integer"}, "success": {"type": "boolean"}}, "required": ["corrected_text", "corrections", "original_text", "response_code", "success"]}
2facf127-0774-4e6b-9b3e-86eaead8d2c7/c935ce19-b8b6-4032-90e2-e80e817a7de6/0/0
Fastest Spell Checker API
Should you require a reliable and efficient means of verifying the accuracy of your textual content, our Spell Checker API is a commendable option. Recently updated to ensure it's precision and currency, it presents JSON responses that can be easily integrated into your existing workflow.
7.1
Spell Check
Spell Check
200
New Example
{"corrected_text": "This is a sample text to demonstrate the spell check API. Sheer are some spelling errors inn this text.", "input_length": 108, "issues": [{"corrected": "His", "word": "This"}, {"corrected": "sample", "word": "samplee"}, {"corrected": "text", "word": "textt"}, {"corrected": "demonstrate", "word": "demoonstrate"}, {"corrected": "spell", "word": "spel"}, {"corrected": "check", "word": "cehck"}, {"corrected": "Sheer", "word": "Theer"}, {"corrected": "some", "word": "sommme"}, {"corrected": "spelling", "word": "speeling"}, {"corrected": "errors", "word": "errros"}, {"corrected": "this", "word": "thiss"}], "original_text": "This is a samplee textt to demoonstrate the spel cehck API. Theer are sommme speeling errros inn thiss text.", "success": true}
{"type": "object", "properties": {"corrected_text": {"type": "string"}, "input_length": {"type": "integer"}, "issues": {"type": "array", "items": {"type": "object", "properties": {"corrected": {"type": "string"}, "word": {"type": "string"}}}}, "original_text": {"type": "string"}, "success": {"type": "boolean"}}}
5155f846-9c51-4427-8bb0-49a592c9916d/da0898e3-580d-44c6-9e4f-f6057ca1b1b7/0/0
Entity and Sentiment Extractor
Multilingual Sentiment, Entity extraction, and disambiguation for cheap and at a scale. Entity (Person, Organization, Address, Price, etc) extraction, disambiguation, and resolution to a wiki page.
6.7
/Entity : Non English Entity Recognition
Currently supports the following major languages: Chinese, English, French, German, Italian, Japanese, Korean, Portuguese, Russian, Spanish which correspond to the following lang_code ['zh', 'zh-Hant', 'en', 'fr', 'de', 'it', 'ja', 'ko', 'pt', 'ru', 'es']
200
Response
[{"name": "Apple", "type": "ORGANIZATION", "salience": 0.38609597086906433, "start_offset": 0, "end_offset": 5, "entity_url": "https://en.wikipedia.org/wiki/Apple_Inc."}, {"name": "\u4eba", "type": "PERSON", "salience": 0.3363187611103058, "start_offset": 9, "end_offset": 10}, {"name": "\u516c\u53f8", "type": "ORGANIZATION", "salience": 0.1359543800354004, "start_offset": 19, "end_offset": 21}, {"name": "\u4e09\u661f", "type": "ORGANIZATION", "salience": 0.10035595297813416, "start_offset": 28, "end_offset": 30, "entity_url": "https://en.wikipedia.org/wiki/Samsung_Electronics"}, {"name": "\u60f3\u6cd5", "type": "OTHER", "salience": 0.041274938732385635, "start_offset": 35, "end_offset": 37}]
{"$schema": "http://json-schema.org/schema#", "type": "array", "items": {"type": "object", "properties": {"name": {"type": "string"}, "type": {"type": "string"}, "salience": {"type": "number"}, "start_offset": {"type": "integer"}, "end_offset": {"type": "integer"}, "entity_url": {"type": "string"}}, "required": ["end_offset", "name", "salience", "start_offset", "type"]}}
ea290e90-c65b-487f-9299-618080965183/d5d931f4-d1d1-4d9a-ba62-f151ee12e207/0/0
Sentiment Analysis
Detect if text is positive, negative or neutral.
null
analyze
Detect if text if positive, negative or neutral.
200
Response
{"sentiment": "positive"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"sentiment": {"type": "string"}}, "required": ["sentiment"]}
306a7109-6c6e-45e8-b72d-38b21c15136e/fcaebd70-39fe-4b1d-a818-b7421defe8f0/0/0
GerVADER
GerVADER is a German adaptation of the sentiment classification tool VADER
7.6
v1
German sentence sentiment classification.
200
New Example
{"ratings": {"compound": 0.8436, "neg": 0, "neu": 0.248, "pos": 0.752}, "sentence": "dieses produkt ist hervorragend, absolute empfehlung!!", "sentiment": "positive"}
{"type": "object", "properties": {"ratings": {"type": "object", "properties": {"compound": {"type": "number"}, "neg": {"type": "integer"}, "neu": {"type": "number"}, "pos": {"type": "number"}}}, "sentence": {"type": "string"}, "sentiment": {"type": "string"}}}
d7971cd3-5bff-4fc0-ae3a-477c1f9e113a/e3df040d-2ed7-4205-ac69-7e9e75e4b034/0/0
Words Definitions, Dictionary And Data API
Get word definitions, Dictionary, and all the data about the particular word. Get deep and more accurate data about the particular word. It is the best word data API at a very reasonable price.
6.7
Word Definations
Word Definations
200
New Example
[{"word": "going", "phonetic": "/\u02c8\u0261\u0259\u028a\u026a\u014b/", "phonetics": [{"text": "/\u02c8\u0261\u0259\u028a\u026a\u014b/", "audio": ""}, {"text": "/\u02c8\u0261o\u028a\u026a\u014b/", "audio": "https://api.dictionaryapi.dev/media/pronunciations/en/going-us.mp3", "sourceUrl": "https://commons.wikimedia.org/w/index.php?curid=1217858", "license": {"name": "BY-SA 3.0", "url": "https://creativecommons.org/licenses/by-sa/3.0"}}], "meanings": [{"partOfSpeech": "verb", "definitions": [{"definition": "To move:", "synonyms": ["cross", "draw", "drift", "fare", "move", "tread", "wend"], "antonyms": ["freeze", "halt", "remain", "stand still", "stay", "stop"]}, {"definition": "(chiefly of a machine) To work or function (properly); to move or perform (as required).", "synonyms": ["function", "operate", "work"], "antonyms": [], "example": "The engine just won't go anymore."}, {"definition": "To start; to begin (an action or process).", "synonyms": [], "antonyms": [], "example": "Get ready, get set, go!"}, {"definition": "To take a turn, especially in a game.", "synonyms": ["make one's move", "move", "take one\u2019s turn"], "antonyms": [], "example": "It\u2019s your turn; go."}, {"definition": "To attend.", "synonyms": [], "antonyms": [], "example": "I go to school at the schoolhouse."}, {"definition": "To proceed:", "synonyms": [], "antonyms": []}, {"definition": "To follow or travel along (a path):", "synonyms": [], "antonyms": []}, {"definition": "To extend (from one point in time or space to another).", "synonyms": [], "antonyms": [], "example": "This property goes all the way to the state line."}, {"definition": "To lead (to a place); to give access to.", "synonyms": [], "antonyms": [], "example": "Does this road go to Fort Smith?"}, {"definition": "To become. (The adjective that follows usually describes a negative state.)", "synonyms": ["become", "change into", "turn"], "antonyms": [], "example": "After failing as a criminal, he decided to go straight."}, {"definition": "To assume the obligation or function of; to be, to serve as.", "synonyms": [], "antonyms": []}, {"definition": "To continuously or habitually be in a state.", "synonyms": [], "antonyms": [], "example": "I don't want my children to go hungry."}, {"definition": "To come to (a certain condition or state).", "synonyms": [], "antonyms": [], "example": "They went into debt, she goes to sleep around 10 o'clock."}, {"definition": "To change (from one value to another) in the meaning of wend.", "synonyms": [], "antonyms": [], "example": "The traffic light went straight from green to red."}, {"definition": "To turn out, to result; to come to (a certain result).", "synonyms": [], "antonyms": [], "example": "How did your meeting with Smith go?"}, {"definition": "To tend (toward a result).", "synonyms": [], "antonyms": [], "example": "These experiences go to make us stronger."}, {"definition": "To contribute to a (specified) end product or result.", "synonyms": [], "antonyms": [], "example": "qualities that go to make a lady / lip-reader / sharpshooter"}, {"definition": "To pass, to be used up:", "synonyms": [], "antonyms": []}, {"definition": "To die.", "synonyms": [], "antonyms": []}, {"definition": "To be discarded.", "synonyms": [], "antonyms": [], "example": "This chair has got to go."}, {"definition": "To be lost or out:", "synonyms": [], "antonyms": []}, {"definition": "To break down or apart:", "synonyms": [], "antonyms": []}, {"definition": "To be sold.", "synonyms": [], "antonyms": [], "example": "Everything must go."}, {"definition": "To be given, especially to be assigned or allotted.", "synonyms": [], "antonyms": [], "example": "The award went to Steven Spielberg."}, {"definition": "To survive or get by; to last or persist for a stated length of time.", "synonyms": [], "antonyms": [], "example": "Can you two go twenty minutes without arguing?!"}, {"definition": "To have a certain record.", "synonyms": [], "antonyms": [], "example": "The team is going five in a row."}, {"definition": "To be authoritative, accepted, or valid:", "synonyms": [], "antonyms": []}, {"definition": "To say (something), to make a sound:", "synonyms": [], "antonyms": []}, {"definition": "To be expressed or composed (a certain way).", "synonyms": [], "antonyms": [], "example": "As the story goes, he got the idea for the song while sitting in traffic."}, {"definition": "To resort (to).", "synonyms": [], "antonyms": [], "example": "I'll go to court if I have to."}, {"definition": "To apply or subject oneself to:", "synonyms": [], "antonyms": []}, {"definition": "To fit (in a place, or together with something):", "synonyms": [], "antonyms": []}, {"definition": "To date.", "synonyms": ["date", "go out (with)", "see"], "antonyms": [], "example": "He's been going with her for two weeks."}, {"definition": "To attack:", "synonyms": [], "antonyms": []}, {"definition": "To be in general; to be usually.", "synonyms": [], "antonyms": [], "example": "As sentences go, this one is pretty boring."}, {"definition": "To take (a particular part or share); to participate in to the extent of.", "synonyms": [], "antonyms": [], "example": "Let's go halves on this."}, {"definition": "To yield or weigh.", "synonyms": [], "antonyms": [], "example": "Those babies go five tons apiece."}, {"definition": "To offer, bid or bet an amount; to pay.", "synonyms": [], "antonyms": [], "example": "I'll go a ten-spot."}, {"definition": "To enjoy. (Compare go for.)", "synonyms": [], "antonyms": [], "example": "I could go a beer right about now."}, {"definition": "To urinate or defecate.", "synonyms": [], "antonyms": [], "example": "Have you managed to go today, Mrs. Miggins?"}], "synonyms": ["become", "change into", "turn", "cross", "draw", "drift", "fare", "move", "tread", "wend", "date", "go out (with)", "see", "function", "operate", "work", "make one's move", "move", "take one\u2019s turn"], "antonyms": ["freeze", "halt", "remain", "stand still", "stay", "stop"]}, {"partOfSpeech": "noun", "definitions": [{"definition": "A departure.", "synonyms": [], "antonyms": [], "example": "Thy going is not lonely, with thee goes thy Husband"}, {"definition": "The suitability of ground for riding, walking etc.", "synonyms": [], "antonyms": [], "example": "The going was very difficult over the ice."}, {"definition": "Progress.", "synonyms": [], "antonyms": [], "example": "We made good going for a while, but then we came to the price."}, {"definition": "Conditions for advancing in any way.", "synonyms": [], "antonyms": [], "example": "Not only were the streets not paved with gold, but the going was difficult for an immigrant."}, {"definition": "Pregnancy; gestation; childbearing", "synonyms": [], "antonyms": []}, {"definition": "(in the plural) Course of life; behaviour; doings; ways.", "synonyms": [], "antonyms": []}, {"definition": "(in the phrase \"the going of\") The whereabouts (of something).", "synonyms": [], "antonyms": [], "example": "I can't find my sunglasses; you haven't seen the going of them, have you?"}], "synonyms": [], "antonyms": []}, {"partOfSpeech": "adjective", "definitions": [{"definition": "Likely to continue; viable.", "synonyms": [], "antonyms": [], "example": "He didn't want to make an unsecured loan to the business because it didn't look like a going concern."}, {"definition": "Current, prevailing.", "synonyms": [], "antonyms": [], "example": "The going rate for manual snow-shoveling is $25 an hour."}, {"definition": "(especially after a noun phrase with a superlative) Available.", "synonyms": [], "antonyms": [], "example": "He has the easiest job going."}], "synonyms": [], "antonyms": []}], "license": {"name": "CC BY-SA 3.0", "url": "https://creativecommons.org/licenses/by-sa/3.0"}, "sourceUrls": ["https://en.wiktionary.org/wiki/go", "https://en.wiktionary.org/wiki/going"]}]
{"type": "array", "items": {"type": "object", "properties": {"word": {"type": "string"}, "phonetic": {"type": "string"}, "phonetics": {"type": "array", "items": {"type": "object"}}, "meanings": {"type": "array", "items": {"type": "object", "properties": {"partOfSpeech": {"type": "string"}, "definitions": {"type": "array", "items": {"type": "object"}}, "synonyms": {"type": "array"}, "antonyms": {"type": "array"}}}}, "license": {"type": "object", "properties": {"name": {"type": "string"}, "url": {"type": "string"}}}, "sourceUrls": {"type": "array", "items": {"type": "string"}}}}}
fd1b0626-a9fd-489a-9406-24381c4affb9/18931b5e-6294-47cf-8943-e6b7d90e7740/0/0
Generate Questions from Text
This API will help you generate questions from text using AI. For now, only English is supported, other languages will be supported soon.
6.1
Generate Questions from Text
Generate Questions from Text This endpoint will help you generate questions from English text. Request Body "context": must be more than 10 characters and less than 1000 characters.
200
Response
["Quetab is a modern ai-driven platform to what?"]
{"$schema": "http://json-schema.org/schema#", "type": "array", "items": {"type": "string"}}
5ebd7d86-a817-480e-956e-3407dcc528a8/6b59bbc1-14bc-4ca9-9ab6-e573c1fa8dee/0/0
Senti Level AI
AI service to flag positive or negative sentiment from any text source such as tweets, reviews, and chat logs.
null
wrt_transformer
Returns a label saying if the sentiment is positive or negative.
200
Response
{"result": "positive"}
{"$schema": "http://json-schema.org/schema#", "type": "object", "properties": {"result": {"type": "string"}}, "required": ["result"]}