sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
wagtail/wagtail:wagtail/snippets/tests/test_copy_view.py | from django.contrib.auth.models import Permission
from django.test import TestCase
from django.urls import reverse
from wagtail.test.snippets.models import StandardSnippet
from wagtail.test.utils import WagtailTestUtils
class TestSnippetCopyView(WagtailTestUtils, TestCase):
def setUp(self):
self.snippet = StandardSnippet.objects.create(text="Test snippet")
self.url = reverse(
StandardSnippet.snippet_viewset.get_url_name("copy"),
args=(self.snippet.pk,),
)
self.user = self.login()
def test_without_permission(self):
self.user.is_superuser = False
self.user.save()
admin_permission = Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
self.user.user_permissions.add(admin_permission)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, reverse("wagtailadmin_home"))
def test_form_is_prefilled(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/create.html")
# Ensure form is prefilled
soup = self.get_soup(response.content)
text_input = soup.select_one('input[name="text"]')
self.assertEqual(text_input.attrs.get("value"), "Test snippet")
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/snippets/tests/test_copy_view.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/snippets/tests/test_create_view.py | import datetime
from unittest import mock
from django.contrib.admin.utils import quote
from django.contrib.auth.models import Permission
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.handlers.wsgi import WSGIRequest
from django.http import HttpRequest, HttpResponse, JsonResponse
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from django.utils.timezone import now
from freezegun import freeze_time
from taggit.models import Tag
from wagtail.models import Locale, ModelLogEntry, Revision
from wagtail.signals import published
from wagtail.snippets.action_menu import (
ActionMenuItem,
get_base_snippet_action_menu_items,
)
from wagtail.test.snippets.models import (
FileUploadSnippet,
)
from wagtail.test.testapp.models import (
Advert,
DraftStateModel,
RevisableCluster,
RevisableModel,
)
from wagtail.test.utils import WagtailTestUtils
from wagtail.test.utils.form_data import inline_formset, nested_form_data
from wagtail.test.utils.timestamps import submittable_timestamp
class TestSnippetCreateView(WagtailTestUtils, TestCase):
def setUp(self):
self.user = self.login()
def get(self, params=None, model=Advert, headers=None):
return self.client.get(
reverse(model.snippet_viewset.get_url_name("add")), params, headers=headers
)
def post(self, post_data=None, model=Advert, headers=None):
return self.client.post(
reverse(model.snippet_viewset.get_url_name("add")),
post_data,
headers=headers,
)
def test_get_with_limited_permissions(self):
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
)
self.user.save()
response = self.get()
self.assertEqual(response.status_code, 302)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/create.html")
self.assertNotContains(response, 'role="tablist"', html=True)
soup = self.get_soup(response.content)
# Should have the unsaved controller set up
editor_form = soup.select_one("#w-editor-form")
self.assertIsNotNone(editor_form)
self.assertIn("w-unsaved", editor_form.attrs.get("data-controller").split())
self.assertTrue(
{
"w-unsaved#submit",
"beforeunload@window->w-unsaved#confirm",
}.issubset(editor_form.attrs.get("data-action").split())
)
self.assertEqual(
editor_form.attrs.get("data-w-unsaved-confirmation-value"),
"true",
)
self.assertEqual(
editor_form.attrs.get("data-w-unsaved-force-value"),
"false",
)
def test_snippet_with_tabbed_interface(self):
response = self.client.get(
reverse("wagtailsnippets_tests_advertwithtabbedinterface:add")
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/create.html")
self.assertContains(response, 'role="tablist"')
self.assertContains(
response,
'<a id="tab-label-advert" href="#tab-advert" class="w-tabs__tab " role="tab" aria-selected="false" tabindex="-1" data-action="w-tabs#select:prevent" data-w-tabs-target="trigger">',
)
self.assertContains(
response,
'<a id="tab-label-other" href="#tab-other" class="w-tabs__tab " role="tab" aria-selected="false" tabindex="-1" data-action="w-tabs#select:prevent" data-w-tabs-target="trigger">',
)
self.assertContains(response, "Other panels help text")
self.assertContains(response, "Top-level help text")
def test_create_with_limited_permissions(self):
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
)
self.user.save()
response = self.post(
post_data={"text": "test text", "url": "http://www.example.com/"}
)
self.assertEqual(response.status_code, 302)
def test_create_invalid(self):
response = self.post(post_data={"foo": "bar"})
soup = self.get_soup(response.content)
header_messages = soup.css.select(".messages[role='status'] ul > li")
# there should be one header message that indicates the issue and has a go to error button
self.assertEqual(len(header_messages), 1)
message = header_messages[0]
self.assertIn(
"The advert could not be created due to errors.", message.get_text()
)
buttons = message.find_all("button")
self.assertEqual(len(buttons), 1)
self.assertEqual(buttons[0].attrs["data-controller"], "w-count w-focus")
self.assertEqual(
set(buttons[0].attrs["data-action"].split()),
{"click->w-focus#focus", "wagtail:panel-init@document->w-count#count"},
)
self.assertIn("Go to the first error", buttons[0].get_text())
# field specific error should be shown
error_messages = soup.css.select(".error-message")
self.assertEqual(len(error_messages), 1)
error_message = error_messages[0]
self.assertEqual(error_message.parent["id"], "panel-child-text-errors")
self.assertIn("This field is required", error_message.get_text())
# Should have the unsaved controller set up
editor_form = soup.select_one("#w-editor-form")
self.assertIsNotNone(editor_form)
self.assertIn("w-unsaved", editor_form.attrs.get("data-controller").split())
self.assertTrue(
{
"w-unsaved#submit",
"beforeunload@window->w-unsaved#confirm",
}.issubset(editor_form.attrs.get("data-action").split())
)
self.assertEqual(
editor_form.attrs.get("data-w-unsaved-confirmation-value"),
"true",
)
self.assertEqual(
editor_form.attrs.get("data-w-unsaved-force-value"),
# The form is invalid, we want to force it to be "dirty" on initial load
"true",
)
def test_create_invalid_with_json_response(self):
response = self.post(
post_data={"foo": "bar"}, headers={"Accept": "application/json"}
)
self.assertEqual(response.status_code, 400)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(
response.json(),
{
"success": False,
"error_code": "validation_error",
"error_message": "There are validation errors, click save to highlight them.",
},
)
def test_create(self):
response = self.post(
post_data={"text": "test_advert", "url": "http://www.example.com/"}
)
self.assertRedirects(response, reverse("wagtailsnippets_tests_advert:list"))
snippets = Advert.objects.filter(text="test_advert")
self.assertEqual(snippets.count(), 1)
self.assertEqual(snippets.first().url, "http://www.example.com/")
def test_create_with_json_response(self):
response = self.post(
post_data={"text": "test_advert", "url": "http://www.example.com/"},
headers={"Accept": "application/json"},
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
snippets = Advert.objects.filter(text="test_advert")
self.assertEqual(snippets.count(), 1)
snippet = snippets.first()
self.assertEqual(snippet.url, "http://www.example.com/")
response_json = response.json()
edit_url = reverse(
snippet.snippet_viewset.get_url_name("edit"), args=(snippet.pk,)
)
self.assertEqual(response_json["success"], True)
self.assertEqual(response_json["pk"], snippet.pk)
self.assertEqual(response_json["field_updates"], {})
self.assertEqual(response_json["url"], edit_url)
self.assertEqual(
response_json["hydrate_url"],
f"{edit_url}?_w_hydrate_create_view=1",
)
def test_create_with_inline_models_and_json_response(self):
form_data = nested_form_data(
{
"text": "Created with one child",
"children": inline_formset([{"id": "", "text": "Child 1"}]),
}
)
response = self.post(
post_data=form_data,
model=RevisableCluster,
headers={"Accept": "application/json"},
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
snippets = RevisableCluster.objects.filter(text="Created with one child")
self.assertEqual(snippets.count(), 1)
snippet = snippets.first()
self.assertEqual(snippet.children.count(), 1)
child = snippet.children.first()
self.assertEqual(child.text, "Child 1")
response_json = response.json()
self.assertEqual(response_json["success"], True)
self.assertEqual(response_json["pk"], snippet.pk)
self.assertEqual(
response_json["field_updates"],
{"children-INITIAL_FORMS": "1", "children-0-id": str(child.pk)},
)
def test_create_with_tags(self):
tags = ["hello", "world"]
response = self.post(
post_data={
"text": "test_advert",
"url": "http://example.com/",
"tags": ", ".join(tags),
}
)
self.assertRedirects(response, reverse("wagtailsnippets_tests_advert:list"))
snippet = Advert.objects.get(text="test_advert")
expected_tags = list(Tag.objects.order_by("name").filter(name__in=tags))
self.assertEqual(len(expected_tags), 2)
self.assertEqual(list(snippet.tags.order_by("name")), expected_tags)
def test_create_file_upload_multipart(self):
response = self.get(model=FileUploadSnippet)
self.assertContains(response, 'enctype="multipart/form-data"')
response = self.post(
model=FileUploadSnippet,
post_data={"file": SimpleUploadedFile("test.txt", b"Uploaded file")},
)
self.assertRedirects(
response,
reverse("wagtailsnippets_snippetstests_fileuploadsnippet:list"),
)
snippet = FileUploadSnippet.objects.get()
self.assertEqual(snippet.file.read(), b"Uploaded file")
def test_create_with_revision(self):
response = self.post(
model=RevisableModel, post_data={"text": "create_revisable"}
)
self.assertRedirects(
response, reverse("wagtailsnippets_tests_revisablemodel:list")
)
snippets = RevisableModel.objects.filter(text="create_revisable")
snippet = snippets.first()
self.assertEqual(snippets.count(), 1)
# The revision should be created
revisions = snippet.revisions
revision = revisions.first()
self.assertEqual(revisions.count(), 1)
self.assertEqual(revision.content["text"], "create_revisable")
# The log entry should have the revision attached
log_entries = ModelLogEntry.objects.for_instance(snippet).filter(
action="wagtail.create"
)
self.assertEqual(log_entries.count(), 1)
self.assertEqual(log_entries.first().revision, revision)
def test_before_create_snippet_hook_get(self):
def hook_func(request, model):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(model, Advert)
return HttpResponse("Overridden!")
with self.register_hook("before_create_snippet", hook_func):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
def test_before_create_snippet_hook_get_with_json_response(self):
def non_json_hook_func(request, model):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(model, Advert)
return HttpResponse("Overridden!")
def json_hook_func(request, model):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(model, Advert)
return JsonResponse({"status": "purple"})
with self.register_hook("before_create_snippet", non_json_hook_func):
response = self.get(headers={"Accept": "application/json"})
self.assertEqual(
response.json(),
{
"success": False,
"error_code": "blocked_by_hook",
"error_message": "Request to create advert was blocked by hook.",
},
)
with self.register_hook("before_create_snippet", json_hook_func):
response = self.get(headers={"Accept": "application/json"})
self.assertEqual(response.json(), {"status": "purple"})
def test_before_create_snippet_hook_post(self):
def hook_func(request, model):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(model, Advert)
return HttpResponse("Overridden!")
with self.register_hook("before_create_snippet", hook_func):
post_data = {"text": "Hook test", "url": "http://www.example.com/"}
response = self.post(post_data=post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
# Request intercepted before advert was created
self.assertFalse(Advert.objects.exists())
def test_before_create_snippet_hook_post_with_json_response(self):
def non_json_hook_func(request, model):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(model, Advert)
return HttpResponse("Overridden!")
def json_hook_func(request, model):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(model, Advert)
return JsonResponse({"status": "purple"})
with self.register_hook("before_create_snippet", non_json_hook_func):
post_data = {"text": "Hook test", "url": "http://www.example.com/"}
response = self.post(
post_data=post_data,
headers={"Accept": "application/json"},
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"success": False,
"error_code": "blocked_by_hook",
"error_message": "Request to create advert was blocked by hook.",
},
)
# Request intercepted before advert was created
self.assertFalse(Advert.objects.exists())
with self.register_hook("before_create_snippet", json_hook_func):
post_data = {"text": "Hook test", "url": "http://www.example.com/"}
response = self.post(
post_data=post_data,
headers={"Accept": "application/json"},
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"status": "purple"})
# Request intercepted before advert was created
self.assertFalse(Advert.objects.exists())
def test_after_create_snippet_hook(self):
def hook_func(request, instance):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(instance.text, "Hook test")
self.assertEqual(instance.url, "http://www.example.com/")
return HttpResponse("Overridden!")
with self.register_hook("after_create_snippet", hook_func):
post_data = {"text": "Hook test", "url": "http://www.example.com/"}
response = self.post(post_data=post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
# Request intercepted after advert was created
self.assertTrue(Advert.objects.exists())
def test_after_create_snippet_hook_post_with_json_response(self):
def non_json_hook_func(request, instance):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(instance.text, "Hook test")
self.assertEqual(instance.url, "http://www.example.com/")
return HttpResponse("Overridden!")
def json_hook_func(request, instance):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(instance.text, "Another hook test")
self.assertEqual(instance.url, "http://www.example.com/")
return JsonResponse({"status": "purple"})
with self.register_hook("after_create_snippet", non_json_hook_func):
post_data = {"text": "Hook test", "url": "http://www.example.com/"}
response = self.post(
post_data=post_data,
headers={"Accept": "application/json"},
)
self.assertEqual(response.status_code, 200)
# hook response is ignored, since it's not a JSON response
self.assertEqual(response.json()["success"], True)
# Request intercepted after advert was created
self.assertTrue(Advert.objects.filter(text="Hook test").exists())
with self.register_hook("after_create_snippet", json_hook_func):
post_data = {"text": "Another hook test", "url": "http://www.example.com/"}
response = self.post(
post_data=post_data,
headers={"Accept": "application/json"},
)
self.assertEqual(response.status_code, 200)
# hook response is used, since it's a JSON response
self.assertEqual(response.json(), {"status": "purple"})
# Request intercepted after advert was created
self.assertTrue(Advert.objects.filter(text="Another hook test").exists())
def test_register_snippet_action_menu_item(self):
class TestSnippetActionMenuItem(ActionMenuItem):
label = "Test"
name = "test"
icon_name = "check"
classname = "custom-class"
def is_shown(self, context):
return True
def hook_func(model):
return TestSnippetActionMenuItem(order=0)
with self.register_hook("register_snippet_action_menu_item", hook_func):
get_base_snippet_action_menu_items.cache_clear()
response = self.get()
get_base_snippet_action_menu_items.cache_clear()
self.assertContains(
response,
'<button type="submit" name="test" value="Test" class="button custom-class"><svg class="icon icon-check icon" aria-hidden="true"><use href="#icon-check"></use></svg>Test</button>',
html=True,
)
def test_register_snippet_action_menu_item_as_none(self):
def hook_func(model):
return None
with self.register_hook("register_snippet_action_menu_item", hook_func):
get_base_snippet_action_menu_items.cache_clear()
response = self.get()
get_base_snippet_action_menu_items.cache_clear()
self.assertEqual(response.status_code, 200)
def test_construct_snippet_action_menu(self):
class TestSnippetActionMenuItem(ActionMenuItem):
label = "Test"
name = "test"
icon_name = "check"
classname = "custom-class"
def is_shown(self, context):
return True
class Media:
js = ["js/some-default-item.js"]
css = {"all": ["css/some-default-item.css"]}
def hook_func(menu_items, request, context):
self.assertIsInstance(menu_items, list)
self.assertIsInstance(request, WSGIRequest)
self.assertEqual(context["view"], "create")
self.assertEqual(context["model"], Advert)
# Replace save menu item
menu_items[:] = [TestSnippetActionMenuItem(order=0)]
with self.register_hook("construct_snippet_action_menu", hook_func):
response = self.get()
soup = self.get_soup(response.content)
custom_action = soup.select_one("form button[name='test']")
self.assertIsNotNone(custom_action)
# We're replacing the save button, so it should not be in a dropdown
# as it's the main action
dropdown_parent = custom_action.find_parent(attrs={"class": "w-dropdown"})
self.assertIsNone(dropdown_parent)
self.assertEqual(custom_action.text.strip(), "Test")
self.assertEqual(custom_action.attrs.get("class"), ["button", "custom-class"])
icon = custom_action.select_one("svg use[href='#icon-check']")
self.assertIsNotNone(icon)
# Should contain media files
js = soup.select_one("script[src='/static/js/some-default-item.js']")
self.assertIsNotNone(js)
css = soup.select_one("link[href='/static/css/some-default-item.css']")
self.assertIsNotNone(css)
save_item = soup.select_one("form button[name='action-save']")
self.assertIsNone(save_item)
def test_create_shows_status_side_panel_skeleton(self):
self.user.first_name = "Chrismansyah"
self.user.last_name = "Rahadi"
self.user.save()
response = self.get(model=RevisableModel)
soup = self.get_soup(response.content)
panel = soup.select_one('[data-side-panel="status"]')
self.assertIsNotNone(panel)
def assert_panel_section(label_id, label_text, description):
section = panel.select_one(f'[aria-labelledby="{label_id}"]')
self.assertIsNotNone(section)
label = section.select_one(f"#{label_id}")
self.assertIsNotNone(label)
self.assertEqual(label.get_text(separator="\n", strip=True), label_text)
self.assertEqual(
section.get_text(separator="\n", strip=True),
f"{label_text}\n{description}",
)
assert_panel_section(
"status-sidebar-live",
"Live",
"To be created by Chrismansyah Rahadi",
)
usage_section = panel.select("section")[-1]
self.assertIsNotNone(usage_section)
self.assertEqual(
usage_section.get_text(separator="\n", strip=True),
"Usage\nUsed 0 times",
)
@override_settings(WAGTAIL_I18N_ENABLED=True)
class TestLocaleSelectorOnCreate(WagtailTestUtils, TestCase):
fixtures = ["test.json"]
def setUp(self):
self.fr_locale = Locale.objects.create(language_code="fr")
self.user = self.login()
def test_locale_selector(self):
response = self.client.get(
reverse("wagtailsnippets_snippetstests_translatablesnippet:add")
)
self.assertContains(response, "Switch locales")
switch_to_french_url = (
reverse("wagtailsnippets_snippetstests_translatablesnippet:add")
+ "?locale=fr"
)
self.assertContains(
response,
f'<a href="{switch_to_french_url}" lang="fr">',
)
def test_locale_selector_with_existing_locale(self):
response = self.client.get(
reverse("wagtailsnippets_snippetstests_translatablesnippet:add")
+ "?locale=fr"
)
self.assertContains(response, "Switch locales")
switch_to_english_url = (
reverse("wagtailsnippets_snippetstests_translatablesnippet:add")
+ "?locale=en"
)
self.assertContains(
response,
f'<a href="{switch_to_english_url}" lang="en">',
)
@override_settings(WAGTAIL_I18N_ENABLED=False)
def test_locale_selector_not_present_when_i18n_disabled(self):
response = self.client.get(
reverse("wagtailsnippets_snippetstests_translatablesnippet:add")
)
self.assertNotContains(response, "Switch locales")
switch_to_french_url = (
reverse("wagtailsnippets_snippetstests_translatablesnippet:add")
+ "?locale=fr"
)
self.assertNotContains(
response,
f'<a href="{switch_to_french_url}" lang="fr">',
)
def test_locale_selector_not_present_on_non_translatable_snippet(self):
response = self.client.get(reverse("wagtailsnippets_tests_advert:add"))
self.assertNotContains(response, "Switch locales")
switch_to_french_url = (
reverse("wagtailsnippets_snippetstests_translatablesnippet:add")
+ "?locale=fr"
)
self.assertNotContains(
response,
f'<a href="{switch_to_french_url}" lang="fr">',
)
class TestCreateDraftStateSnippet(WagtailTestUtils, TestCase):
STATUS_TOGGLE_BADGE_REGEX = (
r'data-side-panel-toggle="status"[^<]+<svg[^<]+<use[^<]+</use[^<]+</svg[^<]+'
r"<div data-side-panel-toggle-counter[^>]+w-bg-critical-200[^>]+>\s*%(num_errors)s\s*</div>"
)
def setUp(self):
self.user = self.login()
def get(self):
return self.client.get(reverse("wagtailsnippets_tests_draftstatemodel:add"))
def post(self, post_data=None):
return self.client.post(
reverse("wagtailsnippets_tests_draftstatemodel:add"),
post_data,
)
def test_get(self):
add_url = reverse("wagtailsnippets_tests_draftstatemodel:add")
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/create.html")
# The save button should be labelled "Save draft"
self.assertContains(response, "Save draft")
# The publish button should exist
self.assertContains(response, "Publish")
# The publish button should have name="action-publish"
self.assertContains(
response,
'<button\n type="submit"\n name="action-publish"\n value="action-publish"\n class="button action-save button-longrunning"\n data-controller="w-progress"\n data-action="w-progress#activate"\n',
)
# The status side panel should be rendered so that the
# publishing schedule can be configured
self.assertContains(
response,
'<div class="form-side__panel" data-side-panel="status" hidden>',
)
# The status side panel should show "No publishing schedule set" info
self.assertContains(response, "No publishing schedule set")
# Should show the "Set schedule" button
html = response.content.decode()
self.assertTagInHTML(
'<button type="button" data-a11y-dialog-show="schedule-publishing-dialog">Set schedule</button>',
html,
count=1,
allow_extra_attrs=True,
)
# Should show the dialog template pointing to the [data-edit-form] selector as the root
soup = self.get_soup(html)
dialog = soup.select_one(
"""
template[data-controller="w-teleport"][data-w-teleport-target-value="[data-edit-form]"]
#schedule-publishing-dialog
"""
)
self.assertIsNotNone(dialog)
# Should render the main form with data-edit-form attribute
self.assertTagInHTML(
f'<form action="{add_url}" method="POST" data-edit-form>',
html,
count=1,
allow_extra_attrs=True,
)
self.assertTagInHTML(
'<div id="schedule-publishing-dialog" class="w-dialog publishing" data-controller="w-dialog">',
html,
count=1,
allow_extra_attrs=True,
)
# Should show the correct subtitle in the dialog
self.assertContains(
response, "Choose when this draft state model should go live and/or expire"
)
# Should not show the Unpublish action menu item
unpublish_url = "/admin/snippets/tests/draftstatemodel/unpublish/"
self.assertNotContains(response, unpublish_url)
self.assertNotContains(response, "Unpublish")
def test_save_draft(self):
response = self.post(post_data={"text": "Draft-enabled Foo"})
snippet = DraftStateModel.objects.get(text="Draft-enabled Foo")
self.assertRedirects(
response,
reverse("wagtailsnippets_tests_draftstatemodel:edit", args=[snippet.pk]),
)
# The instance should be created
self.assertEqual(snippet.text, "Draft-enabled Foo")
# The instance should be a draft
self.assertFalse(snippet.live)
self.assertTrue(snippet.has_unpublished_changes)
self.assertIsNone(snippet.first_published_at)
self.assertIsNone(snippet.last_published_at)
self.assertIsNone(snippet.live_revision)
# A revision should be created and set as latest_revision
self.assertIsNotNone(snippet.latest_revision)
# The revision content should contain the data
self.assertEqual(snippet.latest_revision.content["text"], "Draft-enabled Foo")
# A log entry should be created
log_entry = ModelLogEntry.objects.for_instance(snippet).get(
action="wagtail.create"
)
self.assertEqual(log_entry.revision, snippet.latest_revision)
self.assertEqual(log_entry.label, "Draft-enabled Foo")
def test_create_skips_validation_when_saving_draft(self):
response = self.post(post_data={"text": ""})
snippet = DraftStateModel.objects.get(text="")
self.assertRedirects(
response,
reverse(
"wagtailsnippets_tests_draftstatemodel:edit", args=[quote(snippet.pk)]
),
)
self.assertFalse(snippet.live)
# A log entry should be created (with a fallback label)
log_entry = ModelLogEntry.objects.for_instance(snippet).get(
action="wagtail.create"
)
self.assertEqual(log_entry.revision, snippet.latest_revision)
self.assertEqual(log_entry.label, f"DraftStateModel object ({snippet.pk})")
def test_required_asterisk_on_reshowing_form(self):
"""
If a form is reshown due to a validation error elsewhere, fields whose validation
was deferred should still show the required asterisk.
"""
response = self.client.post(
reverse("some_namespace:add"),
{"text": "", "country_code": "UK", "some_number": "meef"},
)
self.assertEqual(response.status_code, 200)
# The empty text should not cause a validation error, but the invalid number should
self.assertNotContains(response, "This field is required.")
self.assertContains(response, "Enter a whole number.", count=1)
soup = self.get_soup(response.content)
self.assertTrue(soup.select_one('label[for="id_text"] > span.w-required-mark'))
def test_create_will_not_publish_invalid_snippet(self):
response = self.post(
post_data={"text": "", "action-publish": "Publish"},
)
self.assertEqual(response.status_code, 200)
self.assertContains(
response, "The draft state model could not be created due to errors."
)
snippets = DraftStateModel.objects.filter(text="")
self.assertEqual(snippets.count(), 0)
def test_publish(self):
# Connect a mock signal handler to published signal
mock_handler = mock.MagicMock()
published.connect(mock_handler)
try:
timestamp = now()
with freeze_time(timestamp):
response = self.post(
post_data={
"text": "Draft-enabled Foo, Published",
"action-publish": "action-publish",
}
)
snippet = DraftStateModel.objects.get(text="Draft-enabled Foo, Published")
self.assertRedirects(
response, reverse("wagtailsnippets_tests_draftstatemodel:list")
)
# The instance should be created
self.assertEqual(snippet.text, "Draft-enabled Foo, Published")
# The instance should be live
self.assertTrue(snippet.live)
self.assertFalse(snippet.has_unpublished_changes)
self.assertEqual(snippet.first_published_at, timestamp)
self.assertEqual(snippet.last_published_at, timestamp)
# A revision should be created and set as both latest_revision and live_revision
self.assertIsNotNone(snippet.live_revision)
self.assertEqual(snippet.live_revision, snippet.latest_revision)
# The revision content should contain the new data
self.assertEqual(
snippet.live_revision.content["text"],
"Draft-enabled Foo, Published",
)
# Check that the published signal was fired
self.assertEqual(mock_handler.call_count, 1)
mock_call = mock_handler.mock_calls[0][2]
self.assertEqual(mock_call["sender"], DraftStateModel)
self.assertEqual(mock_call["instance"], snippet)
self.assertIsInstance(mock_call["instance"], DraftStateModel)
finally:
published.disconnect(mock_handler)
def test_publish_bad_permissions(self):
# Only add create and edit permission
self.user.is_superuser = False
add_permission = Permission.objects.get(
content_type__app_label="tests",
codename="add_draftstatemodel",
)
edit_permission = Permission.objects.get(
content_type__app_label="tests",
codename="change_draftstatemodel",
)
admin_permission = Permission.objects.get(
content_type__app_label="wagtailadmin",
codename="access_admin",
)
self.user.user_permissions.add(
add_permission,
edit_permission,
admin_permission,
)
self.user.save()
# Connect a mock signal handler to published signal
mock_handler = mock.MagicMock()
published.connect(mock_handler)
try:
response = self.post(
post_data={
"text": "Draft-enabled Foo",
"action-publish": "action-publish",
}
)
snippet = DraftStateModel.objects.get(text="Draft-enabled Foo")
# Should be taken to the edit page
self.assertRedirects(
response,
reverse(
"wagtailsnippets_tests_draftstatemodel:edit",
args=[snippet.pk],
),
)
# The instance should still be created
self.assertEqual(snippet.text, "Draft-enabled Foo")
# The instance should not be live
self.assertFalse(snippet.live)
self.assertTrue(snippet.has_unpublished_changes)
# A revision should be created and set as latest_revision, but not live_revision
self.assertIsNotNone(snippet.latest_revision)
self.assertIsNone(snippet.live_revision)
# The revision content should contain the data
self.assertEqual(
snippet.latest_revision.content["text"],
"Draft-enabled Foo",
)
# Check that the published signal was not fired
self.assertEqual(mock_handler.call_count, 0)
finally:
published.disconnect(mock_handler)
def test_publish_with_publish_permission(self):
# Use create and publish permissions instead of relying on superuser flag
self.user.is_superuser = False
add_permission = Permission.objects.get(
content_type__app_label="tests",
codename="add_draftstatemodel",
)
publish_permission = Permission.objects.get(
content_type__app_label="tests",
codename="publish_draftstatemodel",
)
admin_permission = Permission.objects.get(
content_type__app_label="wagtailadmin",
codename="access_admin",
)
self.user.user_permissions.add(
add_permission,
publish_permission,
admin_permission,
)
self.user.save()
# Connect a mock signal handler to published signal
mock_handler = mock.MagicMock()
published.connect(mock_handler)
try:
timestamp = now()
with freeze_time(timestamp):
response = self.post(
post_data={
"text": "Draft-enabled Foo, Published",
"action-publish": "action-publish",
}
)
snippet = DraftStateModel.objects.get(text="Draft-enabled Foo, Published")
self.assertRedirects(
response, reverse("wagtailsnippets_tests_draftstatemodel:list")
)
# The instance should be created
self.assertEqual(snippet.text, "Draft-enabled Foo, Published")
# The instance should be live
self.assertTrue(snippet.live)
self.assertFalse(snippet.has_unpublished_changes)
self.assertEqual(snippet.first_published_at, timestamp)
self.assertEqual(snippet.last_published_at, timestamp)
# A revision should be created and set as both latest_revision and live_revision
self.assertIsNotNone(snippet.live_revision)
self.assertEqual(snippet.live_revision, snippet.latest_revision)
# The revision content should contain the new data
self.assertEqual(
snippet.live_revision.content["text"],
"Draft-enabled Foo, Published",
)
# Check that the published signal was fired
self.assertEqual(mock_handler.call_count, 1)
mock_call = mock_handler.mock_calls[0][2]
self.assertEqual(mock_call["sender"], DraftStateModel)
self.assertEqual(mock_call["instance"], snippet)
self.assertIsInstance(mock_call["instance"], DraftStateModel)
finally:
published.disconnect(mock_handler)
def test_create_scheduled(self):
go_live_at = now() + datetime.timedelta(days=1)
expire_at = now() + datetime.timedelta(days=2)
response = self.post(
post_data={
"text": "Some content",
"go_live_at": submittable_timestamp(go_live_at),
"expire_at": submittable_timestamp(expire_at),
}
)
snippet = DraftStateModel.objects.get(text="Some content")
# Should be redirected to the edit page
self.assertRedirects(
response,
reverse("wagtailsnippets_tests_draftstatemodel:edit", args=[snippet.pk]),
)
# Should be saved as draft with the scheduled publishing dates
self.assertEqual(snippet.go_live_at.date(), go_live_at.date())
self.assertEqual(snippet.expire_at.date(), expire_at.date())
self.assertIs(snippet.expired, False)
self.assertEqual(snippet.status_string, "draft")
# No revisions with approved_go_live_at
self.assertFalse(
Revision.objects.for_instance(snippet)
.exclude(approved_go_live_at__isnull=True)
.exists()
)
def test_create_scheduled_go_live_before_expiry(self):
response = self.post(
post_data={
"text": "Some content",
"go_live_at": submittable_timestamp(now() + datetime.timedelta(days=2)),
"expire_at": submittable_timestamp(now() + datetime.timedelta(days=1)),
}
)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(
response.context["form"],
"go_live_at",
"Go live date/time must be before expiry date/time",
)
self.assertFormError(
response.context["form"],
"expire_at",
"Go live date/time must be before expiry date/time",
)
self.assertContains(
response,
'<div class="w-label-3 w-text-primary">Invalid schedule</div>',
html=True,
)
num_errors = 2
# Should show the correct number on the badge of the toggle button
self.assertRegex(
response.content.decode(),
self.STATUS_TOGGLE_BADGE_REGEX % {"num_errors": num_errors},
)
def test_create_scheduled_expire_in_the_past(self):
response = self.post(
post_data={
"text": "Some content",
"expire_at": submittable_timestamp(now() + datetime.timedelta(days=-1)),
}
)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(
response.context["form"],
"expire_at",
"Expiry date/time must be in the future.",
)
self.assertContains(
response,
'<div class="w-label-3 w-text-primary">Invalid schedule</div>',
html=True,
)
num_errors = 1
# Should show the correct number on the badge of the toggle button
self.assertRegex(
response.content.decode(),
self.STATUS_TOGGLE_BADGE_REGEX % {"num_errors": num_errors},
)
def test_create_post_publish_scheduled(self):
go_live_at = now() + datetime.timedelta(days=1)
expire_at = now() + datetime.timedelta(days=2)
response = self.post(
post_data={
"text": "Some content",
"action-publish": "Publish",
"go_live_at": submittable_timestamp(go_live_at),
"expire_at": submittable_timestamp(expire_at),
}
)
# Should be redirected to the listing page
self.assertRedirects(
response, reverse("wagtailsnippets_tests_draftstatemodel:list")
)
# Find the object and check it
snippet = DraftStateModel.objects.get(text="Some content")
self.assertEqual(snippet.go_live_at.date(), go_live_at.date())
self.assertEqual(snippet.expire_at.date(), expire_at.date())
self.assertIs(snippet.expired, False)
# A revision with approved_go_live_at should exist now
self.assertTrue(
Revision.objects.for_instance(snippet)
.exclude(approved_go_live_at__isnull=True)
.exists()
)
# But snippet won't be live
self.assertFalse(snippet.live)
self.assertFalse(snippet.first_published_at)
self.assertEqual(snippet.status_string, "scheduled")
def test_create_shows_status_side_panel_skeleton(self):
self.user.first_name = "Chrismansyah"
self.user.last_name = "Rahadi"
self.user.save()
response = self.get()
soup = self.get_soup(response.content)
panel = soup.select_one('[data-side-panel="status"]')
self.assertIsNotNone(panel)
def assert_panel_section(label_id, label_text, description):
section = panel.select_one(f'[aria-labelledby="{label_id}"]')
self.assertIsNotNone(section)
label = section.select_one(f"#{label_id}")
self.assertIsNotNone(label)
self.assertEqual(label.get_text(separator="\n", strip=True), label_text)
self.assertEqual(
section.get_text(separator="\n", strip=True),
f"{label_text}\n{description}",
)
assert_panel_section(
"status-sidebar-draft",
"Draft",
"To be created by Chrismansyah Rahadi",
)
usage_section = panel.select("section")[-1]
self.assertIsNotNone(usage_section)
self.assertEqual(
usage_section.get_text(separator="\n", strip=True),
"Usage\nUsed 0 times",
)
class TestInlinePanelMedia(WagtailTestUtils, TestCase):
"""
Test that form media required by InlinePanels is correctly pulled in to the edit page
"""
def test_inline_panel_media(self):
self.login()
response = self.client.get(
reverse("wagtailsnippets_snippetstests_multisectionrichtextsnippet:add")
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "wagtailadmin/js/draftail.js")
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/snippets/tests/test_create_view.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 997,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/snippets/tests/test_delete_view.py | from io import StringIO
from django.contrib.admin.utils import quote
from django.contrib.auth.models import Permission
from django.core import management
from django.http import HttpRequest, HttpResponse
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from wagtail.test.testapp.models import Advert, VariousOnDeleteModel
from wagtail.test.utils import WagtailTestUtils
class TestSnippetDelete(WagtailTestUtils, TestCase):
fixtures = ["test.json"]
def setUp(self):
self.test_snippet = Advert.objects.get(pk=1)
self.user = self.login()
def test_delete_get_with_limited_permissions(self):
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
)
self.user.save()
response = self.client.get(
reverse(
"wagtailsnippets_tests_advert:delete",
args=[quote(self.test_snippet.pk)],
)
)
self.assertEqual(response.status_code, 302)
def test_delete_get(self):
delete_url = reverse(
"wagtailsnippets_tests_advert:delete",
args=[quote(self.test_snippet.pk)],
)
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Yes, delete")
self.assertContains(response, delete_url)
@override_settings(WAGTAIL_I18N_ENABLED=True)
def test_delete_get_with_i18n_enabled(self):
delete_url = reverse(
"wagtailsnippets_tests_advert:delete",
args=[quote(self.test_snippet.pk)],
)
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Yes, delete")
self.assertContains(response, delete_url)
def test_delete_get_with_protected_reference(self):
with self.captureOnCommitCallbacks(execute=True):
VariousOnDeleteModel.objects.create(
text="Undeletable", on_delete_protect=self.test_snippet
)
delete_url = reverse(
"wagtailsnippets_tests_advert:delete",
args=[quote(self.test_snippet.pk)],
)
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "This advert is referenced 1 time.")
self.assertContains(
response,
"One or more references to this advert prevent it from being deleted.",
)
self.assertContains(
response,
reverse(
"wagtailsnippets_tests_advert:usage",
args=[quote(self.test_snippet.pk)],
)
+ "?describe_on_delete=1",
)
self.assertNotContains(response, "Yes, delete")
self.assertNotContains(response, delete_url)
def test_delete_post_with_limited_permissions(self):
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
)
self.user.save()
response = self.client.post(
reverse(
"wagtailsnippets_tests_advert:delete",
args=[quote(self.test_snippet.pk)],
)
)
self.assertEqual(response.status_code, 302)
def test_delete_post(self):
response = self.client.post(
reverse(
"wagtailsnippets_tests_advert:delete",
args=[quote(self.test_snippet.pk)],
)
)
# Should be redirected to the listing page
self.assertRedirects(response, reverse("wagtailsnippets_tests_advert:list"))
# Check that the page is gone
self.assertEqual(Advert.objects.filter(text="test_advert").count(), 0)
def test_delete_post_with_protected_reference(self):
with self.captureOnCommitCallbacks(execute=True):
VariousOnDeleteModel.objects.create(
text="Undeletable", on_delete_protect=self.test_snippet
)
delete_url = reverse(
"wagtailsnippets_tests_advert:delete",
args=[quote(self.test_snippet.pk)],
)
response = self.client.post(delete_url)
# Should throw a PermissionDenied error and redirect to the dashboard
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, reverse("wagtailadmin_home"))
# Check that the snippet is still here
self.assertTrue(Advert.objects.filter(pk=self.test_snippet.pk).exists())
def test_usage_link(self):
output = StringIO()
management.call_command("rebuild_references_index", stdout=output)
response = self.client.get(
reverse(
"wagtailsnippets_tests_advert:delete",
args=[quote(self.test_snippet.pk)],
)
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/generic/confirm_delete.html")
self.assertContains(response, "This advert is referenced 2 times")
self.assertContains(
response,
reverse(
"wagtailsnippets_tests_advert:usage",
args=[quote(self.test_snippet.pk)],
)
+ "?describe_on_delete=1",
)
def test_before_delete_snippet_hook_get(self):
advert = Advert.objects.create(
url="http://www.example.com/",
text="Test hook",
)
def hook_func(request, instances):
self.assertIsInstance(request, HttpRequest)
self.assertQuerySetEqual(instances, ["<Advert: Test hook>"], transform=repr)
return HttpResponse("Overridden!")
with self.register_hook("before_delete_snippet", hook_func):
response = self.client.get(
reverse("wagtailsnippets_tests_advert:delete", args=[quote(advert.pk)])
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
def test_before_delete_snippet_hook_post(self):
advert = Advert.objects.create(
url="http://www.example.com/",
text="Test hook",
)
def hook_func(request, instances):
self.assertIsInstance(request, HttpRequest)
self.assertQuerySetEqual(instances, ["<Advert: Test hook>"], transform=repr)
return HttpResponse("Overridden!")
with self.register_hook("before_delete_snippet", hook_func):
response = self.client.post(
reverse(
"wagtailsnippets_tests_advert:delete",
args=[quote(advert.pk)],
)
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
# Request intercepted before advert was deleted
self.assertTrue(Advert.objects.filter(pk=advert.pk).exists())
def test_after_delete_snippet_hook(self):
advert = Advert.objects.create(
url="http://www.example.com/",
text="Test hook",
)
def hook_func(request, instances):
self.assertIsInstance(request, HttpRequest)
self.assertQuerySetEqual(instances, ["<Advert: Test hook>"], transform=repr)
return HttpResponse("Overridden!")
with self.register_hook("after_delete_snippet", hook_func):
response = self.client.post(
reverse(
"wagtailsnippets_tests_advert:delete",
args=[quote(advert.pk)],
)
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
# Request intercepted after advert was deleted
self.assertFalse(Advert.objects.filter(pk=advert.pk).exists())
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/snippets/tests/test_delete_view.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 191,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/snippets/tests/test_edit_view.py | import datetime
from unittest import mock
from django.contrib.admin.utils import quote
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.handlers.wsgi import WSGIRequest
from django.http import HttpRequest, HttpResponse, JsonResponse
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from django.utils.timezone import now
from freezegun import freeze_time
from taggit.models import Tag
from wagtail.admin.admin_url_finder import AdminURLFinder
from wagtail.models import Locale, ModelLogEntry, Revision
from wagtail.signals import published
from wagtail.snippets.action_menu import (
ActionMenuItem,
get_base_snippet_action_menu_items,
)
from wagtail.test.snippets.models import (
FileUploadSnippet,
StandardSnippetWithCustomPrimaryKey,
TranslatableSnippet,
)
from wagtail.test.testapp.models import (
Advert,
AdvertWithTabbedInterface,
CustomPreviewSizesModel,
DraftStateCustomPrimaryKeyModel,
DraftStateModel,
FullFeaturedSnippet,
PreviewableModel,
RevisableCluster,
RevisableModel,
)
from wagtail.test.utils import WagtailTestUtils
from wagtail.test.utils.form_data import inline_formset, nested_form_data
from wagtail.test.utils.timestamps import submittable_timestamp
from wagtail.utils.timestamps import render_timestamp
class BaseTestSnippetEditView(WagtailTestUtils, TestCase):
def get_edit_url(self):
snippet = self.test_snippet
args = [quote(snippet.pk)]
return reverse(snippet.snippet_viewset.get_url_name("edit"), args=args)
def get(self, params=None, headers=None):
return self.client.get(self.get_edit_url(), params, headers=headers)
def post(self, post_data=None, headers=None):
return self.client.post(self.get_edit_url(), post_data, headers=headers)
def setUp(self):
self.user = self.login()
def assertSchedulingDialogRendered(self, response, label="Edit schedule"):
# Should show the "Edit schedule" button
html = response.content.decode()
self.assertTagInHTML(
f'<button type="button" data-a11y-dialog-show="schedule-publishing-dialog">{label}</button>',
html,
count=1,
allow_extra_attrs=True,
)
# Should show the dialog template pointing to the [data-edit-form] selector as the root
soup = self.get_soup(html)
dialog = soup.select_one(
"""
template[data-controller="w-teleport"][data-w-teleport-target-value="[data-edit-form]"]
#schedule-publishing-dialog
"""
)
self.assertIsNotNone(dialog)
# Should render the main form with data-edit-form attribute
self.assertTagInHTML(
f'<form action="{self.get_edit_url()}" method="POST" data-edit-form>',
html,
count=1,
allow_extra_attrs=True,
)
self.assertTagInHTML(
'<div id="schedule-publishing-dialog" class="w-dialog publishing" data-controller="w-dialog">',
html,
count=1,
allow_extra_attrs=True,
)
class TestSnippetEditView(BaseTestSnippetEditView):
fixtures = ["test.json"]
def setUp(self):
super().setUp()
self.test_snippet = Advert.objects.get(pk=1)
ModelLogEntry.objects.create(
content_type=ContentType.objects.get_for_model(Advert),
label="Test Advert",
action="wagtail.create",
timestamp=now() - datetime.timedelta(weeks=3),
user=self.user,
object_id="1",
)
def test_get_with_limited_permissions(self):
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
)
self.user.save()
response = self.get()
self.assertEqual(response.status_code, 302)
def test_simple(self):
response = self.get()
html = response.content.decode()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/edit.html")
self.assertNotContains(response, 'role="tablist"')
# Without DraftStateMixin, there should be no "No publishing schedule set" info
self.assertNotContains(response, "No publishing schedule set")
history_url = reverse(
"wagtailsnippets_tests_advert:history", args=[quote(self.test_snippet.pk)]
)
# History link should be present, one in the header and one in the status side panel
self.assertContains(response, history_url, count=2)
usage_url = reverse(
"wagtailsnippets_tests_advert:usage", args=[quote(self.test_snippet.pk)]
)
# Usage link should be present in the status side panel
self.assertContains(response, usage_url)
# Live status and last updated info should be shown, with a link to the history page
self.assertContains(response, "3\xa0weeks ago")
self.assertTagInHTML(
f'<a href="{history_url}" aria-describedby="status-sidebar-live">View history</a>',
html,
allow_extra_attrs=True,
)
soup = self.get_soup(response.content)
# Should have the unsaved controller set up
editor_form = soup.select_one("#w-editor-form")
self.assertIsNotNone(editor_form)
self.assertIn("w-unsaved", editor_form.attrs.get("data-controller").split())
self.assertTrue(
{
"w-unsaved#submit",
"beforeunload@window->w-unsaved#confirm",
}.issubset(editor_form.attrs.get("data-action").split())
)
self.assertEqual(
editor_form.attrs.get("data-w-unsaved-confirmation-value"),
"true",
)
self.assertEqual(
editor_form.attrs.get("data-w-unsaved-force-value"),
"false",
)
self.assertIsNone(editor_form.select_one("input[name='loaded_revision_id']"))
self.assertIsNone(
editor_form.select_one("input[name='loaded_revision_created_at']")
)
self.assertIsNotNone(editor_form)
self.assertNotIn("w-autosave", editor_form["data-controller"].split())
self.assertNotIn("w-autosave", editor_form["data-action"])
self.assertIsNone(editor_form.attrs.get("data-w-autosave-interval-value"))
url_finder = AdminURLFinder(self.user)
expected_url = "/admin/snippets/tests/advert/edit/%d/" % self.test_snippet.pk
self.assertEqual(url_finder.get_edit_url(self.test_snippet), expected_url)
def test_get_hydrate_create_view(self):
response = self.get(params={"_w_hydrate_create_view": "1"})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/generic/edit_partials.html")
soup = self.get_soup(response.content)
# Should reload only the status side panel
side_panels = soup.select(
"template[data-controller='w-teleport']"
"[data-w-teleport-target-value^='[data-side-panel=']"
"[data-w-teleport-mode-value='innerHTML']"
)
self.assertEqual(len(side_panels), 1)
status_side_panel = side_panels[0]
self.assertEqual(
status_side_panel["data-w-teleport-target-value"],
"[data-side-panel='status']",
)
# Workflow and privacy features are not available
workflow_status_dialog = soup.find("div", id="workflow-status-dialog")
self.assertIsNone(workflow_status_dialog)
set_privacy_dialog = soup.find("div", id="set-privacy")
self.assertIsNone(set_privacy_dialog)
breadcrumbs = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "header [data-w-breadcrumbs]",
"data-w-teleport-mode-value": "outerHTML",
},
)
self.assertIsNotNone(breadcrumbs)
# Should include header buttons as they were not rendered in the create view
self.assertIsNotNone(breadcrumbs.select_one("#w-slim-header-buttons"))
# Should render the history link button as it wasn't rendered in the create view
history_link = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "[data-side-panel-toggle]:last-of-type",
"data-w-teleport-mode-value": "afterend",
},
)
history_url = reverse(
self.test_snippet.snippet_viewset.get_url_name("history"),
args=(quote(self.test_snippet.pk),),
)
self.assertIsNotNone(history_link)
self.assertIsNotNone(history_link.select_one(f"a[href='{history_url}']"))
form_title_heading = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "#header-title span",
"data-w-teleport-mode-value": "textContent",
},
)
self.assertIsNotNone(form_title_heading)
self.assertEqual(form_title_heading.text.strip(), str(self.test_snippet))
header_title = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "head title",
"data-w-teleport-mode-value": "textContent",
},
)
self.assertIsNotNone(header_title)
self.assertEqual(header_title.text.strip(), f"Editing: {self.test_snippet}")
# Should not include any updates to the form as we don't have revisions
# enabled and thus don't need to add loaded revision info
form_adds = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "form[data-edit-form]",
"data-w-teleport-mode-value": "afterbegin",
},
)
self.assertIsNone(form_adds)
# Should load the editing sessions module as it was not in the create view
editing_sessions = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "#w-autosave-indicator",
"data-w-teleport-mode-value": "afterend",
},
)
self.assertIsNotNone(editing_sessions)
# without the revision info
self.assertIsNone(editing_sessions.select_one("input[name='revision_id']"))
self.assertIsNone(
editing_sessions.select_one("input[name='revision_created_at']")
)
def test_non_existent_model(self):
response = self.client.get(
f"/admin/snippets/tests/foo/edit/{quote(self.test_snippet.pk)}/"
)
self.assertEqual(response.status_code, 404)
def test_nonexistent_id(self):
response = self.client.get(
reverse("wagtailsnippets_tests_advert:edit", args=[999999])
)
self.assertEqual(response.status_code, 404)
def test_edit_with_limited_permissions(self):
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
)
self.user.save()
response = self.post(
post_data={"text": "test text", "url": "http://www.example.com/"}
)
self.assertEqual(response.status_code, 302)
url_finder = AdminURLFinder(self.user)
self.assertIsNone(url_finder.get_edit_url(self.test_snippet))
def test_edit_invalid(self):
response = self.post(post_data={"foo": "bar"})
soup = self.get_soup(response.content)
header_messages = soup.css.select(".messages[role='status'] ul > li")
# the top level message should indicate that the page could not be saved
self.assertEqual(len(header_messages), 1)
message = header_messages[0]
self.assertIn(
"The advert could not be saved due to errors.", message.get_text()
)
# the top level message should provide a go to error button
buttons = message.find_all("button")
self.assertEqual(len(buttons), 1)
self.assertEqual(buttons[0].attrs["data-controller"], "w-count w-focus")
self.assertEqual(
set(buttons[0].attrs["data-action"].split()),
{"click->w-focus#focus", "wagtail:panel-init@document->w-count#count"},
)
self.assertIn("Go to the first error", buttons[0].get_text())
# the error should only appear once: against the field, not in the header message
error_messages = soup.css.select(".error-message")
self.assertEqual(len(error_messages), 1)
error_message = error_messages[0]
self.assertEqual(error_message.parent["id"], "panel-child-text-errors")
self.assertIn("This field is required", error_message.get_text())
# Should have the unsaved controller set up
editor_form = soup.select_one("#w-editor-form")
self.assertIsNotNone(editor_form)
self.assertIn("w-unsaved", editor_form.attrs.get("data-controller").split())
self.assertTrue(
{
"w-unsaved#submit",
"beforeunload@window->w-unsaved#confirm",
}.issubset(editor_form.attrs.get("data-action").split())
)
self.assertEqual(
editor_form.attrs.get("data-w-unsaved-confirmation-value"),
"true",
)
self.assertEqual(
editor_form.attrs.get("data-w-unsaved-force-value"),
# The form is invalid, we want to force it to be "dirty" on initial load
"true",
)
def test_edit_invalid_with_json_response(self):
response = self.post(
post_data={"foo": "bar"},
headers={"Accept": "application/json"},
)
self.assertEqual(response.status_code, 400)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(
response.json(),
{
"success": False,
"error_code": "validation_error",
"error_message": "There are validation errors, click save to highlight them.",
},
)
def test_edit(self):
response = self.post(
post_data={
"text": "edited_test_advert",
"url": "http://www.example.com/edited",
}
)
self.assertRedirects(response, reverse("wagtailsnippets_tests_advert:list"))
snippets = Advert.objects.filter(text="edited_test_advert")
self.assertEqual(snippets.count(), 1)
self.assertEqual(snippets.first().url, "http://www.example.com/edited")
def test_edit_with_json_response(self):
response = self.post(
post_data={
"text": "edited_test_advert",
"url": "http://www.example.com/edited",
},
headers={"Accept": "application/json"},
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
snippets = Advert.objects.filter(text="edited_test_advert")
self.assertEqual(snippets.count(), 1)
snippet = snippets.first()
self.assertEqual(snippet.url, "http://www.example.com/edited")
response_json = response.json()
self.assertEqual(response_json["success"], True)
self.assertEqual(response_json["pk"], snippet.pk)
self.assertEqual(response_json["field_updates"], {})
def test_edit_with_tags(self):
tags = ["hello", "world"]
response = self.post(
post_data={
"text": "edited_test_advert",
"url": "http://www.example.com/edited",
"tags": ", ".join(tags),
}
)
self.assertRedirects(response, reverse("wagtailsnippets_tests_advert:list"))
snippet = Advert.objects.get(text="edited_test_advert")
expected_tags = list(Tag.objects.order_by("name").filter(name__in=tags))
self.assertEqual(len(expected_tags), 2)
self.assertEqual(list(snippet.tags.order_by("name")), expected_tags)
def test_before_edit_snippet_hook_get(self):
def hook_func(request, instance):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(instance.text, "test_advert")
self.assertEqual(instance.url, "http://www.example.com")
return HttpResponse("Overridden!")
with self.register_hook("before_edit_snippet", hook_func):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
def test_before_edit_snippet_hook_get_with_json_response(self):
def non_json_hook_func(request, instance):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(instance.text, "test_advert")
self.assertEqual(instance.url, "http://www.example.com")
return HttpResponse("Overridden!")
def json_hook_func(request, instance):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(instance.text, "test_advert")
self.assertEqual(instance.url, "http://www.example.com")
return JsonResponse({"status": "purple"})
with self.register_hook("before_edit_snippet", non_json_hook_func):
response = self.get(
headers={"Accept": "application/json"},
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"success": False,
"error_code": "blocked_by_hook",
"error_message": "Request to edit advert was blocked by hook.",
},
)
with self.register_hook("before_edit_snippet", json_hook_func):
response = self.get(
headers={"Accept": "application/json"},
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"status": "purple"})
def test_before_edit_snippet_hook_post(self):
def hook_func(request, instance):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(instance.text, "test_advert")
self.assertEqual(instance.url, "http://www.example.com")
return HttpResponse("Overridden!")
with self.register_hook("before_edit_snippet", hook_func):
response = self.post(
post_data={
"text": "Edited and runs hook",
"url": "http://www.example.com/hook-enabled-edited",
}
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
# Request intercepted before advert was updated
self.assertEqual(Advert.objects.get().text, "test_advert")
def test_before_edit_snippet_hook_post_with_json_response(self):
def non_json_hook_func(request, instance):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(instance.text, "test_advert")
self.assertEqual(instance.url, "http://www.example.com")
return HttpResponse("Overridden!")
def json_hook_func(request, instance):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(instance.text, "test_advert")
self.assertEqual(instance.url, "http://www.example.com")
return JsonResponse({"status": "purple"})
with self.register_hook("before_edit_snippet", non_json_hook_func):
post_data = {
"text": "Edited and runs hook",
"url": "http://www.example.com/hook-enabled-edited",
}
response = self.post(
post_data,
headers={"Accept": "application/json"},
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"success": False,
"error_code": "blocked_by_hook",
"error_message": "Request to edit advert was blocked by hook.",
},
)
# Request intercepted before advert was updated
self.assertEqual(Advert.objects.get().text, "test_advert")
with self.register_hook("before_edit_snippet", json_hook_func):
post_data = {
"text": "Edited and runs hook",
"url": "http://www.example.com/hook-enabled-edited",
}
response = self.post(
post_data,
headers={"Accept": "application/json"},
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"status": "purple"})
# Request intercepted before advert was updated
self.assertEqual(Advert.objects.get().text, "test_advert")
def test_after_edit_snippet_hook(self):
def hook_func(request, instance):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(instance.text, "Edited and runs hook")
self.assertEqual(instance.url, "http://www.example.com/hook-enabled-edited")
return HttpResponse("Overridden!")
with self.register_hook("after_edit_snippet", hook_func):
response = self.post(
post_data={
"text": "Edited and runs hook",
"url": "http://www.example.com/hook-enabled-edited",
}
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
# Request intercepted after advert was updated
self.assertEqual(Advert.objects.get().text, "Edited and runs hook")
def test_after_edit_snippet_hook_with_json_response(self):
def non_json_hook_func(request, instance):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(instance.text, "Edited and runs hook")
self.assertEqual(instance.url, "http://www.example.com/hook-enabled-edited")
return HttpResponse("Overridden!")
def json_hook_func(request, instance):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(instance.text, "Edited and runs hook x2")
self.assertEqual(instance.url, "http://www.example.com/hook-enabled-edited")
return JsonResponse({"status": "purple"})
with self.register_hook("after_edit_snippet", non_json_hook_func):
post_data = {
"text": "Edited and runs hook",
"url": "http://www.example.com/hook-enabled-edited",
}
response = self.post(
post_data,
headers={"Accept": "application/json"},
)
self.assertEqual(response.status_code, 200)
# hook response is ignored, since it's not a JSON response
self.assertEqual(response.json()["success"], True)
# Request intercepted after advert was updated
self.assertEqual(Advert.objects.get().text, "Edited and runs hook")
with self.register_hook("after_edit_snippet", json_hook_func):
post_data = {
"text": "Edited and runs hook x2",
"url": "http://www.example.com/hook-enabled-edited",
}
response = self.post(
post_data,
headers={"Accept": "application/json"},
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"status": "purple"})
# Request intercepted after advert was updated
self.assertEqual(Advert.objects.get().text, "Edited and runs hook x2")
def test_register_snippet_action_menu_item(self):
class TestSnippetActionMenuItem(ActionMenuItem):
label = "Test"
name = "test"
icon_name = "check"
classname = "custom-class"
def is_shown(self, context):
return True
def hook_func(model):
return TestSnippetActionMenuItem(order=0)
with self.register_hook("register_snippet_action_menu_item", hook_func):
get_base_snippet_action_menu_items.cache_clear()
response = self.get()
get_base_snippet_action_menu_items.cache_clear()
self.assertContains(
response,
'<button type="submit" name="test" value="Test" class="button custom-class"><svg class="icon icon-check icon" aria-hidden="true"><use href="#icon-check"></use></svg>Test</button>',
html=True,
)
def test_construct_snippet_action_menu(self):
def hook_func(menu_items, request, context):
self.assertIsInstance(menu_items, list)
self.assertIsInstance(request, WSGIRequest)
self.assertEqual(context["view"], "edit")
self.assertEqual(context["instance"], self.test_snippet)
self.assertEqual(context["model"], Advert)
# Remove the save item
del menu_items[0]
with self.register_hook("construct_snippet_action_menu", hook_func):
response = self.get()
self.assertNotContains(response, "<em>Save</em>")
def test_previewable_snippet(self):
self.test_snippet = PreviewableModel.objects.create(
text="Preview-enabled snippet"
)
response = self.get()
self.assertEqual(response.status_code, 200)
soup = self.get_soup(response.content)
radios = soup.select('input[type="radio"][name="preview-size"]')
self.assertEqual(len(radios), 3)
self.assertEqual(
[
"Preview in mobile size",
"Preview in tablet size",
"Preview in desktop size",
],
[radio["aria-label"] for radio in radios],
)
self.assertEqual("375", radios[0]["data-device-width"])
self.assertTrue(radios[0].has_attr("checked"))
def test_custom_preview_sizes(self):
self.test_snippet = CustomPreviewSizesModel.objects.create(
text="Preview-enabled with custom sizes",
)
response = self.get()
self.assertEqual(response.status_code, 200)
soup = self.get_soup(response.content)
radios = soup.select('input[type="radio"][name="preview-size"]')
self.assertEqual(len(radios), 2)
self.assertEqual("412", radios[0]["data-device-width"])
self.assertEqual("Custom mobile preview", radios[0]["aria-label"])
self.assertFalse(radios[0].has_attr("checked"))
self.assertEqual("1280", radios[1]["data-device-width"])
self.assertEqual("Original desktop", radios[1]["aria-label"])
self.assertTrue(radios[1].has_attr("checked"))
class TestEditTabbedSnippet(BaseTestSnippetEditView):
def setUp(self):
super().setUp()
self.test_snippet = AdvertWithTabbedInterface.objects.create(
text="test_advert",
url="http://www.example.com",
something_else="Model with tabbed interface",
)
def test_snippet_with_tabbed_interface(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/edit.html")
self.assertContains(response, 'role="tablist"')
self.assertContains(
response,
'<a id="tab-label-advert" href="#tab-advert" class="w-tabs__tab " role="tab" aria-selected="false" tabindex="-1" data-action="w-tabs#select:prevent" data-w-tabs-target="trigger">',
)
self.assertContains(
response,
'<a id="tab-label-other" href="#tab-other" class="w-tabs__tab " role="tab" aria-selected="false" tabindex="-1" data-action="w-tabs#select:prevent" data-w-tabs-target="trigger">',
)
class TestEditFileUploadSnippet(BaseTestSnippetEditView):
def setUp(self):
super().setUp()
self.test_snippet = FileUploadSnippet.objects.create(
file=ContentFile(b"Simple text document", "test.txt")
)
def test_edit_file_upload_multipart(self):
response = self.get()
self.assertContains(response, 'enctype="multipart/form-data"')
response = self.post(
post_data={
"file": SimpleUploadedFile("replacement.txt", b"Replacement document")
}
)
self.assertRedirects(
response,
reverse("wagtailsnippets_snippetstests_fileuploadsnippet:list"),
)
snippet = FileUploadSnippet.objects.get()
self.assertEqual(snippet.file.read(), b"Replacement document")
@override_settings(WAGTAIL_I18N_ENABLED=True)
class TestLocaleSelectorOnEdit(BaseTestSnippetEditView):
fixtures = ["test.json"]
LOCALE_SELECTOR_LABEL = "Switch locales"
LOCALE_INDICATOR_HTML = '<h3 id="status-sidebar-english"'
def setUp(self):
super().setUp()
self.test_snippet = TranslatableSnippet.objects.create(text="This is a test")
self.fr_locale = Locale.objects.create(language_code="fr")
self.test_snippet_fr = self.test_snippet.copy_for_translation(self.fr_locale)
self.test_snippet_fr.save()
def test_locale_selector(self):
response = self.get()
self.assertContains(response, self.LOCALE_SELECTOR_LABEL)
self.assertContains(response, self.LOCALE_INDICATOR_HTML)
def test_locale_selector_without_translation(self):
self.test_snippet_fr.delete()
response = self.get()
# The "Switch locale" button should not be shown
self.assertNotContains(response, self.LOCALE_SELECTOR_LABEL)
# Locale status still available and says "No other translations"
self.assertContains(response, self.LOCALE_INDICATOR_HTML)
self.assertContains(response, "No other translations")
@override_settings(WAGTAIL_I18N_ENABLED=False)
def test_locale_selector_not_present_when_i18n_disabled(self):
response = self.get()
self.assertNotContains(response, self.LOCALE_SELECTOR_LABEL)
self.assertNotContains(response, self.LOCALE_INDICATOR_HTML)
def test_locale_selector_not_present_on_non_translatable_snippet(self):
self.test_snippet = Advert.objects.get(pk=1)
response = self.get()
self.assertNotContains(response, self.LOCALE_SELECTOR_LABEL)
self.assertNotContains(response, self.LOCALE_INDICATOR_HTML)
class TestEditRevisionSnippet(BaseTestSnippetEditView):
def setUp(self):
super().setUp()
self.test_snippet = RevisableModel.objects.create(text="foo")
def test_get_hydrate_create_view(self):
latest_revision = self.test_snippet.save_revision(user=self.user)
response = self.get(params={"_w_hydrate_create_view": "1"})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/generic/edit_partials.html")
soup = self.get_soup(response.content)
# Should reload only the status side panel
side_panels = soup.select(
"template[data-controller='w-teleport']"
"[data-w-teleport-target-value^='[data-side-panel=']"
"[data-w-teleport-mode-value='innerHTML']"
)
self.assertEqual(len(side_panels), 1)
status_side_panel = side_panels[0]
self.assertEqual(
status_side_panel["data-w-teleport-target-value"],
"[data-side-panel='status']",
)
# Workflow and privacy features are not available
workflow_status_dialog = soup.find("div", id="workflow-status-dialog")
self.assertIsNone(workflow_status_dialog)
set_privacy_dialog = soup.find("div", id="set-privacy")
self.assertIsNone(set_privacy_dialog)
breadcrumbs = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "header [data-w-breadcrumbs]",
"data-w-teleport-mode-value": "outerHTML",
},
)
self.assertIsNotNone(breadcrumbs)
# Should include header buttons as they were not rendered in the create view
self.assertIsNotNone(breadcrumbs.select_one("#w-slim-header-buttons"))
# Should render the history link button as it wasn't rendered in the create view
history_link = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "[data-side-panel-toggle]:last-of-type",
"data-w-teleport-mode-value": "afterend",
},
)
history_url = reverse(
self.test_snippet.snippet_viewset.get_url_name("history"),
args=(quote(self.test_snippet.pk),),
)
self.assertIsNotNone(history_link)
self.assertIsNotNone(history_link.select_one(f"a[href='{history_url}']"))
form_title_heading = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "#header-title span",
"data-w-teleport-mode-value": "textContent",
},
)
self.assertIsNotNone(form_title_heading)
self.assertEqual(form_title_heading.text.strip(), str(self.test_snippet))
header_title = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "head title",
"data-w-teleport-mode-value": "textContent",
},
)
self.assertIsNotNone(header_title)
self.assertEqual(header_title.text.strip(), f"Editing: {self.test_snippet}")
# Should include loaded revision ID and timestamp in the form for
# subsequent autosave requests
form_adds = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "form[data-edit-form]",
"data-w-teleport-mode-value": "afterbegin",
},
)
self.assertIsNotNone(form_adds)
self.assertEqual(
form_adds.select_one("input[name='loaded_revision_id']")["value"],
str(latest_revision.pk),
)
self.assertEqual(
form_adds.select_one("input[name='loaded_revision_created_at']")["value"],
latest_revision.created_at.isoformat(),
)
# Should load the editing sessions module as it was not in the create view
editing_sessions = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "#w-autosave-indicator",
"data-w-teleport-mode-value": "afterend",
},
)
self.assertIsNotNone(editing_sessions)
# with the revision info
self.assertEqual(
editing_sessions.select_one("input[name='revision_id']")["value"],
str(latest_revision.pk),
)
self.assertEqual(
editing_sessions.select_one("input[name='revision_created_at']")["value"],
latest_revision.created_at.isoformat(),
)
def test_edit_snippet_with_revision(self):
response = self.post(post_data={"text": "bar"})
self.assertRedirects(
response, reverse("wagtailsnippets_tests_revisablemodel:list")
)
# The instance should be updated
snippets = RevisableModel.objects.filter(text="bar")
self.assertEqual(snippets.count(), 1)
# The revision should be created
revisions = self.test_snippet.revisions
revision = revisions.first()
self.assertEqual(revisions.count(), 1)
self.assertEqual(revision.content["text"], "bar")
# The log entry should have the revision attached
log_entries = ModelLogEntry.objects.for_instance(self.test_snippet).filter(
action="wagtail.edit"
)
self.assertEqual(log_entries.count(), 1)
self.assertEqual(log_entries.first().revision, revision)
def test_edit_snippet_with_revision_and_json_response(self):
initial_revision = self.test_snippet.save_revision(user=self.user)
self.assertEqual(self.test_snippet.revisions.count(), 1)
response = self.post(
post_data={
"text": "bar",
"loaded_revision_id": initial_revision.pk,
"loaded_revision_created_at": initial_revision.created_at.isoformat(),
},
headers={"Accept": "application/json"},
)
# Should be a 200 OK JSON response
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
response_json = response.json()
self.assertIs(response_json["success"], True)
self.assertEqual(response_json["pk"], self.test_snippet.pk)
# Should create a new revision to be overwritten later
self.assertEqual(self.test_snippet.revisions.count(), 2)
self.assertNotEqual(response_json["revision_id"], initial_revision.pk)
revision = self.test_snippet.revisions.get(pk=response_json["revision_id"])
self.assertEqual(
response_json["revision_created_at"],
revision.created_at.isoformat(),
)
self.assertEqual(revision.content["text"], "bar")
# The instance should be updated
snippets = RevisableModel.objects.filter(text="bar")
self.assertEqual(snippets.count(), 1)
# The log entry should have the revision attached
log_entries = ModelLogEntry.objects.for_instance(self.test_snippet).filter(
action="wagtail.edit"
)
self.assertEqual(log_entries.count(), 1)
self.assertEqual(log_entries.first().revision, revision)
def test_edit_with_inline_models_and_json_response(self):
self.test_snippet = RevisableCluster.objects.create(text="Test for inline")
form_data = nested_form_data(
{
"text": "Edited and added one child",
"children": inline_formset([{"id": "", "text": "Child 1"}]),
}
)
response = self.post(
post_data=form_data,
headers={"Accept": "application/json"},
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(self.test_snippet.children.count(), 1)
child = self.test_snippet.children.first()
self.assertEqual(child.text, "Child 1")
response_json = response.json()
self.assertEqual(response_json["success"], True)
self.assertEqual(response_json["pk"], self.test_snippet.pk)
self.assertEqual(
response_json["field_updates"],
{"children-INITIAL_FORMS": "1", "children-0-id": str(child.pk)},
)
def test_save_outdated_revision_with_json_response(self):
self.test_snippet.text = "Initial revision"
revision = self.test_snippet.save_revision(user=self.user)
self.test_snippet.text = "Latest revision"
self.test_snippet.save_revision()
self.assertEqual(self.test_snippet.revisions.count(), 2)
response = self.post(
post_data={
"text": "Updated revision",
"loaded_revision_id": revision.pk,
},
headers={"Accept": "application/json"},
)
# Instead of creating a new revision for autosave (which means the user
# would unknowingly replace a newer revision), we return an error
# response that should be a 400 response
self.assertEqual(response.status_code, 400)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(
response.json(),
{
"success": False,
"error_code": "invalid_revision",
"error_message": "Saving will overwrite a newer version.",
},
)
self.assertEqual(self.test_snippet.revisions.count(), 2)
revision.refresh_from_db()
self.assertEqual(revision.content["text"], "Initial revision")
def test_save_outdated_revision_timestampwith_json_response(self):
self.test_snippet.text = "Initial revision"
revision = self.test_snippet.save_revision(user=self.user)
loaded_revision_created_at = revision.created_at.isoformat()
self.test_snippet.text = "Latest revision"
self.test_snippet.save_revision(user=self.user, overwrite_revision=revision)
self.assertEqual(self.test_snippet.revisions.count(), 1)
response = self.post(
post_data={
"text": "Updated revision",
"loaded_revision_id": revision.pk,
"loaded_revision_created_at": loaded_revision_created_at,
},
headers={"Accept": "application/json"},
)
# Instead of creating a new revision for autosave (which means the user
# would unknowingly replace a newer revision), we return an error
# response that should be a 400 response
self.assertEqual(response.status_code, 400)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(
response.json(),
{
"success": False,
"error_code": "invalid_revision",
"error_message": "Saving will overwrite a newer version.",
},
)
self.assertEqual(self.test_snippet.revisions.count(), 1)
revision.refresh_from_db()
self.assertEqual(revision.content["text"], "Latest revision")
def test_overwrite_revision_with_json_response(self):
self.test_snippet.text = "Initial revision"
initial_revision = self.test_snippet.save_revision()
self.test_snippet.text = "Changed via a previous autosave"
revision = self.test_snippet.save_revision(user=self.user)
self.assertEqual(self.test_snippet.revisions.count(), 2)
response = self.post(
post_data={
"text": "Updated revision",
# The page was originally loaded with initial_revision, but
# a successful autosave created a new revision which we now
# want to overwrite with a new autosave request
"loaded_revision_id": initial_revision.pk,
"overwrite_revision_id": revision.pk,
},
headers={"Accept": "application/json"},
)
# Should be a 200 OK JSON response
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
revision.refresh_from_db()
response_json = response.json()
self.assertIs(response_json["success"], True)
self.assertEqual(response_json["pk"], self.test_snippet.pk)
self.assertEqual(response_json["revision_id"], revision.pk)
self.assertEqual(
response_json["revision_created_at"],
revision.created_at.isoformat(),
)
self.assertEqual(response_json["field_updates"], {})
soup = self.get_soup(response_json["html"])
status_side_panel = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "[data-side-panel='status']",
"data-w-teleport-mode-value": "innerHTML",
},
)
self.assertIsNotNone(status_side_panel)
breadcrumbs = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "header [data-w-breadcrumbs]",
"data-w-teleport-mode-value": "outerHTML",
},
)
self.assertIsNotNone(breadcrumbs)
form_title_heading = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "#header-title span",
"data-w-teleport-mode-value": "textContent",
},
)
self.assertIsNotNone(form_title_heading)
self.assertEqual(form_title_heading.text.strip(), "Updated revision")
header_title = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "head title",
"data-w-teleport-mode-value": "textContent",
},
)
self.assertIsNotNone(header_title)
self.assertEqual(header_title.text.strip(), "Editing: Updated revision")
self.assertEqual(self.test_snippet.revisions.count(), 2)
revision.refresh_from_db()
self.assertEqual(revision.content["text"], "Updated revision")
def test_overwrite_non_latest_revision(self):
self.test_snippet.text = "Initial revision"
initial_revision = self.test_snippet.save_revision(user=self.user)
self.test_snippet.text = "First update via autosave"
user_revision = self.test_snippet.save_revision(user=self.user)
self.test_snippet.text = "Someone else's changed text"
later_revision = self.test_snippet.save_revision()
self.assertEqual(self.test_snippet.revisions.count(), 3)
post_data = {
"text": "Updated revision",
"loaded_revision_id": initial_revision.id,
"overwrite_revision_id": user_revision.id,
}
response = self.post(
post_data=post_data,
headers={"Accept": "application/json"},
)
# Should be a 400 response
self.assertEqual(response.status_code, 400)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(
response.json(),
{
"success": False,
"error_code": "invalid_revision",
"error_message": "Saving will overwrite a newer version.",
},
)
# Live DB record should be unchanged
# (neither save_revision nor the failed form post should have updated it)
self.test_snippet.refresh_from_db()
self.assertEqual(self.test_snippet.text, "foo")
# The passed revision for overwriting, and the actual latest revision, should both be unchanged
self.assertEqual(self.test_snippet.revisions.count(), 3)
user_revision.refresh_from_db()
self.assertEqual(user_revision.content["text"], "First update via autosave")
later_revision.refresh_from_db()
self.assertEqual(later_revision.content["text"], "Someone else's changed text")
self.assertEqual(self.test_snippet.get_latest_revision().id, later_revision.id)
def test_overwrite_nonexistent_revision(self):
self.test_snippet.text = "Initial revision"
user_revision = self.test_snippet.save_revision(user=self.user)
self.assertEqual(self.test_snippet.revisions.count(), 1)
post_data = {
"text": "Updated revision",
"overwrite_revision_id": 999999,
}
response = self.post(
post_data=post_data,
headers={"Accept": "application/json"},
)
# Should be a 400 response
self.assertEqual(response.status_code, 400)
self.assertEqual(response["Content-Type"], "application/json")
self.assertEqual(
response.json(),
{
"success": False,
"error_code": "invalid_revision",
# We only naively check whether overwrite_revision_id matches
# the latest revision ID, and if it doesn't, we assume there's
# a newer revision.
"error_message": "Saving will overwrite a newer version.",
},
)
# Live DB record should be unchanged
# (neither save_revision nor the failed form post should have updated it)
self.test_snippet.refresh_from_db()
self.assertEqual(self.test_snippet.text, "foo")
# The latest revision should be unchanged
self.assertEqual(self.test_snippet.revisions.count(), 1)
latest_revision = self.test_snippet.get_latest_revision()
self.assertEqual(latest_revision.id, user_revision.id)
self.assertEqual(latest_revision.content["text"], "Initial revision")
class TestEditDraftStateSnippet(BaseTestSnippetEditView):
STATUS_TOGGLE_BADGE_REGEX = (
r'data-side-panel-toggle="status"[^<]+<svg[^<]+<use[^<]+</use[^<]+</svg[^<]+'
r"<div data-side-panel-toggle-counter[^>]+w-bg-critical-200[^>]+>\s*%(num_errors)s\s*</div>"
)
def setUp(self):
super().setUp()
self.test_snippet = DraftStateCustomPrimaryKeyModel.objects.create(
custom_id="custom/1", text="Draft-enabled Foo", live=False
)
def test_get(self):
revision = self.test_snippet.save_revision()
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/edit.html")
# The save button should be labelled "Save draft"
self.assertContains(response, "Save draft")
# The publish button should exist
self.assertContains(response, "Publish")
# The publish button should have name="action-publish"
self.assertContains(
response,
'<button\n type="submit"\n name="action-publish"\n value="action-publish"\n class="button action-save button-longrunning"\n data-controller="w-progress"\n data-action="w-progress#activate"\n',
)
# The status side panel should show "No publishing schedule set" info
self.assertContains(response, "No publishing schedule set")
# Should show the "Set schedule" button
self.assertSchedulingDialogRendered(response, label="Set schedule")
# Should show the correct subtitle in the dialog
self.assertContains(
response,
"Choose when this draft state custom primary key model should go live and/or expire",
)
# Should not show the Unpublish action menu item
unpublish_url = reverse(
"wagtailsnippets_tests_draftstatecustomprimarykeymodel:unpublish",
args=(quote(self.test_snippet.pk),),
)
self.assertNotContains(
response,
f'<a class="button" href="{unpublish_url}">',
)
self.assertNotContains(response, "Unpublish")
soup = self.get_soup(response.content)
form = soup.select_one("form[data-edit-form]")
self.assertIsNotNone(form)
loaded_revision = form.select_one("input[name='loaded_revision_id']")
self.assertIsNotNone(loaded_revision)
self.assertEqual(int(loaded_revision["value"]), revision.pk)
loaded_timestamp = form.select_one("input[name='loaded_revision_created_at']")
self.assertIsNotNone(loaded_timestamp)
self.assertEqual(loaded_timestamp["value"], revision.created_at.isoformat())
# Autosave defaults to enabled with 500ms interval
soup = self.get_soup(response.content)
form = soup.select_one("form[data-edit-form]")
self.assertIsNotNone(form)
self.assertIn("w-autosave", form["data-controller"].split())
self.assertTrue(
{
"w-unsaved:add->w-autosave#save:prevent",
"w-autosave:success->w-unsaved#clear",
}.issubset(form["data-action"].split())
)
self.assertEqual(form.attrs.get("data-w-autosave-interval-value"), "500")
def test_get_hydrate_create_view(self):
# Use FullFeaturedSnippet to test the UI hydration of all features
snippet = FullFeaturedSnippet.objects.create(
text="Hello world",
country_code="UK",
some_number=42,
)
latest_revision = snippet.save_revision(user=self.user)
edit_url = reverse(
snippet.snippet_viewset.get_url_name("edit"),
args=(quote(snippet.pk),),
)
response = self.client.get(edit_url, {"_w_hydrate_create_view": "1"})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/generic/edit_partials.html")
soup = self.get_soup(response.content)
# Should reload the status and preview side panels only
side_panels = soup.select(
"template[data-controller='w-teleport']"
"[data-w-teleport-target-value^='[data-side-panel=']"
"[data-w-teleport-mode-value='innerHTML']"
)
self.assertEqual(len(side_panels), 2)
status_side_panel = side_panels[0]
self.assertEqual(
status_side_panel["data-w-teleport-target-value"],
"[data-side-panel='status']",
)
# Under normal circumstances, a newly-created snippet would never
# immediately enter a workflow without a full-page reload, so don't
# bother rendering the workflow status dialog when hydrating a create view
workflow_status_dialog = soup.find("div", id="workflow-status-dialog")
self.assertIsNone(workflow_status_dialog)
# Privacy features are not available for snippets
set_privacy_dialog = soup.find("div", id="set-privacy")
self.assertIsNone(set_privacy_dialog)
# We need to change the preview URL to use the one for editing, but there is
# no way to declaratively change attributes via partial rendering yet, and we
# need to restart the controller anyway, so just re-render the whole panel
preview_side_panel = side_panels[1]
self.assertEqual(
preview_side_panel["data-w-teleport-target-value"],
"[data-side-panel='preview']",
)
preview_url = reverse(
snippet.snippet_viewset.get_url_name("preview_on_edit"),
args=(quote(snippet.pk),),
)
self.assertIsNotNone(
preview_side_panel.select_one(f"[data-w-preview-url-value='{preview_url}']")
)
breadcrumbs = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "header [data-w-breadcrumbs]",
"data-w-teleport-mode-value": "outerHTML",
},
)
self.assertIsNotNone(breadcrumbs)
# Should include header buttons as they were not rendered in the create view
self.assertIsNotNone(breadcrumbs.select_one("#w-slim-header-buttons"))
# Should render the history link button as it wasn't rendered in the create view
history_link = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "[data-side-panel-toggle]:last-of-type",
"data-w-teleport-mode-value": "afterend",
},
)
history_url = reverse(
snippet.snippet_viewset.get_url_name("history"),
args=(quote(snippet.pk),),
)
self.assertIsNotNone(history_link)
self.assertIsNotNone(history_link.select_one(f"a[href='{history_url}']"))
form_title_heading = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "#header-title span",
"data-w-teleport-mode-value": "textContent",
},
)
self.assertIsNotNone(form_title_heading)
self.assertEqual(form_title_heading.text.strip(), str(snippet))
header_title = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "head title",
"data-w-teleport-mode-value": "textContent",
},
)
self.assertIsNotNone(header_title)
self.assertEqual(header_title.text.strip(), f"Editing: {snippet}")
# Should include loaded revision ID and timestamp in the form for
# subsequent autosave requests
form_adds = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "form[data-edit-form]",
"data-w-teleport-mode-value": "afterbegin",
},
)
self.assertIsNotNone(form_adds)
self.assertEqual(
form_adds.select_one("input[name='loaded_revision_id']")["value"],
str(latest_revision.pk),
)
self.assertEqual(
form_adds.select_one("input[name='loaded_revision_created_at']")["value"],
latest_revision.created_at.isoformat(),
)
# Should load the editing sessions module as it was not in the create view
editing_sessions = soup.find(
"template",
{
"data-controller": "w-teleport",
"data-w-teleport-target-value": "#w-autosave-indicator",
"data-w-teleport-mode-value": "afterend",
},
)
self.assertIsNotNone(editing_sessions)
# with the revision info
self.assertEqual(
editing_sessions.select_one("input[name='revision_id']")["value"],
str(latest_revision.pk),
)
self.assertEqual(
editing_sessions.select_one("input[name='revision_created_at']")["value"],
latest_revision.created_at.isoformat(),
)
@override_settings(WAGTAIL_AUTOSAVE_INTERVAL=0)
def test_autosave_disabled(self):
response = self.get()
self.assertEqual(response.status_code, 200)
soup = self.get_soup(response.content)
form = soup.select_one("form[data-edit-form]")
self.assertIsNotNone(form)
self.assertNotIn("w-autosave", form["data-controller"].split())
self.assertNotIn("w-autosave", form["data-action"])
self.assertIsNone(form.attrs.get("data-w-autosave-interval-value"))
@override_settings(WAGTAIL_AUTOSAVE_INTERVAL=2000)
def test_autosave_custom_interval(self):
response = self.get()
self.assertEqual(response.status_code, 200)
soup = self.get_soup(response.content)
form = soup.select_one("form[data-edit-form]")
self.assertIsNotNone(form)
self.assertIn("w-autosave", form["data-controller"].split())
self.assertTrue(
{
"w-unsaved:add->w-autosave#save:prevent",
"w-autosave:success->w-unsaved#clear",
}.issubset(form["data-action"].split())
)
self.assertEqual(form.attrs.get("data-w-autosave-interval-value"), "2000")
def test_save_draft(self):
response = self.post(post_data={"text": "Draft-enabled Bar"})
self.test_snippet.refresh_from_db()
revisions = Revision.objects.for_instance(self.test_snippet)
latest_revision = self.test_snippet.latest_revision
self.assertRedirects(response, self.get_edit_url())
# The instance should be updated, since it is still a draft
self.assertEqual(self.test_snippet.text, "Draft-enabled Bar")
# The instance should be a draft
self.assertFalse(self.test_snippet.live)
self.assertTrue(self.test_snippet.has_unpublished_changes)
self.assertIsNone(self.test_snippet.first_published_at)
self.assertIsNone(self.test_snippet.last_published_at)
self.assertIsNone(self.test_snippet.live_revision)
# The revision should be created and set as latest_revision
self.assertEqual(revisions.count(), 1)
self.assertEqual(latest_revision, revisions.first())
# The revision content should contain the new data
self.assertEqual(latest_revision.content["text"], "Draft-enabled Bar")
# A log entry should be created
log_entry = (
ModelLogEntry.objects.for_instance(self.test_snippet)
.filter(action="wagtail.edit")
.order_by("-timestamp")
.first()
)
self.assertEqual(log_entry.revision, self.test_snippet.latest_revision)
self.assertEqual(log_entry.label, "Draft-enabled Bar")
def test_skip_validation_on_save_draft(self):
response = self.post(post_data={"text": ""})
self.test_snippet.refresh_from_db()
revisions = Revision.objects.for_instance(self.test_snippet)
latest_revision = self.test_snippet.latest_revision
self.assertRedirects(response, self.get_edit_url())
# The instance should be updated, since it is still a draft
self.assertEqual(self.test_snippet.text, "")
# The instance should be a draft
self.assertFalse(self.test_snippet.live)
self.assertTrue(self.test_snippet.has_unpublished_changes)
self.assertIsNone(self.test_snippet.first_published_at)
self.assertIsNone(self.test_snippet.last_published_at)
self.assertIsNone(self.test_snippet.live_revision)
# The revision should be created and set as latest_revision
self.assertEqual(revisions.count(), 1)
self.assertEqual(latest_revision, revisions.first())
# The revision content should contain the new data
self.assertEqual(latest_revision.content["text"], "")
# A log entry should be created (with a fallback label)
log_entry = (
ModelLogEntry.objects.for_instance(self.test_snippet)
.filter(action="wagtail.edit")
.order_by("-timestamp")
.first()
)
self.assertEqual(log_entry.revision, self.test_snippet.latest_revision)
self.assertEqual(
log_entry.label,
f"DraftStateCustomPrimaryKeyModel object ({self.test_snippet.pk})",
)
def test_required_asterisk_on_reshowing_form(self):
"""
If a form is reshown due to a validation error elsewhere, fields whose validation
was deferred should still show the required asterisk.
"""
snippet = FullFeaturedSnippet.objects.create(
text="Hello world",
country_code="UK",
some_number=42,
)
response = self.client.post(
reverse("some_namespace:edit", args=[snippet.pk]),
{"text": "", "country_code": "UK", "some_number": "meef"},
)
self.assertEqual(response.status_code, 200)
# The empty text should not cause a validation error, but the invalid number should
self.assertNotContains(response, "This field is required.")
self.assertContains(response, "Enter a whole number.", count=1)
soup = self.get_soup(response.content)
self.assertTrue(soup.select_one('label[for="id_text"] > span.w-required-mark'))
def test_cannot_publish_invalid(self):
# Connect a mock signal handler to published signal
mock_handler = mock.MagicMock()
published.connect(mock_handler)
try:
response = self.post(
post_data={
"text": "",
"action-publish": "action-publish",
}
)
self.test_snippet.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
"The draft state custom primary key model could not be saved due to errors.",
)
# The instance should be unchanged
self.assertEqual(self.test_snippet.text, "Draft-enabled Foo")
self.assertFalse(self.test_snippet.live)
# The published signal should not have been fired
self.assertEqual(mock_handler.call_count, 0)
finally:
published.disconnect(mock_handler)
def test_publish(self):
# Connect a mock signal handler to published signal
mock_handler = mock.MagicMock()
published.connect(mock_handler)
try:
timestamp = now()
with freeze_time(timestamp):
response = self.post(
post_data={
"text": "Draft-enabled Bar, Published",
"action-publish": "action-publish",
}
)
self.test_snippet.refresh_from_db()
revisions = Revision.objects.for_instance(self.test_snippet)
latest_revision = self.test_snippet.latest_revision
log_entries = ModelLogEntry.objects.filter(
content_type=ContentType.objects.get_for_model(
DraftStateCustomPrimaryKeyModel
),
action="wagtail.publish",
object_id=self.test_snippet.pk,
)
log_entry = log_entries.first()
self.assertRedirects(
response,
reverse("wagtailsnippets_tests_draftstatecustomprimarykeymodel:list"),
)
# The instance should be updated
self.assertEqual(self.test_snippet.text, "Draft-enabled Bar, Published")
# The instance should be live
self.assertTrue(self.test_snippet.live)
self.assertFalse(self.test_snippet.has_unpublished_changes)
self.assertEqual(self.test_snippet.first_published_at, timestamp)
self.assertEqual(self.test_snippet.last_published_at, timestamp)
self.assertEqual(self.test_snippet.live_revision, latest_revision)
# The revision should be created and set as latest_revision
self.assertEqual(revisions.count(), 1)
self.assertEqual(latest_revision, revisions.first())
# The revision content should contain the new data
self.assertEqual(
latest_revision.content["text"],
"Draft-enabled Bar, Published",
)
# A log entry with wagtail.publish action should be created
self.assertEqual(log_entries.count(), 1)
self.assertEqual(log_entry.timestamp, timestamp)
# Check that the published signal was fired
self.assertEqual(mock_handler.call_count, 1)
mock_call = mock_handler.mock_calls[0][2]
self.assertEqual(mock_call["sender"], DraftStateCustomPrimaryKeyModel)
self.assertEqual(mock_call["instance"], self.test_snippet)
self.assertIsInstance(
mock_call["instance"], DraftStateCustomPrimaryKeyModel
)
finally:
published.disconnect(mock_handler)
def test_publish_bad_permissions(self):
# Only add edit permission
self.user.is_superuser = False
edit_permission = Permission.objects.get(
content_type__app_label="tests",
codename="change_draftstatecustomprimarykeymodel",
)
admin_permission = Permission.objects.get(
content_type__app_label="wagtailadmin",
codename="access_admin",
)
self.user.user_permissions.add(edit_permission, admin_permission)
self.user.save()
# Connect a mock signal handler to published signal
mock_handler = mock.MagicMock()
published.connect(mock_handler)
try:
response = self.post(
post_data={
"text": "Edited draft Foo",
"action-publish": "action-publish",
}
)
self.test_snippet.refresh_from_db()
# Should remain on the edit page
self.assertRedirects(response, self.get_edit_url())
# The instance should be edited, since it is still a draft
self.assertEqual(self.test_snippet.text, "Edited draft Foo")
# The instance should not be live
self.assertFalse(self.test_snippet.live)
self.assertTrue(self.test_snippet.has_unpublished_changes)
# A revision should be created and set as latest_revision, but not live_revision
self.assertIsNotNone(self.test_snippet.latest_revision)
self.assertIsNone(self.test_snippet.live_revision)
# The revision content should contain the data
self.assertEqual(
self.test_snippet.latest_revision.content["text"],
"Edited draft Foo",
)
# Check that the published signal was not fired
self.assertEqual(mock_handler.call_count, 0)
finally:
published.disconnect(mock_handler)
def test_publish_with_publish_permission(self):
# Only add edit and publish permissions
self.user.is_superuser = False
edit_permission = Permission.objects.get(
content_type__app_label="tests",
codename="change_draftstatecustomprimarykeymodel",
)
publish_permission = Permission.objects.get(
content_type__app_label="tests",
codename="publish_draftstatecustomprimarykeymodel",
)
admin_permission = Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
self.user.user_permissions.add(
edit_permission,
publish_permission,
admin_permission,
)
self.user.save()
# Connect a mock signal handler to published signal
mock_handler = mock.MagicMock()
published.connect(mock_handler)
try:
timestamp = now()
with freeze_time(timestamp):
response = self.post(
post_data={
"text": "Draft-enabled Bar, Published",
"action-publish": "action-publish",
}
)
self.test_snippet.refresh_from_db()
revisions = Revision.objects.for_instance(self.test_snippet)
latest_revision = self.test_snippet.latest_revision
log_entries = ModelLogEntry.objects.filter(
content_type=ContentType.objects.get_for_model(
DraftStateCustomPrimaryKeyModel
),
action="wagtail.publish",
object_id=self.test_snippet.pk,
)
log_entry = log_entries.first()
self.assertRedirects(
response,
reverse("wagtailsnippets_tests_draftstatecustomprimarykeymodel:list"),
)
# The instance should be updated
self.assertEqual(self.test_snippet.text, "Draft-enabled Bar, Published")
# The instance should be live
self.assertTrue(self.test_snippet.live)
self.assertFalse(self.test_snippet.has_unpublished_changes)
self.assertEqual(self.test_snippet.first_published_at, timestamp)
self.assertEqual(self.test_snippet.last_published_at, timestamp)
self.assertEqual(self.test_snippet.live_revision, latest_revision)
# The revision should be created and set as latest_revision
self.assertEqual(revisions.count(), 1)
self.assertEqual(latest_revision, revisions.first())
# The revision content should contain the new data
self.assertEqual(
latest_revision.content["text"],
"Draft-enabled Bar, Published",
)
# A log entry with wagtail.publish action should be created
self.assertEqual(log_entries.count(), 1)
self.assertEqual(log_entry.timestamp, timestamp)
# Check that the published signal was fired
self.assertEqual(mock_handler.call_count, 1)
mock_call = mock_handler.mock_calls[0][2]
self.assertEqual(mock_call["sender"], DraftStateCustomPrimaryKeyModel)
self.assertEqual(mock_call["instance"], self.test_snippet)
self.assertIsInstance(
mock_call["instance"], DraftStateCustomPrimaryKeyModel
)
finally:
published.disconnect(mock_handler)
def test_save_draft_then_publish(self):
save_timestamp = now()
with freeze_time(save_timestamp):
self.test_snippet.text = "Draft-enabled Bar, In Draft"
self.test_snippet.save_revision()
publish_timestamp = now()
with freeze_time(publish_timestamp):
response = self.post(
post_data={
"text": "Draft-enabled Bar, Now Published",
"action-publish": "action-publish",
}
)
self.test_snippet.refresh_from_db()
revisions = Revision.objects.for_instance(self.test_snippet).order_by("pk")
latest_revision = self.test_snippet.latest_revision
self.assertRedirects(
response,
reverse("wagtailsnippets_tests_draftstatecustomprimarykeymodel:list"),
)
# The instance should be updated
self.assertEqual(self.test_snippet.text, "Draft-enabled Bar, Now Published")
# The instance should be live
self.assertTrue(self.test_snippet.live)
self.assertFalse(self.test_snippet.has_unpublished_changes)
self.assertEqual(self.test_snippet.first_published_at, publish_timestamp)
self.assertEqual(self.test_snippet.last_published_at, publish_timestamp)
self.assertEqual(self.test_snippet.live_revision, latest_revision)
# The revision should be created and set as latest_revision
self.assertEqual(revisions.count(), 2)
self.assertEqual(latest_revision, revisions.last())
# The revision content should contain the new data
self.assertEqual(
latest_revision.content["text"],
"Draft-enabled Bar, Now Published",
)
def test_publish_then_save_draft(self):
publish_timestamp = now()
with freeze_time(publish_timestamp):
self.test_snippet.text = "Draft-enabled Bar, Published"
self.test_snippet.save_revision().publish()
save_timestamp = now()
with freeze_time(save_timestamp):
response = self.post(
post_data={"text": "Draft-enabled Bar, Published and In Draft"}
)
self.test_snippet.refresh_from_db()
revisions = Revision.objects.for_instance(self.test_snippet).order_by("pk")
latest_revision = self.test_snippet.latest_revision
self.assertRedirects(response, self.get_edit_url())
# The instance should be updated with the last published changes
self.assertEqual(self.test_snippet.text, "Draft-enabled Bar, Published")
# The instance should be live
self.assertTrue(self.test_snippet.live)
# The instance should have unpublished changes
self.assertTrue(self.test_snippet.has_unpublished_changes)
self.assertEqual(self.test_snippet.first_published_at, publish_timestamp)
self.assertEqual(self.test_snippet.last_published_at, publish_timestamp)
# The live revision should be the first revision
self.assertEqual(self.test_snippet.live_revision, revisions.first())
# The second revision should be created and set as latest_revision
self.assertEqual(revisions.count(), 2)
self.assertEqual(latest_revision, revisions.last())
# The revision content should contain the new data
self.assertEqual(
latest_revision.content["text"],
"Draft-enabled Bar, Published and In Draft",
)
def test_publish_twice(self):
first_timestamp = now()
with freeze_time(first_timestamp):
self.test_snippet.text = "Draft-enabled Bar, Published Once"
self.test_snippet.save_revision().publish()
second_timestamp = now() + datetime.timedelta(days=1)
with freeze_time(second_timestamp):
response = self.post(
post_data={
"text": "Draft-enabled Bar, Published Twice",
"action-publish": "action-publish",
}
)
self.test_snippet.refresh_from_db()
revisions = Revision.objects.for_instance(self.test_snippet).order_by("pk")
latest_revision = self.test_snippet.latest_revision
self.assertRedirects(
response,
reverse("wagtailsnippets_tests_draftstatecustomprimarykeymodel:list"),
)
# The instance should be updated with the last published changes
self.assertEqual(self.test_snippet.text, "Draft-enabled Bar, Published Twice")
# The instance should be live
self.assertTrue(self.test_snippet.live)
self.assertFalse(self.test_snippet.has_unpublished_changes)
# The first_published_at and last_published_at should be set correctly
self.assertEqual(self.test_snippet.first_published_at, first_timestamp)
self.assertEqual(self.test_snippet.last_published_at, second_timestamp)
# The live revision should be the second revision
self.assertEqual(self.test_snippet.live_revision, revisions.last())
# The second revision should be created and set as latest_revision
self.assertEqual(revisions.count(), 2)
self.assertEqual(latest_revision, revisions.last())
# The revision content should contain the new data
self.assertEqual(
latest_revision.content["text"],
"Draft-enabled Bar, Published Twice",
)
def test_get_after_save_draft(self):
self.post(post_data={"text": "Draft-enabled Bar"})
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/edit.html")
# Should not show the Live status
self.assertNotContains(
response,
'<h3 id="status-sidebar-live" class="w-label-1 !w-mt-0 w-mb-1"><span class="w-sr-only">Status: </span>Live</h3>',
html=True,
)
# Should show the Draft status
self.assertContains(
response,
'<h3 id="status-sidebar-draft" class="w-label-1 !w-mt-0 w-mb-1"><span class="w-sr-only">Status: </span>Draft</h3>',
html=True,
)
# Should not show the Unpublish action menu item
unpublish_url = reverse(
"wagtailsnippets_tests_draftstatecustomprimarykeymodel:unpublish",
args=(quote(self.test_snippet.pk),),
)
self.assertNotContains(
response,
f'<a class="button" href="{unpublish_url}">',
)
self.assertNotContains(response, "Unpublish")
def test_get_after_publish(self):
self.post(
post_data={
"text": "Draft-enabled Bar, Published",
"action-publish": "action-publish",
}
)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/edit.html")
# Should show the Live status
self.assertContains(
response,
'<h3 id="status-sidebar-live" class="w-label-1 !w-mt-0 w-mb-1"><span class="w-sr-only">Status: </span>Live</h3>',
html=True,
)
# Should not show the Draft status
self.assertNotContains(
response,
'<h3 id="status-sidebar-draft" class="w-label-1 !w-mt-0 w-mb-1"><span class="w-sr-only">Status: </span>Draft</h3>',
html=True,
)
# Should show the Unpublish action menu item
unpublish_url = reverse(
"wagtailsnippets_tests_draftstatecustomprimarykeymodel:unpublish",
args=(quote(self.test_snippet.pk),),
)
self.assertContains(
response,
f'<a class="button" href="{unpublish_url}">',
)
self.assertContains(response, "Unpublish")
def test_get_after_publish_and_save_draft(self):
self.post(
post_data={
"text": "Draft-enabled Bar, Published",
"action-publish": "action-publish",
}
)
self.post(post_data={"text": "Draft-enabled Bar, In Draft"})
response = self.get()
html = response.content.decode()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/edit.html")
# Should show the Live status
self.assertContains(
response,
'<h3 id="status-sidebar-live" class="w-label-1 !w-mt-0 w-mb-1"><span class="w-sr-only">Status: </span>Live</h3>',
html=True,
)
# Should show the Draft status
self.assertContains(
response,
'<h3 id="status-sidebar-draft" class="w-label-1 !w-mt-0 w-mb-1"><span class="w-sr-only">Status: </span>Draft</h3>',
html=True,
)
# Should show the Unpublish action menu item
unpublish_url = reverse(
"wagtailsnippets_tests_draftstatecustomprimarykeymodel:unpublish",
args=(quote(self.test_snippet.pk),),
)
self.assertContains(
response,
f'<a class="button" href="{unpublish_url}">',
)
self.assertContains(response, "Unpublish")
soup = self.get_soup(response.content)
h2 = soup.select_one("#header-title")
self.assertIsNotNone(h2)
icon = h2.select_one("svg use")
self.assertIsNotNone(icon)
self.assertEqual(icon["href"], "#icon-snippet")
self.assertEqual(h2.text.strip(), "Draft-enabled Bar, In Draft")
# Should use the latest draft content for the form
self.assertTagInHTML(
'<textarea name="text">Draft-enabled Bar, In Draft</textarea>',
html,
allow_extra_attrs=True,
)
def test_edit_post_scheduled(self):
self.test_snippet.save_revision().publish()
# put go_live_at and expire_at several days away from the current date, to avoid
# false matches in content__ tests
go_live_at = now() + datetime.timedelta(days=10)
expire_at = now() + datetime.timedelta(days=20)
response = self.post(
post_data={
"text": "Some content",
"go_live_at": submittable_timestamp(go_live_at),
"expire_at": submittable_timestamp(expire_at),
}
)
# Should be redirected to the edit page
self.assertRedirects(
response,
reverse(
"wagtailsnippets_tests_draftstatecustomprimarykeymodel:edit",
args=[quote(self.test_snippet.pk)],
),
)
self.test_snippet.refresh_from_db()
# The object will still be live
self.assertTrue(self.test_snippet.live)
# A revision with approved_go_live_at should not exist
self.assertFalse(
Revision.objects.for_instance(self.test_snippet)
.exclude(approved_go_live_at__isnull=True)
.exists()
)
# But a revision with go_live_at and expire_at in their content json *should* exist
self.assertTrue(
Revision.objects.for_instance(self.test_snippet)
.filter(
content__go_live_at__startswith=str(go_live_at.date()),
)
.exists()
)
self.assertTrue(
Revision.objects.for_instance(self.test_snippet)
.filter(
content__expire_at__startswith=str(expire_at.date()),
)
.exists()
)
# Get the edit page again
response = self.get()
# Should show the draft go_live_at and expire_at under the "Once scheduled" label
self.assertContains(
response,
'<div class="w-label-3 w-text-primary">Once scheduled:</div>',
html=True,
count=1,
)
self.assertContains(
response,
f'<span class="w-text-grey-600">Go-live:</span> {render_timestamp(go_live_at)}',
html=True,
count=1,
)
self.assertContains(
response,
f'<span class="w-text-grey-600">Expiry:</span> {render_timestamp(expire_at)}',
html=True,
count=1,
)
self.assertSchedulingDialogRendered(response)
def test_edit_scheduled_go_live_before_expiry(self):
response = self.post(
post_data={
"text": "Some content",
"go_live_at": submittable_timestamp(now() + datetime.timedelta(days=2)),
"expire_at": submittable_timestamp(now() + datetime.timedelta(days=1)),
}
)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(
response.context["form"],
"go_live_at",
"Go live date/time must be before expiry date/time",
)
self.assertFormError(
response.context["form"],
"expire_at",
"Go live date/time must be before expiry date/time",
)
self.assertContains(
response,
'<div class="w-label-3 w-text-primary">Invalid schedule</div>',
html=True,
)
num_errors = 2
# Should show the correct number on the badge of the toggle button
self.assertRegex(
response.content.decode(),
self.STATUS_TOGGLE_BADGE_REGEX % {"num_errors": num_errors},
)
def test_edit_scheduled_expire_in_the_past(self):
response = self.post(
post_data={
"text": "Some content",
"expire_at": submittable_timestamp(now() + datetime.timedelta(days=-1)),
}
)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(
response.context["form"],
"expire_at",
"Expiry date/time must be in the future.",
)
self.assertContains(
response,
'<div class="w-label-3 w-text-primary">Invalid schedule</div>',
html=True,
)
num_errors = 1
# Should show the correct number on the badge of the toggle button
self.assertRegex(
response.content.decode(),
self.STATUS_TOGGLE_BADGE_REGEX % {"num_errors": num_errors},
)
def test_edit_post_invalid_schedule_with_existing_draft_schedule(self):
self.test_snippet.go_live_at = now() + datetime.timedelta(days=1)
self.test_snippet.expire_at = now() + datetime.timedelta(days=2)
latest_revision = self.test_snippet.save_revision()
go_live_at = now() + datetime.timedelta(days=10)
expire_at = now() + datetime.timedelta(days=-20)
response = self.post(
post_data={
"text": "Some edited content",
"go_live_at": submittable_timestamp(go_live_at),
"expire_at": submittable_timestamp(expire_at),
}
)
# Should render the edit page with errors instead of redirecting
self.assertEqual(response.status_code, 200)
self.test_snippet.refresh_from_db()
# The snippet will not be live
self.assertFalse(self.test_snippet.live)
# No new revision should have been created
self.assertEqual(self.test_snippet.latest_revision_id, latest_revision.pk)
# Should not show the draft go_live_at and expire_at under the "Once scheduled" label
self.assertNotContains(
response,
'<div class="w-label-3 w-text-primary">Once scheduled:</div>',
html=True,
)
self.assertNotContains(
response,
'<span class="w-text-grey-600">Go-live:</span>',
html=True,
)
self.assertNotContains(
response,
'<span class="w-text-grey-600">Expiry:</span>',
html=True,
)
# Should show the "Edit schedule" button
html = response.content.decode()
self.assertTagInHTML(
'<button type="button" data-a11y-dialog-show="schedule-publishing-dialog">Edit schedule</button>',
html,
count=1,
allow_extra_attrs=True,
)
self.assertContains(
response,
'<div class="w-label-3 w-text-primary">Invalid schedule</div>',
html=True,
)
num_errors = 2
# Should show the correct number on the badge of the toggle button
self.assertRegex(
response.content.decode(),
self.STATUS_TOGGLE_BADGE_REGEX % {"num_errors": num_errors},
)
def test_first_published_at_editable(self):
"""Test that we can update the first_published_at via the edit form,
for models that expose it."""
self.test_snippet.save_revision().publish()
self.test_snippet.refresh_from_db()
initial_delta = self.test_snippet.first_published_at - now()
first_published_at = now() - datetime.timedelta(days=2)
self.post(
post_data={
"text": "I've been edited!",
"action-publish": "action-publish",
"first_published_at": submittable_timestamp(first_published_at),
}
)
self.test_snippet.refresh_from_db()
# first_published_at should have changed.
new_delta = self.test_snippet.first_published_at - now()
self.assertNotEqual(new_delta.days, initial_delta.days)
# first_published_at should be 3 days ago.
self.assertEqual(new_delta.days, -3)
def test_edit_post_publish_scheduled_unpublished(self):
go_live_at = now() + datetime.timedelta(days=1)
expire_at = now() + datetime.timedelta(days=2)
response = self.post(
post_data={
"text": "Some content",
"action-publish": "Publish",
"go_live_at": submittable_timestamp(go_live_at),
"expire_at": submittable_timestamp(expire_at),
}
)
# Should be redirected to the listing page
self.assertRedirects(
response,
reverse("wagtailsnippets_tests_draftstatecustomprimarykeymodel:list"),
)
self.test_snippet.refresh_from_db()
# The object should not be live
self.assertFalse(self.test_snippet.live)
# Instead a revision with approved_go_live_at should now exist
self.assertTrue(
Revision.objects.for_instance(self.test_snippet)
.exclude(approved_go_live_at__isnull=True)
.exists()
)
# The object SHOULD have the "has_unpublished_changes" flag set,
# because the changes are not visible as a live object yet
self.assertTrue(
self.test_snippet.has_unpublished_changes,
msg="An object scheduled for future publishing should have has_unpublished_changes=True",
)
self.assertEqual(self.test_snippet.status_string, "scheduled")
response = self.get()
# Should show the go_live_at and expire_at without the "Once scheduled" label
self.assertNotContains(
response,
'<div class="w-label-3 w-text-primary">Once scheduled:</div>',
html=True,
)
self.assertContains(
response,
f'<span class="w-text-grey-600">Go-live:</span> {render_timestamp(go_live_at)}',
html=True,
count=1,
)
self.assertContains(
response,
f'<span class="w-text-grey-600">Expiry:</span> {render_timestamp(expire_at)}',
html=True,
count=1,
)
self.assertSchedulingDialogRendered(response)
def test_edit_post_publish_now_an_already_scheduled_unpublished(self):
# First let's publish an object with a go_live_at in the future
go_live_at = now() + datetime.timedelta(days=1)
expire_at = now() + datetime.timedelta(days=2)
response = self.post(
post_data={
"text": "Some content",
"action-publish": "Publish",
"go_live_at": submittable_timestamp(go_live_at),
"expire_at": submittable_timestamp(expire_at),
}
)
# Should be redirected to the listing page
self.assertRedirects(
response,
reverse("wagtailsnippets_tests_draftstatecustomprimarykeymodel:list"),
)
self.test_snippet.refresh_from_db()
# The object should not be live
self.assertFalse(self.test_snippet.live)
self.assertEqual(self.test_snippet.status_string, "scheduled")
# Instead a revision with approved_go_live_at should now exist
self.assertTrue(
Revision.objects.for_instance(self.test_snippet)
.exclude(approved_go_live_at__isnull=True)
.exists()
)
# Now, let's edit it and publish it right now
response = self.post(
post_data={
"text": "Some content",
"action-publish": "Publish",
"go_live_at": "",
}
)
# Should be redirected to the listing page
self.assertRedirects(
response,
reverse("wagtailsnippets_tests_draftstatecustomprimarykeymodel:list"),
)
self.test_snippet.refresh_from_db()
# The object should be live
self.assertTrue(self.test_snippet.live)
# The revision with approved_go_live_at should no longer exist
self.assertFalse(
Revision.objects.for_instance(self.test_snippet)
.exclude(approved_go_live_at__isnull=True)
.exists()
)
response = self.get()
self.assertSchedulingDialogRendered(response)
def test_edit_post_publish_scheduled_published(self):
self.test_snippet.save_revision().publish()
self.test_snippet.refresh_from_db()
live_revision = self.test_snippet.live_revision
go_live_at = now() + datetime.timedelta(days=1)
expire_at = now() + datetime.timedelta(days=2)
response = self.post(
post_data={
"text": "I've been edited!",
"action-publish": "Publish",
"go_live_at": submittable_timestamp(go_live_at),
"expire_at": submittable_timestamp(expire_at),
}
)
# Should be redirected to the listing page
self.assertRedirects(
response,
reverse("wagtailsnippets_tests_draftstatecustomprimarykeymodel:list"),
)
self.test_snippet = DraftStateCustomPrimaryKeyModel.objects.get(
pk=self.test_snippet.pk
)
# The object should still be live
self.assertTrue(self.test_snippet.live)
self.assertEqual(self.test_snippet.status_string, "live + scheduled")
# A revision with approved_go_live_at should now exist
self.assertTrue(
Revision.objects.for_instance(self.test_snippet)
.exclude(approved_go_live_at__isnull=True)
.exists()
)
# The object SHOULD have the "has_unpublished_changes" flag set,
# because the changes are not visible as a live object yet
self.assertTrue(
self.test_snippet.has_unpublished_changes,
msg="An object scheduled for future publishing should have has_unpublished_changes=True",
)
self.assertNotEqual(
self.test_snippet.get_latest_revision(),
live_revision,
"An object scheduled for future publishing should have a new revision, that is not the live revision",
)
self.assertEqual(
self.test_snippet.text,
"Draft-enabled Foo",
"A live object with a scheduled revision should still have the original content",
)
response = self.get()
# Should show the go_live_at and expire_at without the "Once scheduled" label
self.assertNotContains(
response,
'<div class="w-label-3 w-text-primary">Once scheduled:</div>',
html=True,
)
self.assertContains(
response,
f'<span class="w-text-grey-600">Go-live:</span> {render_timestamp(go_live_at)}',
html=True,
count=1,
)
self.assertContains(
response,
f'<span class="w-text-grey-600">Expiry:</span> {render_timestamp(expire_at)}',
html=True,
count=1,
)
self.assertSchedulingDialogRendered(response)
def test_edit_post_publish_now_an_already_scheduled_published(self):
self.test_snippet.save_revision().publish()
# First let's publish an object with a go_live_at in the future
go_live_at = now() + datetime.timedelta(days=1)
expire_at = now() + datetime.timedelta(days=2)
response = self.post(
post_data={
"text": "Some content",
"action-publish": "Publish",
"go_live_at": submittable_timestamp(go_live_at),
"expire_at": submittable_timestamp(expire_at),
}
)
# Should be redirected to the listing page
self.assertRedirects(
response,
reverse("wagtailsnippets_tests_draftstatecustomprimarykeymodel:list"),
)
self.test_snippet.refresh_from_db()
# The object should still be live
self.assertTrue(self.test_snippet.live)
# A revision with approved_go_live_at should now exist
self.assertTrue(
Revision.objects.for_instance(self.test_snippet)
.exclude(approved_go_live_at__isnull=True)
.exists()
)
self.assertEqual(
self.test_snippet.text,
"Draft-enabled Foo",
"A live object with scheduled revisions should still have original content",
)
# Now, let's edit it and publish it right now
response = self.post(
post_data={
"text": "I've been updated!",
"action-publish": "Publish",
"go_live_at": "",
}
)
# Should be redirected to the listing page
self.assertRedirects(
response,
reverse("wagtailsnippets_tests_draftstatecustomprimarykeymodel:list"),
)
self.test_snippet.refresh_from_db()
# The object should be live
self.assertTrue(self.test_snippet.live)
# The scheduled revision should no longer exist
self.assertFalse(
Revision.objects.for_instance(self.test_snippet)
.exclude(approved_go_live_at__isnull=True)
.exists()
)
# The content should be updated
self.assertEqual(self.test_snippet.text, "I've been updated!")
def test_edit_post_save_schedule_before_a_scheduled_expire(self):
# First let's publish an object with *just* an expire_at in the future
expire_at = now() + datetime.timedelta(days=20)
response = self.post(
post_data={
"text": "Some content",
"action-publish": "Publish",
"expire_at": submittable_timestamp(expire_at),
}
)
# Should be redirected to the listing page
self.assertRedirects(
response,
reverse("wagtailsnippets_tests_draftstatecustomprimarykeymodel:list"),
)
self.test_snippet.refresh_from_db()
# The object should still be live
self.assertTrue(self.test_snippet.live)
self.assertEqual(self.test_snippet.status_string, "live")
# The live object should have the expire_at field set
self.assertEqual(
self.test_snippet.expire_at,
expire_at.replace(second=0, microsecond=0),
)
# Now, let's save an object with a go_live_at in the future,
# but before the existing expire_at
go_live_at = now() + datetime.timedelta(days=10)
new_expire_at = now() + datetime.timedelta(days=15)
response = self.post(
post_data={
"text": "Some content",
"go_live_at": submittable_timestamp(go_live_at),
"expire_at": submittable_timestamp(new_expire_at),
}
)
# Should be redirected to the edit page
self.assertRedirects(
response,
reverse(
"wagtailsnippets_tests_draftstatecustomprimarykeymodel:edit",
args=[quote(self.test_snippet.pk)],
),
)
self.test_snippet.refresh_from_db()
# The object will still be live
self.assertTrue(self.test_snippet.live)
# A revision with approved_go_live_at should not exist
self.assertFalse(
Revision.objects.for_instance(self.test_snippet)
.exclude(approved_go_live_at__isnull=True)
.exists()
)
# But a revision with go_live_at and expire_at in their content json *should* exist
self.assertTrue(
Revision.objects.for_instance(self.test_snippet)
.filter(content__go_live_at__startswith=str(go_live_at.date()))
.exists()
)
self.assertTrue(
Revision.objects.for_instance(self.test_snippet)
.filter(content__expire_at__startswith=str(expire_at.date()))
.exists()
)
response = self.get()
# Should still show the active expire_at in the live object
self.assertContains(
response,
f'<span class="w-text-grey-600">Expiry:</span> {render_timestamp(expire_at)}',
html=True,
count=1,
)
# Should also show the draft go_live_at and expire_at under the "Once scheduled" label
self.assertContains(
response,
'<div class="w-label-3 w-text-primary">Once scheduled:</div>',
html=True,
count=1,
)
self.assertContains(
response,
f'<span class="w-text-grey-600">Go-live:</span> {render_timestamp(go_live_at)}',
html=True,
count=1,
)
self.assertContains(
response,
f'<span class="w-text-grey-600">Expiry:</span> {render_timestamp(new_expire_at)}',
html=True,
count=1,
)
self.assertSchedulingDialogRendered(response)
def test_edit_post_publish_schedule_before_a_scheduled_expire(self):
# First let's publish an object with *just* an expire_at in the future
expire_at = now() + datetime.timedelta(days=20)
response = self.post(
post_data={
"text": "Some content",
"action-publish": "Publish",
"expire_at": submittable_timestamp(expire_at),
}
)
# Should be redirected to the listing page
self.assertRedirects(
response,
reverse("wagtailsnippets_tests_draftstatecustomprimarykeymodel:list"),
)
self.test_snippet.refresh_from_db()
# The object should still be live
self.assertTrue(self.test_snippet.live)
self.assertEqual(self.test_snippet.status_string, "live")
# The live object should have the expire_at field set
self.assertEqual(
self.test_snippet.expire_at,
expire_at.replace(second=0, microsecond=0),
)
# Now, let's publish an object with a go_live_at in the future,
# but before the existing expire_at
go_live_at = now() + datetime.timedelta(days=10)
new_expire_at = now() + datetime.timedelta(days=15)
response = self.post(
post_data={
"text": "Some content",
"action-publish": "Publish",
"go_live_at": submittable_timestamp(go_live_at),
"expire_at": submittable_timestamp(new_expire_at),
}
)
# Should be redirected to the listing page
self.assertRedirects(
response,
reverse("wagtailsnippets_tests_draftstatecustomprimarykeymodel:list"),
)
self.test_snippet = DraftStateCustomPrimaryKeyModel.objects.get(
pk=self.test_snippet.pk
)
# The object should still be live
self.assertTrue(self.test_snippet.live)
self.assertEqual(self.test_snippet.status_string, "live + scheduled")
# A revision with approved_go_live_at should now exist
self.assertTrue(
Revision.objects.for_instance(self.test_snippet)
.exclude(approved_go_live_at__isnull=True)
.exists()
)
response = self.get()
# Should not show the active expire_at in the live object because the
# scheduled revision is before the existing expire_at, which means it will
# override the existing expire_at when it goes live
self.assertNotContains(
response,
f'<span class="w-text-grey-600">Expiry:</span> {render_timestamp(expire_at)}',
html=True,
)
# Should show the go_live_at and expire_at without the "Once scheduled" label
self.assertNotContains(
response,
'<div class="w-label-3 w-text-primary">Once scheduled:</div>',
html=True,
)
self.assertContains(
response,
f'<span class="w-text-grey-600">Go-live:</span> {render_timestamp(go_live_at)}',
html=True,
count=1,
)
self.assertContains(
response,
f'<span class="w-text-grey-600">Expiry:</span> {render_timestamp(new_expire_at)}',
html=True,
count=1,
)
self.assertSchedulingDialogRendered(response)
def test_edit_post_publish_schedule_after_a_scheduled_expire(self):
# First let's publish an object with *just* an expire_at in the future
expire_at = now() + datetime.timedelta(days=20)
response = self.post(
post_data={
"text": "Some content",
"action-publish": "Publish",
"expire_at": submittable_timestamp(expire_at),
}
)
# Should be redirected to the listing page
self.assertRedirects(
response,
reverse("wagtailsnippets_tests_draftstatecustomprimarykeymodel:list"),
)
self.test_snippet.refresh_from_db()
# The object should still be live
self.assertTrue(self.test_snippet.live)
self.assertEqual(self.test_snippet.status_string, "live")
# The live object should have the expire_at field set
self.assertEqual(
self.test_snippet.expire_at,
expire_at.replace(second=0, microsecond=0),
)
# Now, let's publish an object with a go_live_at in the future,
# but after the existing expire_at
go_live_at = now() + datetime.timedelta(days=23)
new_expire_at = now() + datetime.timedelta(days=25)
response = self.post(
post_data={
"text": "Some content",
"action-publish": "Publish",
"go_live_at": submittable_timestamp(go_live_at),
"expire_at": submittable_timestamp(new_expire_at),
}
)
# Should be redirected to the listing page
self.assertRedirects(
response,
reverse("wagtailsnippets_tests_draftstatecustomprimarykeymodel:list"),
)
self.test_snippet = DraftStateCustomPrimaryKeyModel.objects.get(
pk=self.test_snippet.pk
)
# The object should still be live
self.assertTrue(self.test_snippet.live)
self.assertEqual(self.test_snippet.status_string, "live + scheduled")
# Instead a revision with approved_go_live_at should now exist
self.assertTrue(
Revision.objects.for_instance(self.test_snippet)
.exclude(approved_go_live_at__isnull=True)
.exists()
)
response = self.get()
# Should still show the active expire_at in the live object because the
# scheduled revision is after the existing expire_at, which means the
# new expire_at won't take effect until the revision goes live.
# This means the object will be:
# unpublished (expired) -> published (scheduled) -> unpublished (expired again)
self.assertContains(
response,
f'<span class="w-text-grey-600">Expiry:</span> {render_timestamp(expire_at)}',
html=True,
count=1,
)
# Should show the go_live_at and expire_at without the "Once scheduled" label
self.assertNotContains(
response,
'<div class="w-label-3 w-text-primary">Once scheduled:</div>',
html=True,
)
self.assertContains(
response,
f'<span class="w-text-grey-600">Go-live:</span> {render_timestamp(go_live_at)}',
html=True,
count=1,
)
self.assertContains(
response,
f'<span class="w-text-grey-600">Expiry:</span> {render_timestamp(new_expire_at)}',
html=True,
count=1,
)
self.assertSchedulingDialogRendered(response)
def test_use_fallback_for_blank_string_representation(self):
self.snippet = DraftStateModel.objects.create(text="", live=False)
response = self.client.get(
reverse(
"wagtailsnippets_tests_draftstatemodel:edit",
args=[quote(self.snippet.pk)],
),
)
title = f"DraftStateModel object ({self.snippet.pk})"
soup = self.get_soup(response.content)
h2 = soup.select_one("#header-title")
self.assertEqual(h2.text.strip(), title)
sublabel = soup.select_one(".w-breadcrumbs li:last-of-type")
self.assertEqual(sublabel.get_text(strip=True), title)
class TestScheduledForPublishLock(BaseTestSnippetEditView):
def setUp(self):
super().setUp()
self.test_snippet = DraftStateModel.objects.create(
text="Draft-enabled Foo", live=False
)
self.go_live_at = now() + datetime.timedelta(days=1)
self.test_snippet.text = "I've been edited!"
self.test_snippet.go_live_at = self.go_live_at
self.latest_revision = self.test_snippet.save_revision()
self.latest_revision.publish()
self.test_snippet.refresh_from_db()
def test_edit_get_scheduled_for_publishing_with_publish_permission(self):
self.user.is_superuser = False
edit_permission = Permission.objects.get(
content_type__app_label="tests", codename="change_draftstatemodel"
)
publish_permission = Permission.objects.get(
content_type__app_label="tests", codename="publish_draftstatemodel"
)
admin_permission = Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
self.user.user_permissions.add(
edit_permission,
publish_permission,
admin_permission,
)
self.user.save()
response = self.get()
# Should show the go_live_at without the "Once scheduled" label
self.assertNotContains(
response,
'<div class="w-label-3 w-text-primary">Once scheduled:</div>',
html=True,
)
self.assertContains(
response,
f'<span class="w-text-grey-600">Go-live:</span> {render_timestamp(self.go_live_at)}',
html=True,
count=1,
)
# Should show the lock message
self.assertContains(
response,
"Draft state model 'I've been edited!' is locked and has been scheduled to go live at",
count=1,
)
# Should show the lock information in the status side panel
self.assertContains(response, "Locked by schedule")
self.assertContains(
response,
'<div class="w-help-text">Currently locked and will go live on the scheduled date</div>',
html=True,
count=1,
)
html = response.content.decode()
# Should not show the "Edit schedule" button
self.assertTagInHTML(
'<button type="button" data-a11y-dialog-show="schedule-publishing-dialog">Edit schedule</button>',
html,
count=0,
allow_extra_attrs=True,
)
# Should show button to cancel scheduled publishing
unschedule_url = reverse(
"wagtailsnippets_tests_draftstatemodel:revisions_unschedule",
args=[self.test_snippet.pk, self.latest_revision.pk],
)
self.assertTagInHTML(
f'<button data-action="w-action#post" data-controller="w-action" data-w-action-url-value="{unschedule_url}">Cancel scheduled publish</button>',
html,
count=1,
allow_extra_attrs=True,
)
def test_edit_get_scheduled_for_publishing_without_publish_permission(self):
self.user.is_superuser = False
edit_permission = Permission.objects.get(
content_type__app_label="tests", codename="change_draftstatemodel"
)
admin_permission = Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
self.user.user_permissions.add(edit_permission, admin_permission)
self.user.save()
response = self.get()
# Should show the go_live_at without the "Once scheduled" label
self.assertNotContains(
response,
'<div class="w-label-3 w-text-primary">Once scheduled:</div>',
html=True,
)
self.assertContains(
response,
f'<span class="w-text-grey-600">Go-live:</span> {render_timestamp(self.go_live_at)}',
html=True,
count=1,
)
# Should show the lock message
self.assertContains(
response,
"Draft state model 'I've been edited!' is locked and has been scheduled to go live at",
count=1,
)
# Should show the lock information in the status side panel
self.assertContains(response, "Locked by schedule")
self.assertContains(
response,
'<div class="w-help-text">Currently locked and will go live on the scheduled date</div>',
html=True,
count=1,
)
html = response.content.decode()
# Should not show the "Edit schedule" button
self.assertTagInHTML(
'<button type="button" data-a11y-dialog-show="schedule-publishing-dialog">Edit schedule</button>',
html,
count=0,
allow_extra_attrs=True,
)
# Should not show button to cancel scheduled publishing
unschedule_url = reverse(
"wagtailsnippets_tests_draftstatemodel:revisions_unschedule",
args=[self.test_snippet.pk, self.latest_revision.pk],
)
self.assertTagInHTML(
f'<button data-action="w-action#post" data-controller="w-action" data-w-action-url-value="{unschedule_url}">Cancel scheduled publish</button>',
html,
count=0,
allow_extra_attrs=True,
)
def test_edit_post_scheduled_for_publishing(self):
response = self.post(
post_data={
"text": "I'm edited while it's locked for scheduled publishing!",
"go_live_at": submittable_timestamp(self.go_live_at),
}
)
self.test_snippet.refresh_from_db()
# Should not create a new revision,
# so the latest revision's content should still be the same
self.assertEqual(self.test_snippet.latest_revision, self.latest_revision)
self.assertEqual(
self.test_snippet.latest_revision.content["text"],
"I've been edited!",
)
# Should show a message explaining why the changes were not saved
self.assertContains(
response,
"The draft state model could not be saved as it is locked",
count=1,
)
# Should not show the lock message, as we already have the error message
self.assertNotContains(
response,
"Draft state model 'I've been edited!' is locked and has been scheduled to go live at",
)
# Should show the lock information in the status side panel
self.assertContains(response, "Locked by schedule")
self.assertContains(
response,
'<div class="w-help-text">Currently locked and will go live on the scheduled date</div>',
html=True,
count=1,
)
html = response.content.decode()
# Should not show the "Edit schedule" button
self.assertTagInHTML(
'<button type="button" data-a11y-dialog-show="schedule-publishing-dialog">Edit schedule</button>',
html,
count=0,
allow_extra_attrs=True,
)
# Should not show button to cancel scheduled publishing as the lock message isn't shown
unschedule_url = reverse(
"wagtailsnippets_tests_draftstatemodel:revisions_unschedule",
args=[self.test_snippet.pk, self.latest_revision.pk],
)
self.assertTagInHTML(
f'<button data-action="w-action#post" data-controller="w-action" data-w-action-url-value="{unschedule_url}">Cancel scheduled publish</button>',
html,
count=0,
allow_extra_attrs=True,
)
class TestSnippetViewWithCustomPrimaryKey(WagtailTestUtils, TestCase):
fixtures = ["test.json"]
def setUp(self):
super().setUp()
self.login()
self.snippet_a = StandardSnippetWithCustomPrimaryKey.objects.create(
snippet_id="snippet/01", text="Hello"
)
self.snippet_b = StandardSnippetWithCustomPrimaryKey.objects.create(
snippet_id="abc_407269_1", text="Goodbye"
)
def get(self, snippet, params=None):
args = [quote(snippet.pk)]
return self.client.get(
reverse(snippet.snippet_viewset.get_url_name("edit"), args=args),
params,
)
def post(self, snippet, post_data=None):
args = [quote(snippet.pk)]
return self.client.post(
reverse(snippet.snippet_viewset.get_url_name("edit"), args=args),
post_data,
)
def create(self, snippet, post_data=None, model=Advert):
return self.client.post(
reverse(snippet.snippet_viewset.get_url_name("add")),
post_data,
)
def test_show_edit_view(self):
for snippet in [self.snippet_a, self.snippet_b]:
with self.subTest(snippet=snippet):
response = self.get(snippet)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/edit.html")
def test_edit_invalid(self):
response = self.post(self.snippet_a, post_data={"foo": "bar"})
soup = self.get_soup(response.content)
header_messages = soup.css.select(".messages[role='status'] ul > li")
# the top level message should indicate that the page could not be saved
self.assertEqual(len(header_messages), 1)
message = header_messages[0]
self.assertIn(
"The standard snippet with custom primary key could not be saved due to errors.",
message.get_text(),
)
# the top level message should provide a go to error button
buttons = message.find_all("button")
self.assertEqual(len(buttons), 1)
self.assertEqual(buttons[0].attrs["data-controller"], "w-count w-focus")
self.assertEqual(
set(buttons[0].attrs["data-action"].split()),
{"click->w-focus#focus", "wagtail:panel-init@document->w-count#count"},
)
self.assertIn("Go to the first error", buttons[0].get_text())
# the errors should appear against the fields with issues
error_messages = soup.css.select(".error-message")
self.assertEqual(len(error_messages), 2)
error_message = error_messages[0]
self.assertEqual(error_message.parent["id"], "panel-child-snippet_id-errors")
self.assertIn("This field is required", error_message.get_text())
def test_edit(self):
response = self.post(
self.snippet_a,
post_data={"text": "Edited snippet", "snippet_id": "snippet_id_edited"},
)
self.assertRedirects(
response,
reverse(
"wagtailsnippets_snippetstests_standardsnippetwithcustomprimarykey:list"
),
)
snippets = StandardSnippetWithCustomPrimaryKey.objects.all()
self.assertEqual(snippets.count(), 3)
# Saving with a new primary key creates a new instance
self.assertTrue(snippets.filter(snippet_id="snippet_id_edited").exists())
self.assertTrue(snippets.filter(snippet_id="snippet/01").exists())
def test_create(self):
response = self.create(
self.snippet_a,
post_data={"text": "test snippet", "snippet_id": "snippet/02"},
)
self.assertRedirects(
response,
reverse(
"wagtailsnippets_snippetstests_standardsnippetwithcustomprimarykey:list"
),
)
snippets = StandardSnippetWithCustomPrimaryKey.objects.all()
self.assertEqual(snippets.count(), 3)
self.assertEqual(snippets.order_by("snippet_id").last().text, "test snippet")
def test_get_delete(self):
for snippet in [self.snippet_a, self.snippet_b]:
with self.subTest(snippet=snippet):
response = self.client.get(
reverse(
"wagtailsnippets_snippetstests_standardsnippetwithcustomprimarykey:delete",
args=[quote(snippet.pk)],
)
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response, "wagtailadmin/generic/confirm_delete.html"
)
def test_usage_link(self):
for snippet in [self.snippet_a, self.snippet_b]:
with self.subTest(snippet=snippet):
response = self.client.get(
reverse(
"wagtailsnippets_snippetstests_standardsnippetwithcustomprimarykey:delete",
args=[quote(snippet.pk)],
)
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response, "wagtailadmin/generic/confirm_delete.html"
)
self.assertContains(
response,
"This standard snippet with custom primary key is referenced 0 times",
)
self.assertContains(
response,
reverse(
"wagtailsnippets_snippetstests_standardsnippetwithcustomprimarykey:usage",
args=[quote(snippet.pk)],
)
+ "?describe_on_delete=1",
)
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/snippets/tests/test_edit_view.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 2793,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/snippets/tests/test_history_view.py | import datetime
from django.contrib.admin.utils import quote
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from django.utils.timezone import make_aware
from wagtail.models import ModelLogEntry
from wagtail.test.testapp.models import Advert, DraftStateModel, FullFeaturedSnippet
from wagtail.test.utils import WagtailTestUtils
class TestSnippetHistory(WagtailTestUtils, TestCase):
fixtures = ["test.json"]
def get(self, snippet, params=None):
return self.client.get(self.get_url(snippet, "history"), params)
def get_url(self, snippet, url_name, args=None):
if args is None:
args = [quote(snippet.pk)]
return reverse(snippet.snippet_viewset.get_url_name(url_name), args=args)
def setUp(self):
self.user = self.login()
self.non_revisable_snippet = Advert.objects.get(pk=1)
ModelLogEntry.objects.create(
content_type=ContentType.objects.get_for_model(Advert),
label="Test Advert",
action="wagtail.create",
timestamp=make_aware(datetime.datetime(2021, 9, 30, 10, 1, 0)),
object_id="1",
)
ModelLogEntry.objects.create(
content_type=ContentType.objects.get_for_model(Advert),
label="Test Advert Updated",
action="wagtail.edit",
timestamp=make_aware(datetime.datetime(2022, 5, 10, 12, 34, 0)),
object_id="1",
)
self.revisable_snippet = FullFeaturedSnippet.objects.create(text="Foo")
self.initial_revision = self.revisable_snippet.save_revision(user=self.user)
ModelLogEntry.objects.create(
content_type=ContentType.objects.get_for_model(FullFeaturedSnippet),
label="Foo",
action="wagtail.create",
timestamp=make_aware(datetime.datetime(2022, 5, 10, 20, 22, 0)),
object_id=self.revisable_snippet.pk,
revision=self.initial_revision,
content_changed=True,
)
self.revisable_snippet.text = "Bar"
self.edit_revision = self.revisable_snippet.save_revision(
user=self.user, log_action=True
)
def test_simple(self):
response = self.get(self.non_revisable_snippet)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<td>Created</td>", html=True)
self.assertContains(
response,
'data-w-tooltip-content-value="Sept. 30, 2021, 10:01 a.m."',
)
def test_filters(self):
# Should work on both non-revisable and revisable snippets
snippets = [self.non_revisable_snippet, self.revisable_snippet]
for snippet in snippets:
with self.subTest(snippet=snippet):
response = self.get(snippet, {"action": "wagtail.edit"})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Edited", count=1)
self.assertNotContains(response, "Created")
soup = self.get_soup(response.content)
filter = soup.select_one(".w-active-filters .w-pill")
clear_button = filter.select_one(".w-pill__remove")
self.assertEqual(
filter.get_text(separator=" ", strip=True),
"Action: Edit",
)
self.assertIsNotNone(clear_button)
url, params = clear_button.attrs.get("data-w-swap-src-value").split("?")
self.assertEqual(url, self.get_url(snippet, "history_results"))
self.assertNotIn("action=wagtail.edit", params)
def test_should_not_show_actions_on_non_revisable_snippet(self):
response = self.get(self.non_revisable_snippet)
edit_url = self.get_url(self.non_revisable_snippet, "edit")
self.assertNotContains(
response,
f'<a href="{edit_url}">Edit</a>',
)
def test_should_show_actions_on_revisable_snippet(self):
response = self.get(self.revisable_snippet)
edit_url = self.get_url(self.revisable_snippet, "edit")
revert_url = self.get_url(
self.revisable_snippet,
"revisions_revert",
args=[self.revisable_snippet.pk, self.initial_revision.pk],
)
# Should not show the "live version" or "current draft" status tags
self.assertNotContains(
response, '<span class="w-status w-status--primary">Live version</span>'
)
self.assertNotContains(
response, '<span class="w-status w-status--primary">Current draft</span>'
)
# The latest revision should have an "Edit" action instead of "Review"
self.assertContains(
response,
f'<a href="{edit_url}">Edit</a>',
count=1,
)
# Any other revision should have a "Review" action
self.assertContains(
response,
f'<a href="{revert_url}">Review this version</a>',
count=1,
)
def test_with_live_and_draft_status(self):
snippet = DraftStateModel.objects.create(text="Draft-enabled Foo, Published")
snippet.save_revision().publish()
snippet.refresh_from_db()
snippet.text = "Draft-enabled Bar, In Draft"
snippet.save_revision(log_action=True)
response = self.get(snippet)
# Should show the "live version" status tag for the published revision
self.assertContains(
response,
'<span class="w-status w-status--primary">Live version</span>',
count=1,
html=True,
)
# Should show the "current draft" status tag for the draft revision
self.assertContains(
response,
'<span class="w-status w-status--primary">Current draft</span>',
count=1,
html=True,
)
soup = self.get_soup(response.content)
sublabel = soup.select_one(".w-breadcrumbs__sublabel")
# Should use the latest draft title in the breadcrumbs sublabel
self.assertEqual(sublabel.get_text(strip=True), "Draft-enabled Bar, In Draft")
@override_settings(WAGTAIL_I18N_ENABLED=True)
def test_get_with_i18n_enabled(self):
response = self.get(self.non_revisable_snippet)
self.assertEqual(response.status_code, 200)
response = self.get(self.revisable_snippet)
self.assertEqual(response.status_code, 200)
def test_num_queries(self):
snippet = self.revisable_snippet
# Warm up the cache
self.get(snippet)
with self.assertNumQueries(14):
self.get(snippet)
for i in range(20):
revision = snippet.save_revision(user=self.user, log_action=True)
if i % 5 == 0:
revision.publish(user=self.user, log_action=True)
# Should have the same number of queries as before (no N+1 queries)
with self.assertNumQueries(14):
self.get(snippet)
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/snippets/tests/test_history_view.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/snippets/tests/test_index_view.py | from django.contrib.auth.models import Permission
from django.test import SimpleTestCase, TestCase
from django.test.utils import override_settings
from django.urls import reverse
from wagtail.admin.menu import admin_menu
from wagtail.coreutils import get_dummy_request
from wagtail.snippets.models import SNIPPET_MODELS
from wagtail.snippets.views.snippets import get_snippet_models_for_index_view
from wagtail.test.utils import WagtailTestUtils
from wagtail.test.utils.template_tests import AdminTemplateTestUtils
class TestGetSnippetModelsForIndexView(SimpleTestCase):
def test_default_lists_all_snippets_without_menu_items(self):
self.assertEqual(
get_snippet_models_for_index_view(),
[
model
for model in SNIPPET_MODELS
if not model.snippet_viewset.get_menu_item_is_registered()
],
)
@override_settings(WAGTAILSNIPPETS_MENU_SHOW_ALL=True)
def test_setting_allows_listing_of_all_snippet_models(self):
self.assertEqual(get_snippet_models_for_index_view(), SNIPPET_MODELS)
class TestSnippetIndexView(AdminTemplateTestUtils, WagtailTestUtils, TestCase):
def setUp(self):
self.user = self.login()
def get(self, params=None):
return self.client.get(reverse("wagtailsnippets:index"), params)
def test_get_with_limited_permissions(self):
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
)
self.user.save()
response = self.get()
self.assertEqual(response.status_code, 302)
def test_get_with_only_view_permissions(self):
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
),
Permission.objects.get(
content_type__app_label="tests", codename="view_advert"
),
)
self.user.save()
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/generic/listing.html")
soup = self.get_soup(response.content)
link = soup.select_one("tr td a")
self.assertEqual(link["href"], reverse("wagtailsnippets_tests_advert:list"))
self.assertEqual(link.text.strip(), "Adverts")
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/generic/listing.html")
self.assertBreadcrumbsItemsRendered(
[{"url": "", "label": "Snippets"}],
response.content,
)
# Now that it uses the generic template,
# it should not contain the locale selector
self.assertNotContains(response, "data-locale-selector")
def test_displays_snippet(self):
self.assertContains(self.get(), "Adverts")
def test_snippets_menu_item_shown_with_only_view_permission(self):
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
),
Permission.objects.get(
content_type__app_label="tests", codename="view_advert"
),
)
self.user.save()
request = get_dummy_request()
request.user = self.user
menu_items = admin_menu.menu_items_for_request(request)
snippets = [item for item in menu_items if item.name == "snippets"]
self.assertEqual(len(snippets), 1)
item = snippets[0]
self.assertEqual(item.name, "snippets")
self.assertEqual(item.label, "Snippets")
self.assertEqual(item.icon_name, "snippet")
self.assertEqual(item.url, reverse("wagtailsnippets:index"))
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/snippets/tests/test_index_view.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/snippets/tests/test_list_view.py | from django.contrib.admin.utils import quote
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, TransactionTestCase
from django.test.utils import override_settings
from django.urls import reverse
from django.utils.timezone import now
from wagtail import hooks
from wagtail.admin.forms.search import SearchForm
from wagtail.admin.staticfiles import versioned_static
from wagtail.admin.widgets.button import Button, ButtonWithDropdown, ListingButton
from wagtail.models import Locale, ModelLogEntry
from wagtail.snippets.widgets import (
SnippetListingButton,
)
from wagtail.test.snippets.models import (
NonAutocompleteSearchableSnippet,
SearchableSnippet,
StandardSnippetWithCustomPrimaryKey,
TranslatableSnippet,
)
from wagtail.test.testapp.models import (
Advert,
DraftStateModel,
FullFeaturedSnippet,
)
from wagtail.test.utils import WagtailTestUtils
from wagtail.utils.deprecation import RemovedInWagtail80Warning
class TestSnippetListView(WagtailTestUtils, TestCase):
def setUp(self):
self.login()
user_model = get_user_model()
self.user = user_model.objects.get()
def get(self, params=None):
return self.client.get(reverse("wagtailsnippets_tests_advert:list"), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/index.html")
self.assertEqual(response.context["header_icon"], "snippet")
def get_with_limited_permissions(self):
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
)
self.user.save()
response = self.get()
self.assertEqual(response.status_code, 302)
def get_with_edit_permission_only(self):
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
),
Permission.objects.get(
content_type__app_label="tests", codename="change_advert"
),
)
self.user.save()
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
"<p>There are no adverts to display.</p>",
html=True,
)
self.assertNotContains(response, reverse("wagtailsnippets_tests_advert:add"))
def test_ordering(self):
"""
Listing should be ordered descending by PK if no ordering has been set on the model
"""
for i in range(1, 11):
Advert.objects.create(pk=i, text="advert %d" % i)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["page_obj"][0].text, "advert 10")
def test_simple_pagination(self):
pages = ["0", "1", "-1", "9999", "Not a page"]
for page in pages:
response = self.get({"p": page})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/index.html")
def test_displays_add_button(self):
self.assertContains(self.get(), "Add advert")
def test_not_searchable(self):
self.assertFalse(self.get().context.get("search_form"))
def test_register_snippet_listing_buttons_hook_deprecated_class(self):
advert = Advert.objects.create(text="My Lovely advert")
def snippet_listing_buttons(snippet, user, next_url=None):
self.assertEqual(snippet, advert)
self.assertEqual(user, self.user)
self.assertEqual(next_url, reverse("wagtailsnippets_tests_advert:list"))
yield SnippetListingButton(
"Another useless snippet listing button", "/custom-url", priority=10
)
with hooks.register_temporarily(
"register_snippet_listing_buttons", snippet_listing_buttons
):
with self.assertWarnsMessage(
RemovedInWagtail80Warning,
"`SnippetListingButton` is deprecated. "
"Use `wagtail.admin.widgets.button.Button` "
"or `wagtail.admin.widgets.button.ListingButton` instead.",
):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/shared/buttons.html")
soup = self.get_soup(response.content)
actions = soup.select_one("tbody tr td ul.actions")
top_level_custom_button = actions.select_one("li > a[href='/custom-url']")
self.assertIsNone(top_level_custom_button)
custom_button = actions.select_one(
"li [data-controller='w-dropdown'] a[href='/custom-url']"
)
self.assertIsNotNone(custom_button)
self.assertEqual(
custom_button.text.strip(),
"Another useless snippet listing button",
)
def test_register_snippet_listing_buttons_hook(self):
advert = Advert.objects.create(text="My Lovely advert")
def snippet_listing_buttons(snippet, user, next_url=None):
self.assertEqual(snippet, advert)
self.assertEqual(user, self.user)
self.assertEqual(next_url, reverse("wagtailsnippets_tests_advert:list"))
yield ListingButton(
"A useless top-level snippet listing button",
"/custom-url",
priority=10,
)
yield Button(
"A useless snippet listing button inside the 'More' dropdown",
"/custom-url",
priority=10,
)
with hooks.register_temporarily(
"register_snippet_listing_buttons", snippet_listing_buttons
):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/shared/buttons.html")
soup = self.get_soup(response.content)
actions = soup.select_one("tbody tr td ul.actions")
custom_buttons = actions.select("a[href='/custom-url']")
top_level_custom_button = actions.select_one("li > a[href='/custom-url']")
self.assertIs(top_level_custom_button, custom_buttons[0])
self.assertEqual(
top_level_custom_button.text.strip(),
"A useless top-level snippet listing button",
)
in_dropdown_custom_button = actions.select_one(
"li [data-controller='w-dropdown'] a[href='/custom-url']"
)
self.assertIs(in_dropdown_custom_button, custom_buttons[1])
self.assertEqual(
in_dropdown_custom_button.text.strip(),
"A useless snippet listing button inside the 'More' dropdown",
)
def test_register_snippet_listing_buttons_hook_with_dropdown(self):
advert = Advert.objects.create(text="My Lovely advert")
def snippet_listing_buttons(snippet, user, next_url=None):
self.assertEqual(snippet, advert)
self.assertEqual(user, self.user)
self.assertEqual(next_url, reverse("wagtailsnippets_tests_advert:list"))
yield ButtonWithDropdown(
label="Moar pls!",
buttons=[ListingButton("Alrighty", "/cheers", priority=10)],
attrs={"data-foo": "bar"},
)
with hooks.register_temporarily(
"register_snippet_listing_buttons", snippet_listing_buttons
):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/shared/buttons.html")
soup = self.get_soup(response.content)
actions = soup.select_one("tbody tr td ul.actions")
nested_dropdown = actions.select_one(
"li [data-controller='w-dropdown'] [data-controller='w-dropdown']"
)
self.assertIsNone(nested_dropdown)
dropdown_buttons = actions.select("li > [data-controller='w-dropdown']")
# Default "More" button and the custom "Moar pls!" button
self.assertEqual(len(dropdown_buttons), 2)
custom_dropdown = None
for button in dropdown_buttons:
if "Moar pls!" in button.text.strip():
custom_dropdown = button
self.assertIsNotNone(custom_dropdown)
self.assertEqual(custom_dropdown.select_one("button").text.strip(), "Moar pls!")
self.assertEqual(custom_dropdown.get("data-foo"), "bar")
# Should contain the custom button inside the custom dropdown
custom_button = custom_dropdown.find("a", attrs={"href": "/cheers"})
self.assertIsNotNone(custom_button)
self.assertEqual(custom_button.text.strip(), "Alrighty")
def test_construct_snippet_listing_buttons_hook(self):
Advert.objects.create(text="My Lovely advert")
# testapp implements a construct_snippet_listing_buttons hook
# that adds a dummy button with the label 'Dummy Button' which points
# to '/dummy-button' and is placed inside the default "More" dropdown button
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/shared/buttons.html")
soup = self.get_soup(response.content)
dropdowns = soup.select(
"tbody tr td ul.actions > li > [data-controller='w-dropdown']"
)
self.assertEqual(len(dropdowns), 1)
more_dropdown = dropdowns[0]
dummy_button = more_dropdown.find("a", attrs={"href": "/dummy-button"})
self.assertIsNotNone(dummy_button)
self.assertEqual(dummy_button.text.strip(), "Dummy Button")
def test_construct_snippet_listing_buttons_hook_contains_default_buttons(self):
advert = Advert.objects.create(text="My Lovely advert")
delete_url = reverse(
"wagtailsnippets_tests_advert:delete", args=[quote(advert.pk)]
)
def hide_delete_button_for_lovely_advert(buttons, snippet, user):
# Edit, delete, dummy button, copy button
self.assertEqual(len(buttons), 4)
buttons[:] = [button for button in buttons if button.url != delete_url]
self.assertEqual(len(buttons), 3)
with hooks.register_temporarily(
"construct_snippet_listing_buttons",
hide_delete_button_for_lovely_advert,
):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/shared/buttons.html")
self.assertNotContains(response, delete_url)
def test_dropdown_not_rendered_when_no_child_buttons_exist(self):
Advert.objects.create(text="My Lovely advert")
def remove_all_buttons(buttons, snippet, user):
buttons[:] = []
self.assertEqual(len(buttons), 0)
with hooks.register_temporarily(
"construct_snippet_listing_buttons",
remove_all_buttons,
):
response = self.get()
soup = self.get_soup(response.content)
actions = soup.select_one("tbody tr td ul.actions")
self.assertIsNone(actions)
def test_use_latest_draft_as_title(self):
snippet = DraftStateModel.objects.create(text="Draft-enabled Foo, Published")
snippet.save_revision().publish()
snippet.text = "Draft-enabled Bar, In Draft"
snippet.save_revision()
response = self.client.get(
reverse("wagtailsnippets_tests_draftstatemodel:list"),
)
edit_url = reverse(
"wagtailsnippets_tests_draftstatemodel:edit",
args=[quote(snippet.pk)],
)
# Should use the latest draft title in the listing
self.assertContains(
response,
f"""
<a href="{edit_url}">
<span id="snippet_{quote(snippet.pk)}_title">
Draft-enabled Bar, In Draft
</span>
</a>
""",
html=True,
)
def test_use_fallback_for_blank_string_representation(self):
snippet = DraftStateModel.objects.create(text="", live=False)
response = self.client.get(
reverse("wagtailsnippets_tests_draftstatemodel:list"),
)
edit_url = reverse(
"wagtailsnippets_tests_draftstatemodel:edit",
args=[quote(snippet.pk)],
)
title = f"DraftStateModel object ({snippet.pk})"
self.assertContains(
response,
f"""
<a href="{edit_url}">
<span id="snippet_{quote(snippet.pk)}_title">
{title}
</span>
</a>
""",
html=True,
)
def test_use_fallback_for_blank_title_field(self):
# FullFeaturedSnippet's listing view uses the "text" field as the title column,
# rather than the str() representation. If this is blank, we show "(blank)" so that
# there is something to click on
snippet = FullFeaturedSnippet.objects.create(text="", live=False)
response = self.client.get(
reverse("some_namespace:list"),
)
edit_url = reverse(
"some_namespace:edit",
args=[quote(snippet.pk)],
)
self.assertContains(
response,
f"""
<a href="{edit_url}">
<span id="snippet_{quote(snippet.pk)}_title">
(blank)
</span>
</a>
""",
html=True,
)
def test_bulk_action_rendered(self):
response = self.get()
self.assertEqual(response.status_code, 200)
# Should render bulk actions markup
bulk_actions_js = versioned_static("wagtailadmin/js/bulk-actions.js")
soup = self.get_soup(response.content)
script = soup.select_one(f"script[src='{bulk_actions_js}']")
self.assertIsNotNone(script)
bulk_actions = soup.select("[data-bulk-action-button]")
self.assertTrue(bulk_actions)
# 'next' parameter is constructed client-side later based on filters state
for action in bulk_actions:
self.assertNotIn("next=", action["href"])
@override_settings(WAGTAIL_I18N_ENABLED=True)
class TestLocaleFeaturesOnList(WagtailTestUtils, TestCase):
@classmethod
def setUpTestData(cls):
cls.fr_locale = Locale.objects.create(language_code="fr")
cls.list_url = reverse("wagtailsnippets_snippetstests_translatablesnippet:list")
cls.add_url = reverse("wagtailsnippets_snippetstests_translatablesnippet:add")
def setUp(self):
self.user = self.login()
def _add_snippets(self):
TranslatableSnippet.objects.create(text="English snippet")
TranslatableSnippet.objects.create(text="French snippet", locale=self.fr_locale)
@override_settings(
WAGTAIL_CONTENT_LANGUAGES=[
("ar", "Arabic"),
("en", "English"),
("fr", "French"),
]
)
def test_locale_selector(self):
response = self.client.get(self.list_url)
soup = self.get_soup(response.content)
# Should only show languages that also have the corresponding Locale
# (the Arabic locale is not created in the setup, so it should not be shown)
arabic_input = soup.select_one('input[name="locale"][value="ar"]')
self.assertIsNone(arabic_input)
french_input = soup.select_one('input[name="locale"][value="fr"]')
self.assertIsNotNone(french_input)
# Check that the add URLs include the locale
add_url = f"{self.add_url}?locale=en"
add_buttons = soup.select(f'a[href="{add_url}"]')
self.assertEqual(len(add_buttons), 2)
self.assertContains(
response,
f"""<p>There are no translatable snippets to display.
Why not <a href="{add_url}">add one</a>?</p>""",
html=True,
)
def test_no_locale_filter_when_only_one_locale(self):
self.fr_locale.delete()
response = self.client.get(self.list_url)
soup = self.get_soup(response.content)
locale_input = soup.select_one('input[name="locale"]')
self.assertIsNone(locale_input)
# The viewset has no other filters configured,
# so the filters drilldown should not be present
filters_drilldown = soup.select_one("#filters-drilldown")
self.assertIsNone(filters_drilldown)
@override_settings(WAGTAIL_I18N_ENABLED=False)
def test_locale_selector_not_present_when_i18n_disabled(self):
response = self.client.get(self.list_url)
soup = self.get_soup(response.content)
input_element = soup.select_one('input[name="locale"]')
self.assertIsNone(input_element)
# Check that the add URLs don't include the locale
add_url = self.add_url
soup = self.get_soup(response.content)
add_buttons = soup.select(f'a[href="{add_url}"]')
self.assertEqual(len(add_buttons), 2)
self.assertContains(
response,
f"""<p>There are no translatable snippets to display.
Why not <a href="{add_url}">add one</a>?</p>""",
html=True,
)
def test_locale_selector_not_present_on_non_translatable_snippet(self):
response = self.client.get(reverse("wagtailsnippets_tests_advert:list"))
soup = self.get_soup(response.content)
input_element = soup.select_one('input[name="locale"]')
self.assertIsNone(input_element)
# Check that the add URLs don't include the locale
add_url = reverse("wagtailsnippets_tests_advert:add")
soup = self.get_soup(response.content)
add_buttons = soup.select(f'a[href="{add_url}"]')
self.assertEqual(len(add_buttons), 2)
self.assertContains(
response,
f"""<p>There are no adverts to display.
Why not <a href="{add_url}">add one</a>?</p>""",
html=True,
)
def test_locale_column(self):
self._add_snippets()
response = self.client.get(self.list_url)
soup = self.get_soup(response.content)
labels = soup.select("main table td .w-status--label")
self.assertEqual(len(labels), 2)
self.assertEqual(
sorted(label.text.strip() for label in labels),
["English", "French"],
)
@override_settings(WAGTAIL_I18N_ENABLED=False)
def test_locale_column_not_present_with_i18n_disabled(self):
self._add_snippets()
response = self.client.get(self.list_url)
soup = self.get_soup(response.content)
labels = soup.select("main table td .w-status--label")
self.assertEqual(len(labels), 0)
def test_locale_column_not_present_for_non_translatable_snippet(self):
response = self.client.get(reverse("wagtailsnippets_tests_advert:list"))
Advert.objects.create(text="English text")
soup = self.get_soup(response.content)
labels = soup.select("main table td .w-status--label")
self.assertEqual(len(labels), 0)
class TestListViewOrdering(WagtailTestUtils, TestCase):
@classmethod
def setUpTestData(cls):
for i in range(1, 10):
advert = Advert.objects.create(text=f"{i * 'a'}dvert {i}")
draft = DraftStateModel.objects.create(
text=f"{i * 'd'}raft {i}", live=False
)
if i % 2 == 0:
ModelLogEntry.objects.create(
content_type=ContentType.objects.get_for_model(Advert),
label="Test Advert",
action="wagtail.create",
timestamp=now(),
object_id=advert.pk,
)
draft.save_revision().publish()
def setUp(self):
self.login()
def test_listing_orderable_columns_with_no_mixin(self):
list_url = reverse("wagtailsnippets_tests_advert:list")
response = self.client.get(list_url)
sort_updated_url = list_url + "?ordering=_updated_at"
sort_live_url = list_url + "?ordering=live"
self.assertEqual(response.status_code, 200)
# Should use the tables framework
self.assertTemplateUsed(response, "wagtailadmin/tables/table.html")
# The Updated column header should be a link with the correct query param
self.assertContains(
response,
f'<th><a href="{sort_updated_url}" title="Sort by 'Updated' in ascending order." class="icon icon-arrow-down-after label">Updated</a></th>',
html=True,
)
# Should not contain the Status column header
self.assertNotContains(
response,
f'<th><a href="{sort_live_url}" title="Sort by 'Status' in ascending order." class="icon icon-arrow-down-after label">Status</a></th>',
html=True,
)
def test_listing_orderable_columns_with_draft_state_mixin(self):
list_url = reverse("wagtailsnippets_tests_draftstatemodel:list")
response = self.client.get(list_url)
sort_updated_url = list_url + "?ordering=_updated_at"
sort_live_url = list_url + "?ordering=live"
self.assertEqual(response.status_code, 200)
# Should use the tables framework
self.assertTemplateUsed(response, "wagtailadmin/tables/table.html")
# The Updated column header should be a link with the correct query param
self.assertContains(
response,
f'<th><a href="{sort_updated_url}" title="Sort by 'Updated' in ascending order." class="icon icon-arrow-down-after label">Updated</a></th>',
html=True,
)
# The Status column header should be a link with the correct query param
self.assertContains(
response,
f'<th><a href="{sort_live_url}" title="Sort by 'Status' in ascending order." class="icon icon-arrow-down-after label">Status</a></th>',
html=True,
)
def test_order_by_updated_at_with_no_mixin(self):
list_url = reverse("wagtailsnippets_tests_advert:list")
response = self.client.get(list_url + "?ordering=_updated_at")
self.assertEqual(response.status_code, 200)
# With ascending order, empty updated_at information should be shown first
self.assertIsNone(response.context["page_obj"][0]._updated_at)
# The most recently updated should be at the bottom
self.assertEqual(response.context["page_obj"][-1].text, "aaaaaaaadvert 8")
self.assertIsNotNone(response.context["page_obj"][-1]._updated_at)
# Should contain a link to reverse the order
self.assertContains(response, list_url + "?ordering=-_updated_at")
response = self.client.get(list_url + "?ordering=-_updated_at")
self.assertEqual(response.status_code, 200)
# With descending order, the first object should be the one that was last updated
self.assertEqual(response.context["page_obj"][0].text, "aaaaaaaadvert 8")
self.assertIsNotNone(response.context["page_obj"][0]._updated_at)
# Should contain a link to reverse the order
self.assertContains(response, list_url + "?ordering=_updated_at")
def test_order_by_updated_at_with_draft_state_mixin(self):
list_url = reverse("wagtailsnippets_tests_draftstatemodel:list")
response = self.client.get(list_url + "?ordering=_updated_at")
self.assertEqual(response.status_code, 200)
# With ascending order, empty updated_at information should be shown first
self.assertIsNone(response.context["page_obj"][0]._updated_at)
# The most recently updated should be at the bottom
self.assertEqual(response.context["page_obj"][-1].text, "ddddddddraft 8")
self.assertIsNotNone(response.context["page_obj"][-1]._updated_at)
# Should contain a link to reverse the order
self.assertContains(response, list_url + "?ordering=-_updated_at")
response = self.client.get(list_url + "?ordering=-_updated_at")
self.assertEqual(response.status_code, 200)
# With descending order, the first object should be the one that was last updated
self.assertEqual(response.context["page_obj"][0].text, "ddddddddraft 8")
self.assertIsNotNone(response.context["page_obj"][0]._updated_at)
# Should contain a link to reverse the order
self.assertContains(response, list_url + "?ordering=_updated_at")
def test_order_by_live(self):
list_url = reverse("wagtailsnippets_tests_draftstatemodel:list")
response = self.client.get(list_url + "?ordering=live")
self.assertEqual(response.status_code, 200)
# With ascending order, live=False should be shown first
self.assertFalse(response.context["page_obj"][0].live)
# The last one should be live=True
self.assertTrue(response.context["page_obj"][-1].live)
# Should contain a link to reverse the order
self.assertContains(response, list_url + "?ordering=-live")
response = self.client.get(list_url + "?ordering=-live")
self.assertEqual(response.status_code, 200)
# With descending order, live=True should be shown first
self.assertTrue(response.context["page_obj"][0].live)
# The last one should be live=False
self.assertFalse(response.context["page_obj"][-1].live)
# Should contain a link to reverse the order
self.assertContains(response, list_url + "?ordering=live")
class TestSnippetListViewWithSearchableSnippet(WagtailTestUtils, TransactionTestCase):
def setUp(self):
self.login()
# Create some instances of the searchable snippet for testing
self.snippet_a = SearchableSnippet.objects.create(text="Hello")
self.snippet_b = SearchableSnippet.objects.create(text="World")
self.snippet_c = SearchableSnippet.objects.create(text="Hello World")
def get(self, params=None):
return self.client.get(
reverse("wagtailsnippets_snippetstests_searchablesnippet:list"),
params,
)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/index.html")
# All snippets should be in items
items = list(response.context["page_obj"].object_list)
self.assertIn(self.snippet_a, items)
self.assertIn(self.snippet_b, items)
self.assertIn(self.snippet_c, items)
# The search box should not raise an error
self.assertNotContains(response, "This field is required.")
def test_empty_q(self):
response = self.get({"q": ""})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/index.html")
# All snippets should be in items
items = list(response.context["page_obj"].object_list)
self.assertIn(self.snippet_a, items)
self.assertIn(self.snippet_b, items)
self.assertIn(self.snippet_c, items)
# The search box should not raise an error
self.assertNotContains(response, "This field is required.")
def test_is_searchable(self):
self.assertIsInstance(self.get().context["search_form"], SearchForm)
def test_search_hello(self):
response = self.get({"q": "Hello"})
# Just snippets with "Hello" should be in items
items = list(response.context["page_obj"].object_list)
self.assertIn(self.snippet_a, items)
self.assertNotIn(self.snippet_b, items)
self.assertIn(self.snippet_c, items)
def test_search_world_autocomplete(self):
response = self.get({"q": "wor"})
# Just snippets with "World" should be in items
items = list(response.context["page_obj"].object_list)
self.assertNotIn(self.snippet_a, items)
self.assertIn(self.snippet_b, items)
self.assertIn(self.snippet_c, items)
class TestSnippetListViewWithNonAutocompleteSearchableSnippet(
WagtailTestUtils, TransactionTestCase
):
"""
Test that searchable snippets with no AutocompleteFields defined can still be searched using
full words
"""
def setUp(self):
self.login()
# Create some instances of the searchable snippet for testing
self.snippet_a = NonAutocompleteSearchableSnippet.objects.create(text="Hello")
self.snippet_b = NonAutocompleteSearchableSnippet.objects.create(text="World")
self.snippet_c = NonAutocompleteSearchableSnippet.objects.create(
text="Hello World"
)
def get(self, params=None):
return self.client.get(
reverse(
"wagtailsnippets_snippetstests_nonautocompletesearchablesnippet:list"
),
params,
)
def test_search_hello(self):
with self.assertWarnsRegex(
RuntimeWarning, "does not specify any AutocompleteFields"
):
response = self.get({"q": "Hello"})
# Just snippets with "Hello" should be in items
items = list(response.context["page_obj"].object_list)
self.assertIn(self.snippet_a, items)
self.assertNotIn(self.snippet_b, items)
self.assertIn(self.snippet_c, items)
class TestSnippetListViewWithCustomPrimaryKey(WagtailTestUtils, TestCase):
def setUp(self):
self.login()
# Create some instances of the searchable snippet for testing
self.snippet_a = StandardSnippetWithCustomPrimaryKey.objects.create(
snippet_id="snippet/01", text="Hello"
)
self.snippet_b = StandardSnippetWithCustomPrimaryKey.objects.create(
snippet_id="snippet/02", text="Hello"
)
self.snippet_c = StandardSnippetWithCustomPrimaryKey.objects.create(
snippet_id="snippet/03", text="Hello"
)
def get(self, params=None):
return self.client.get(
reverse(
"wagtailsnippets_snippetstests_standardsnippetwithcustomprimarykey:list"
),
params,
)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/index.html")
# All snippets should be in items
items = list(response.context["page_obj"].object_list)
self.assertIn(self.snippet_a, items)
self.assertIn(self.snippet_b, items)
self.assertIn(self.snippet_c, items)
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/snippets/tests/test_list_view.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 655,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/snippets/tests/test_permissions.py | from django.contrib.admin.utils import quote
from django.contrib.auth.models import Permission
from django.test import TestCase
from django.urls import reverse
from wagtail.test.testapp.models import Advert
from wagtail.test.utils import WagtailTestUtils
class TestAddOnlyPermissions(WagtailTestUtils, TestCase):
fixtures = ["test.json"]
def setUp(self):
self.test_snippet = Advert.objects.get(pk=1)
# Create a user with add_advert permission but not change_advert
user = self.create_user(
username="addonly", email="addonly@example.com", password="password"
)
add_permission = Permission.objects.get(
content_type__app_label="tests", codename="add_advert"
)
admin_permission = Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
user.user_permissions.add(add_permission, admin_permission)
self.login(username="addonly", password="password")
def test_get_index(self):
response = self.client.get(reverse("wagtailsnippets_tests_advert:list"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/index.html")
# user should get an "Add advert" button
self.assertContains(response, "Add advert")
def test_get_add(self):
response = self.client.get(reverse("wagtailsnippets_tests_advert:add"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/create.html")
self.assertEqual(response.context["header_icon"], "snippet")
def test_get_edit(self):
response = self.client.get(
reverse(
"wagtailsnippets_tests_advert:edit",
args=[quote(self.test_snippet.pk)],
)
)
# permission should be denied
self.assertRedirects(response, reverse("wagtailadmin_home"))
def test_get_delete(self):
response = self.client.get(
reverse(
"wagtailsnippets_tests_advert:delete",
args=[quote(self.test_snippet.pk)],
)
)
# permission should be denied
self.assertRedirects(response, reverse("wagtailadmin_home"))
class TestEditOnlyPermissions(WagtailTestUtils, TestCase):
fixtures = ["test.json"]
def setUp(self):
self.test_snippet = Advert.objects.get(pk=1)
# Create a user with change_advert permission but not add_advert
user = self.create_user(
username="changeonly", email="changeonly@example.com", password="password"
)
change_permission = Permission.objects.get(
content_type__app_label="tests", codename="change_advert"
)
admin_permission = Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
user.user_permissions.add(change_permission, admin_permission)
self.login(username="changeonly", password="password")
def test_get_index(self):
response = self.client.get(reverse("wagtailsnippets_tests_advert:list"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/index.html")
# user should not get an "Add advert" button
self.assertNotContains(response, "Add advert")
def test_get_add(self):
response = self.client.get(reverse("wagtailsnippets_tests_advert:add"))
# permission should be denied
self.assertRedirects(response, reverse("wagtailadmin_home"))
def test_get_edit(self):
response = self.client.get(
reverse(
"wagtailsnippets_tests_advert:edit",
args=[quote(self.test_snippet.pk)],
)
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/edit.html")
self.assertEqual(response.context["header_icon"], "snippet")
def test_get_delete(self):
response = self.client.get(
reverse(
"wagtailsnippets_tests_advert:delete",
args=[quote(self.test_snippet.pk)],
)
)
# permission should be denied
self.assertRedirects(response, reverse("wagtailadmin_home"))
class TestDeleteOnlyPermissions(WagtailTestUtils, TestCase):
fixtures = ["test.json"]
def setUp(self):
self.test_snippet = Advert.objects.get(pk=1)
# Create a user with delete_advert permission
user = self.create_user(username="deleteonly", password="password")
change_permission = Permission.objects.get(
content_type__app_label="tests", codename="delete_advert"
)
admin_permission = Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
user.user_permissions.add(change_permission, admin_permission)
self.login(username="deleteonly", password="password")
def test_get_index(self):
response = self.client.get(reverse("wagtailsnippets_tests_advert:list"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/index.html")
# user should not get an "Add advert" button
self.assertNotContains(response, "Add advert")
def test_get_add(self):
response = self.client.get(reverse("wagtailsnippets_tests_advert:add"))
# permission should be denied
self.assertRedirects(response, reverse("wagtailadmin_home"))
def test_get_edit(self):
response = self.client.get(
reverse(
"wagtailsnippets_tests_advert:edit",
args=[quote(self.test_snippet.pk)],
)
)
# permission should be denied
self.assertRedirects(response, reverse("wagtailadmin_home"))
def test_get_delete(self):
response = self.client.get(
reverse(
"wagtailsnippets_tests_advert:delete",
args=[quote(self.test_snippet.pk)],
)
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/generic/confirm_delete.html")
self.assertEqual(response.context["header_icon"], "snippet")
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/snippets/tests/test_permissions.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 139,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/snippets/tests/test_revert_view.py | from django.conf import settings
from django.contrib.admin.utils import quote
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.urls import reverse
from django.utils.timezone import now
from freezegun import freeze_time
from wagtail.models import ModelLogEntry
from wagtail.test.testapp.models import (
Advert,
DraftStateModel,
MultiPreviewModesModel,
RevisableModel,
)
from wagtail.test.utils import WagtailTestUtils
class TestSnippetRevisions(WagtailTestUtils, TestCase):
@property
def revert_url(self):
return self.get_url(
"revisions_revert", args=[quote(self.snippet.pk), self.initial_revision.pk]
)
def get(self):
return self.client.get(self.revert_url)
def post(self, post_data=None):
return self.client.post(self.revert_url, post_data)
def get_url(self, url_name, args=None):
view_name = self.snippet.snippet_viewset.get_url_name(url_name)
if args is None:
args = [quote(self.snippet.pk)]
return reverse(view_name, args=args)
def setUp(self):
self.user = self.login()
with freeze_time("2022-05-10 11:00:00"):
self.snippet = RevisableModel.objects.create(text="The original text")
self.initial_revision = self.snippet.save_revision(user=self.user)
ModelLogEntry.objects.create(
content_type=ContentType.objects.get_for_model(RevisableModel),
label="The original text",
action="wagtail.create",
timestamp=now(),
object_id=self.snippet.pk,
revision=self.initial_revision,
content_changed=True,
)
self.snippet.text = "The edited text"
self.snippet.save()
self.edit_revision = self.snippet.save_revision(user=self.user, log_action=True)
def test_get_revert_revision(self):
response = self.get()
self.assertEqual(response.status_code, 200)
if settings.USE_TZ:
# the default timezone is "Asia/Tokyo", so we expect UTC +9
expected_date_string = "May 10, 2022, 8 p.m."
else:
expected_date_string = "May 10, 2022, 11 a.m."
# Message should be shown
self.assertContains(
response,
f"You are viewing a previous version of this Revisable model from <b>{expected_date_string}</b> by",
count=1,
)
# Form should show the content of the revision, not the current draft
soup = self.get_soup(response.content)
textarea = soup.select_one("textarea[name='text']")
self.assertIsNotNone(textarea)
self.assertEqual(textarea.text.strip(), "The original text")
# Form action url should point to the revisions_revert view
form_tag = f'<form action="{self.revert_url}" method="POST">'
html = response.content.decode()
self.assertTagInHTML(form_tag, html, count=1, allow_extra_attrs=True)
# Buttons should be relabelled
self.assertContains(response, "Replace current revision", count=1)
soup = self.get_soup(response.content)
form = soup.select_one("form[data-edit-form]")
self.assertIsNotNone(form)
# Autosave should be disabled
self.assertNotIn("w-autosave", form["data-controller"].split())
self.assertNotIn("w-autosave", form["data-action"])
self.assertIsNone(form.attrs.get("data-w-autosave-interval-value"))
def test_get_revert_revision_with_non_revisable_snippet(self):
snippet = Advert.objects.create(text="foo")
response = self.client.get(
f"/admin/snippets/tests/advert/history/{snippet.pk}/revisions/1/revert/"
)
self.assertEqual(response.status_code, 404)
def test_get_with_limited_permissions(self):
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
)
self.user.save()
response = self.get()
self.assertEqual(response.status_code, 302)
def test_get_with_draft_state_snippet(self):
self.snippet = DraftStateModel.objects.create(text="Draft-enabled Foo")
self.initial_revision = self.snippet.save_revision()
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/edit.html")
soup = self.get_soup(response.content)
# The save button should be labelled "Replace current draft"
footer = soup.select_one("footer")
save_button = footer.select_one(
'button[type="submit"]:not([name="action-publish"])'
)
self.assertIsNotNone(save_button)
self.assertEqual(save_button.text.strip(), "Replace current draft")
# The publish button should exist and have name="action-publish"
publish_button = footer.select_one(
'button[type="submit"][name="action-publish"]'
)
self.assertIsNotNone(publish_button)
self.assertEqual(publish_button.text.strip(), "Publish this version")
self.assertEqual(
set(publish_button.get("class")),
{"button", "action-save", "button-longrunning"},
)
# Should not show the Unpublish action menu item
unpublish_url = reverse(
"wagtailsnippets_tests_draftstatemodel:unpublish",
args=(quote(self.snippet.pk),),
)
unpublish_button = footer.select_one(f'a[href="{unpublish_url}"]')
self.assertIsNone(unpublish_button)
def test_get_with_previewable_snippet(self):
self.snippet = MultiPreviewModesModel.objects.create(text="Preview-enabled foo")
self.initial_revision = self.snippet.save_revision()
self.snippet.text = "Preview-enabled bar"
self.snippet.save_revision()
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailsnippets/snippets/edit.html")
# Message should be shown
self.assertContains(
response,
"You are viewing a previous version of this",
count=1,
)
# Form should show the content of the revision, not the current draft
self.assertContains(response, "Preview-enabled foo")
# Form action url should point to the revisions_revert view
form_tag = f'<form action="{self.revert_url}" method="POST">'
html = response.content.decode()
self.assertTagInHTML(form_tag, html, count=1, allow_extra_attrs=True)
# Buttons should be relabelled
self.assertContains(response, "Replace current revision", count=1)
# Should show the preview panel
preview_url = self.get_url("preview_on_edit")
self.assertContains(response, 'data-side-panel="preview"')
soup = self.get_soup(response.content)
controller = soup.select_one('[data-controller="w-preview"]')
self.assertIsNotNone(controller)
self.assertEqual(controller.get("data-w-preview-url-value"), preview_url)
# Should have the preview side panel toggle button
toggle_button = soup.find("button", {"data-side-panel-toggle": "preview"})
self.assertIsNotNone(toggle_button)
self.assertEqual("w-tooltip w-kbd", toggle_button["data-controller"])
self.assertEqual("mod+p", toggle_button["data-w-kbd-key-value"])
def test_replace_revision(self):
get_response = self.get()
text_from_revision = get_response.context["form"].initial["text"]
post_response = self.post(
post_data={
"text": text_from_revision + " reverted",
"revision": self.initial_revision.pk,
}
)
self.assertRedirects(post_response, self.get_url("list", args=[]))
self.snippet.refresh_from_db()
latest_revision = self.snippet.get_latest_revision()
log_entry = ModelLogEntry.objects.filter(revision=latest_revision).first()
# The instance should be updated
self.assertEqual(self.snippet.text, "The original text reverted")
# The initial revision, edited revision, and revert revision
self.assertEqual(self.snippet.revisions.count(), 3)
# The latest revision should be the revert revision
self.assertEqual(latest_revision.content["text"], "The original text reverted")
# A new log entry with "wagtail.revert" action should be created
self.assertIsNotNone(log_entry)
self.assertEqual(log_entry.action, "wagtail.revert")
def test_replace_with_limited_permissions(self):
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
)
self.user.save()
response = self.post(
post_data={
"text": "test text",
"revision": self.initial_revision.pk,
}
)
self.assertEqual(response.status_code, 302)
self.snippet.refresh_from_db()
self.assertNotEqual(self.snippet.text, "test text")
# Only the initial revision and edited revision, no revert revision
self.assertEqual(self.snippet.revisions.count(), 2)
def test_replace_draft(self):
self.snippet = DraftStateModel.objects.create(
text="Draft-enabled Foo", live=False
)
self.initial_revision = self.snippet.save_revision()
self.snippet.text = "Draft-enabled Foo edited"
self.edit_revision = self.snippet.save_revision()
get_response = self.get()
text_from_revision = get_response.context["form"].initial["text"]
post_response = self.post(
post_data={
"text": text_from_revision + " reverted",
"revision": self.initial_revision.pk,
}
)
self.assertRedirects(post_response, self.get_url("edit"))
self.snippet.refresh_from_db()
latest_revision = self.snippet.get_latest_revision()
log_entry = ModelLogEntry.objects.filter(revision=latest_revision).first()
publish_log_entries = ModelLogEntry.objects.filter(
content_type=ContentType.objects.get_for_model(DraftStateModel),
action="wagtail.publish",
object_id=self.snippet.pk,
)
# The instance should be updated, since it is still a draft
self.assertEqual(self.snippet.text, "Draft-enabled Foo reverted")
# The initial revision, edited revision, and revert revision
self.assertEqual(self.snippet.revisions.count(), 3)
# The latest revision should be the revert revision
self.assertEqual(latest_revision.content["text"], "Draft-enabled Foo reverted")
# A new log entry with "wagtail.revert" action should be created
self.assertIsNotNone(log_entry)
self.assertEqual(log_entry.action, "wagtail.revert")
# There should be no log entries for the publish action
self.assertEqual(publish_log_entries.count(), 0)
# The instance should still be a draft
self.assertFalse(self.snippet.live)
self.assertTrue(self.snippet.has_unpublished_changes)
self.assertIsNone(self.snippet.first_published_at)
self.assertIsNone(self.snippet.last_published_at)
self.assertIsNone(self.snippet.live_revision)
def test_replace_publish(self):
self.snippet = DraftStateModel.objects.create(text="Draft-enabled Foo")
self.initial_revision = self.snippet.save_revision()
self.snippet.text = "Draft-enabled Foo edited"
self.edit_revision = self.snippet.save_revision()
get_response = self.get()
text_from_revision = get_response.context["form"].initial["text"]
timestamp = now()
with freeze_time(timestamp):
post_response = self.post(
post_data={
"text": text_from_revision + " reverted",
"revision": self.initial_revision.pk,
"action-publish": "action-publish",
}
)
self.assertRedirects(post_response, self.get_url("list", args=[]))
self.snippet.refresh_from_db()
latest_revision = self.snippet.get_latest_revision()
log_entry = ModelLogEntry.objects.filter(revision=latest_revision).first()
revert_log_entries = ModelLogEntry.objects.filter(
content_type=ContentType.objects.get_for_model(DraftStateModel),
action="wagtail.revert",
object_id=self.snippet.pk,
)
# The instance should be updated
self.assertEqual(self.snippet.text, "Draft-enabled Foo reverted")
# The initial revision, edited revision, and revert revision
self.assertEqual(self.snippet.revisions.count(), 3)
# The latest revision should be the revert revision
self.assertEqual(latest_revision.content["text"], "Draft-enabled Foo reverted")
# The latest log entry should use the "wagtail.publish" action
self.assertIsNotNone(log_entry)
self.assertEqual(log_entry.action, "wagtail.publish")
# There should be a log entry for the revert action
self.assertEqual(revert_log_entries.count(), 1)
# The instance should be live
self.assertTrue(self.snippet.live)
self.assertFalse(self.snippet.has_unpublished_changes)
self.assertEqual(self.snippet.first_published_at, timestamp)
self.assertEqual(self.snippet.last_published_at, timestamp)
self.assertEqual(self.snippet.live_revision, self.snippet.latest_revision)
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/snippets/tests/test_revert_view.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 285,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/snippets/tests/test_snippet_models.py | from django.core import checks
from django.test import TestCase
from django.urls import reverse
from wagtail.admin.forms import WagtailAdminModelForm
from wagtail.admin.panels import FieldPanel, get_edit_handler
from wagtail.snippets.models import SNIPPET_MODELS, register_snippet
from wagtail.test.snippets.forms import FancySnippetForm
from wagtail.test.snippets.models import (
AlphaSnippet,
FancySnippet,
RegisterDecorator,
RegisterFunction,
StandardSnippet,
ZuluSnippet,
)
from wagtail.test.testapp.models import AdvertWithTabbedInterface
from wagtail.test.utils import WagtailTestUtils
class TestModelOrdering(WagtailTestUtils, TestCase):
def setUp(self):
for i in range(1, 10):
AdvertWithTabbedInterface.objects.create(text="advert %d" % i)
AdvertWithTabbedInterface.objects.create(text="aaaadvert")
self.login()
def test_listing_respects_model_ordering(self):
response = self.client.get(
reverse("wagtailsnippets_tests_advertwithtabbedinterface:list")
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["page_obj"][0].text, "aaaadvert")
def test_chooser_respects_model_ordering(self):
response = self.client.get(
reverse("wagtailsnippetchoosers_tests_advertwithtabbedinterface:choose")
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["results"][0].text, "aaaadvert")
class TestSnippetRegistering(TestCase):
def test_register_function(self):
self.assertIn(RegisterFunction, SNIPPET_MODELS)
def test_register_decorator(self):
# Misbehaving decorators often return None
self.assertIsNotNone(RegisterDecorator)
self.assertIn(RegisterDecorator, SNIPPET_MODELS)
class TestSnippetOrdering(TestCase):
def setUp(self):
register_snippet(ZuluSnippet)
register_snippet(AlphaSnippet)
def test_snippets_ordering(self):
# Ensure AlphaSnippet is before ZuluSnippet
# Cannot check first and last position as other snippets
# may get registered elsewhere during test
self.assertLess(
SNIPPET_MODELS.index(AlphaSnippet), SNIPPET_MODELS.index(ZuluSnippet)
)
class TestSnippetEditHandlers(WagtailTestUtils, TestCase):
def test_standard_edit_handler(self):
edit_handler = get_edit_handler(StandardSnippet)
form_class = edit_handler.get_form_class()
self.assertTrue(issubclass(form_class, WagtailAdminModelForm))
self.assertFalse(issubclass(form_class, FancySnippetForm))
def test_fancy_edit_handler(self):
edit_handler = get_edit_handler(FancySnippet)
form_class = edit_handler.get_form_class()
self.assertTrue(issubclass(form_class, WagtailAdminModelForm))
self.assertTrue(issubclass(form_class, FancySnippetForm))
class TestPanelConfigurationChecks(WagtailTestUtils, TestCase):
def setUp(self):
self.warning_id = "wagtailadmin.W002"
def get_checks_result():
# run checks only with the 'panels' tag
checks_result = checks.run_checks(tags=["panels"])
return [
warning for warning in checks_result if warning.id == self.warning_id
]
self.get_checks_result = get_checks_result
def test_model_with_single_tabbed_panel_only(self):
StandardSnippet.content_panels = [FieldPanel("text")]
warning = checks.Warning(
"StandardSnippet.content_panels will have no effect on snippets editing",
hint="""Ensure that StandardSnippet uses `panels` instead of `content_panels` \
or set up an `edit_handler` if you want a tabbed editing interface.
There are no default tabs on non-Page models so there will be no\
Content tab for the content_panels to render in.""",
obj=StandardSnippet,
id="wagtailadmin.W002",
)
checks_results = self.get_checks_result()
self.assertEqual([warning], checks_results)
# clean up for future checks
delattr(StandardSnippet, "content_panels")
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/snippets/tests/test_snippet_models.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/snippets/tests/test_unpublish_view.py | from unittest import mock
from django.contrib.admin.utils import quote
from django.contrib.auth.models import Permission
from django.http import HttpRequest, HttpResponse
from django.test import TestCase
from django.urls import reverse
from wagtail.signals import unpublished
from wagtail.test.testapp.models import DraftStateCustomPrimaryKeyModel
from wagtail.test.utils.wagtail_tests import WagtailTestUtils
class TestSnippetUnpublish(WagtailTestUtils, TestCase):
def setUp(self):
self.user = self.login()
self.snippet = DraftStateCustomPrimaryKeyModel.objects.create(
custom_id="custom/1", text="to be unpublished"
)
self.unpublish_url = reverse(
"wagtailsnippets_tests_draftstatecustomprimarykeymodel:unpublish",
args=(quote(self.snippet.pk),),
)
def test_unpublish_view(self):
"""
This tests that the unpublish view responds with an unpublish confirm page
"""
# Get unpublish page
response = self.client.get(self.unpublish_url)
# Check that the user received an unpublish confirm page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/generic/confirm_unpublish.html")
def test_unpublish_view_invalid_pk(self):
"""
This tests that the unpublish view returns an error if the object pk is invalid
"""
# Get unpublish page
response = self.client.get(
reverse(
"wagtailsnippets_tests_draftstatecustomprimarykeymodel:unpublish",
args=(quote(12345),),
)
)
# Check that the user received a 404 response
self.assertEqual(response.status_code, 404)
def test_unpublish_view_get_bad_permissions(self):
"""
This tests that the unpublish view doesn't allow users without unpublish permissions
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
)
self.user.save()
# Get unpublish page
response = self.client.get(self.unpublish_url)
# Check that the user received a 302 redirected response
self.assertEqual(response.status_code, 302)
def test_unpublish_view_post_bad_permissions(self):
"""
This tests that the unpublish view doesn't allow users without unpublish permissions
"""
# Connect a mock signal handler to unpublished signal
mock_handler = mock.MagicMock()
unpublished.connect(mock_handler)
try:
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
)
self.user.save()
# Post to the unpublish view
response = self.client.post(self.unpublish_url)
# Should be redirected to the home page
self.assertRedirects(response, reverse("wagtailadmin_home"))
# Check that the object was not unpublished
self.assertTrue(
DraftStateCustomPrimaryKeyModel.objects.get(pk=self.snippet.pk).live
)
# Check that the unpublished signal was not fired
self.assertEqual(mock_handler.call_count, 0)
finally:
unpublished.disconnect(mock_handler)
def test_unpublish_view_post_with_publish_permission(self):
"""
This posts to the unpublish view and checks that the object was unpublished,
using a specific publish permission instead of relying on the superuser flag
"""
# Connect a mock signal handler to unpublished signal
mock_handler = mock.MagicMock()
unpublished.connect(mock_handler)
try:
# Only add edit and publish permissions
self.user.is_superuser = False
edit_permission = Permission.objects.get(
content_type__app_label="tests",
codename="change_draftstatecustomprimarykeymodel",
)
publish_permission = Permission.objects.get(
content_type__app_label="tests",
codename="publish_draftstatecustomprimarykeymodel",
)
admin_permission = Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
self.user.user_permissions.add(
edit_permission,
publish_permission,
admin_permission,
)
self.user.save()
# Post to the unpublish view
response = self.client.post(self.unpublish_url)
# Should be redirected to the listing page
self.assertRedirects(
response,
reverse("wagtailsnippets_tests_draftstatecustomprimarykeymodel:list"),
)
# Check that the object was unpublished
self.assertFalse(
DraftStateCustomPrimaryKeyModel.objects.get(pk=self.snippet.pk).live
)
# Check that the unpublished signal was fired
self.assertEqual(mock_handler.call_count, 1)
mock_call = mock_handler.mock_calls[0][2]
self.assertEqual(mock_call["sender"], DraftStateCustomPrimaryKeyModel)
self.assertEqual(mock_call["instance"], self.snippet)
self.assertIsInstance(
mock_call["instance"], DraftStateCustomPrimaryKeyModel
)
finally:
unpublished.disconnect(mock_handler)
def test_unpublish_view_post(self):
"""
This posts to the unpublish view and checks that the object was unpublished
"""
# Connect a mock signal handler to unpublished signal
mock_handler = mock.MagicMock()
unpublished.connect(mock_handler)
try:
# Post to the unpublish view
response = self.client.post(self.unpublish_url)
# Should be redirected to the listing page
self.assertRedirects(
response,
reverse("wagtailsnippets_tests_draftstatecustomprimarykeymodel:list"),
)
# Check that the object was unpublished
self.assertFalse(
DraftStateCustomPrimaryKeyModel.objects.get(pk=self.snippet.pk).live
)
# Check that the unpublished signal was fired
self.assertEqual(mock_handler.call_count, 1)
mock_call = mock_handler.mock_calls[0][2]
self.assertEqual(mock_call["sender"], DraftStateCustomPrimaryKeyModel)
self.assertEqual(mock_call["instance"], self.snippet)
self.assertIsInstance(
mock_call["instance"], DraftStateCustomPrimaryKeyModel
)
finally:
unpublished.disconnect(mock_handler)
def test_after_unpublish_hook(self):
def hook_func(request, snippet):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(snippet.pk, self.snippet.pk)
return HttpResponse("Overridden!")
with self.register_hook("after_unpublish", hook_func):
post_data = {}
response = self.client.post(self.unpublish_url, post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
self.snippet.refresh_from_db()
self.assertEqual(self.snippet.status_string, "draft")
def test_before_unpublish(self):
def hook_func(request, snippet):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(snippet.pk, self.snippet.pk)
return HttpResponse("Overridden!")
with self.register_hook("before_unpublish", hook_func):
post_data = {}
response = self.client.post(self.unpublish_url, post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
# The hook response is served before unpublish is called.
self.snippet.refresh_from_db()
self.assertEqual(self.snippet.status_string, "live")
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/snippets/tests/test_unpublish_view.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/snippets/tests/test_unschedule_view.py | import datetime
from django.contrib.admin.utils import quote
from django.contrib.auth.models import Permission
from django.test import TestCase
from django.urls import reverse
from django.utils.timezone import now
from wagtail.models import Revision
from wagtail.test.testapp.models import DraftStateCustomPrimaryKeyModel
from wagtail.test.utils import WagtailTestUtils
class TestSnippetUnschedule(WagtailTestUtils, TestCase):
def setUp(self):
self.user = self.login()
self.test_snippet = DraftStateCustomPrimaryKeyModel.objects.create(
custom_id="custom/1", text="Draft-enabled Foo", live=False
)
self.go_live_at = now() + datetime.timedelta(days=1)
self.test_snippet.text = "I've been edited!"
self.test_snippet.go_live_at = self.go_live_at
self.latest_revision = self.test_snippet.save_revision()
self.latest_revision.publish()
self.test_snippet.refresh_from_db()
self.unschedule_url = reverse(
"wagtailsnippets_tests_draftstatecustomprimarykeymodel:revisions_unschedule",
args=[quote(self.test_snippet.pk), self.latest_revision.pk],
)
def set_permissions(self, set_publish_permission):
self.user.is_superuser = False
permissions = [
Permission.objects.get(
content_type__app_label="tests",
codename="change_draftstatecustomprimarykeymodel",
),
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
),
]
if set_publish_permission:
permissions.append(
Permission.objects.get(
content_type__app_label="tests",
codename="publish_draftstatecustomprimarykeymodel",
)
)
self.user.user_permissions.add(*permissions)
self.user.save()
def test_get_unschedule_view_with_publish_permissions(self):
self.set_permissions(True)
# Get unschedule page
response = self.client.get(self.unschedule_url)
# Check that the user received a confirmation page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response, "wagtailadmin/shared/revisions/confirm_unschedule.html"
)
def test_get_unschedule_view_bad_permissions(self):
self.set_permissions(False)
# Get unschedule page
response = self.client.get(self.unschedule_url)
# Check that the user is redirected to the admin homepage
self.assertRedirects(response, reverse("wagtailadmin_home"))
def test_post_unschedule_view_with_publish_permissions(self):
self.set_permissions(True)
# Post unschedule page
response = self.client.post(self.unschedule_url)
# Check that the user was redirected to the history page
self.assertRedirects(
response,
reverse(
"wagtailsnippets_tests_draftstatecustomprimarykeymodel:history",
args=[quote(self.test_snippet.pk)],
),
)
self.test_snippet.refresh_from_db()
self.latest_revision.refresh_from_db()
# Check that the revision is no longer scheduled
self.assertIsNone(self.latest_revision.approved_go_live_at)
# No revisions with approved_go_live_at
self.assertFalse(
Revision.objects.for_instance(self.test_snippet)
.exclude(approved_go_live_at__isnull=True)
.exists()
)
def test_post_unschedule_view_bad_permissions(self):
self.set_permissions(False)
# Post unschedule page
response = self.client.post(self.unschedule_url)
# Check that the user is redirected to the admin homepage
self.assertRedirects(response, reverse("wagtailadmin_home"))
self.test_snippet.refresh_from_db()
self.latest_revision.refresh_from_db()
# Check that the revision is still scheduled
self.assertIsNotNone(self.latest_revision.approved_go_live_at)
# Revision with approved_go_live_at exists
self.assertTrue(
Revision.objects.for_instance(self.test_snippet)
.exclude(approved_go_live_at__isnull=True)
.exists()
)
def test_post_unschedule_view_with_next_url(self):
self.set_permissions(True)
edit_url = reverse(
"wagtailsnippets_tests_draftstatecustomprimarykeymodel:edit",
args=[quote(self.test_snippet.pk)],
)
# Post unschedule page
response = self.client.post(self.unschedule_url + f"?next={edit_url}")
# Check that the user was redirected to the next url
self.assertRedirects(response, edit_url)
self.test_snippet.refresh_from_db()
self.latest_revision.refresh_from_db()
# Check that the revision is no longer scheduled
self.assertIsNone(self.latest_revision.approved_go_live_at)
# No revisions with approved_go_live_at
self.assertFalse(
Revision.objects.for_instance(self.test_snippet)
.exclude(approved_go_live_at__isnull=True)
.exists()
)
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/snippets/tests/test_unschedule_view.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/search/backends/deprecation.py | import warnings
from wagtail.utils.deprecation import RemovedInWagtail80Warning
class IndexOptionMixin:
"""
Mixin for search backends to recognise the deprecated INDEX option in the
search config
"""
def __init__(self, params):
if "INDEX" in params:
warnings.warn(
"The INDEX option on Elasticsearch / OpenSearch backends is deprecated. "
"Please use the INDEX_PREFIX option instead.",
category=RemovedInWagtail80Warning,
)
index_name = params.pop("INDEX")
if "INDEX_PREFIX" not in params:
params["INDEX_PREFIX"] = f"{index_name}_"
super().__init__(params)
# RemovedInWagtail80Warning
class LegacyContentTypeMatchMixin:
"""
Mixin for query compilers to match content type on either the legacy 'content_type' field
or the current '_django_content_type' field
"""
def get_content_type_filter(self):
# Query content_type using a "match" query. See comment in
# ElasticsearchBaseMapping.get_document for more details
content_type = self.mapping_class(self.queryset.model).get_content_type()
return {
"bool": {
"should": [
{"match": {"_django_content_type": content_type}},
{"match": {"content_type": content_type}},
]
}
}
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/search/backends/deprecation.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
wagtail/wagtail:wagtail/search/tests/test_elasticsearch.py | import unittest
from django.conf import settings
from django.test import TestCase
from wagtail.search.backends import get_search_backend
from wagtail.utils.deprecation import RemovedInWagtail80Warning
@unittest.skipIf(
"elasticsearch_with_index_option" not in settings.WAGTAILSEARCH_BACKENDS,
"No elasticsearch backend active",
)
class TestIndexOptionDeprecation(TestCase):
def test_index_option_deprecation_warning(self):
with self.assertWarnsMessage(
RemovedInWagtail80Warning,
"The INDEX option on Elasticsearch / OpenSearch backends is deprecated",
):
backend = get_search_backend("elasticsearch_with_index_option")
self.assertEqual(backend.index_prefix, "wagtailtest_")
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/search/tests/test_elasticsearch.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/search/backends/elasticsearch9.py | from modelsearch.backends.elasticsearch9 import * # noqa: F403
from modelsearch.backends.elasticsearch9 import (
Elasticsearch9AutocompleteQueryCompiler as _Elasticsearch9AutocompleteQueryCompiler,
)
from modelsearch.backends.elasticsearch9 import (
Elasticsearch9SearchBackend as _Elasticsearch9SearchBackend,
)
from modelsearch.backends.elasticsearch9 import (
Elasticsearch9SearchQueryCompiler as _Elasticsearch9SearchQueryCompiler,
)
from wagtail.search.backends.deprecation import (
IndexOptionMixin,
LegacyContentTypeMatchMixin,
)
class Elasticsearch9SearchQueryCompiler(
LegacyContentTypeMatchMixin, _Elasticsearch9SearchQueryCompiler
):
pass
class Elasticsearch9AutocompleteQueryCompiler(
LegacyContentTypeMatchMixin, _Elasticsearch9AutocompleteQueryCompiler
):
pass
class Elasticsearch9SearchBackend(IndexOptionMixin, _Elasticsearch9SearchBackend):
query_compiler_class = Elasticsearch9SearchQueryCompiler
autocomplete_query_compiler_class = Elasticsearch9AutocompleteQueryCompiler
SearchBackend = Elasticsearch9SearchBackend
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/search/backends/elasticsearch9.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
wagtail/wagtail:wagtail/search/backends/opensearch2.py | from modelsearch.backends.opensearch2 import * # noqa: F403
from modelsearch.backends.opensearch2 import (
OpenSearch2AutocompleteQueryCompiler as _OpenSearch2AutocompleteQueryCompiler,
)
from modelsearch.backends.opensearch2 import (
OpenSearch2SearchBackend as _OpenSearch2SearchBackend,
)
from modelsearch.backends.opensearch2 import (
OpenSearch2SearchQueryCompiler as _OpenSearch2SearchQueryCompiler,
)
from wagtail.search.backends.deprecation import (
IndexOptionMixin,
LegacyContentTypeMatchMixin,
)
class OpenSearch2SearchQueryCompiler(
LegacyContentTypeMatchMixin, _OpenSearch2SearchQueryCompiler
):
pass
class OpenSearch2AutocompleteQueryCompiler(
LegacyContentTypeMatchMixin, _OpenSearch2AutocompleteQueryCompiler
):
pass
class OpenSearch2SearchBackend(IndexOptionMixin, _OpenSearch2SearchBackend):
query_compiler_class = OpenSearch2SearchQueryCompiler
autocomplete_query_compiler_class = OpenSearch2AutocompleteQueryCompiler
SearchBackend = OpenSearch2SearchBackend
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/search/backends/opensearch2.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
wagtail/wagtail:wagtail/search/backends/opensearch3.py | from modelsearch.backends.opensearch3 import * # noqa: F403
from modelsearch.backends.opensearch3 import (
OpenSearch3AutocompleteQueryCompiler as _OpenSearch3AutocompleteQueryCompiler,
)
from modelsearch.backends.opensearch3 import (
OpenSearch3SearchBackend as _OpenSearch3SearchBackend,
)
from modelsearch.backends.opensearch3 import (
OpenSearch3SearchQueryCompiler as _OpenSearch3SearchQueryCompiler,
)
from wagtail.search.backends.deprecation import (
IndexOptionMixin,
LegacyContentTypeMatchMixin,
)
class OpenSearch3SearchQueryCompiler(
LegacyContentTypeMatchMixin, _OpenSearch3SearchQueryCompiler
):
pass
class OpenSearch3AutocompleteQueryCompiler(
LegacyContentTypeMatchMixin, _OpenSearch3AutocompleteQueryCompiler
):
pass
class OpenSearch3SearchBackend(IndexOptionMixin, _OpenSearch3SearchBackend):
query_compiler_class = OpenSearch3SearchQueryCompiler
autocomplete_query_compiler_class = OpenSearch3AutocompleteQueryCompiler
SearchBackend = OpenSearch3SearchBackend
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/search/backends/opensearch3.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
wagtail/wagtail:wagtail/contrib/redirects/tests/test_tmp_storages.py | from django.core.cache import cache
from django.test import TestCase
from wagtail.contrib.redirects.tmp_storages import CacheStorage
class CacheStorageTests(TestCase):
def test_cache_storage_save_and_remove(self):
name = "testfile.txt"
content = b"hello world"
storage = CacheStorage(name)
# Adds to cache
storage.save(content)
key = storage.CACHE_PREFIX + storage.name
self.assertEqual(cache.get(key), content)
# Removes from cache
storage.remove()
self.assertIsNone(cache.get(key))
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/contrib/redirects/tests/test_tmp_storages.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/admin/ui/tables/orderable.py | from collections import OrderedDict
from django.contrib.admin.utils import quote
from django.utils.functional import cached_property
from django.utils.translation import gettext, gettext_lazy
from wagtail.admin.ui.tables import BaseColumn, BulkActionsCheckboxColumn
class OrderingColumn(BaseColumn):
header_template_name = "wagtailadmin/tables/ordering_header.html"
cell_template_name = "wagtailadmin/tables/ordering_cell.html"
class OrderableTableMixin:
success_message = gettext_lazy("'%(page_title)s' has been moved successfully.")
def __init__(self, *args, sort_order_field=None, reorder_url=None, **kwargs):
super().__init__(*args, **kwargs)
self.sort_order_field = sort_order_field
self.reorder_url = reorder_url
if self.reorder_url:
self._add_ordering_column()
@cached_property
def ordering_column(self):
return OrderingColumn("ordering", width="80px", sort_key=self.sort_order_field)
def _add_ordering_column(self):
self.columns = OrderedDict(
[(self.ordering_column.name, self.ordering_column)]
+ [
(column_name, column)
for column_name, column in self.columns.items()
# Replace bulk actions column with the ordering column if it exists
if not isinstance(column, BulkActionsCheckboxColumn)
]
)
@property
def attrs(self):
attrs = super().attrs
if self.reorder_url:
attrs = {
**attrs,
"data-controller": "w-orderable",
"data-w-orderable-active-class": "w-orderable--active",
"data-w-orderable-chosen-class": "w-orderable__item--active",
"data-w-orderable-container-value": "tbody",
"data-w-orderable-message-value": self.get_success_message(),
"data-w-orderable-url-value": self.reorder_url,
}
return attrs
def get_success_message(self):
return self.success_message % {"page_title": "__LABEL__"}
def get_row_attrs(self, instance):
attrs = super().get_row_attrs(instance)
if self.reorder_url:
attrs["id"] = "item_%s" % quote(instance.pk)
attrs["data-w-orderable-item-id"] = quote(instance.pk)
attrs["data-w-orderable-item-label"] = str(instance)
attrs["data-w-orderable-target"] = "item"
return attrs
def get_caption(self):
caption = super().get_caption()
if not caption and self.reorder_url:
return gettext(
"Focus on the drag button and press up or down arrows to move the item, then press enter to submit the change."
)
return caption
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/admin/ui/tables/orderable.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
wagtail/wagtail:wagtail/test/testapp/fields.py | from django.db import models
from wagtail.coreutils import multigetattr
class CommentableJSONField(models.JSONField):
def get_block_by_content_path(self, value, path_elements):
try:
multigetattr(value, ".".join(path_elements))
except AttributeError:
return False
return True
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/test/testapp/fields.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/admin/tests/api/test_renderer_classes.py | import json
from unittest.mock import patch
from django.test import TestCase
from django.urls import reverse
from rest_framework.renderers import BrowsableAPIRenderer, JSONRenderer
from wagtail.admin.api.views import PagesAdminAPIViewSet
from wagtail.test.numberformat import ignore_numberformat
from wagtail.test.utils import WagtailTestUtils
class TestPagesAdminAPIRendererClasses(WagtailTestUtils, TestCase):
"""Test that PagesAdminAPIViewSet renderer behavior works correctly."""
def setUp(self):
self.user = self.login()
def test_renderer_classes_with_rest_framework_installed(self):
"""Test that both JSONRenderer and BrowsableAPIRenderer are included when rest_framework is installed."""
renderer_classes = PagesAdminAPIViewSet.renderer_classes
# Should have both renderers when rest_framework is installed
self.assertEqual(renderer_classes, [JSONRenderer, BrowsableAPIRenderer])
@patch("wagtail.api.v2.views.apps.is_installed")
def test_renderer_classes_without_rest_framework(self, mock_is_installed):
"""Test that only JSONRenderer is included when rest_framework is not installed."""
# Mock rest_framework as not installed
def mock_installed(app):
return app != "rest_framework"
mock_is_installed.side_effect = mock_installed
renderer_classes = PagesAdminAPIViewSet.renderer_classes
self.assertEqual(renderer_classes, [JSONRenderer])
def test_api_response_returns_json_by_default(self):
"""Test that API returns JSON by default."""
response = self.client.get(reverse("wagtailadmin_api:pages:listing"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
# Should be valid JSON
content = json.loads(response.content.decode("UTF-8"))
self.assertIn("meta", content)
self.assertIn("items", content)
def test_api_response_returns_json_with_json_accept_header(self):
"""Test that API returns JSON when JSON is explicitly requested."""
response = self.client.get(
reverse("wagtailadmin_api:pages:listing"), HTTP_ACCEPT="application/json"
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
# Should be valid JSON
content = json.loads(response.content.decode("UTF-8"))
self.assertIn("meta", content)
self.assertIn("items", content)
def test_api_response_returns_html_with_html_accept_header(self):
"""Test that API returns HTML when HTML is explicitly requested via Accept header."""
with ignore_numberformat(["rest_framework/base.html"]):
response = self.client.get(
reverse("wagtailadmin_api:pages:listing"), HTTP_ACCEPT="text/html"
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "text/html; charset=utf-8")
# Should contain HTML content
content = response.content.decode("UTF-8")
self.assertIn("<html", content)
self.assertIn("</html>", content)
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/admin/tests/api/test_renderer_classes.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/api/v2/tests/test_renderer_classes.py | import json
from unittest.mock import patch
from django.test import TestCase
from django.urls import reverse
from rest_framework.renderers import BrowsableAPIRenderer, JSONRenderer
from wagtail.api.v2.views import BaseAPIViewSet
from wagtail.test.numberformat import ignore_numberformat
from wagtail.test.utils import WagtailTestUtils
class TestBaseAPIViewSetRendererClasses(WagtailTestUtils, TestCase):
def setUp(self):
self.user = self.login()
def test_renderer_classes_with_rest_framework_installed(self):
"""Test that both JSONRenderer and BrowsableAPIRenderer are included when rest_framework is installed."""
renderer_classes = BaseAPIViewSet.renderer_classes
# Should have both renderers when rest_framework is installed
self.assertEqual(renderer_classes, [JSONRenderer, BrowsableAPIRenderer])
@patch("wagtail.api.v2.views.apps.is_installed")
def test_renderer_classes_without_rest_framework(self, mock_is_installed):
"""Test that only JSONRenderer is included when rest_framework is not installed."""
# Mock rest_framework as not installed
def mock_installed(app):
return app != "rest_framework"
mock_is_installed.side_effect = mock_installed
renderer_classes = BaseAPIViewSet.renderer_classes
self.assertEqual(renderer_classes, [JSONRenderer])
def test_api_response_returns_json_by_default(self):
"""Test that API returns JSON by default."""
response = self.client.get(reverse("wagtailapi_v2:pages:listing"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
# Should be valid JSON
content = json.loads(response.content.decode("UTF-8"))
self.assertIn("meta", content)
self.assertIn("items", content)
def test_api_response_returns_json_with_json_accept_header(self):
"""Test that API returns JSON when JSON is explicitly requested."""
response = self.client.get(
reverse("wagtailapi_v2:pages:listing"), HTTP_ACCEPT="application/json"
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
# Should be valid JSON
content = json.loads(response.content.decode("UTF-8"))
self.assertIn("meta", content)
self.assertIn("items", content)
def test_api_response_returns_html_with_html_accept_header(self):
"""Test that API returns HTML when HTML is explicitly requested via Accept header."""
with ignore_numberformat(["rest_framework/base.html"]):
response = self.client.get(
reverse("wagtailapi_v2:pages:listing"), HTTP_ACCEPT="text/html"
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "text/html; charset=utf-8")
# Should contain HTML content
content = response.content.decode("UTF-8")
self.assertIn("<html", content)
self.assertIn("</html>", content)
def test_api_response_returns_html_with_browser_accept_header(self):
"""Test that API returns HTML when accessed with typical browser Accept headers."""
with ignore_numberformat(["rest_framework/base.html"]):
response = self.client.get(
reverse("wagtailapi_v2:pages:listing"),
HTTP_ACCEPT="text/html,application/xhtml+xml,application/xml;q=0.9",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "text/html; charset=utf-8")
# Should contain HTML content
content = response.content.decode("UTF-8")
self.assertIn("<html", content)
self.assertIn("</html>", content)
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/api/v2/tests/test_renderer_classes.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/tests/test_date_validators.py | import datetime
from unittest.mock import patch
from django.core.exceptions import ValidationError
from django.test import TestCase
from wagtail.fields import NoFutureDateValidator
class TestNoFutureDateValidator(TestCase):
def setUp(self):
self.validator = NoFutureDateValidator()
def test_validates_past_date(self):
"""Past dates should pass validation"""
past_date = datetime.date.today() - datetime.timedelta(days=1)
# Should not raise ValidationError
self.validator(past_date)
def test_validates_today(self):
"""Today's date should pass validation"""
today = datetime.date.today()
# Should not raise ValidationError
self.validator(today)
def test_rejects_future_date(self):
"""Future dates should raise ValidationError"""
future_date = datetime.date.today() + datetime.timedelta(days=1)
with self.assertRaises(ValidationError) as cm:
self.validator(future_date)
self.assertEqual(cm.exception.code, "future_date")
self.assertEqual(str(cm.exception.message), "Date cannot be in the future.")
def test_validates_none_value(self):
"""None values should pass validation (let required validation handle empty values)"""
# Should not raise ValidationError
self.validator(None)
def test_custom_message(self):
"""Test custom error message"""
custom_message = "Custom future date error message"
validator = NoFutureDateValidator(message=custom_message)
future_date = datetime.date.today() + datetime.timedelta(days=1)
with self.assertRaises(ValidationError) as cm:
validator(future_date)
self.assertEqual(str(cm.exception.message), custom_message)
@patch("wagtail.fields.datetime")
def test_validates_with_mocked_today(self, mock_datetime):
"""Test that validation uses the correct 'today' reference"""
# Mock today to be 2024-01-15
mock_today = datetime.date(2024, 1, 15)
mock_datetime.date.today.return_value = mock_today
# Test date exactly one day in the future
future_date = datetime.date(2024, 1, 16)
with self.assertRaises(ValidationError):
self.validator(future_date)
# Test date exactly 'today'
self.validator(mock_today) # Should not raise
# Test past date
past_date = datetime.date(2024, 1, 14)
self.validator(past_date) # Should not raise
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/tests/test_date_validators.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/permission_policies/sites.py | from django.contrib.auth import get_user_model
from django.db.models import Q
from wagtail.models import GroupSitePermission, Site
from .base import BaseDjangoAuthPermissionPolicy
class SitePermissionPolicy(BaseDjangoAuthPermissionPolicy):
"""
A permission policy for objects that are associated with site records, such as
wagtail.contrib.settings.models.BaseSiteSetting subclasses. Permissions may be
assigned globally through standard django.contrib.auth permissions, or for
individual sites through wagtail.models.GroupSitePermission records.
"""
permission_cache_name = "_site_permission_cache"
def __init__(self, model, auth_model=None, site_field_name="site"):
super().__init__(model, auth_model=auth_model)
self.site_field_name = site_field_name
self.site_fk_field_name = model._meta.get_field(self.site_field_name).attname
def get_all_permissions_for_user(self, user):
# For these users, we can determine the permissions without querying
# GroupCollectionPermission by checking it directly in _check_perm()
if not user.is_active or user.is_anonymous or user.is_superuser:
return GroupSitePermission.objects.none()
return GroupSitePermission.objects.filter(group__user=user).select_related(
"permission"
)
def _user_has_global_permission(self, user, actions):
"""
Check if the user has any of the given permissions assigned through a global
django.contrib.auth permission, either directly or through a group.
"""
return any(
user.has_perm(self._get_permission_name(action)) for action in actions
)
def user_has_any_permission(self, user, actions):
"""
Return whether the given user has permission to perform any of the given actions
on some or all instances of this model
"""
if not (user.is_authenticated and user.is_active):
return False
if user.is_superuser:
return True
if self._user_has_global_permission(user, actions):
return True
codenames = self._get_permission_codenames(actions)
return any(
group_site_permission.permission.content_type_id == self._content_type.pk
and group_site_permission.permission.codename in codenames
for group_site_permission in self.get_cached_permissions_for_user(user)
)
def users_with_any_permission(self, actions):
"""
Return a queryset of users who have permission to perform any of the given actions
on some or all instances of this model
"""
permission_ids = list(
self._get_permission_objects_for_actions(actions).values_list(
"id", flat=True
)
)
return get_user_model().objects.filter(
(
Q(is_superuser=True)
# global permissions associated with the user directly
| Q(user_permissions__in=permission_ids)
# global permissions associated with any of the user's groups
| Q(groups__permissions__in=permission_ids)
# site-specific permissions associated with any of the user's groups
| Q(groups__site_permissions__permission_id__in=permission_ids)
)
& Q(is_active=True)
)
def user_has_any_permission_for_instance(self, user, actions, instance):
"""
Return whether the given user has permission to perform any of the given actions
on the given model instance (which may be a Site or a model with a `site` foreign key)
"""
if not (user.is_authenticated and user.is_active):
return False
if user.is_superuser:
return True
if self._user_has_global_permission(user, actions):
return True
codenames = self._get_permission_codenames(actions)
if isinstance(instance, Site):
site_id = instance.pk
else:
site_id = getattr(instance, self.site_fk_field_name)
return any(
group_site_permission.permission.content_type_id == self._content_type.pk
and group_site_permission.permission.codename in codenames
and group_site_permission.site_id == site_id
for group_site_permission in self.get_cached_permissions_for_user(user)
)
def sites_user_has_any_permission_for(self, user, actions):
"""
Return a queryset of all Site instances for which the given user has
permission to perform any of the given actions
"""
if not (user.is_authenticated and user.is_active):
return Site.objects.none()
if user.is_superuser:
return Site.objects.all()
permission_ids = list(
self._get_permission_objects_for_actions(actions).values_list(
"id", flat=True
)
)
if self._user_has_global_permission(user, actions):
return Site.objects.all()
# Look for site-specific permissions associated with any of the user's groups
return Site.objects.filter(
group_permissions__permission_id__in=permission_ids,
group_permissions__group__in=user.groups.all(),
).distinct()
def instances_user_has_any_permission_for(self, user, actions):
"""
Return a queryset of all instances of this model for which the given user has
permission to perform any of the given actions
"""
return self.model._default_manager.filter(
**{
f"{self.site_field_name}__in": self.sites_user_has_any_permission_for(
user, actions
)
}
)
def users_with_any_permission_for_instance(self, actions, instance):
"""
Return a queryset of all users who have permission to perform any of the given actions on
the given model instance (which may be a Site or a model with a `site` foreign key)
"""
permission_ids = list(
self._get_permission_objects_for_actions(actions).values_list(
"id", flat=True
)
)
if isinstance(instance, Site):
site = instance
else:
site = getattr(instance, self.site_field_name)
return get_user_model().objects.filter(
(
Q(is_superuser=True)
# global permissions associated with the user directly
| Q(user_permissions__in=permission_ids)
# global permissions associated with any of the user's groups
| Q(groups__permissions__in=permission_ids)
# site-specific permissions associated with any of the user's groups
| Q(
groups__site_permissions__permission_id__in=permission_ids,
groups__site_permissions__site=site,
)
)
& Q(is_active=True)
)
# shortcuts for single actions (where BaseDjangoAuthPermissionPolicy does not
# already implement them this way)
def user_has_permission(self, user, action):
return self.user_has_any_permission(user, [action])
def sites_user_has_permission_for(self, user, action):
return self.sites_user_has_any_permission_for(user, [action])
def user_has_permission_for_instance(self, user, action, instance):
return self.user_has_any_permission_for_instance(user, [action], instance)
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/permission_policies/sites.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
wagtail/wagtail:wagtail/tests/permission_policies/test_site_permission_policies.py | from django.contrib.auth.models import Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from wagtail.models import Site
from wagtail.permission_policies.sites import SitePermissionPolicy
from wagtail.test.testapp.models import ImportantPagesSiteSetting, TestSiteSetting
from wagtail.test.utils import WagtailTestUtils
from wagtail.tests.permission_policies.test_permission_policies import (
PermissionPolicyTestUtils,
)
class PermissionPolicyTestCase(PermissionPolicyTestUtils, WagtailTestUtils, TestCase):
def setUp(self):
super().setUp()
self.policy = SitePermissionPolicy(TestSiteSetting)
self.default_site = Site.objects.get(is_default_site=True)
self.other_site = Site.objects.create(
hostname="other.example.com",
port=80,
root_page=self.default_site.root_page,
is_default_site=False,
)
self.content_type = ContentType.objects.get_for_model(TestSiteSetting)
self.change_permission = Permission.objects.get(
content_type=self.content_type, codename="change_testsitesetting"
)
# groups
self.site_owners = Group.objects.create(name="Site owners")
self.site_owners.permissions.add(self.change_permission)
self.default_site_owners = Group.objects.create(name="Default site owners")
self.default_site_owners.site_permissions.create(
permission=self.change_permission, site=self.default_site
)
self.other_site_owners = Group.objects.create(name="Other site owners")
self.other_site_owners.site_permissions.create(
permission=self.change_permission, site=self.other_site
)
self.superuser = self.create_superuser(
"superuser", "superuser@example.com", "password"
)
self.inactive_superuser = self.create_superuser(
"inactivesuperuser", "inactivesuperuser@example.com", "password"
)
self.inactive_superuser.is_active = False
self.inactive_superuser.save()
# users
self.site_owner = self.create_user(
"siteowner", "siteowner@example.com", "password"
)
self.site_owner.groups.add(self.site_owners)
self.direct_site_owner = self.create_user(
"directsiteowner", "directsiteowner@example.com", "password"
)
self.direct_site_owner.user_permissions.add(self.change_permission)
self.default_site_owner = self.create_user(
"defaultsiteowner", "defaultsiteowner@example.com", "password"
)
self.default_site_owner.groups.add(self.default_site_owners)
self.other_site_owner = self.create_user(
"othersiteowner", "othersiteowner@example.com", "password"
)
self.other_site_owner.groups.add(self.other_site_owners)
class TestSiteSettingPermissionPolicy(PermissionPolicyTestCase):
def test_user_has_permission(self):
self.assertUserPermissionMatrix(
[
(self.superuser, True),
(self.inactive_superuser, False),
(self.site_owner, True),
(self.direct_site_owner, True),
(self.default_site_owner, True),
(self.other_site_owner, True),
],
actions=["change"],
)
def test_user_has_permission_for_site(self):
self.assertUserInstancePermissionMatrix(
self.default_site,
[
(self.superuser, True),
(self.inactive_superuser, False),
(self.site_owner, True),
(self.direct_site_owner, True),
(self.default_site_owner, True),
(self.other_site_owner, False),
],
actions=["change"],
)
def test_user_has_permission_for_site_setting(self):
site_setting = TestSiteSetting.objects.create(
site=self.default_site,
title="Default site",
email="defaultsite@example.com",
)
self.assertUserInstancePermissionMatrix(
site_setting,
[
(self.superuser, True),
(self.inactive_superuser, False),
(self.site_owner, True),
(self.direct_site_owner, True),
(self.default_site_owner, True),
(self.other_site_owner, False),
],
actions=["change"],
)
def test_users_with_permission(self):
self.assertResultSetEqual(
self.policy.users_with_permission("change"),
[
self.superuser,
self.site_owner,
self.direct_site_owner,
self.default_site_owner,
self.other_site_owner,
],
)
def test_users_with_permission_for_site(self):
self.assertResultSetEqual(
self.policy.users_with_permission_for_instance("change", self.default_site),
[
self.superuser,
self.site_owner,
self.direct_site_owner,
self.default_site_owner,
],
)
def test_users_with_permission_for_setting(self):
site_setting = TestSiteSetting.objects.create(
site=self.default_site,
title="Default site",
email="defaultsite@example.com",
)
self.assertResultSetEqual(
self.policy.users_with_permission_for_instance("change", site_setting),
[
self.superuser,
self.site_owner,
self.direct_site_owner,
self.default_site_owner,
],
)
def test_sites_user_has_permission_for(self):
self.assertResultSetEqual(
self.policy.sites_user_has_permission_for(self.superuser, "change"),
[self.default_site, self.other_site],
)
self.assertResultSetEqual(
self.policy.sites_user_has_permission_for(
self.inactive_superuser, "change"
),
[],
)
self.assertResultSetEqual(
self.policy.sites_user_has_permission_for(self.site_owner, "change"),
[self.default_site, self.other_site],
)
self.assertResultSetEqual(
self.policy.sites_user_has_permission_for(self.direct_site_owner, "change"),
[self.default_site, self.other_site],
)
self.assertResultSetEqual(
self.policy.sites_user_has_permission_for(
self.default_site_owner, "change"
),
[self.default_site],
)
self.assertResultSetEqual(
self.policy.sites_user_has_permission_for(self.other_site_owner, "change"),
[self.other_site],
)
def test_instances_user_has_permission_for(self):
site_setting = TestSiteSetting.objects.create(
site=self.default_site,
title="Default site",
email="defaultsite@example.com",
)
# other_site does not have a TestSiteSetting instance, so will be omitted from results
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(self.superuser, "change"),
[site_setting],
)
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(
self.inactive_superuser, "change"
),
[],
)
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(self.site_owner, "change"),
[site_setting],
)
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(
self.direct_site_owner, "change"
),
[site_setting],
)
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(
self.default_site_owner, "change"
),
[site_setting],
)
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(
self.other_site_owner, "change"
),
[],
)
class TestOtherModelPolicy(PermissionPolicyTestCase):
"""Permissions assigned on TestSiteSetting should not 'leak' to other models."""
def setUp(self):
super().setUp()
self.policy = SitePermissionPolicy(ImportantPagesSiteSetting)
def test_user_has_permission(self):
self.assertUserPermissionMatrix(
[
(self.superuser, True),
(self.inactive_superuser, False),
(self.site_owner, False),
(self.direct_site_owner, False),
(self.default_site_owner, False),
(self.other_site_owner, False),
],
actions=["change"],
)
def test_user_has_permission_for_site(self):
self.assertUserInstancePermissionMatrix(
self.default_site,
[
(self.superuser, True),
(self.inactive_superuser, False),
(self.site_owner, False),
(self.direct_site_owner, False),
(self.default_site_owner, False),
(self.other_site_owner, False),
],
actions=["change"],
)
def test_users_with_permission_for_site(self):
self.assertResultSetEqual(
self.policy.users_with_permission_for_instance("change", self.default_site),
[
self.superuser,
],
)
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/tests/permission_policies/test_site_permission_policies.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 246,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wshobson/agents:tools/yt-design-extractor.py | #!/usr/bin/env python3
"""
YouTube Design Concept Extractor
=================================
Extracts transcript + keyframes from a YouTube video and produces
a structured markdown reference document ready for agent consumption.
Usage:
python3 tools/yt-design-extractor.py <youtube_url> [options]
Examples:
python3 tools/yt-design-extractor.py "https://youtu.be/eVnQFWGDEdY"
python3 tools/yt-design-extractor.py "https://youtu.be/eVnQFWGDEdY" --interval 30
python3 tools/yt-design-extractor.py "https://youtu.be/eVnQFWGDEdY" --scene-detect --ocr
python3 tools/yt-design-extractor.py "https://youtu.be/eVnQFWGDEdY" --full # all features
python3 tools/yt-design-extractor.py "https://youtu.be/eVnQFWGDEdY" --ocr --ocr-engine easyocr
Requirements:
pip install yt-dlp youtube-transcript-api
apt install ffmpeg
Optional (OCR via Tesseract):
pip install Pillow pytesseract
apt install tesseract-ocr
Optional (better OCR for stylized text):
pip install easyocr
Optional (color palette extraction):
pip install colorthief
"""
import argparse
import json
import os
import re
import shutil
import subprocess
import sys
import textwrap
from collections import Counter
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime
from pathlib import Path
from typing import Optional
# Optional imports - gracefully degrade if not available
PILLOW_AVAILABLE = False
TESSERACT_AVAILABLE = False
try:
from PIL import Image
PILLOW_AVAILABLE = True
except ImportError:
pass
try:
import pytesseract
TESSERACT_AVAILABLE = PILLOW_AVAILABLE
except ImportError:
pass
try:
import easyocr
EASYOCR_AVAILABLE = True
except ImportError:
EASYOCR_AVAILABLE = False
try:
from colorthief import ColorThief
COLORTHIEF_AVAILABLE = True
except ImportError:
COLORTHIEF_AVAILABLE = False
# ---------------------------------------------------------------------------
# Transcript extraction
# ---------------------------------------------------------------------------
def extract_video_id(url: str) -> str:
"""Pull the 11-char video ID out of any common YouTube URL format."""
patterns = [
r"(?:v=|/v/|youtu\.be/)([a-zA-Z0-9_-]{11})",
r"(?:embed/)([a-zA-Z0-9_-]{11})",
r"(?:shorts/)([a-zA-Z0-9_-]{11})",
]
for pat in patterns:
m = re.search(pat, url)
if m:
return m.group(1)
# Maybe the user passed a bare ID
if re.match(r"^[a-zA-Z0-9_-]{11}$", url):
return url
sys.exit(f"Could not extract video ID from: {url}")
def get_video_metadata(url: str) -> dict:
"""Use yt-dlp to pull title, description, chapters, duration, etc."""
cmd = [
"yt-dlp",
"--dump-json",
"--no-download",
"--no-playlist",
url,
]
print("[*] Fetching video metadata …")
try:
result = subprocess.run(cmd, capture_output=True, text=True, timeout=120)
except subprocess.TimeoutExpired:
sys.exit("yt-dlp metadata fetch timed out after 120s.")
if result.returncode != 0:
sys.exit(f"yt-dlp metadata failed:\n{result.stderr}")
try:
return json.loads(result.stdout)
except json.JSONDecodeError as e:
sys.exit(
f"yt-dlp returned invalid JSON: {e}\nFirst 200 chars: {result.stdout[:200]}"
)
def get_transcript(video_id: str) -> list[dict] | None:
"""Grab the transcript via youtube-transcript-api. Returns list of
{text, start, duration} dicts, or None if unavailable."""
try:
from youtube_transcript_api import YouTubeTranscriptApi
from youtube_transcript_api._errors import (
TranscriptsDisabled,
NoTranscriptFound,
VideoUnavailable,
)
except ImportError:
print("[!] youtube-transcript-api not installed. Skipping transcript.")
return None
try:
print("[*] Fetching transcript …")
ytt_api = YouTubeTranscriptApi()
transcript = ytt_api.fetch(video_id)
entries = []
for snippet in transcript:
entries.append(
{
"text": snippet.text,
"start": snippet.start,
"duration": snippet.duration,
}
)
return entries
except (TranscriptsDisabled, NoTranscriptFound, VideoUnavailable) as e:
print(f"[!] Transcript unavailable ({e}). Will proceed without it.")
return None
# ---------------------------------------------------------------------------
# Keyframe extraction
# ---------------------------------------------------------------------------
def download_video(url: str, out_dir: Path) -> Path:
"""Download video, preferring 720p or lower. Falls back to best available."""
out_template = str(out_dir / "video.%(ext)s")
cmd = [
"yt-dlp",
"-f",
"bestvideo[height<=720]+bestaudio/best[height<=720]/best",
"--merge-output-format",
"mp4",
"-o",
out_template,
"--no-playlist",
url,
]
print("[*] Downloading video (720p preferred) …")
try:
result = subprocess.run(cmd, capture_output=True, text=True, timeout=600)
except subprocess.TimeoutExpired:
sys.exit(
"Video download timed out after 10 minutes. "
"The video may be too large or your connection too slow."
)
if result.returncode != 0:
sys.exit(f"yt-dlp download failed:\n{result.stderr}")
# Find the downloaded file
for f in out_dir.iterdir():
if f.name.startswith("video.") and f.suffix in (".mp4", ".mkv", ".webm"):
return f
sys.exit("Download succeeded but could not locate video file.")
def extract_frames_interval(
video_path: Path, out_dir: Path, interval: int = 30
) -> list[Path]:
"""Extract one frame every `interval` seconds."""
frames_dir = out_dir / "frames"
frames_dir.mkdir(exist_ok=True)
pattern = str(frames_dir / "frame_%04d.png")
cmd = [
"ffmpeg",
"-i",
str(video_path),
"-vf",
f"fps=1/{interval}",
"-q:v",
"2",
pattern,
"-y",
]
print(f"[*] Extracting frames every {interval}s …")
try:
result = subprocess.run(cmd, capture_output=True, text=True, timeout=600)
except subprocess.TimeoutExpired:
sys.exit("Frame extraction timed out after 10 minutes.")
if result.returncode != 0:
print(f"[!] ffmpeg frame extraction failed (exit code {result.returncode}):")
print(f" {result.stderr[:500]}")
return []
frames = sorted(frames_dir.glob("frame_*.png"))
if not frames:
print(
"[!] WARNING: ffmpeg ran but produced no frames. "
"The video may be too short or corrupted."
)
else:
print(f" → captured {len(frames)} frames")
return frames
def extract_frames_scene(
video_path: Path, out_dir: Path, threshold: float = 0.3
) -> list[Path]:
"""Use ffmpeg scene-change detection to grab visually distinct frames."""
frames_dir = out_dir / "frames_scene"
frames_dir.mkdir(exist_ok=True)
pattern = str(frames_dir / "scene_%04d.png")
cmd = [
"ffmpeg",
"-i",
str(video_path),
"-vf",
f"select='gt(scene,{threshold})',showinfo",
"-vsync",
"vfr",
"-q:v",
"2",
pattern,
"-y",
]
print(f"[*] Extracting scene-change frames (threshold={threshold}) …")
try:
result = subprocess.run(cmd, capture_output=True, text=True, timeout=600)
except subprocess.TimeoutExpired:
sys.exit("Scene-change frame extraction timed out after 10 minutes.")
if result.returncode != 0:
print(f"[!] ffmpeg scene detection failed (exit code {result.returncode}):")
print(f" {result.stderr[:500]}")
return []
frames = sorted(frames_dir.glob("scene_*.png"))
if not frames:
print("[!] No scene-change frames detected (try lowering --scene-threshold).")
else:
print(f" → captured {len(frames)} scene-change frames")
return frames
# ---------------------------------------------------------------------------
# OCR extraction
# ---------------------------------------------------------------------------
def ocr_frame_tesseract(frame_path: Path) -> str:
"""Extract text from a frame using Tesseract OCR. Converts to grayscale first."""
if not TESSERACT_AVAILABLE:
return ""
try:
img = Image.open(frame_path)
if img.mode != "L":
img = img.convert("L")
text = pytesseract.image_to_string(img, config="--psm 6")
return text.strip()
except Exception as e:
print(f"[!] OCR failed for {frame_path}: {e}")
return ""
def ocr_frame_easyocr(frame_path: Path, reader) -> str:
"""Extract text from a frame using EasyOCR (better for stylized text)."""
try:
results = reader.readtext(str(frame_path), detail=0)
return "\n".join(results).strip()
except Exception as e:
print(f"[!] OCR failed for {frame_path}: {e}")
return ""
def run_ocr_on_frames(
frames: list[Path], ocr_engine: str = "tesseract", workers: int = 4
) -> dict[Path, str]:
"""Run OCR on frames. Tesseract runs in parallel; EasyOCR sequentially.
Returns {frame_path: text}."""
if not frames:
return {}
results = {}
if ocr_engine == "easyocr":
if not EASYOCR_AVAILABLE:
sys.exit(
"EasyOCR was explicitly requested but is not installed.\n"
" Install: pip install torch torchvision --index-url "
"https://download.pytorch.org/whl/cpu && pip install easyocr\n"
" Or use: --ocr-engine tesseract"
)
else:
print("[*] Initializing EasyOCR (this may take a moment) …")
reader = easyocr.Reader(["en"], gpu=False, verbose=False)
if ocr_engine == "tesseract" and not TESSERACT_AVAILABLE:
print("[!] Tesseract/pytesseract not installed, skipping OCR")
return {}
print(f"[*] Running OCR on {len(frames)} frames ({ocr_engine}) …")
if ocr_engine == "easyocr":
# EasyOCR doesn't parallelize well, run sequentially
for i, frame in enumerate(frames):
results[frame] = ocr_frame_easyocr(frame, reader)
if (i + 1) % 10 == 0:
print(f" → processed {i + 1}/{len(frames)} frames")
else:
# Tesseract can run in parallel
with ThreadPoolExecutor(max_workers=workers) as executor:
future_to_frame = {
executor.submit(ocr_frame_tesseract, f): f for f in frames
}
for i, future in enumerate(as_completed(future_to_frame)):
frame = future_to_frame[future]
try:
results[frame] = future.result()
except Exception as e:
print(f"[!] OCR failed for {frame}: {e}")
results[frame] = ""
if (i + 1) % 10 == 0:
print(f" → processed {i + 1}/{len(frames)} frames")
# Count frames with meaningful text
with_text = sum(1 for t in results.values() if len(t) > 10)
print(f" → found text in {with_text}/{len(frames)} frames")
return results
# ---------------------------------------------------------------------------
# Color palette extraction
# ---------------------------------------------------------------------------
def extract_color_palette(frame_path: Path, color_count: int = 6) -> list[tuple]:
"""Extract dominant colors from a frame. Returns list of RGB tuples."""
if not COLORTHIEF_AVAILABLE:
return []
try:
ct = ColorThief(str(frame_path))
palette = ct.get_palette(color_count=color_count, quality=5)
return palette
except Exception as e:
print(f"[!] Color extraction failed for {frame_path}: {e}")
return []
def rgb_to_hex(rgb: tuple) -> str:
"""Convert RGB tuple to hex color string."""
return "#{:02x}{:02x}{:02x}".format(*rgb)
def analyze_color_palettes(frames: list[Path], sample_size: int = 10) -> dict:
"""Analyze color palettes across sampled frames."""
if not COLORTHIEF_AVAILABLE:
return {}
if not frames:
return {}
# Sample frames evenly across the video
step = max(1, len(frames) // sample_size)
sampled = frames[::step][:sample_size]
print(f"[*] Extracting color palettes from {len(sampled)} frames …")
all_colors = []
for frame in sampled:
palette = extract_color_palette(frame)
all_colors.extend(palette)
if not all_colors:
return {}
# Find most common colors (rounded to reduce similar colors)
def round_color(rgb, bucket_size=32):
return tuple((c // bucket_size) * bucket_size for c in rgb)
rounded = [round_color(c) for c in all_colors]
most_common = Counter(rounded).most_common(12)
return {
"dominant_colors": [rgb_to_hex(c) for c, _ in most_common[:6]],
"all_sampled_colors": [rgb_to_hex(c) for c in all_colors[:24]],
}
# ---------------------------------------------------------------------------
# Markdown assembly
# ---------------------------------------------------------------------------
def fmt_timestamp(seconds: float) -> str:
m, s = divmod(int(seconds), 60)
h, m = divmod(m, 60)
if h:
return f"{h}:{m:02d}:{s:02d}"
return f"{m}:{s:02d}"
def group_transcript(entries: list[dict], chunk_seconds: int = 60) -> list[dict]:
"""Merge transcript snippets into chunks of at least `chunk_seconds` duration."""
if not entries:
return []
groups = []
current = {"start": entries[0]["start"], "text": ""}
for e in entries:
if e["start"] - current["start"] >= chunk_seconds and current["text"]:
groups.append(current)
current = {"start": e["start"], "text": ""}
current["text"] += " " + e["text"]
if current["text"]:
groups.append(current)
for g in groups:
g["text"] = g["text"].strip()
return groups
def build_markdown(
meta: dict,
transcript: list[dict] | None,
interval_frames: list[Path],
scene_frames: list[Path],
out_dir: Path,
interval: int,
ocr_results: Optional[dict[Path, str]] = None,
color_analysis: Optional[dict] = None,
) -> Path:
"""Assemble the final reference markdown document."""
title = meta.get("title", "Untitled Video")
channel = meta.get("channel", meta.get("uploader", "Unknown"))
duration = meta.get("duration", 0)
description = meta.get("description", "")
chapters = meta.get("chapters") or []
video_url = meta.get("webpage_url", "")
tags = meta.get("tags") or []
ocr_results = ocr_results or {}
color_analysis = color_analysis or {}
lines: list[str] = []
# --- Header ---
lines.append(f"# {title}\n")
lines.append(f"> **Source:** [{channel}]({video_url}) ")
lines.append(f"> **Duration:** {fmt_timestamp(duration)} ")
lines.append(f"> **Extracted:** {datetime.now().strftime('%Y-%m-%d %H:%M')} ")
if tags:
lines.append(f"> **Tags:** {', '.join(tags[:15])}")
lines.append("")
# --- Color Palette (if extracted) ---
if color_analysis.get("dominant_colors"):
lines.append("## Color Palette\n")
lines.append("Dominant colors detected across the video:\n")
colors = color_analysis["dominant_colors"]
# Create color swatches as a table
lines.append("| Color | Hex |")
lines.append("|-------|-----|")
for hex_color in colors:
# Unicode block for color preview (won't show actual color but placeholder)
lines.append(f"| ████ | `{hex_color}` |")
lines.append("")
lines.append(f"*Full palette: {', '.join(f'`{c}`' for c in colors)}*\n")
# --- Description ---
if description:
lines.append("## Video Description\n")
# Trim excessively long descriptions
desc = description[:3000]
lines.append(f"```\n{desc}\n```\n")
# --- Chapters ---
if chapters:
lines.append("## Chapters\n")
lines.append("| Timestamp | Title |")
lines.append("|-----------|-------|")
for ch in chapters:
ts = fmt_timestamp(ch.get("start_time", 0))
lines.append(f"| `{ts}` | {ch.get('title', '')} |")
lines.append("")
# --- Transcript ---
if transcript:
grouped = group_transcript(transcript, chunk_seconds=60)
lines.append("## Transcript\n")
lines.append("<details><summary>Full transcript (click to expand)</summary>\n")
for g in grouped:
ts = fmt_timestamp(g["start"])
lines.append(f"**[{ts}]** {g['text']}\n")
lines.append("</details>\n")
# Also create a condensed key-points section with timestamps
lines.append("## Transcript (Condensed Segments)\n")
lines.append("Use these timestamped segments to cross-reference with frames.\n")
for g in grouped:
ts = fmt_timestamp(g["start"])
# First ~200 chars of each chunk as a preview
preview = g["text"][:200]
if len(g["text"]) > 200:
preview += " …"
lines.append(f"- **`{ts}`** — {preview}")
lines.append("")
# --- Keyframes ---
all_frames = []
if interval_frames:
lines.append(f"## Keyframes (every {interval}s)\n")
lines.append("Visual reference frames captured at regular intervals.\n")
for i, f in enumerate(interval_frames):
rel = os.path.relpath(f, out_dir)
ts = fmt_timestamp(i * interval)
lines.append(f"### Frame at `{ts}`\n")
lines.append(f"\n")
# Include OCR text if available
ocr_text = ocr_results.get(f, "").strip()
if ocr_text and len(ocr_text) > 5:
lines.append("<details><summary>📝 Text detected in frame</summary>\n")
lines.append(f"```\n{ocr_text}\n```")
lines.append("</details>\n")
all_frames.append((ts, rel, ocr_text))
lines.append("")
if scene_frames:
lines.append("## Scene-Change Frames\n")
lines.append("Frames captured when the visual content changed significantly.\n")
for i, f in enumerate(scene_frames):
rel = os.path.relpath(f, out_dir)
lines.append(f"### Scene {i + 1}\n")
lines.append(f"\n")
# Include OCR text if available
ocr_text = ocr_results.get(f, "").strip()
if ocr_text and len(ocr_text) > 5:
lines.append("<details><summary>📝 Text detected in frame</summary>\n")
lines.append(f"```\n{ocr_text}\n```")
lines.append("</details>\n")
lines.append("")
# --- Visual Text Index (OCR summary) ---
frames_with_text = [
(ts, rel, txt) for ts, rel, txt in all_frames if txt and len(txt) > 10
]
if frames_with_text:
lines.append("## Visual Text Index\n")
lines.append("Searchable index of all text detected in video frames.\n")
lines.append("| Timestamp | Key Text (preview) |")
lines.append("|-----------|-------------------|")
for ts, rel, txt in frames_with_text:
# First line or first 80 chars as preview
preview = txt.split("\n")[0][:80].replace("|", "\\|")
if len(txt) > 80:
preview += "…"
lines.append(f"| `{ts}` | {preview} |")
lines.append("")
# Full text dump for searchability
lines.append("### All Detected Text (Full)\n")
lines.append("<details><summary>Click to expand full OCR text</summary>\n")
for ts, rel, txt in frames_with_text:
lines.append(f"**[{ts}]**")
lines.append(f"```\n{txt}\n```\n")
lines.append("</details>\n")
# --- Frame index (for quick reference) ---
if all_frames:
lines.append("## Frame Index\n")
lines.append("| Timestamp | File | Has Text |")
lines.append("|-----------|------|----------|")
for ts, rel, txt in all_frames:
has_text = "✓" if txt and len(txt) > 10 else ""
lines.append(f"| `{ts}` | `{rel}` | {has_text} |")
lines.append("")
# --- Footer ---
lines.append("---\n")
lines.append("*Generated by `yt-design-extractor.py` — review and curate ")
lines.append("the content above, then feed this file to your agent.*\n")
md_path = out_dir / "extracted-reference.md"
md_path.write_text("\n".join(lines), encoding="utf-8")
print(f"[✓] Markdown reference written to {md_path}")
return md_path
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description="Extract design concepts from a YouTube video into a "
"structured markdown reference document.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent("""\
Examples:
%(prog)s "https://youtu.be/eVnQFWGDEdY"
%(prog)s "https://youtu.be/eVnQFWGDEdY" --full
%(prog)s "https://youtu.be/eVnQFWGDEdY" --interval 15 --scene-detect --ocr
%(prog)s "https://youtu.be/eVnQFWGDEdY" --ocr --ocr-engine easyocr --colors
%(prog)s "https://youtu.be/eVnQFWGDEdY" -o ./my-output
"""),
)
parser.add_argument("url", help="YouTube video URL or ID")
parser.add_argument(
"-o",
"--output-dir",
help="Output directory (default: ./yt-extract-<video_id>)",
)
parser.add_argument(
"--interval",
type=int,
default=30,
help="Seconds between keyframe captures (default: 30)",
)
parser.add_argument(
"--scene-detect",
action="store_true",
help="Also extract frames on scene changes (good for visual-heavy videos)",
)
parser.add_argument(
"--scene-threshold",
type=float,
default=0.3,
help="Scene change sensitivity 0.0-1.0, lower = more frames (default: 0.3)",
)
parser.add_argument(
"--transcript-only",
action="store_true",
help="Skip video download, only fetch transcript + metadata",
)
parser.add_argument(
"--chunk-seconds",
type=int,
default=60,
help="Group transcript into chunks of N seconds (default: 60)",
)
parser.add_argument(
"--ocr",
action="store_true",
help="Run OCR on frames to extract on-screen text",
)
parser.add_argument(
"--ocr-engine",
choices=["tesseract", "easyocr"],
default="tesseract",
help="OCR engine: 'tesseract' (fast) or 'easyocr' (better for stylized text)",
)
parser.add_argument(
"--colors",
action="store_true",
help="Extract color palette from frames",
)
parser.add_argument(
"--full",
action="store_true",
help="Enable all features: scene-detect, OCR, and color extraction",
)
args = parser.parse_args()
# --full enables everything
if args.full:
args.scene_detect = True
args.ocr = True
args.colors = True
# Upfront dependency checks
if not shutil.which("yt-dlp"):
sys.exit(
"Required tool 'yt-dlp' not found on PATH. Install with: pip install yt-dlp"
)
if not args.transcript_only and not shutil.which("ffmpeg"):
sys.exit(
"Required tool 'ffmpeg' not found on PATH. "
"Install with: make install-ocr (or: brew install ffmpeg)"
)
video_id = extract_video_id(args.url)
out_dir = Path(args.output_dir or f"./yt-extract-{video_id}")
out_dir.mkdir(parents=True, exist_ok=True)
# 1. Metadata
meta = get_video_metadata(args.url)
# Dump raw metadata for future reference
(out_dir / "metadata.json").write_text(
json.dumps(meta, indent=2, default=str), encoding="utf-8"
)
print(f" Title: {meta.get('title')}")
print(f" Channel: {meta.get('channel', meta.get('uploader'))}")
print(f" Duration: {fmt_timestamp(meta.get('duration', 0))}")
# 2. Transcript
transcript = get_transcript(video_id)
# 3. Keyframes
interval_frames: list[Path] = []
scene_frames: list[Path] = []
# OCR and color analysis results
ocr_results: dict[Path, str] = {}
color_analysis: dict = {}
if not args.transcript_only:
video_path = download_video(args.url, out_dir)
try:
interval_frames = extract_frames_interval(
video_path, out_dir, interval=args.interval
)
if args.scene_detect:
scene_frames = extract_frames_scene(
video_path, out_dir, threshold=args.scene_threshold
)
finally:
# Always clean up video file to save space
print("[*] Removing downloaded video to save space …")
video_path.unlink(missing_ok=True)
# 4. OCR extraction
if args.ocr:
all_frames_for_ocr = interval_frames + scene_frames
ocr_results = run_ocr_on_frames(
all_frames_for_ocr,
ocr_engine=args.ocr_engine,
)
# Save OCR results to JSON for reuse
ocr_json = {str(k): v for k, v in ocr_results.items()}
(out_dir / "ocr-results.json").write_text(
json.dumps(ocr_json, indent=2), encoding="utf-8"
)
# 5. Color palette analysis
if args.colors:
all_frames_for_color = interval_frames + scene_frames
color_analysis = analyze_color_palettes(all_frames_for_color)
if color_analysis:
(out_dir / "color-palette.json").write_text(
json.dumps(color_analysis, indent=2), encoding="utf-8"
)
else:
print("[*] --transcript-only: skipping video download")
# 6. Build markdown
md_path = build_markdown(
meta,
transcript,
interval_frames,
scene_frames,
out_dir,
args.interval,
ocr_results=ocr_results,
color_analysis=color_analysis,
)
# Summary
print("\n" + "=" * 60)
print("DONE! Output directory:", out_dir)
print("=" * 60)
print(f" Reference doc : {md_path}")
print(f" Metadata : {out_dir / 'metadata.json'}")
if interval_frames:
print(f" Interval frames: {len(interval_frames)} in frames/")
if scene_frames:
print(f" Scene frames : {len(scene_frames)} in frames_scene/")
if ocr_results:
frames_with_text = sum(1 for t in ocr_results.values() if len(t) > 10)
print(
f" OCR results : {frames_with_text} frames with text → ocr-results.json"
)
if color_analysis:
print(
f" Color palette : {len(color_analysis.get('dominant_colors', []))} colors → color-palette.json"
)
print()
print("Next steps:")
print(" 1. Review extracted-reference.md")
print(" 2. Curate/annotate the content for your agent")
print(" 3. Feed the file to Claude to generate a SKILL.md or agent definition")
if __name__ == "__main__":
main()
| {
"repo_id": "wshobson/agents",
"file_path": "tools/yt-design-extractor.py",
"license": "MIT License",
"lines": 703,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:etc/unittest/test_cli.py | from __future__ import annotations
import os
import tempfile
import unittest
from g4f.cli import get_api_parser, run_api_args
import g4f.cli as cli_mod
import g4f.cookies as cookies_mod
class TestCLI(unittest.TestCase):
def test_api_parser_includes_cookies_dir(self):
parser = get_api_parser(exit_on_error=False)
args = parser.parse_args(["--cookies-dir", "/tmp/foo"])
self.assertEqual(args.cookies_dir, "/tmp/foo")
def test_run_api_args_sets_cookies_dir(self):
# create a temporary directory to simulate user input
tmpdir = tempfile.mkdtemp()
parser = get_api_parser(exit_on_error=False)
args = parser.parse_args(["--cookies-dir", tmpdir])
called = {}
# patch run_api so we don't actually start uvicorn
orig_run = cli_mod.run_api
orig_set = cookies_mod.set_cookies_dir
try:
cli_mod.run_api = lambda **kwargs: called.setdefault('ran', True)
cookies_mod.set_cookies_dir = lambda d: called.setdefault('dir', d)
run_api_args(args)
self.assertTrue(called.get('ran'), "run_api should have been called")
self.assertEqual(called.get('dir'), tmpdir,
"cookies directory should be passed to set_cookies_dir")
finally:
cli_mod.run_api = orig_run
cookies_mod.set_cookies_dir = orig_set
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "xtekky/gpt4free",
"file_path": "etc/unittest/test_cli.py",
"license": "GNU General Public License v3.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
xtekky/gpt4free:etc/unittest/test_api_quota.py | from __future__ import annotations
import unittest
from fastapi.testclient import TestClient
import g4f.api
from g4f import Provider
class TestApiQuota(unittest.TestCase):
def setUp(self):
# create fresh FastAPI app instance for each test
self.app = g4f.api.create_app()
self.client = TestClient(self.app)
def test_nonexistent_provider_returns_404(self):
resp = self.client.get("/api/NoSuchProvider/quota")
self.assertEqual(resp.status_code, 404)
def test_dummy_provider_quota_route(self):
# monkeypatch a fake provider with async get_quota method
class DummyProvider:
async def get_quota(self, api_key=None):
return {"foo": "bar"}
Provider.__map__["dummy"] = DummyProvider()
try:
resp = self.client.get("/api/dummy/quota")
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json(), {"foo": "bar"})
finally:
Provider.__map__.pop("dummy", None)
| {
"repo_id": "xtekky/gpt4free",
"file_path": "etc/unittest/test_api_quota.py",
"license": "GNU General Public License v3.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
xtekky/gpt4free:g4f/Provider/github/GithubCopilot.py | from __future__ import annotations
import sys
import json
import time
import asyncio
import aiohttp
from pathlib import Path
from typing import Optional
from ...typing import Messages, AsyncResult
from ...errors import MissingAuthError
from ..template import OpenaiTemplate
from ...providers.asyncio import get_running_loop
from .copilotTokenProvider import CopilotTokenProvider, EDITOR_VERSION, EDITOR_PLUGIN_VERSION, USER_AGENT, API_VERSION
from .sharedTokenManager import TokenManagerError, SharedTokenManager
from .githubOAuth2 import GithubOAuth2Client
from .oauthFlow import launch_browser_for_oauth
class GithubCopilot(OpenaiTemplate):
"""
GitHub Copilot provider with OAuth authentication.
This provider uses GitHub OAuth device flow for authentication,
allowing users to authenticate via browser without sharing credentials.
Usage:
1. Run `g4f auth github-copilot` to authenticate
2. Use the provider normally after authentication
Example:
>>> from g4f.client import Client
>>> from g4f.Provider.github import GithubCopilot
>>> client = Client(provider=GithubCopilot)
>>> response = client.chat.completions.create(
... model="gpt-4o",
... messages=[{"role": "user", "content": "Hello!"}]
... )
"""
label = "GitHub Copilot (OAuth) 🔐"
url = "https://github.com/copilot"
login_url = "https://github.com/login"
working = True
needs_auth = True
active_by_default = True
default_model = "gpt-4.1"
base_url = "https://api.githubcopilot.com"
fallback_models = [
# GPT-5 Series
"gpt-5",
"gpt-5-mini",
"gpt-5.1",
"gpt-5.2",
# GPT-5 Codex (optimized for code)
"gpt-5-codex",
"gpt-5.1-codex",
"gpt-5.1-codex-mini",
"gpt-5.1-codex-max",
"gpt-5.2-codex",
"gpt-5.3-codex",
# GPT-4 Series
"gpt-4.1",
"gpt-4.1-2025-04-14",
"gpt-4o",
"gpt-4o-mini",
"gpt-4o-2024-11-20",
"gpt-4o-2024-08-06",
"gpt-4o-2024-05-13",
"gpt-4o-mini-2024-07-18",
"gpt-4",
"gpt-4-0613",
"gpt-4-0125-preview",
"gpt-4-o-preview",
# Claude 4 Series
"claude-opus-4.6",
"claude-opus-4.6-fast",
"claude-opus-4.5",
"claude-sonnet-4.5",
"claude-sonnet-4",
"claude-haiku-4.5",
# Gemini Series
"gemini-3-pro-preview",
"gemini-3-flash-preview",
"gemini-2.5-pro",
# Grok
"grok-code-fast-1",
# Legacy GPT-3.5
"gpt-3.5-turbo",
"gpt-3.5-turbo-0613",
# Embeddings
"text-embedding-3-small",
"text-embedding-ada-002",
]
_token_provider: Optional[CopilotTokenProvider] = None
@classmethod
def _get_token_provider(cls) -> CopilotTokenProvider:
if cls._token_provider is None:
cls._token_provider = CopilotTokenProvider()
return cls._token_provider
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
api_key: Optional[str] = None,
base_url: Optional[str] = None,
**kwargs
) -> AsyncResult:
"""
Create an async generator for chat completions.
If api_key is provided, it will be used directly.
Otherwise, OAuth credentials will be used.
"""
# If no API key provided, use OAuth token
if not api_key:
try:
token_provider = cls._get_token_provider()
creds = await token_provider.get_valid_token()
api_key = creds.get("token")
if not api_key:
raise MissingAuthError(
"GitHub Copilot OAuth not configured. "
"Please run 'g4f auth github-copilot' to authenticate."
)
if not base_url:
base_url = creds.get("endpoint", cls.base_url)
except TokenManagerError as e:
if "login" in str(e).lower() or "credentials" in str(e).lower():
raise MissingAuthError(
"GitHub Copilot OAuth not configured. "
"Please run 'g4f auth github-copilot' to authenticate."
) from e
raise
# Use parent class for actual API calls
async for chunk in super().create_async_generator(
model,
messages,
api_key=api_key,
base_url=base_url or cls.base_url,
**kwargs
):
yield chunk
@classmethod
def get_models(cls, api_key: Optional[str] = None, base_url: Optional[str] = None, timeout: Optional[int] = None):
# If no API key provided, use OAuth token
if not api_key:
try:
token_provider = cls._get_token_provider()
get_running_loop(check_nested=True)
creds = asyncio.run(token_provider.get_valid_token())
api_key = creds.get("token")
if not base_url:
base_url = creds.get("endpoint", cls.base_url)
except TokenManagerError as e:
if "login" in str(e).lower() or "credentials" in str(e).lower():
raise MissingAuthError(
"GitHub Copilot OAuth not configured. "
"Please run 'g4f auth github-copilot' to authenticate."
) from e
raise
return super().get_models(api_key, base_url, timeout)
@classmethod
def get_headers(cls, stream: bool, api_key: str | None = None, headers: dict[str, str] | None = None) -> dict[str, str]:
headers_result = super().get_headers(stream, api_key or "", headers or {})
# Add required Copilot headers
copilot_headers: dict[str, str] = {
"Editor-Version": EDITOR_VERSION,
"Editor-Plugin-Version": EDITOR_PLUGIN_VERSION,
"Openai-Organization": "github-copilot",
"Copilot-Integration-Id": "vscode-chat",
"X-GitHub-Api-Version": "2024-12-15",
}
if headers_result:
copilot_headers.update(headers_result)
return copilot_headers
@classmethod
async def login(cls, credentials_path: Optional[Path] = None) -> SharedTokenManager:
"""
Perform interactive OAuth login and save credentials.
Args:
credentials_path: Path to save credentials (default: g4f cache)
Returns:
SharedTokenManager with active credentials
Example:
>>> import asyncio
>>> from g4f.Provider.github import GithubCopilot
>>> asyncio.run(GithubCopilot.login())
"""
print("\n" + "=" * 60)
print("GitHub Copilot OAuth Login")
print("=" * 60)
await launch_browser_for_oauth()
shared_manager = SharedTokenManager.getInstance()
print("=" * 60 + "\n")
return shared_manager
@classmethod
def has_credentials(cls) -> bool:
"""Check if valid credentials exist."""
shared_manager = SharedTokenManager.getInstance()
try:
path = shared_manager.getCredentialFilePath()
return path.exists()
except Exception:
return False
@classmethod
def get_credentials_path(cls) -> Optional[Path]:
"""Get path to credentials file if it exists."""
shared_manager = SharedTokenManager.getInstance()
try:
path = shared_manager.getCredentialFilePath()
if path.exists():
return path
except Exception:
pass
return None
@classmethod
async def get_quota(cls, api_key: Optional[str] = None) -> dict:
"""
Fetch and summarize current GitHub Copilot usage/quota information.
Returns a dictionary with usage details or raises an exception on failure.
"""
client = GithubOAuth2Client()
github_creds = await client.sharedManager.getValidCredentials(client)
if not github_creds or not github_creds.get("access_token"):
raise MissingAuthError(
"GitHub Copilot OAuth not configured. "
"Please run 'g4f auth github-copilot' to authenticate."
)
github_token = github_creds["access_token"]
url = f"https://api.github.com/copilot_internal/user"
headers = {
"accept": "application/json",
"authorization": f"token {github_token}",
"editor-version": EDITOR_VERSION,
"editor-plugin-version": EDITOR_PLUGIN_VERSION,
"user-agent": USER_AGENT,
"x-github-api-version": API_VERSION,
"x-vscode-user-agent-library-version": "electron-fetch",
}
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=headers) as resp:
if resp.status != 200:
text = await resp.text()
raise RuntimeError(f"Failed to fetch Copilot usage: {resp.status} {text}")
usage = await resp.json()
return usage
async def main(args: Optional[List[str]] = None):
"""CLI entry point for GitHub Copilot OAuth authentication."""
import argparse
parser = argparse.ArgumentParser(
description="GitHub Copilot OAuth Authentication for gpt4free",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
%(prog)s login # Interactive device code login
%(prog)s status # Check authentication status
%(prog)s logout # Remove saved credentials
"""
)
subparsers = parser.add_subparsers(dest="command", help="Commands")
# Login command
subparsers.add_parser("login", help="Authenticate with GitHub Copilot")
# Status command
subparsers.add_parser("status", help="Check authentication status")
# Logout command
subparsers.add_parser("logout", help="Remove saved credentials")
args = parser.parse_args(args)
if args.command == "login":
try:
await GithubCopilot.login()
except KeyboardInterrupt:
print("\n\nLogin cancelled.")
sys.exit(1)
except Exception as e:
print(f"\n❌ Login failed: {e}")
sys.exit(1)
elif args.command == "status":
print("\nGitHub Copilot OAuth Status")
print("=" * 40)
if GithubCopilot.has_credentials():
creds_path = GithubCopilot.get_credentials_path()
print(f"✓ Credentials found at: {creds_path}")
try:
with creds_path.open() as f:
creds = json.load(f)
expiry = creds.get("expiry_date")
if expiry:
expiry_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(expiry / 1000))
if expiry / 1000 > time.time():
print(f" Token expires: {expiry_time}")
else:
print(f" Token expired: {expiry_time}")
if creds.get("scope"):
print(f" Scope: {creds['scope']}")
except Exception as e:
print(f" (Could not read credential details: {e})")
else:
print("✗ No credentials found")
print(f"\nRun 'g4f auth github-copilot' to authenticate.")
print()
elif args.command == "logout":
print("\nGitHub Copilot OAuth Logout")
print("=" * 40)
removed = False
shared_manager = SharedTokenManager.getInstance()
path = shared_manager.getCredentialFilePath()
if path.exists():
path.unlink()
print(f"✓ Removed: {path}")
removed = True
# Also try the default location
default_path = Path.home() / ".github-copilot" / "oauth_creds.json"
if default_path.exists() and default_path != path:
default_path.unlink()
print(f"✓ Removed: {default_path}")
removed = True
if removed:
print("\n✓ Credentials removed successfully.")
else:
print("No credentials found to remove.")
print()
else:
parser.print_help()
def cli_main(args: Optional[List[str]] = None):
"""Synchronous CLI entry point for setup.py console_scripts."""
asyncio.run(main(args))
if __name__ == "__main__":
cli_main()
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/github/GithubCopilot.py",
"license": "GNU General Public License v3.0",
"lines": 322,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/github/githubOAuth2.py | import time
from typing import Dict, Optional, Union
import aiohttp
from .stubs import IGithubOAuth2Client, GithubCredentials, ErrorDataDict
from .sharedTokenManager import SharedTokenManager
# GitHub OAuth endpoints
GITHUB_DEVICE_CODE_ENDPOINT = "https://github.com/login/device/code"
GITHUB_TOKEN_ENDPOINT = "https://github.com/login/oauth/access_token"
# GitHub Copilot OAuth Client ID (VS Code Extension)
GITHUB_COPILOT_CLIENT_ID = "Iv1.b507a08c87ecfe98"
# Scopes needed for Copilot
GITHUB_COPILOT_SCOPE = "read:user"
TOKEN_REFRESH_BUFFER_MS = 30 * 1000 # 30 seconds
def object_to_urlencoded(data: Dict[str, str]) -> str:
return "&".join([f"{k}={v}" for k, v in data.items()])
def isDeviceAuthorizationSuccess(response: Union[Dict, ErrorDataDict]) -> bool:
return "device_code" in response
def isDeviceTokenSuccess(response: Union[Dict, ErrorDataDict]) -> bool:
return (
"access_token" in response
and response["access_token"]
and isinstance(response["access_token"], str)
and len(response["access_token"]) > 0
)
def isDeviceTokenPending(response: Union[Dict, ErrorDataDict]) -> bool:
return response.get("error") == "authorization_pending"
def isSlowDown(response: Union[Dict, ErrorDataDict]) -> bool:
return response.get("error") == "slow_down"
def isErrorResponse(response: Union[Dict, ErrorDataDict]) -> bool:
return "error" in response and response.get("error") not in ["authorization_pending", "slow_down"]
class GithubOAuth2Client(IGithubOAuth2Client):
def __init__(self, client_id: str = GITHUB_COPILOT_CLIENT_ID):
self.client_id = client_id
self.credentials: GithubCredentials = GithubCredentials()
self.sharedManager = SharedTokenManager.getInstance()
def setCredentials(self, credentials: GithubCredentials):
self.credentials = credentials
def getCredentials(self) -> GithubCredentials:
return self.credentials
async def getAccessToken(self) -> Dict[str, Optional[str]]:
try:
credentials = await self.sharedManager.getValidCredentials(self)
return {"token": credentials.get("access_token")}
except Exception:
# fallback to internal credentials if valid
if (
self.credentials.get("access_token")
and self.isTokenValid(self.credentials)
):
return {"token": self.credentials["access_token"]}
return {"token": None}
async def requestDeviceAuthorization(self, options: dict) -> Union[Dict, ErrorDataDict]:
"""
Request device authorization from GitHub.
Returns:
dict with device_code, user_code, verification_uri, expires_in, interval
"""
body_data = {
"client_id": self.client_id,
"scope": options.get("scope", GITHUB_COPILOT_SCOPE),
}
async with aiohttp.ClientSession() as session:
async with session.post(
GITHUB_DEVICE_CODE_ENDPOINT,
headers={
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json",
},
data=object_to_urlencoded(body_data)
) as resp:
resp_json = await resp.json()
if resp.status != 200:
raise Exception(f"Device authorization failed {resp.status}: {resp_json}")
if not isDeviceAuthorizationSuccess(resp_json):
raise Exception(
f"Device authorization error: {resp_json.get('error')} - {resp_json.get('error_description')}"
)
return resp_json
async def pollDeviceToken(self, options: dict) -> Union[Dict, ErrorDataDict]:
"""
Poll for device token from GitHub.
Args:
options: dict with device_code
Returns:
dict with access_token, token_type, scope or status=pending
"""
body_data = {
"grant_type": "urn:ietf:params:oauth:grant-type:device_code",
"client_id": self.client_id,
"device_code": options["device_code"],
}
async with aiohttp.ClientSession() as session:
async with session.post(
GITHUB_TOKEN_ENDPOINT,
headers={
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json",
},
data=object_to_urlencoded(body_data)
) as resp:
resp_json = await resp.json()
# Check for OAuth RFC 8628 responses
if "error" in resp_json:
if resp_json["error"] == "authorization_pending":
return {"status": "pending"}
if resp_json["error"] == "slow_down":
return {"status": "pending", "slowDown": True}
if resp_json["error"] == "expired_token":
raise Exception("Device code expired. Please try again.")
if resp_json["error"] == "access_denied":
raise Exception("Authorization was denied by the user.")
raise Exception(f"Token poll failed: {resp_json.get('error')} - {resp_json.get('error_description')}")
return resp_json
def isTokenValid(self, credentials: GithubCredentials) -> bool:
"""GitHub tokens don't expire by default, but we track expiry_date if set"""
if not credentials.get("access_token"):
return False
expiry_date = credentials.get("expiry_date")
if expiry_date is None:
# GitHub tokens don't expire unless explicitly set
return True
return time.time() * 1000 < expiry_date - TOKEN_REFRESH_BUFFER_MS
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/github/githubOAuth2.py",
"license": "GNU General Public License v3.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/github/oauthFlow.py | import asyncio
import webbrowser
import time
from .githubOAuth2 import GithubOAuth2Client, GITHUB_COPILOT_SCOPE
async def launch_browser_for_oauth(client_id: str = None):
"""
Perform GitHub OAuth device flow for authentication.
This function:
1. Requests a device code from GitHub
2. Opens a browser for the user to authenticate
3. Polls for the access token
4. Saves the credentials
Args:
client_id: Optional custom client ID (defaults to Copilot VS Code extension)
"""
# Initialize OAuth client
client = GithubOAuth2Client(client_id) if client_id else GithubOAuth2Client()
# Request device code
print("Requesting device authorization from GitHub...")
device_auth = await client.requestDeviceAuthorization({
"scope": GITHUB_COPILOT_SCOPE,
})
# Check device auth success
if not isinstance(device_auth, dict) or "device_code" not in device_auth:
print("Failed to receive device code")
return None
# Show user instructions
user_code = device_auth.get("user_code")
verification_uri = device_auth.get("verification_uri", "https://github.com/login/device")
print("\n" + "=" * 60)
print("GitHub Copilot Authorization")
print("=" * 60)
print(f"\nPlease visit: {verification_uri}")
print(f"Enter code: {user_code}")
print("=" * 60 + "\n")
# Attempt to automatically open the URL
try:
webbrowser.open(verification_uri)
print("Browser opened automatically.")
except Exception:
print(f"Please open the URL manually in your browser: {verification_uri}")
# Start polling for token
device_code = device_auth["device_code"]
expires_in = device_auth.get("expires_in", 900) # default 15 min
interval = device_auth.get("interval", 5) # default 5 seconds
start_time = time.time()
print("\nWaiting for authorization... Press Ctrl+C to cancel.")
while True:
if time.time() - start_time > expires_in:
print("\nAuthorization timed out. Please try again.")
return None
# Poll for token
token_response = await client.pollDeviceToken({
"device_code": device_code,
})
if isinstance(token_response, dict):
if token_response.get("status") == "pending":
if token_response.get("slowDown"):
interval += 5 # Increase interval as requested by GitHub
print(".", end="", flush=True)
await asyncio.sleep(interval)
continue
elif "access_token" in token_response:
# Success
print("\n\n✓ Authorization successful!")
# Save credentials
credentials = {
"access_token": token_response["access_token"],
"token_type": token_response.get("token_type", "bearer"),
"scope": token_response.get("scope", ""),
# GitHub tokens don't expire, but we can set a far future date
"expiry_date": int(time.time() * 1000) + (365 * 24 * 60 * 60 * 1000), # 1 year
}
await client.sharedManager.saveCredentialsToFile(credentials)
print(f"Credentials saved to: {client.sharedManager.getCredentialFilePath()}")
return credentials
else:
print(f"\nError during polling: {token_response}")
return None
else:
print(f"\nUnexpected response: {token_response}")
return None
async def main():
"""Run the OAuth flow."""
await launch_browser_for_oauth()
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/github/oauthFlow.py",
"license": "GNU General Public License v3.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/github/sharedTokenManager.py | import os
import json
import time
import asyncio
import threading
from typing import Optional, Dict
from pathlib import Path
from ..base_provider import AuthFileMixin
from ... import debug
GITHUB_DIR = ".github-copilot"
GITHUB_CREDENTIAL_FILENAME = "oauth_creds.json"
GITHUB_LOCK_FILENAME = "oauth_creds.lock"
TOKEN_REFRESH_BUFFER_MS = 30 * 1000
CACHE_CHECK_INTERVAL_MS = 1000
class TokenError:
REFRESH_FAILED = "REFRESH_FAILED"
NO_REFRESH_TOKEN = "NO_REFRESH_TOKEN"
LOCK_TIMEOUT = "LOCK_TIMEOUT"
FILE_ACCESS_ERROR = "FILE_ACCESS_ERROR"
NETWORK_ERROR = "NETWORK_ERROR"
class TokenManagerError(Exception):
def __init__(self, type_: str, message: str, original_error: Optional[Exception] = None):
super().__init__(message)
self.type = type_
self.original_error = original_error
class SharedTokenManager(AuthFileMixin):
parent = "GithubCopilot"
_instance: Optional["SharedTokenManager"] = None
_lock = threading.Lock()
def __init__(self):
self.memory_cache = {
"credentials": None,
"file_mod_time": 0,
"last_check": 0,
}
self.refresh_promise = None
@classmethod
def getInstance(cls):
with cls._lock:
if cls._instance is None:
cls._instance = cls()
return cls._instance
def getCredentialFilePath(self):
path = Path(os.path.expanduser(f"~/{GITHUB_DIR}/{GITHUB_CREDENTIAL_FILENAME}"))
if path.is_file():
return path
return SharedTokenManager.get_cache_file()
def getLockFilePath(self):
return Path(os.path.expanduser(f"~/{GITHUB_DIR}/{GITHUB_LOCK_FILENAME}"))
def getCurrentCredentials(self):
return self.memory_cache.get("credentials")
def checkAndReloadIfNeeded(self):
now = int(time.time() * 1000)
if now - self.memory_cache["last_check"] < CACHE_CHECK_INTERVAL_MS:
return
self.memory_cache["last_check"] = now
try:
file_path = self.getCredentialFilePath()
if not file_path.exists():
self.memory_cache["file_mod_time"] = 0
return
stat = file_path.stat()
file_mod_time = int(stat.st_mtime * 1000)
if file_mod_time > self.memory_cache["file_mod_time"]:
self.reloadCredentialsFromFile()
self.memory_cache["file_mod_time"] = file_mod_time
except FileNotFoundError:
self.memory_cache["file_mod_time"] = 0
except Exception as e:
self.memory_cache["credentials"] = None
raise TokenManagerError(TokenError.FILE_ACCESS_ERROR, str(e), e)
def reloadCredentialsFromFile(self):
file_path = self.getCredentialFilePath()
debug.log(f"Reloading credentials from {file_path}")
try:
with open(file_path, "r") as fs:
data = json.load(fs)
credentials = self.validateCredentials(data)
self.memory_cache["credentials"] = credentials
except FileNotFoundError as e:
self.memory_cache["credentials"] = None
raise TokenManagerError(TokenError.FILE_ACCESS_ERROR, "Credentials file not found", e) from e
except json.JSONDecodeError as e:
self.memory_cache["credentials"] = None
raise TokenManagerError(TokenError.FILE_ACCESS_ERROR, "Invalid JSON format", e) from e
except Exception as e:
self.memory_cache["credentials"] = None
raise TokenManagerError(TokenError.FILE_ACCESS_ERROR, str(e), e) from e
def validateCredentials(self, data):
if not data or not isinstance(data, dict):
raise ValueError("Invalid credentials format")
if "access_token" not in data or not isinstance(data["access_token"], str):
raise ValueError("Invalid credentials: missing access_token")
if "token_type" not in data or not isinstance(data["token_type"], str):
raise ValueError("Invalid credentials: missing token_type")
return data
def isTokenValid(self, credentials) -> bool:
"""GitHub tokens don't expire by default"""
if not credentials or not credentials.get("access_token"):
return False
expiry_date = credentials.get("expiry_date")
if expiry_date is None:
return True
return time.time() * 1000 < expiry_date - TOKEN_REFRESH_BUFFER_MS
async def getValidCredentials(self, github_client, force_refresh: bool = False):
try:
self.checkAndReloadIfNeeded()
if (
self.memory_cache["credentials"]
and not force_refresh
and self.isTokenValid(self.memory_cache["credentials"])
):
return self.memory_cache["credentials"]
if self.refresh_promise:
return await self.refresh_promise
# Try to reload credentials from file
try:
self.reloadCredentialsFromFile()
if self.memory_cache["credentials"] and self.isTokenValid(self.memory_cache["credentials"]):
return self.memory_cache["credentials"]
except TokenManagerError:
pass
raise TokenManagerError(
TokenError.FILE_ACCESS_ERROR,
"No valid credentials found. Please run login first."
)
except Exception as e:
if isinstance(e, TokenManagerError):
raise
raise TokenManagerError(TokenError.FILE_ACCESS_ERROR, str(e), e) from e
async def saveCredentialsToFile(self, credentials: dict):
"""Save credentials to the credential file."""
file_path = self.getCredentialFilePath()
file_path.parent.mkdir(parents=True, exist_ok=True)
with open(file_path, "w") as f:
json.dump(credentials, f, indent=2)
self.memory_cache["credentials"] = credentials
self.memory_cache["file_mod_time"] = int(time.time() * 1000)
debug.log(f"Credentials saved to {file_path}")
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/github/sharedTokenManager.py",
"license": "GNU General Public License v3.0",
"lines": 139,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/github/stubs.py | from typing import Dict, Optional, Union
class ErrorDataDict(Dict):
pass
class GithubCredentials(Dict):
pass
class IGithubOAuth2Client:
def setCredentials(self, credentials: GithubCredentials):
raise NotImplementedError
def getCredentials(self) -> GithubCredentials:
raise NotImplementedError
async def getAccessToken(self) -> Dict[str, Optional[str]]:
raise NotImplementedError
async def requestDeviceAuthorization(self, options: dict) -> Union[Dict, ErrorDataDict]:
raise NotImplementedError
async def pollDeviceToken(self, options: dict) -> Union[Dict, ErrorDataDict]:
raise NotImplementedError
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/github/stubs.py",
"license": "GNU General Public License v3.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:etc/unittest/tool_support_provider.py | import asyncio
import unittest
from g4f.providers.base_provider import AsyncGeneratorProvider
from g4f.providers.response import FinishReason, ToolCalls
from g4f.providers.tool_support import ToolSupportProvider
from g4f.tools.run_tools import async_iter_run_tools
class ToolPlanProviderMock(AsyncGeneratorProvider):
working = True
@staticmethod
async def create_async_generator(model, messages, stream=True, **kwargs):
# Always return a tool call plan.
yield (
'{"tool_calls":['
'{"name":"read","arguments":{"filePath":"README.md"}},'
'{"name":"glob","arguments":{"pattern":"**/*.py"}}'
"]}"
)
yield FinishReason("stop")
TOOLS = [
{
"type": "function",
"function": {
"name": "read",
"description": "Read a file",
"parameters": {
"type": "object",
"properties": {"filePath": {"type": "string"}},
"required": ["filePath"],
},
},
},
{
"type": "function",
"function": {
"name": "glob",
"description": "Glob files",
"parameters": {
"type": "object",
"properties": {"pattern": {"type": "string"}},
"required": ["pattern"],
},
},
},
]
class TestToolSupportProvider(unittest.TestCase):
def test_emits_tool_calls_from_json_plan(self):
async def run():
out = []
async for chunk in ToolSupportProvider.create_async_generator(
model="test-model",
messages=[{"role": "user", "content": "list files"}],
stream=True,
tools=TOOLS,
provider=ToolPlanProviderMock,
):
out.append(chunk)
return out
out = asyncio.run(run())
tool_chunks = [x for x in out if isinstance(x, ToolCalls)]
self.assertEqual(len(tool_chunks), 1)
calls = tool_chunks[0].get_list()
self.assertEqual(len(calls), 2)
self.assertEqual(calls[0]["function"]["name"], "read")
self.assertEqual(calls[1]["function"]["name"], "glob")
def test_run_tools_routes_to_tool_support_provider(self):
async def run():
out = []
async for chunk in async_iter_run_tools(
ToolPlanProviderMock,
model="test-model",
messages=[{"role": "user", "content": "list files"}],
stream=True,
tools=TOOLS,
tool_emulation=True,
):
out.append(chunk)
return out
out = asyncio.run(run())
self.assertTrue(any(isinstance(x, ToolCalls) for x in out))
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "xtekky/gpt4free",
"file_path": "etc/unittest/tool_support_provider.py",
"license": "GNU General Public License v3.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/Provider/needs_auth/Antigravity.py | """
Antigravity Provider for gpt4free
Provides access to Google's Antigravity API (Code Assist) supporting:
- Gemini 2.5 (Pro/Flash) with thinkingBudget
- Gemini 3 (Pro/Flash) with thinkingLevel
- Claude (Sonnet 4.5 / Opus 4.5) via Antigravity proxy
Uses OAuth2 authentication with Antigravity-specific credentials.
Supports endpoint fallback chain for reliability.
Includes interactive OAuth login flow with PKCE support.
"""
import os
import sys
import json
import base64
import time
import secrets
import hashlib
import asyncio
import webbrowser
import threading
from pathlib import Path
from typing import Any, AsyncGenerator, Dict, List, Optional, Union, Tuple
from urllib.parse import urlencode, parse_qs, urlparse
from http.server import HTTPServer, BaseHTTPRequestHandler
import aiohttp
from aiohttp import ClientSession, ClientTimeout
from ...typing import AsyncResult, Messages, MediaListType
from ...errors import MissingAuthError
from ...image.copy_images import save_response_media
from ...image import to_bytes, is_data_an_media
from ...providers.response import Usage, ImageResponse, ToolCalls, Reasoning
from ...providers.asyncio import get_running_loop
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin
from ..helper import get_connector, get_system_prompt, format_media_prompt
from ... import debug
def get_antigravity_oauth_creds_path():
"""Get the default path for Antigravity OAuth credentials."""
return Path.home() / ".antigravity" / "oauth_creds.json"
# OAuth configuration
ANTIGRAVITY_REDIRECT_URI = "http://localhost:51121/oauthcallback"
ANTIGRAVITY_SCOPES = [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/userinfo.email",
"https://www.googleapis.com/auth/userinfo.profile",
"https://www.googleapis.com/auth/cclog",
"https://www.googleapis.com/auth/experimentsandconfigs",
]
OAUTH_CALLBACK_PORT = 51121
OAUTH_CALLBACK_PATH = "/oauthcallback"
def generate_pkce_pair() -> Tuple[str, str]:
"""
Generate a PKCE (Proof Key for Code Exchange) verifier and challenge pair.
Returns:
Tuple of (verifier, challenge) where:
- verifier: Random 43-128 character string
- challenge: Base64URL-encoded SHA256 hash of verifier
"""
# Generate a random verifier (43-128 characters)
verifier = secrets.token_urlsafe(32)
# Create SHA256 hash of verifier
digest = hashlib.sha256(verifier.encode('ascii')).digest()
# Base64URL encode (no padding)
challenge = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
return verifier, challenge
def encode_oauth_state(verifier: str, project_id: str = "") -> str:
"""Encode OAuth state parameter with PKCE verifier and project ID."""
payload = {"verifier": verifier, "projectId": project_id}
return base64.urlsafe_b64encode(json.dumps(payload).encode()).decode().rstrip('=')
def decode_oauth_state(state: str) -> Dict[str, str]:
"""Decode OAuth state parameter back to verifier and project ID."""
# Add padding if needed
padded = state + '=' * (4 - len(state) % 4) if len(state) % 4 else state
# Convert URL-safe base64 to standard
normalized = padded.replace('-', '+').replace('_', '/')
try:
decoded = base64.b64decode(normalized).decode('utf-8')
parsed = json.loads(decoded)
return {
"verifier": parsed.get("verifier", ""),
"projectId": parsed.get("projectId", "")
}
except Exception:
return {"verifier": "", "projectId": ""}
class OAuthCallbackHandler(BaseHTTPRequestHandler):
"""HTTP request handler for OAuth callback."""
callback_result: Optional[Dict[str, str]] = None
callback_error: Optional[str] = None
def log_message(self, format, *args):
"""Suppress default logging."""
pass
def do_GET(self):
"""Handle GET request for OAuth callback."""
parsed = urlparse(self.path)
if parsed.path != OAUTH_CALLBACK_PATH:
self.send_error(404, "Not Found")
return
params = parse_qs(parsed.query)
code = params.get("code", [None])[0]
state = params.get("state", [None])[0]
error = params.get("error", [None])[0]
if error:
OAuthCallbackHandler.callback_error = error
self._send_error_response(error)
elif code and state:
OAuthCallbackHandler.callback_result = {"code": code, "state": state}
self._send_success_response()
else:
OAuthCallbackHandler.callback_error = "Missing code or state parameter"
self._send_error_response("Missing parameters")
def _send_success_response(self):
"""Send success HTML response."""
html = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Authentication Successful</title>
<style>
body { font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
display: flex; justify-content: center; align-items: center; height: 100vh;
margin: 0; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); }
.container { background: white; padding: 3rem; border-radius: 1rem;
box-shadow: 0 20px 60px rgba(0,0,0,0.3); text-align: center; max-width: 400px; }
h1 { color: #10B981; margin-bottom: 1rem; }
p { color: #6B7280; line-height: 1.6; }
.icon { font-size: 4rem; margin-bottom: 1rem; }
</style>
</head>
<body>
<div class="container">
<div class="icon">✅</div>
<h1>Authentication Successful!</h1>
<p>You have successfully authenticated with Google.<br>You can close this window and return to your terminal.</p>
</div>
</body>
</html>"""
self.send_response(200)
self.send_header("Content-Type", "text/html; charset=utf-8")
self.send_header("Content-Length", len(html.encode()))
self.end_headers()
self.wfile.write(html.encode())
def _send_error_response(self, error: str):
"""Send error HTML response."""
html = f"""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Authentication Failed</title>
<style>
body {{ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
display: flex; justify-content: center; align-items: center; height: 100vh;
margin: 0; background: #FEE2E2; }}
.container {{ background: white; padding: 3rem; border-radius: 1rem;
box-shadow: 0 10px 40px rgba(0,0,0,0.1); text-align: center; }}
h1 {{ color: #EF4444; }}
p {{ color: #6B7280; }}
</style>
</head>
<body>
<div class="container">
<h1>❌ Authentication Failed</h1>
<p>Error: {error}</p>
<p>Please try again.</p>
</div>
</body>
</html>"""
self.send_response(400)
self.send_header("Content-Type", "text/html; charset=utf-8")
self.send_header("Content-Length", len(html.encode()))
self.end_headers()
self.wfile.write(html.encode())
class OAuthCallbackServer:
"""Local HTTP server to capture OAuth callback."""
def __init__(self, port: int = OAUTH_CALLBACK_PORT, timeout: float = 300.0):
self.port = port
self.timeout = timeout
self.server: Optional[HTTPServer] = None
self._thread: Optional[threading.Thread] = None
self._stop_flag = False
def start(self) -> bool:
"""Start the callback server. Returns True if successful."""
try:
# Reset any previous results
OAuthCallbackHandler.callback_result = None
OAuthCallbackHandler.callback_error = None
self._stop_flag = False
self.server = HTTPServer(("localhost", self.port), OAuthCallbackHandler)
self.server.timeout = 0.5 # Short timeout for responsive shutdown
self._thread = threading.Thread(target=self._serve, daemon=True)
self._thread.start()
return True
except OSError as e:
debug.log(f"Failed to start OAuth callback server: {e}")
return False
def _serve(self):
"""Serve requests until shutdown or result received."""
start_time = time.time()
while not self._stop_flag and self.server:
if time.time() - start_time > self.timeout:
break
if OAuthCallbackHandler.callback_result or OAuthCallbackHandler.callback_error:
# Give browser time to receive response
time.sleep(0.3)
break
try:
self.server.handle_request()
except Exception:
break
def wait_for_callback(self) -> Optional[Dict[str, str]]:
"""Wait for OAuth callback and return result."""
# Poll for result instead of blocking on thread join
start_time = time.time()
while time.time() - start_time < self.timeout:
if OAuthCallbackHandler.callback_result or OAuthCallbackHandler.callback_error:
break
time.sleep(0.1)
# Signal thread to stop
self._stop_flag = True
if self._thread:
self._thread.join(timeout=2.0)
if OAuthCallbackHandler.callback_error:
raise RuntimeError(f"OAuth error: {OAuthCallbackHandler.callback_error}")
return OAuthCallbackHandler.callback_result
def stop(self):
"""Stop the callback server."""
self._stop_flag = True
if self.server:
try:
self.server.server_close()
except Exception:
pass
self.server = None
# Antigravity base URLs with fallback order
# For streaming/generation: prefer production (most stable)
# For discovery: sandbox daily may work faster
BASE_URLS = [
"https://cloudcode-pa.googleapis.com/v1internal",
"https://daily-cloudcode-pa.googleapis.com/v1internal",
"https://daily-cloudcode-pa.sandbox.googleapis.com/v1internal",
]
# Production URL (most reliable for generation)
PRODUCTION_URL = "https://cloudcode-pa.googleapis.com/v1internal"
# Required headers for Antigravity API calls
# These headers are CRITICAL for gemini-3-pro-high/low to work
# User-Agent matches official Antigravity Electron client
ANTIGRAVITY_HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Antigravity/1.104.0 Chrome/138.0.7204.235 Electron/37.3.1 Safari/537.36",
"X-Goog-Api-Client": "google-cloud-sdk vscode_cloudshelleditor/0.1",
"Client-Metadata": '{"ideType":"IDE_UNSPECIFIED","platform":"PLATFORM_UNSPECIFIED","pluginType":"GEMINI"}',
}
# Headers for auth/discovery calls (uses different User-Agent for tier detection)
ANTIGRAVITY_AUTH_HEADERS = {
"User-Agent": "google-api-nodejs-client/10.3.0",
"X-Goog-Api-Client": "gl-node/22.18.0",
"Client-Metadata": '{"ideType":"IDE_UNSPECIFIED","platform":"PLATFORM_UNSPECIFIED","pluginType":"GEMINI"}',
}
class AntigravityAuthManager(AuthFileMixin):
"""
Handles OAuth2 authentication for Google's Antigravity API.
Uses Antigravity-specific OAuth credentials and supports endpoint fallback.
Manages token caching, refresh, and API calls with automatic retry on 401.
"""
parent = "Antigravity"
OAUTH_REFRESH_URL = "https://oauth2.googleapis.com/token"
# Antigravity OAuth credentials
OAUTH_CLIENT_ID = "1071006060591" + "-tmhssin2h21lcre235vtolojh4g403ep.apps.googleusercontent.com"
OAUTH_CLIENT_SECRET = "GOC" + "SPX-K58FWR486LdLJ1mLB8sXC4z6qDAf"
TOKEN_BUFFER_TIME = 5 * 60 # seconds, 5 minutes
KV_TOKEN_KEY = "antigravity_oauth_token_cache"
def __init__(self, env: Dict[str, Any]):
self.env = env
self._access_token: Optional[str] = None
self._expiry: Optional[float] = None # Unix timestamp in seconds
self._token_cache = {} # In-memory cache
self._working_base_url: Optional[str] = None # Cache working endpoint
self._project_id: Optional[str] = None # Cached project ID from credentials
async def initialize_auth(self) -> None:
"""
Initialize authentication by using cached token, or refreshing if needed.
Raises RuntimeError if no valid token can be obtained.
"""
# Try cached token from in-memory cache
cached = await self._get_cached_token()
now = time.time()
if cached:
expires_at = cached["expiry_date"] / 1000 # ms to seconds
if expires_at - now > self.TOKEN_BUFFER_TIME:
self._access_token = cached["access_token"]
self._expiry = expires_at
return # Use cached token if valid
# Try loading from cache file or default path
path = AntigravityAuthManager.get_cache_file()
if not path.exists():
path = get_antigravity_oauth_creds_path()
if path.exists():
try:
with path.open("r") as f:
creds = json.load(f)
except Exception as e:
raise RuntimeError(f"Failed to read OAuth credentials from {path}: {e}")
else:
# Parse credentials from environment
if "ANTIGRAVITY_SERVICE_ACCOUNT" not in self.env:
raise RuntimeError(
"ANTIGRAVITY_SERVICE_ACCOUNT environment variable not set. "
f"Please set it or create credentials at {get_antigravity_oauth_creds_path()}"
)
creds = json.loads(self.env["ANTIGRAVITY_SERVICE_ACCOUNT"])
# Store project_id from credentials if available
if creds.get("project_id"):
self._project_id = creds["project_id"]
refresh_token = creds.get("refresh_token")
access_token = creds.get("access_token")
expiry_date = creds.get("expiry_date") # milliseconds since epoch
# Use original access token if still valid
if access_token and expiry_date:
expires_at = expiry_date / 1000
if expires_at - now > self.TOKEN_BUFFER_TIME:
self._access_token = access_token
self._expiry = expires_at
await self._cache_token(access_token, expiry_date)
return
# Otherwise, refresh token
if not refresh_token:
raise RuntimeError("No refresh token found in credentials.")
await self._refresh_and_cache_token(refresh_token)
async def _refresh_and_cache_token(self, refresh_token: str) -> None:
"""Refresh the OAuth token and cache it."""
headers = {"Content-Type": "application/x-www-form-urlencoded"}
data = {
"client_id": self.OAUTH_CLIENT_ID,
"client_secret": self.OAUTH_CLIENT_SECRET,
"refresh_token": refresh_token,
"grant_type": "refresh_token",
}
async with aiohttp.ClientSession() as session:
async with session.post(self.OAUTH_REFRESH_URL, data=data, headers=headers) as resp:
if resp.status != 200:
text = await resp.text()
raise RuntimeError(f"Token refresh failed: {text}")
resp_data = await resp.json()
access_token = resp_data.get("access_token")
expires_in = resp_data.get("expires_in", 3600) # seconds
if not access_token:
raise RuntimeError("No access_token in refresh response.")
self._access_token = access_token
self._expiry = time.time() + expires_in
expiry_date_ms = int(self._expiry * 1000) # milliseconds
await self._cache_token(access_token, expiry_date_ms)
async def _cache_token(self, access_token: str, expiry_date: int) -> None:
"""Cache token in memory."""
token_data = {
"access_token": access_token,
"expiry_date": expiry_date,
"cached_at": int(time.time() * 1000), # ms
}
self._token_cache[self.KV_TOKEN_KEY] = token_data
async def _get_cached_token(self) -> Optional[Dict[str, Any]]:
"""Return in-memory cached token if present and still valid."""
cached = self._token_cache.get(self.KV_TOKEN_KEY)
if cached:
expires_at = cached["expiry_date"] / 1000
if expires_at - time.time() > self.TOKEN_BUFFER_TIME:
return cached
return None
async def clear_token_cache(self) -> None:
"""Clear the token cache."""
self._access_token = None
self._expiry = None
self._token_cache.pop(self.KV_TOKEN_KEY, None)
def get_access_token(self) -> Optional[str]:
"""Return current valid access token or None."""
if (
self._access_token is not None
and self._expiry is not None
and self._expiry - time.time() > self.TOKEN_BUFFER_TIME
):
return self._access_token
return None
def get_project_id(self) -> Optional[str]:
"""Return cached project ID from credentials."""
return self._project_id
async def call_endpoint(
self,
method: str,
body: Dict[str, Any],
is_retry: bool = False,
use_auth_headers: bool = False
) -> Any:
"""
Call Antigravity API endpoint with JSON body and endpoint fallback.
Tries each base URL in order until one succeeds.
Automatically retries once on 401 Unauthorized by refreshing auth.
"""
if not self.get_access_token():
await self.initialize_auth()
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.get_access_token()}",
**(ANTIGRAVITY_AUTH_HEADERS if use_auth_headers else ANTIGRAVITY_HEADERS),
}
# Try cached working URL first, then fallback chain
urls_to_try = []
if self._working_base_url:
urls_to_try.append(self._working_base_url)
urls_to_try.extend([url for url in BASE_URLS if url != self._working_base_url])
last_error = None
for base_url in urls_to_try:
url = f"{base_url}:{method}"
try:
async with aiohttp.ClientSession() as session:
async with session.post(url, headers=headers, json=body, timeout=30) as resp:
if resp.status == 401 and not is_retry:
# Token likely expired, clear and retry once
await self.clear_token_cache()
await self.initialize_auth()
return await self.call_endpoint(method, body, is_retry=True, use_auth_headers=use_auth_headers)
elif resp.ok:
self._working_base_url = base_url # Cache working URL
return await resp.json()
else:
last_error = f"HTTP {resp.status}: {await resp.text()}"
debug.log(f"Antigravity endpoint {base_url} returned {resp.status}")
except Exception as e:
last_error = str(e)
debug.log(f"Antigravity endpoint {base_url} failed: {e}")
continue
raise RuntimeError(f"All Antigravity endpoints failed. Last error: {last_error}")
def get_working_base_url(self) -> str:
"""Get the cached working base URL or default to first in list."""
return self._working_base_url or BASE_URLS[0]
@classmethod
def build_authorization_url(cls, project_id: str = "") -> Tuple[str, str, str]:
"""
Build OAuth authorization URL with PKCE.
Returns:
Tuple of (authorization_url, verifier, state)
"""
verifier, challenge = generate_pkce_pair()
state = encode_oauth_state(verifier, project_id)
params = {
"client_id": cls.OAUTH_CLIENT_ID,
"response_type": "code",
"redirect_uri": ANTIGRAVITY_REDIRECT_URI,
"scope": " ".join(ANTIGRAVITY_SCOPES),
"code_challenge": challenge,
"code_challenge_method": "S256",
"state": state,
"access_type": "offline",
"prompt": "consent",
}
url = f"https://accounts.google.com/o/oauth2/v2/auth?{urlencode(params)}"
return url, verifier, state
@classmethod
async def exchange_code_for_tokens(
cls,
code: str,
state: str,
) -> Dict[str, Any]:
"""
Exchange authorization code for access and refresh tokens.
Args:
code: Authorization code from OAuth callback
state: State parameter containing PKCE verifier
Returns:
Dict containing tokens and user info
"""
decoded_state = decode_oauth_state(state)
verifier = decoded_state.get("verifier", "")
project_id = decoded_state.get("projectId", "")
if not verifier:
raise RuntimeError("Missing PKCE verifier in state parameter")
start_time = time.time()
# Exchange code for tokens
async with aiohttp.ClientSession() as session:
token_data = {
"client_id": cls.OAUTH_CLIENT_ID,
"client_secret": cls.OAUTH_CLIENT_SECRET,
"code": code,
"grant_type": "authorization_code",
"redirect_uri": ANTIGRAVITY_REDIRECT_URI,
"code_verifier": verifier,
}
async with session.post(
"https://oauth2.googleapis.com/token",
data=token_data,
headers={
"Content-Type": "application/x-www-form-urlencoded",
"User-Agent": "google-api-nodejs-client/10.3.0",
}
) as resp:
if not resp.ok:
error_text = await resp.text()
raise RuntimeError(f"Token exchange failed: {error_text}")
token_response = await resp.json()
access_token = token_response.get("access_token")
refresh_token = token_response.get("refresh_token")
expires_in = token_response.get("expires_in", 3600)
if not access_token or not refresh_token:
raise RuntimeError("Missing tokens in response")
# Get user info
email = None
async with session.get(
"https://www.googleapis.com/oauth2/v1/userinfo?alt=json",
headers={"Authorization": f"Bearer {access_token}"}
) as resp:
if resp.ok:
user_info = await resp.json()
email = user_info.get("email")
# Discover project ID if not provided
effective_project_id = project_id
if not effective_project_id:
effective_project_id = await cls._fetch_project_id(session, access_token)
expires_at = int((start_time + expires_in) * 1000) # milliseconds
return {
"access_token": access_token,
"refresh_token": refresh_token,
"expiry_date": expires_at,
"email": email,
"project_id": effective_project_id,
}
@classmethod
async def _fetch_project_id(cls, session: aiohttp.ClientSession, access_token: str) -> str:
"""Fetch project ID from Antigravity API."""
headers = {
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
**ANTIGRAVITY_AUTH_HEADERS,
}
load_request = {
"metadata": {
"ideType": "IDE_UNSPECIFIED",
"platform": "PLATFORM_UNSPECIFIED",
"pluginType": "GEMINI",
}
}
# Try endpoints in order with short timeout
timeout = aiohttp.ClientTimeout(total=10)
for base_url in BASE_URLS:
try:
url = f"{base_url}:loadCodeAssist"
async with session.post(url, headers=headers, json=load_request, timeout=timeout) as resp:
if resp.ok:
data = await resp.json()
project = data.get("cloudaicompanionProject")
if isinstance(project, dict):
project = project.get("id")
if project:
return project
except asyncio.TimeoutError:
debug.log(f"Project discovery timed out at {base_url}")
continue
except Exception as e:
debug.log(f"Project discovery failed at {base_url}: {e}")
continue
# If discovery failed, attempt to onboard a managed project for the user.
# Read optional configuration from environment
attempts = int(os.environ.get("ANTIGRAVITY_ONBOARD_ATTEMPTS", "10"))
delay_seconds = float(os.environ.get("ANTIGRAVITY_ONBOARD_DELAY_S", "5"))
tier_id = os.environ.get("ANTIGRAVITY_TIER_ID", "free-tier")
# Use any preconfigured project id as metadata if available
configured_project = os.environ.get("ANTIGRAVITY_PROJECT_ID", "")
if tier_id:
onboard_request_body = {"tierId": tier_id, "metadata": {}}
if configured_project:
# include requested project id in metadata
onboard_request_body["metadata"]["cloudaicompanionProject"] = configured_project
# Try onboarding across endpoints with retries
for base_url in BASE_URLS:
for attempt in range(attempts):
try:
url = f"{base_url}:onboardUser"
onboard_headers = {
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
**ANTIGRAVITY_HEADERS,
}
async with session.post(url, headers=onboard_headers, json=onboard_request_body, timeout=timeout) as resp:
if not resp.ok:
print(f"Onboarding attempt {attempt+1} at {base_url} failed with status {resp.status}")
print(await resp.text())
# Stop attempts on this endpoint and try next base_url
break
payload = await resp.json()
# payload.response?.cloudaicompanionProject?.id
response_obj = payload.get("response") or {}
managed = response_obj.get("cloudaicompanionProject")
if isinstance(managed, dict):
managed_id = managed.get("id")
else:
managed_id = None
done = bool(payload.get("done", False))
if done and managed_id:
return managed_id
if done and configured_project:
return configured_project
except Exception as e:
debug.log(f"Failed to onboard managed project at {base_url}: {e}")
break
await asyncio.sleep(delay_seconds)
return ""
@classmethod
async def interactive_login(
cls,
project_id: str = "",
no_browser: bool = False,
timeout: float = 300.0,
) -> Dict[str, Any]:
"""
Perform interactive OAuth login flow.
This opens a browser for Google OAuth and captures the callback locally.
Args:
project_id: Optional GCP project ID
no_browser: If True, don't auto-open browser (print URL instead)
timeout: Timeout in seconds for OAuth callback
Returns:
Dict containing tokens and user info
"""
# Build authorization URL
auth_url, verifier, state = cls.build_authorization_url(project_id)
print("\n" + "=" * 60)
print("Antigravity OAuth Login")
print("=" * 60)
# Try to start local callback server
callback_server = OAuthCallbackServer(timeout=timeout)
server_started = callback_server.start()
if server_started and not no_browser:
print(f"\nOpening browser for authentication...")
print(f"If browser doesn't open, visit this URL:\n")
print(f"{auth_url}\n")
# Try to open browser
try:
webbrowser.open(auth_url)
except Exception as e:
print(f"Could not open browser automatically: {e}")
print("Please open the URL above manually.\n")
else:
if not server_started:
print(f"\nCould not start local callback server on port {OAUTH_CALLBACK_PORT}.")
print("You may need to close any application using that port.\n")
print(f"\nPlease open this URL in your browser:\n")
print(f"{auth_url}\n")
if server_started:
print("Waiting for authentication callback...")
try:
callback_result = callback_server.wait_for_callback()
if not callback_result:
raise RuntimeError("OAuth callback timed out")
code = callback_result.get("code")
callback_state = callback_result.get("state")
if not code:
raise RuntimeError("No authorization code received")
print("\n✓ Authorization code received. Exchanging for tokens...")
# Exchange code for tokens
tokens = await cls.exchange_code_for_tokens(code, callback_state or state)
print(f"✓ Authentication successful!")
if tokens.get("email"):
print(f" Logged in as: {tokens['email']}")
if tokens.get("project_id"):
print(f" Project ID: {tokens['project_id']}")
return tokens
finally:
callback_server.stop()
else:
# Manual flow - ask user to paste the redirect URL or code
print("\nAfter completing authentication, you'll be redirected to a localhost URL.")
print("Copy and paste the full redirect URL or just the authorization code below:\n")
user_input = input("Paste redirect URL or code: ").strip()
if not user_input:
raise RuntimeError("No input provided")
# Parse the input
if user_input.startswith("http"):
parsed = urlparse(user_input)
params = parse_qs(parsed.query)
code = params.get("code", [None])[0]
callback_state = params.get("state", [state])[0]
else:
# Assume it's just the code
code = user_input
callback_state = state
if not code:
raise RuntimeError("Could not extract authorization code")
print("\nExchanging code for tokens...")
tokens = await cls.exchange_code_for_tokens(code, callback_state)
print(f"✓ Authentication successful!")
if tokens.get("email"):
print(f" Logged in as: {tokens['email']}")
return tokens
@classmethod
async def login_and_save(
cls,
project_id: str = "",
no_browser: bool = False,
credentials_path: Optional[Path] = None,
) -> "AntigravityAuthManager":
"""
Perform interactive login and save credentials to file.
Args:
project_id: Optional GCP project ID
no_browser: If True, don't auto-open browser
credentials_path: Path to save credentials (default: g4f cache or ~/.antigravity/oauth_creds.json)
Returns:
AntigravityAuthManager instance with loaded credentials
"""
tokens = await cls.interactive_login(project_id=project_id, no_browser=no_browser)
# Prepare credentials for saving
creds = {
"access_token": tokens["access_token"],
"refresh_token": tokens["refresh_token"],
"expiry_date": tokens["expiry_date"],
"email": tokens.get("email"),
"project_id": tokens.get("project_id"),
"client_id": cls.OAUTH_CLIENT_ID,
"client_secret": cls.OAUTH_CLIENT_SECRET,
}
# Save credentials - use provided path, or g4f cache file, or default path
if credentials_path:
path = credentials_path
else:
# Prefer g4f cache location (checked first by initialize_auth)
path = cls.get_cache_file()
path.parent.mkdir(parents=True, exist_ok=True)
with path.open("w") as f:
json.dump(creds, f, indent=2)
# Set restrictive permissions on Unix
try:
path.chmod(0o600)
except Exception:
pass
print(f"\n✓ Credentials saved to: {path}")
print("=" * 60 + "\n")
# Create and return auth manager
auth_manager = cls(env=os.environ)
auth_manager._access_token = tokens["access_token"]
auth_manager._expiry = tokens["expiry_date"] / 1000
return auth_manager
class AntigravityProvider:
"""
Internal provider class for Antigravity API communication.
Handles message formatting, project discovery, and streaming content generation.
"""
url = "https://cloud.google.com/code-assist"
def __init__(self, env: dict, auth_manager: AntigravityAuthManager):
self.env = env
self.auth_manager = auth_manager
self._project_id: Optional[str] = None
async def discover_project_id(self) -> str:
"""Discover the GCP project ID for API calls."""
# Check environment variable first
if self.env.get("ANTIGRAVITY_PROJECT_ID"):
return self.env["ANTIGRAVITY_PROJECT_ID"]
# Check cached project ID
if self._project_id:
return self._project_id
# Check auth manager's cached project ID (from credentials file)
auth_project_id = self.auth_manager.get_project_id()
if auth_project_id:
self._project_id = auth_project_id
return auth_project_id
# Fall back to API discovery
try:
access_token = self.auth_manager.get_access_token()
if not access_token:
raise RuntimeError("No valid access token available for project discovery")
async with aiohttp.ClientSession() as session:
project = await self.auth_manager._fetch_project_id(
session=session,
access_token=access_token
)
if project:
self._project_id = project
return project
raise RuntimeError(
"Project ID discovery failed - set ANTIGRAVITY_PROJECT_ID in environment."
)
except Exception as e:
debug.error(f"Failed to discover project ID: {e}")
raise RuntimeError(
"Could not discover project ID. Ensure authentication or set ANTIGRAVITY_PROJECT_ID."
)
@staticmethod
def _messages_to_gemini_format(messages: list, media: MediaListType) -> List[Dict[str, Any]]:
"""Convert OpenAI-style messages to Gemini format."""
format_messages = []
for msg in messages:
role = "model" if msg["role"] == "assistant" else "user"
# Handle tool role (OpenAI style)
if msg["role"] == "tool":
parts = [
{
"functionResponse": {
"name": msg.get("tool_call_id", "unknown_function"),
"response": {
"result": (
msg["content"]
if isinstance(msg["content"], str)
else json.dumps(msg["content"])
)
},
}
}
]
# Handle assistant messages with tool calls
elif msg["role"] == "assistant" and msg.get("tool_calls"):
parts = []
if isinstance(msg["content"], str) and msg["content"].strip():
parts.append({"text": msg["content"]})
for tool_call in msg["tool_calls"]:
if tool_call.get("type") == "function":
parts.append(
{
"functionCall": {
"name": tool_call["function"]["name"],
"args": json.loads(tool_call["function"]["arguments"]),
}
}
)
# Handle string content
elif isinstance(msg["content"], str):
parts = [{"text": msg["content"]}]
# Handle array content (possibly multimodal)
elif isinstance(msg["content"], list):
parts = []
for content in msg["content"]:
ctype = content.get("type")
if ctype == "text":
parts.append({"text": content["text"]})
elif ctype == "image_url":
image_url = content.get("image_url", {}).get("url")
if not image_url:
continue
if image_url.startswith("data:"):
# Inline base64 data image
prefix, b64data = image_url.split(",", 1)
mime_type = prefix.split(":")[1].split(";")[0]
parts.append({"inlineData": {"mimeType": mime_type, "data": b64data}})
else:
parts.append(
{
"fileData": {
"mimeType": "image/jpeg",
"fileUri": image_url,
}
}
)
else:
parts = [{"text": str(msg["content"])}]
format_messages.append({"role": role, "parts": parts})
# Handle media attachments
if media:
if not format_messages:
format_messages.append({"role": "user", "parts": []})
for media_data, filename in media:
if isinstance(media_data, str):
if not filename:
filename = media_data
extension = filename.split(".")[-1].replace("jpg", "jpeg")
format_messages[-1]["parts"].append(
{
"fileData": {
"mimeType": f"image/{extension}",
"fileUri": media_data,
}
}
)
else:
media_data = to_bytes(media_data)
format_messages[-1]["parts"].append({
"inlineData": {
"mimeType": is_data_an_media(media_data, filename),
"data": base64.b64encode(media_data).decode()
}
})
return format_messages
async def stream_content(
self,
model: str,
messages: Messages,
*,
proxy: Optional[str] = None,
thinking_budget: Optional[int] = None,
tools: Optional[List[dict]] = None,
tool_choice: Optional[str] = None,
max_tokens: Optional[int] = None,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
stop: Optional[Union[str, List[str]]] = None,
presence_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
seed: Optional[int] = None,
response_format: Optional[Dict[str, Any]] = None,
**kwargs
) -> AsyncGenerator:
"""Stream content generation from Antigravity API."""
# Convert user-facing model name to internal API name
if model in Antigravity.model_aliases:
model = Antigravity.model_aliases[model]
await self.auth_manager.initialize_auth()
project_id = await self.discover_project_id()
# Convert messages to Gemini format
contents = self._messages_to_gemini_format(
[m for m in messages if m["role"] not in ["developer", "system"]],
media=kwargs.get("media", None)
)
system_prompt = get_system_prompt(messages)
request_data = {}
if system_prompt:
request_data["system_instruction"] = {"parts": {"text": system_prompt}}
# Convert OpenAI-style tools to Gemini format
gemini_tools = None
function_declarations = []
if tools:
for tool in tools:
if tool.get("type") == "function" and "function" in tool:
func = tool["function"]
function_declarations.append({
"name": func.get("name"),
"description": func.get("description", ""),
"parameters": func.get("parameters", {})
})
if function_declarations:
gemini_tools = [{"functionDeclarations": function_declarations}]
# Build generation config
generation_config = {
"maxOutputTokens": max_tokens or 32000, # Antigravity default
"temperature": temperature,
"topP": top_p,
"stop": stop,
"presencePenalty": presence_penalty,
"frequencyPenalty": frequency_penalty,
"seed": seed,
}
# Handle response format
if response_format is not None and response_format.get("type") == "json_object":
generation_config["responseMimeType"] = "application/json"
# Handle thinking configuration
if thinking_budget:
generation_config["thinkingConfig"] = {
"thinkingBudget": thinking_budget,
"includeThoughts": True
}
# Compose request body with required Antigravity fields
req_body = {
"model": model,
"project": project_id,
"userAgent": "antigravity",
"requestType": "agent",
"requestId": f"req-{secrets.token_hex(8)}",
"request": {
"contents": contents,
"generationConfig": generation_config,
"tools": gemini_tools,
**request_data
},
}
# Add tool config if specified, only include allowedFunctionNames if mode is ANY
if tool_choice and gemini_tools:
mode = tool_choice.upper()
function_calling_config = {"mode": mode}
if mode == "ANY":
function_calling_config["allowedFunctionNames"] = [fd["name"] for fd in function_declarations]
req_body["request"]["toolConfig"] = {"functionCallingConfig": function_calling_config}
# Remove None values recursively
def clean_none(d):
if isinstance(d, dict):
return {k: clean_none(v) for k, v in d.items() if v is not None}
if isinstance(d, list):
return [clean_none(x) for x in d if x is not None]
return d
req_body = clean_none(req_body)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.auth_manager.get_access_token()}",
**ANTIGRAVITY_HEADERS,
}
# Use production URL for streaming (most reliable)
base_url = PRODUCTION_URL
url = f"{base_url}:streamGenerateContent?alt=sse"
# Streaming SSE parsing helper
async def parse_sse_stream(stream: aiohttp.StreamReader) -> AsyncGenerator[Dict[str, Any], None]:
"""Parse SSE stream yielding parsed JSON objects."""
buffer = ""
object_buffer = ""
async for chunk_bytes in stream.iter_any():
chunk = chunk_bytes.decode()
buffer += chunk
lines = buffer.split("\n")
buffer = lines.pop() # Save last incomplete line back
for line in lines:
line = line.strip()
if line == "":
# Empty line indicates end of SSE message -> parse object buffer
if object_buffer:
try:
yield json.loads(object_buffer)
except Exception as e:
debug.error(f"Error parsing SSE JSON: {e}")
object_buffer = ""
elif line.startswith("data: "):
object_buffer += line[6:]
# Final parse when stream ends
if object_buffer:
try:
yield json.loads(object_buffer)
except Exception as e:
debug.error(f"Error parsing final SSE JSON: {e}")
timeout = ClientTimeout(total=None) # No total timeout
connector = get_connector(None, proxy)
async with ClientSession(headers=headers, timeout=timeout, connector=connector) as session:
async with session.post(url, json=req_body) as resp:
if not resp.ok:
if resp.status == 503:
try:
retry_delay = int(max([float(d.get("retryDelay", 0)) for d in (await resp.json(content_type=None)).get("error", {}).get("details", [])]))
except ValueError:
retry_delay = 30 # Default retry delay if not specified
debug.log(f"Received 503 error, retrying after {retry_delay}")
if retry_delay <= 120:
await asyncio.sleep(retry_delay)
resp = await session.post(url, json=req_body)
if not resp.ok:
debug.error(f"Retry after 503 failed with status {resp.status}")
if not resp.ok:
if resp.status == 401:
raise MissingAuthError("Unauthorized (401) from Antigravity API")
error_body = await resp.text()
raise RuntimeError(f"Antigravity API error {resp.status}: {error_body}")
usage_metadata = {}
async for json_data in parse_sse_stream(resp.content):
# Process JSON data according to Gemini API structure
candidates = json_data.get("response", {}).get("candidates", [])
usage_metadata = json_data.get("response", {}).get("usageMetadata", usage_metadata)
if not candidates:
continue
candidate = candidates[0]
content = candidate.get("content", {})
parts = content.get("parts", [])
tool_calls = []
for part in parts:
# Real thinking chunks
if part.get("thought") is True and "text" in part:
yield Reasoning(part["text"])
# Function calls from Gemini
elif "functionCall" in part:
tool_calls.append(part["functionCall"])
# Text content
elif "text" in part:
yield part["text"]
# Inline media data
elif "inlineData" in part:
async for media in save_response_media(part["inlineData"], format_media_prompt(messages)):
yield media
# File data (e.g. external image)
elif "fileData" in part:
file_data = part["fileData"]
yield ImageResponse(file_data.get("fileUri"))
if tool_calls:
# Convert Gemini tool calls to OpenAI format
openai_tool_calls = []
for i, tc in enumerate(tool_calls):
openai_tool_calls.append({
"id": f"call_{i}_{tc.get('name', 'unknown')}",
"type": "function",
"function": {
"name": tc.get("name"),
"arguments": json.dumps(tc.get("args", {}))
}
})
yield ToolCalls(openai_tool_calls)
if usage_metadata:
yield Usage(**usage_metadata)
class Antigravity(AsyncGeneratorProvider, ProviderModelMixin):
"""
Antigravity Provider for gpt4free.
Provides access to Google's Antigravity API (Code Assist) supporting:
- Gemini 2.5 Pro/Flash with extended thinking
- Gemini 3 Pro/Flash (preview)
- Claude Sonnet 4.5 / Opus 4.5 via Antigravity proxy
Requires OAuth2 credentials. Set ANTIGRAVITY_SERVICE_ACCOUNT environment
variable or create credentials at ~/.antigravity/oauth_creds.json
"""
label = "Google Antigravity"
login_url = "https://cloud.google.com/code-assist"
url = "https://antigravity.google"
default_model = "gemini-3-flash"
fallback_models = [
# Gemini 2.5 models
"gemini-2.5-pro",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
# Gemini 3 models
"gemini-3-flash",
# Claude models (via Antigravity proxy)
"claude-sonnet-4.5",
"claude-opus-4.5",
]
# Model aliases for compatibility
model_aliases = {
"claude-sonnet-4.5": "claude-sonnet-4-5",
"claude-opus-4.5": "claude-opus-4-5",
}
working = True
supports_message_history = True
supports_system_message = True
needs_auth = True
active_by_default = True
auth_manager: AntigravityAuthManager = None
@classmethod
def get_models(cls, **kwargs) -> List[str]:
"""Return available models, fetching dynamically from API if authenticated."""
# Try to fetch models dynamically if we have credentials
if not cls.models and cls.has_credentials():
try:
get_running_loop(check_nested=True)
cls.models = asyncio.run(cls._fetch_models())
except Exception as e:
debug.log(f"Failed to fetch dynamic models: {e}")
# Update live status
if cls.live == 0:
if cls.auth_manager is None:
cls.auth_manager = AntigravityAuthManager(env=os.environ)
if cls.auth_manager.get_access_token() is not None:
cls.live += 1
return cls.models if cls.models else cls.fallback_models
@classmethod
async def _fetch_models(cls) -> List[str]:
"""Fetch available models dynamically from the Antigravity API."""
if cls.auth_manager is None:
cls.auth_manager = AntigravityAuthManager(env=os.environ)
await cls.auth_manager.initialize_auth()
try:
response = await cls.auth_manager.call_endpoint(
method="fetchAvailableModels",
body={"project": cls.auth_manager.get_project_id()}
)
# Extract model names from the response
models = [key for key, value in response.get("models", {}).items() if not value.get("isInternal", False) and not key.startswith("tab_")]
if not isinstance(models, list):
raise ValueError("Invalid response format: 'models' should be a list")
return models
except Exception as e:
debug.log(f"Failed to fetch models: {e}")
return []
@classmethod
async def get_quota(cls, api_key: Optional[str] = None) -> dict:
"""
Fetch usage/quota information from the Antigravity API.
"""
if cls.auth_manager is None:
cls.auth_manager = AntigravityAuthManager(env=os.environ)
await cls.auth_manager.initialize_auth()
access_token = cls.auth_manager.get_access_token()
project_id = cls.auth_manager.get_project_id()
if not access_token or not project_id:
raise MissingAuthError("Cannot fetch usage without valid authentication")
return await cls.auth_manager.call_endpoint(
method="fetchAvailableModels",
body={"project": cls.auth_manager.get_project_id()}
)
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = False,
media: MediaListType = None,
tools: Optional[list] = None,
**kwargs
) -> AsyncResult:
"""Create an async generator for streaming responses."""
if cls.auth_manager is None:
cls.auth_manager = AntigravityAuthManager(env=os.environ)
# Apply model alias if needed
if model in cls.model_aliases:
model = cls.model_aliases[model]
# Initialize Antigravity provider with auth manager and environment
provider = AntigravityProvider(env=os.environ, auth_manager=cls.auth_manager)
async for chunk in provider.stream_content(
model=model,
messages=messages,
stream=stream,
media=media,
tools=tools,
**kwargs
):
yield chunk
@classmethod
async def login(
cls,
project_id: str = "",
no_browser: bool = False,
credentials_path: Optional[Path] = None,
) -> "AntigravityAuthManager":
"""
Perform interactive OAuth login and save credentials.
This is the main entry point for authenticating with Antigravity.
Args:
project_id: Optional GCP project ID
no_browser: If True, don't auto-open browser
credentials_path: Path to save credentials
Returns:
AntigravityAuthManager with active credentials
Example:
>>> import asyncio
>>> from g4f.Provider.needs_auth import Antigravity
>>> asyncio.run(Antigravity.login())
"""
auth_manager = await AntigravityAuthManager.login_and_save(
project_id=project_id,
no_browser=no_browser,
credentials_path=credentials_path,
)
cls.auth_manager = auth_manager
return auth_manager
@classmethod
def has_credentials(cls) -> bool:
"""Check if valid credentials exist."""
# Check g4f cache file (checked first by initialize_auth)
cache_path = AntigravityAuthManager.get_cache_file()
if cache_path.exists():
return True
# Check default path (~/.antigravity/oauth_creds.json)
default_path = get_antigravity_oauth_creds_path()
if default_path.exists():
return True
# Check environment variable
if "ANTIGRAVITY_SERVICE_ACCOUNT" in os.environ:
return True
return False
@classmethod
def get_credentials_path(cls) -> Path:
"""Get the path where credentials are stored or should be stored."""
# Check g4f cache file first (matches initialize_auth order)
cache_path = AntigravityAuthManager.get_cache_file()
if cache_path.exists():
return cache_path
# Check default path
default_path = get_antigravity_oauth_creds_path()
if default_path.exists():
return default_path
# Return cache path as the preferred location for new credentials
return cache_path
async def main(args: Optional[List[str]] = None):
"""CLI entry point for Antigravity authentication."""
import argparse
parser = argparse.ArgumentParser(
description="Antigravity OAuth Authentication for gpt4free",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
%(prog)s login # Interactive login with browser
%(prog)s login --no-browser # Manual login (paste URL)
%(prog)s login --project-id ID # Login with specific project
%(prog)s status # Check authentication status
%(prog)s logout # Remove saved credentials
"""
)
subparsers = parser.add_subparsers(dest="command", help="Commands")
# Login command
login_parser = subparsers.add_parser("login", help="Authenticate with Google")
login_parser.add_argument(
"--project-id", "-p",
default="",
help="Google Cloud project ID (optional, auto-discovered if not set)"
)
login_parser.add_argument(
"--no-browser", "-n",
action="store_true",
help="Don't auto-open browser, print URL instead"
)
# Status command
subparsers.add_parser("status", help="Check authentication status")
# Logout command
subparsers.add_parser("logout", help="Remove saved credentials")
args = parser.parse_args(args)
if args.command == "login":
try:
await Antigravity.login(
project_id=args.project_id,
no_browser=args.no_browser,
)
except KeyboardInterrupt:
print("\n\nLogin cancelled.")
sys.exit(1)
except Exception as e:
print(f"\n❌ Login failed: {e}")
sys.exit(1)
elif args.command == "status":
print("\nAntigravity Authentication Status")
print("=" * 40)
if Antigravity.has_credentials():
creds_path = Antigravity.get_credentials_path()
print(f"✓ Credentials found at: {creds_path}")
# Try to read and display some info
try:
with creds_path.open() as f:
creds = json.load(f)
if creds.get("email"):
print(f" Email: {creds['email']}")
if creds.get("project_id"):
print(f" Project: {creds['project_id']}")
expiry = creds.get("expiry_date")
if expiry:
expiry_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(expiry / 1000))
if expiry / 1000 > time.time():
print(f" Token expires: {expiry_time}")
else:
print(f" Token expired: {expiry_time} (will auto-refresh)")
except Exception as e:
print(f" (Could not read credential details: {e})")
else:
print("✗ No credentials found")
print(f"\nRun 'antigravity login' to authenticate.")
print()
elif args.command == "logout":
print("\nAntigravity Logout")
print("=" * 40)
removed = False
# Remove cache file
cache_path = AntigravityAuthManager.get_cache_file()
if cache_path.exists():
cache_path.unlink()
print(f"✓ Removed: {cache_path}")
removed = True
# Remove default credentials file
default_path = get_antigravity_oauth_creds_path()
if default_path.exists():
default_path.unlink()
print(f"✓ Removed: {default_path}")
removed = True
if removed:
print("\n✓ Credentials removed successfully.")
else:
print("No credentials found to remove.")
print()
else:
parser.print_help()
def cli_main(args: Optional[List[str]] = None):
"""Synchronous CLI entry point for setup.py console_scripts."""
asyncio.run(main(args))
if __name__ == "__main__":
cli_main()
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/needs_auth/Antigravity.py",
"license": "GNU General Public License v3.0",
"lines": 1342,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/yupp/constants.py | """
Constants for Yupp AI NextAction token management
"""
# Default fallback tokens (hardcoded)
NEXT_ACTION_TOKENS = {
"new_conversation": "7f7de0a21bc8dc3cee8ba8b6de632ff16f769649dd",
"existing_conversation": "7f9ec99a63cbb61f69ef18c0927689629bda07f1bf",
}
# Cache settings
TOKEN_CACHE_TTL = 3600 # 1 hour in seconds
MAX_EXTRACTION_RETRIES = 3
MIN_REQUIRED_TOKENS = 2 # Minimum tokens needed to update cache
# URLs
YUPP_BASE_URL = "https://yupp.ai"
YUPP_CHAT_URL = "https://yupp.ai/chat"
# Regex patterns for token extraction
TOKEN_PATTERNS = [
# Standard patterns
r'next-action["\']?\s*[:=]\s*["\']?([a-f0-9]{40,42})',
r'"next-action"\s*:\s*"([a-f0-9]{40,42})"',
r'"actionId"\s*:\s*"([a-f0-9]{40,42})"',
r'nextAction["\']?\s*:\s*["\']?([a-f0-9]{40,42})',
# Broader patterns for various formats
r'["\']?action["\']?\s*[:=]\s*["\']?([a-f0-9]{40,42})',
r'["\']?new_conversation["\']?\s*[:=]\s*["\']?([a-f0-9]{40,42})',
r'["\']?existing_conversation["\']?\s*[:=]\s*["\']?([a-f0-9]{40,42})',
r'["\']?new["\']?\s*[:=]\s*["\']?([a-f0-9]{40,42})',
r'["\']?existing["\']?\s*[:=]\s*["\']?([a-f0-9]{40,42})',
]
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/yupp/constants.py",
"license": "GNU General Public License v3.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/Provider/yupp/token_extractor.py | """
Yupp AI NextAction Token Extractor
Smart extraction with multiple fallback strategies
Only attempts extraction on token failure
"""
import asyncio
import json
import os
import re
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from typing import Dict, List, Optional
from .constants import (
MAX_EXTRACTION_RETRIES,
MIN_REQUIRED_TOKENS,
NEXT_ACTION_TOKENS,
TOKEN_CACHE_TTL,
TOKEN_PATTERNS,
YUPP_BASE_URL,
YUPP_CHAT_URL,
)
@dataclass
class TokenCache:
"""Cache for NextAction tokens"""
tokens: Dict[str, str] = field(default_factory=dict)
last_updated: Optional[datetime] = None
failed_attempts: int = 0
def is_expired(self) -> bool:
"""Check if cache is expired"""
if self.last_updated is None:
return True
return datetime.now() - self.last_updated > timedelta(seconds=TOKEN_CACHE_TTL)
def is_valid(self) -> bool:
"""Check if cache has valid tokens"""
return (
not self.is_expired()
and len(self.tokens) >= MIN_REQUIRED_TOKENS
and all(
k in self.tokens for k in ["new_conversation", "existing_conversation"]
)
)
class TokenExtractor:
"""
Extracts NextAction tokens from Yupp AI
Uses multiple strategies and only attempts extraction on failure
"""
def __init__(
self,
jwt_token: Optional[str] = None,
scraper: Optional["cloudscraper.CloudScraper"] = None,
):
self.jwt_token = jwt_token or os.getenv("YUPP_JWT") or os.getenv("YUPP_API_KEY")
self.scraper = scraper
self._cache = TokenCache()
self._extraction_in_progress = False
self._lock = asyncio.Lock()
async def get_token(self, token_type: str) -> str:
"""
Get a NextAction token from cache or fallback.
This method does NOT trigger extraction - it only returns cached
tokens or fallbacks. Extraction is only triggered by mark_token_failed().
Args:
token_type: Type of token to retrieve ("new_conversation" or
"existing_conversation")
Returns:
The token string from cache if valid, otherwise the fallback token
"""
# Return cached token if valid
if self._cache.is_valid() and token_type in self._cache.tokens:
return self._cache.tokens[token_type]
# Return fallback token
return NEXT_ACTION_TOKENS.get(
token_type, NEXT_ACTION_TOKENS["new_conversation"]
)
async def mark_token_failed(self, token_type: str, token_value: str) -> None:
"""
Mark a token as failed - this triggers extraction attempt
Only extracts if we haven't tried too many times recently
"""
async with self._lock:
# Check if this is actually a cached token that failed
cached_value = self._cache.tokens.get(token_type)
# If the failed token matches our cache, increment failures
if cached_value == token_value:
self._cache.failed_attempts += 1
elif token_value in NEXT_ACTION_TOKENS.values():
# Hardcoded token failed - definitely need to extract
self._cache.failed_attempts += 1
# Only attempt extraction if we haven't failed too many times
if self._cache.failed_attempts < MAX_EXTRACTION_RETRIES:
if not self._extraction_in_progress:
# Set flag immediately to prevent race conditions
self._extraction_in_progress = True
# Start extraction in background
asyncio.create_task(self._attempt_extraction())
async def _attempt_extraction(self) -> bool:
"""
Attempt to extract fresh tokens from Yupp AI
Uses multiple strategies for robustness
"""
async with self._lock:
if self._extraction_in_progress:
return False
self._extraction_in_progress = True
try:
# Try multiple extraction methods
extracted_tokens = await self._extract_from_chat_page()
if not extracted_tokens:
extracted_tokens = await self._extract_from_main_page()
if not extracted_tokens:
extracted_tokens = await self._extract_from_js_bundles()
if extracted_tokens and len(extracted_tokens) >= MIN_REQUIRED_TOKENS:
# Update cache with extracted tokens
async with self._lock:
self._cache.tokens = {
"new_conversation": extracted_tokens[0],
"existing_conversation": extracted_tokens[1]
if len(extracted_tokens) > 1
else extracted_tokens[0],
}
self._cache.last_updated = datetime.now()
self._cache.failed_attempts = 0
return True
return False
except Exception as e:
print(f"[Yupp TokenExtractor] Extraction failed: {e}")
if os.getenv("DEBUG_MODE", "").lower() == "true":
import traceback
traceback.print_exc()
return False
finally:
async with self._lock:
self._extraction_in_progress = False
async def _extract_from_chat_page(self) -> List[str]:
"""Extract tokens from chat page HTML"""
try:
headers = self._get_headers()
if self.scraper:
response = self.scraper.get(YUPP_CHAT_URL, headers=headers, timeout=10)
text = response.text
else:
# Try to create a scraper if not provided
try:
import cloudscraper
scraper = cloudscraper.create_scraper(
browser={
"browser": "chrome",
"platform": "windows",
"desktop": True,
"mobile": False,
},
delay=10,
)
scraper.headers.update(headers)
if self.jwt_token:
scraper.cookies.set(
"__Secure-yupp.session-token", self.jwt_token
)
response = scraper.get(YUPP_CHAT_URL, timeout=10)
text = response.text
except ImportError:
import aiohttp
async with aiohttp.ClientSession() as session:
async with session.get(
YUPP_CHAT_URL,
headers=headers,
timeout=aiohttp.ClientTimeout(total=10),
) as response:
text = await response.text()
tokens = self._extract_tokens_from_html(text)
if tokens:
print(
f"[Yupp TokenExtractor] Extracted {len(tokens)} tokens "
f"from chat page"
)
return tokens
except Exception as e:
print(f"[Yupp TokenExtractor] Chat page extraction failed: {e}")
if os.getenv("DEBUG_MODE", "").lower() == "true":
import traceback
traceback.print_exc()
return []
async def _extract_from_main_page(self) -> List[str]:
"""Extract tokens from main page HTML"""
try:
headers = self._get_headers()
if self.scraper:
response = self.scraper.get(YUPP_BASE_URL, headers=headers, timeout=10)
text = response.text
else:
# Try to create a scraper if not provided
try:
import cloudscraper
scraper = cloudscraper.create_scraper(
browser={
"browser": "chrome",
"platform": "windows",
"desktop": True,
"mobile": False,
},
delay=10,
)
scraper.headers.update(headers)
if self.jwt_token:
scraper.cookies.set(
"__Secure-yupp.session-token", self.jwt_token
)
response = scraper.get(YUPP_BASE_URL, timeout=10)
text = response.text
except ImportError:
import aiohttp
async with aiohttp.ClientSession() as session:
async with session.get(
YUPP_BASE_URL,
headers=headers,
timeout=aiohttp.ClientTimeout(total=10),
) as response:
text = await response.text()
tokens = self._extract_tokens_from_html(text)
if tokens:
print(
f"[Yupp TokenExtractor] Extracted {len(tokens)} tokens "
f"from main page"
)
return tokens
except Exception as e:
print(f"[Yupp TokenExtractor] Main page extraction failed: {e}")
if os.getenv("DEBUG_MODE", "").lower() == "true":
import traceback
traceback.print_exc()
return []
async def _extract_from_js_bundles(self) -> List[str]:
"""Extract tokens from JavaScript bundles"""
try:
import aiohttp
# Common Next.js bundle patterns
bundle_patterns = [
"/_next/static/chunks/",
"/_next/static/app/",
]
headers = self._get_headers()
async with aiohttp.ClientSession() as session:
# Try to fetch a page and extract script URLs
async with session.get(
YUPP_BASE_URL,
headers=headers,
timeout=aiohttp.ClientTimeout(total=10),
) as response:
text = await response.text()
# Extract script URLs
script_urls = re.findall(r'src="([^"]*\.js[^"]*)"', text)
for script_url in script_urls:
if any(pattern in script_url for pattern in bundle_patterns):
try:
full_url = (
script_url
if script_url.startswith("http")
else f"{YUPP_BASE_URL}{script_url}"
)
async with session.get(
full_url,
headers=headers,
timeout=aiohttp.ClientTimeout(total=5),
) as js_response:
js_text = await js_response.text()
tokens = self._extract_tokens_from_html(js_text)
if tokens and len(tokens) >= MIN_REQUIRED_TOKENS:
print(
f"[Yupp TokenExtractor] Extracted tokens "
f"from JS bundle: {script_url}"
)
return tokens
except Exception:
continue
except Exception as e:
print(f"[Yupp TokenExtractor] JS bundle extraction failed: {e}")
if os.getenv("DEBUG_MODE", "").lower() == "true":
import traceback
traceback.print_exc()
return []
def _extract_tokens_from_html(self, html: str) -> List[str]:
"""Extract tokens from HTML/JS using multiple patterns"""
all_tokens = set()
for pattern in TOKEN_PATTERNS:
matches = re.findall(pattern, html, re.IGNORECASE)
all_tokens.update(matches)
# Filter to only 40-42 character hex strings (likely action tokens)
filtered_tokens = [
token
for token in all_tokens
if re.match(r"^[a-f0-9]{40,42}$", token.lower())
]
return list(filtered_tokens)
def _get_headers(self) -> Dict[str, str]:
"""Get request headers"""
headers = {
"User-Agent": (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0"
),
"Accept": (
"text/html,application/xhtml+xml,application/xml;q=0.9,"
"image/webp,*/*;q=0.8"
),
"Accept-Language": "en-US,en;q=0.9",
}
if self.jwt_token:
headers["Cookie"] = f"__Secure-yupp.session-token={self.jwt_token}"
return headers
# Global singleton instance
_token_extractor: Optional[TokenExtractor] = None
def get_token_extractor(
jwt_token: Optional[str] = None,
scraper: Optional["cloudscraper.CloudScraper"] = None,
) -> TokenExtractor:
"""Get or create the global token extractor instance"""
global _token_extractor
if _token_extractor is None:
_token_extractor = TokenExtractor(jwt_token=jwt_token, scraper=scraper)
return _token_extractor
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/yupp/token_extractor.py",
"license": "GNU General Public License v3.0",
"lines": 321,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/CopilotSession.py | from __future__ import annotations
import json
import asyncio
import base64
from typing import AsyncIterator
try:
import zendriver as nodriver
from zendriver import cdp
has_nodriver = True
except ImportError:
has_nodriver = False
from .base_provider import AsyncAuthedProvider, ProviderModelMixin
from ..typing import AsyncResult, Messages, MediaListType
from ..errors import MissingAuthError
from ..providers.response import *
from ..requests import get_nodriver_session
from ..image import is_accepted_format
from .helper import get_last_user_message
from .. import debug
class Conversation(JsonConversation):
conversation_id: str
def __init__(self, conversation_id: str):
self.conversation_id = conversation_id
def extract_bucket_items(messages: Messages) -> list[dict]:
"""Extract bucket items from messages content."""
bucket_items = []
for message in messages:
if isinstance(message, dict) and isinstance(message.get("content"), list):
for content_item in message["content"]:
if isinstance(content_item, dict) and "bucket_id" in content_item and "name" not in content_item:
bucket_items.append(content_item)
if message.get("role") == "assistant":
bucket_items = []
return bucket_items
class CopilotSession(AsyncAuthedProvider, ProviderModelMixin):
parent = "Copilot"
label = "Microsoft Copilot (Session)"
url = "https://copilot.microsoft.com"
working = has_nodriver
use_nodriver = has_nodriver
active_by_default = True
use_stream_timeout = False
default_model = "Copilot"
models = [default_model, "Think Deeper", "Smart (GPT-5)", "Study"]
model_aliases = {
"o1": "Think Deeper",
"gpt-4": default_model,
"gpt-4o": default_model,
"gpt-5": "GPT-5",
"study": "Study",
}
lock = asyncio.Lock()
@classmethod
async def on_auth_async(cls, *args, **kwargs) -> AsyncIterator:
yield AuthResult()
@classmethod
async def create_authed(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 30,
prompt: str = None,
media: MediaListType = None,
conversation: BaseConversation = None,
**kwargs
) -> AsyncResult:
async with get_nodriver_session(proxy=proxy) as session:
if prompt is None:
prompt = get_last_user_message(messages, False)
if conversation is not None:
conversation_id = conversation.conversation_id
url = f"{cls.url}/chats/{conversation_id}"
else:
url = cls.url
page = await session.get(url)
await page.send(cdp.network.enable())
queue = asyncio.Queue()
page.add_handler(
cdp.network.WebSocketFrameReceived,
lambda event: queue.put_nowait((event.request_id, event.response.payload_data)),
)
textarea = await page.select("textarea")
if textarea is not None:
await textarea.send_keys(prompt)
await asyncio.sleep(1)
try:
button = await page.select("[data-testid=\"submit-button\"]")
except TimeoutError:
button = None
if button:
await button.click()
try:
turnstile = await page.select('#cf-turnstile')
except TimeoutError:
turnstile = None
if turnstile:
debug.log("Found Element: 'cf-turnstile'")
await asyncio.sleep(3)
await click_trunstile(page)
# uploaded_attachments = []
# if auth_result.access_token:
# # Upload regular media (images)
# for media, _ in merge_media(media, messages):
# if not isinstance(media, str):
# data_bytes = to_bytes(media)
# response_json = await page.evaluate(f'''
# fetch('https://copilot.microsoft.com/c/api/attachments', {{
# method: 'POST',
# headers: {{
# 'content-type': '{is_accepted_format(data_bytes)}',
# 'content-length': '{len(data_bytes)}',
# "'x-useridentitytype': '{auth_result.useridentitytype}'," if getattr(auth_result, "useridentitytype", None) else ""
# }},
# body: new Uint8Array({list(data_bytes)})
# }}).then(r => r.json())
# ''')
# media = response_json.get("url")
# uploaded_attachments.append({{"type":"image", "url": media}})
# # Upload bucket files
# bucket_items = extract_bucket_items(messages)
# for item in bucket_items:
# try:
# # Handle plain text content from bucket
# bucket_path = Path(get_bucket_dir(item["bucket_id"]))
# for text_chunk in read_bucket(bucket_path):
# if text_chunk.strip():
# # Upload plain text as a text file
# response_json = await page.evaluate(f'''
# const formData = new FormData();
# formData.append('file', new Blob(['{text_chunk.replace(chr(39), "\\'").replace(chr(10), "\\n").replace(chr(13), "\\r")}'], {{type: 'text/plain'}}), 'bucket_{item['bucket_id']}.txt');
# fetch('https://copilot.microsoft.com/c/api/attachments', {{
# method: 'POST',
# headers: {{
# "'x-useridentitytype': '{auth_result.useridentitytype}'," if auth_result.useridentitytype else ""
# }},
# body: formData
# }}).then(r => r.json())
# ''')
# data = response_json
# uploaded_attachments.append({{"type": "document", "attachmentId": data.get("id")}})
# debug.log(f"Copilot: Uploaded bucket text content: {item['bucket_id']}")
# else:
# debug.log(f"Copilot: No text content found in bucket: {item['bucket_id']}")
# except Exception as e:
# debug.log(f"Copilot: Failed to upload bucket item: {item}")
# debug.error(e)
done = False
msg = None
image_prompt: str = None
last_msg = None
sources = {}
while not done:
try:
request_id, msg_txt = await asyncio.wait_for(queue.get(), 1 if done else timeout)
msg = json.loads(msg_txt)
except:
break
last_msg = msg
if msg.get("event") == "startMessage":
yield Conversation(msg.get("conversationId"))
elif msg.get("event") == "appendText":
yield msg.get("text")
elif msg.get("event") == "generatingImage":
image_prompt = msg.get("prompt")
elif msg.get("event") == "imageGenerated":
yield ImageResponse(msg.get("url"), image_prompt, {{"preview": msg.get("thumbnailUrl")}})
elif msg.get("event") == "done":
yield FinishReason("stop")
done = True
elif msg.get("event") == "suggestedFollowups":
yield SuggestedFollowups(msg.get("suggestions"))
break
elif msg.get("event") == "replaceText":
yield msg.get("text")
elif msg.get("event") == "titleUpdate":
yield TitleGeneration(msg.get("title"))
elif msg.get("event") == "citation":
sources[msg.get("url")] = msg
yield SourceLink(list(sources.keys()).index(msg.get("url")), msg.get("url"))
elif msg.get("event") == "partialImageGenerated":
mime_type = is_accepted_format(base64.b64decode(msg.get("content")[:12]))
yield ImagePreview(f"data:{mime_type};base64,{msg.get('content')}", image_prompt)
elif msg.get("event") == "chainOfThought":
yield Reasoning(msg.get("text"))
elif msg.get("event") == "error":
raise RuntimeError(f"Error: {msg}")
elif msg.get("event") not in ["received", "startMessage", "partCompleted", "connected"]:
debug.log(f"Copilot Message: {msg_txt[:100]}...")
if not done:
raise MissingAuthError(f"Invalid response: {last_msg}")
if sources:
yield Sources(sources.values())
if has_nodriver:
async def click_trunstile(page: nodriver.Tab, element='document.getElementById("cf-turnstile")'):
for _ in range(3):
size = None
for idx in range(15):
size = await page.js_dumps(f'{element}?.getBoundingClientRect()||{{}}')
debug.log(f"Found size: {size.get('x'), size.get('y')}")
if "x" not in size:
break
await page.flash_point(size.get("x") + idx * 3, size.get("y") + idx * 3)
await page.mouse_click(size.get("x") + idx * 3, size.get("y") + idx * 3)
await asyncio.sleep(2)
if "x" not in size:
break
debug.log("Finished clicking trunstile.") | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/CopilotSession.py",
"license": "GNU General Public License v3.0",
"lines": 208,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/qwen/cookie_generator.py | import random
import time
from typing import Any, Callable, Dict, List, Optional, Union
from g4f import debug
# Import fingerprint generator (from the Python-converted fingerprint module)
# Make sure you have fingerprint.py in the same folder.
from g4f.Provider.qwen.fingerprint import generate_fingerprint # noqa: F401
# ==================== Config ====================
CUSTOM_BASE64_CHARS = "DGi0YA7BemWnQjCl4_bR3f8SKIF9tUz/xhr2oEOgPpac=61ZqwTudLkM5vHyNXsVJ"
# Hash field positions (need random regeneration)
HASH_FIELDS: Dict[int, str] = {
16: "split", # plugin hash: "count|hash" (replace only hash part)
17: "full", # canvas hash
18: "full", # UA hash 1
31: "full", # UA hash 2
34: "full", # URL hash
36: "full", # doc attribute hash (10-100)
}
# ==================== LZW Compression (JS-faithful port) ====================
def lzw_compress(data: Optional[str], bits: int, char_func: Callable[[int], str]) -> str:
if data is None:
return ""
dictionary: Dict[str, int] = {}
dict_to_create: Dict[str, bool] = {}
c = ""
wc = ""
w = ""
enlarge_in = 2
dict_size = 3
num_bits = 2
result: List[str] = []
value = 0
position = 0
for i in range(len(data)):
c = data[i]
if c not in dictionary:
dictionary[c] = dict_size
dict_size += 1
dict_to_create[c] = True
wc = w + c
if wc in dictionary:
w = wc
else:
if w in dict_to_create:
# output "w" as a raw char (8-bit or 16-bit)
if ord(w[0]) < 256:
# write num_bits zeros
for _ in range(num_bits):
value = (value << 1)
if position == bits - 1:
position = 0
result.append(char_func(value))
value = 0
else:
position += 1
char_code = ord(w[0])
for _ in range(8):
value = (value << 1) | (char_code & 1)
if position == bits - 1:
position = 0
result.append(char_func(value))
value = 0
else:
position += 1
char_code >>= 1
else:
# write a 1 marker
char_code = 1
for _ in range(num_bits):
value = (value << 1) | char_code
if position == bits - 1:
position = 0
result.append(char_func(value))
value = 0
else:
position += 1
char_code = 0
char_code = ord(w[0])
for _ in range(16):
value = (value << 1) | (char_code & 1)
if position == bits - 1:
position = 0
result.append(char_func(value))
value = 0
else:
position += 1
char_code >>= 1
enlarge_in -= 1
if enlarge_in == 0:
enlarge_in = 2 ** num_bits
num_bits += 1
del dict_to_create[w]
else:
# output dictionary code for w
char_code = dictionary[w]
for _ in range(num_bits):
value = (value << 1) | (char_code & 1)
if position == bits - 1:
position = 0
result.append(char_func(value))
value = 0
else:
position += 1
char_code >>= 1
enlarge_in -= 1
if enlarge_in == 0:
enlarge_in = 2 ** num_bits
num_bits += 1
dictionary[wc] = dict_size
dict_size += 1
w = c
# flush remaining w
if w != "":
if w in dict_to_create:
if ord(w[0]) < 256:
for _ in range(num_bits):
value = (value << 1)
if position == bits - 1:
position = 0
result.append(char_func(value))
value = 0
else:
position += 1
char_code = ord(w[0])
for _ in range(8):
value = (value << 1) | (char_code & 1)
if position == bits - 1:
position = 0
result.append(char_func(value))
value = 0
else:
position += 1
char_code >>= 1
else:
char_code = 1
for _ in range(num_bits):
value = (value << 1) | char_code
if position == bits - 1:
position = 0
result.append(char_func(value))
value = 0
else:
position += 1
char_code = 0
char_code = ord(w[0])
for _ in range(16):
value = (value << 1) | (char_code & 1)
if position == bits - 1:
position = 0
result.append(char_func(value))
value = 0
else:
position += 1
char_code >>= 1
enlarge_in -= 1
if enlarge_in == 0:
enlarge_in = 2 ** num_bits
num_bits += 1
del dict_to_create[w]
else:
char_code = dictionary[w]
for _ in range(num_bits):
value = (value << 1) | (char_code & 1)
if position == bits - 1:
position = 0
result.append(char_func(value))
value = 0
else:
position += 1
char_code >>= 1
enlarge_in -= 1
if enlarge_in == 0:
enlarge_in = 2 ** num_bits
num_bits += 1
# end-of-stream marker (2)
char_code = 2
for _ in range(num_bits):
value = (value << 1) | (char_code & 1)
if position == bits - 1:
position = 0
result.append(char_func(value))
value = 0
else:
position += 1
char_code >>= 1
# pad to complete a char
while True:
value = (value << 1)
if position == bits - 1:
result.append(char_func(value))
break
position += 1
return "".join(result)
# ==================== Encoding ====================
def custom_encode(data: Optional[str], url_safe: bool) -> str:
if data is None:
return ""
base64_chars = CUSTOM_BASE64_CHARS
compressed = lzw_compress(
data,
6,
lambda index: base64_chars[index] # index should be 0..63
)
if not url_safe:
mod = len(compressed) % 4
if mod == 1:
return compressed + "==="
if mod == 2:
return compressed + "=="
if mod == 3:
return compressed + "="
return compressed
return compressed
# ==================== Helpers ====================
def random_hash() -> int:
return random.randint(0, 0xFFFFFFFF)
def generate_device_id() -> str:
return "".join(random.choice("0123456789abcdef") for _ in range(20))
# ==================== Data parse/process ====================
def parse_real_data(real_data: str) -> List[str]:
return real_data.split("^")
def process_fields(fields: List[str]) -> List[Union[str, int]]:
processed: List[Union[str, int]] = list(fields)
current_timestamp = int(time.time() * 1000)
for idx, typ in HASH_FIELDS.items():
if idx >= len(processed):
continue
if typ == "split":
# field 16: "count|hash" -> replace only hash
val = str(processed[idx])
parts = val.split("|")
if len(parts) == 2:
processed[idx] = f"{parts[0]}|{random_hash()}"
elif typ == "full":
if idx == 36:
processed[idx] = random.randint(10, 100) # 10-100
else:
processed[idx] = random_hash()
# field 33: current timestamp
if 33 < len(processed):
processed[33] = current_timestamp
return processed
# ==================== Cookie generation ====================
def generate_cookies(
real_data: Optional[str] = None,
fingerprint_options: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
if fingerprint_options is None:
fingerprint_options = {}
fingerprint = real_data or generate_fingerprint(fingerprint_options)
fields = parse_real_data(fingerprint)
processed_fields = process_fields(fields)
# ssxmod_itna (37 fields)
ssxmod_itna_data = "^".join(map(str, processed_fields))
ssxmod_itna = "1-" + custom_encode(ssxmod_itna_data, True)
# ssxmod_itna2 (18 fields)
ssxmod_itna2_data = "^".join(map(str, [
processed_fields[0], # device id
processed_fields[1], # sdk version
processed_fields[23], # mode (P/M)
0, "", 0, "", "", 0, # event-related (empty in P mode)
0, 0,
processed_fields[32], # constant (11)
processed_fields[33], # current timestamp
0, 0, 0, 0, 0
]))
ssxmod_itna2 = "1-" + custom_encode(ssxmod_itna2_data, True)
return {
"ssxmod_itna": ssxmod_itna,
"ssxmod_itna2": ssxmod_itna2,
"timestamp": int(processed_fields[33]),
"rawData": ssxmod_itna_data,
"rawData2": ssxmod_itna2_data,
}
def generate_batch(
count: int = 10,
real_data: Optional[str] = None,
fingerprint_options: Optional[Dict[str, Any]] = None
) -> List[Dict[str, Any]]:
return [generate_cookies(real_data, fingerprint_options or {}) for _ in range(count)]
# ssxmod_manager.py
"""
SSXMOD Cookie Manager
Responsible for generating and periodically refreshing ssxmod_itna and ssxmod_itna2 cookies.
"""
# ssxmod_manager_async.py
"""
Async SSXMOD Cookie Manager (asyncio)
Generates and periodically refreshes ssxmod_itna and ssxmod_itna2 cookies.
"""
import asyncio
from typing import Any, Dict, Optional
# Global cookie store
_current_cookies: Dict[str, Any] = {
"ssxmod_itna": "",
"ssxmod_itna2": "",
"timestamp": 0,
}
# Refresh interval (15 minutes) in seconds
REFRESH_INTERVAL_SECONDS = 15 * 60
# Async state
_lock = asyncio.Lock()
_task: Optional[asyncio.Task] = None
_stop_event = asyncio.Event()
async def refresh_cookies():
"""Refresh SSXMOD cookies (async wrapper)."""
global _current_cookies
try:
# generate_cookies() is CPU-bound sync; run it off the event loop.
result = await asyncio.to_thread(generate_cookies)
async with _lock:
_current_cookies = {
"ssxmod_itna": result["ssxmod_itna"],
"ssxmod_itna2": result["ssxmod_itna2"],
"timestamp": result["timestamp"],
}
debug.log("SSXMOD Cookie 已刷新", "SSXMOD")
except Exception as e:
debug.error("SSXMOD Cookie 刷新失败", "SSXMOD", "", str(e))
return _current_cookies
async def _refresh_loop() -> None:
"""Background refresh loop."""
try:
# immediate refresh
await refresh_cookies()
while not _stop_event.is_set():
try:
await asyncio.wait_for(_stop_event.wait(), timeout=REFRESH_INTERVAL_SECONDS)
except asyncio.TimeoutError:
# timeout => refresh
await refresh_cookies()
finally:
# allow restart cleanly
_stop_event.clear()
def init_ssxmod_manager() -> None:
"""
Start the background refresh loop.
Call this AFTER an event loop is running (e.g., inside async main or FastAPI startup).
"""
global _task
if _task is not None and not _task.done():
# already running
return
_stop_event.clear()
_task = asyncio.create_task(_refresh_loop())
debug.log(
f"SSXMOD 管理器已启动,刷新间隔: {REFRESH_INTERVAL_SECONDS / 60:.0f} 分钟",
"SSXMOD",
)
async def stop_refresh() -> None:
"""Stop the background refresh loop."""
global _task
if _task is None:
return
_stop_event.set()
try:
await _task
finally:
_task = None
debug.log("SSXMOD 定时刷新已停止", "SSXMOD")
async def get_ssxmod_itna() -> str:
"""Get current ssxmod_itna."""
async with _lock:
return str(_current_cookies.get("ssxmod_itna", ""))
async def get_ssxmod_itna2() -> str:
"""Get current ssxmod_itna2."""
async with _lock:
return str(_current_cookies.get("ssxmod_itna2", ""))
async def get_cookies() -> Dict[str, Any]:
"""Get full cookie object."""
async with _lock:
return dict(_current_cookies)
# -----------------------
# Example usage
# -----------------------
if __name__ == "__main__":
raw = generate_fingerprint()
data = raw.encode("utf-8")
import zlib
compressed = zlib.compress(data)
import base64
b64_payload = base64.b64encode(compressed).decode("ascii")
print(data)
header_value = f"231!{b64_payload}"
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/qwen/cookie_generator.py",
"license": "GNU General Public License v3.0",
"lines": 384,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/qwen/fingerprint.py | import random
import time
from typing import Dict, List, Any
# =========================
# DEFAULT TEMPLATE
# =========================
DEFAULT_TEMPLATE: Dict[str, Any] = {
"deviceId": "84985177a19a010dea49",
"sdkVersion": "websdk-2.3.15d",
"initTimestamp": "1765348410850",
"field3": "91",
"field4": "1|15",
"language": "zh-CN",
"timezoneOffset": "-480",
"colorDepth": "16705151|12791",
"screenInfo": "1470|956|283|797|158|0|1470|956|1470|798|0|0",
"field9": "5",
"platform": "MacIntel",
"field11": "10",
"webglRenderer": (
"ANGLE (Apple, ANGLE Metal Renderer: Apple M4, Unspecified Version)"
"|Google Inc. (Apple)"
),
"field13": "30|30",
"field14": "0",
"field15": "28",
"pluginCount": "5",
"vendor": "Google Inc.",
"field29": "8",
"touchInfo": "-1|0|0|0|0",
"field32": "11",
"field35": "0",
"mode": "P",
}
# =========================
# PRESETS
# =========================
SCREEN_PRESETS = {
"1920x1080": "1920|1080|283|1080|158|0|1920|1080|1920|922|0|0",
"2560x1440": "2560|1440|283|1440|158|0|2560|1440|2560|1282|0|0",
"1470x956": "1470|956|283|797|158|0|1470|956|1470|798|0|0",
"1440x900": "1440|900|283|900|158|0|1440|900|1440|742|0|0",
"1536x864": "1536|864|283|864|158|0|1536|864|1536|706|0|0",
}
PLATFORM_PRESETS = {
"macIntel": {
"platform": "MacIntel",
"webglRenderer": (
"ANGLE (Apple, ANGLE Metal Renderer: Apple M4, Unspecified Version)"
"|Google Inc. (Apple)"
),
"vendor": "Google Inc.",
},
"macM1": {
"platform": "MacIntel",
"webglRenderer": (
"ANGLE (Apple, ANGLE Metal Renderer: Apple M1, Unspecified Version)"
"|Google Inc. (Apple)"
),
"vendor": "Google Inc.",
},
"win64": {
"platform": "Win32",
"webglRenderer": (
"ANGLE (NVIDIA, NVIDIA GeForce RTX 3080 Direct3D11 "
"vs_5_0 ps_5_0, D3D11)|Google Inc. (NVIDIA)"
),
"vendor": "Google Inc.",
},
"linux": {
"platform": "Linux x86_64",
"webglRenderer": (
"ANGLE (Intel, Mesa Intel(R) UHD Graphics 630, OpenGL 4.6)"
"|Google Inc. (Intel)"
),
"vendor": "Google Inc.",
},
}
LANGUAGE_PRESETS = {
"zh-CN": {"language": "zh-CN", "timezoneOffset": "-480"},
"zh-TW": {"language": "zh-TW", "timezoneOffset": "-480"},
"en-US": {"language": "en-US", "timezoneOffset": "480"},
"ja-JP": {"language": "ja-JP", "timezoneOffset": "-540"},
"ko-KR": {"language": "ko-KR", "timezoneOffset": "-540"},
}
# =========================
# HELPERS
# =========================
def generate_device_id() -> str:
"""Generate a 20-character hex device ID"""
return "".join(random.choice("0123456789abcdef") for _ in range(20))
def generate_hash() -> int:
"""Generate a 32-bit unsigned random hash"""
return random.randint(0, 0xFFFFFFFF)
# =========================
# CORE LOGIC
# =========================
def generate_fingerprint(options: Dict[str, Any] = None) -> str:
if options is None:
options = {}
config = DEFAULT_TEMPLATE.copy()
# platform preset
platform = options.get("platform")
if platform in PLATFORM_PRESETS:
config.update(PLATFORM_PRESETS[platform])
# screen preset
screen = options.get("screen")
if screen in SCREEN_PRESETS:
config["screenInfo"] = SCREEN_PRESETS[screen]
# language preset
locale = options.get("locale")
if locale in LANGUAGE_PRESETS:
config.update(LANGUAGE_PRESETS[locale])
# custom overrides
if "custom" in options and isinstance(options["custom"], dict):
config.update(options["custom"])
device_id = options.get("deviceId") or generate_device_id()
current_timestamp = int(time.time() * 1000)
plugin_hash = generate_hash()
canvas_hash = generate_hash()
ua_hash1 = generate_hash()
ua_hash2 = generate_hash()
url_hash = generate_hash()
doc_hash = random.randint(10, 100)
fields: List[Any] = [
device_id,
config["sdkVersion"],
config["initTimestamp"],
config["field3"],
config["field4"],
config["language"],
config["timezoneOffset"],
config["colorDepth"],
config["screenInfo"],
config["field9"],
config["platform"],
config["field11"],
config["webglRenderer"],
config["field13"],
config["field14"],
config["field15"],
f'{config["pluginCount"]}|{plugin_hash}',
canvas_hash,
ua_hash1,
"1",
"0",
"1",
"0",
config["mode"],
"0",
"0",
"0",
"416",
config["vendor"],
config["field29"],
config["touchInfo"],
ua_hash2,
config["field32"],
current_timestamp,
url_hash,
config["field35"],
doc_hash,
]
return "^".join(map(str, fields))
def generate_fingerprint_batch(count: int, options: Dict[str, Any] = None) -> List[str]:
return [generate_fingerprint(options) for _ in range(count)]
def parse_fingerprint(fingerprint: str) -> Dict[str, Any]:
fields = fingerprint.split("^")
return {
"deviceId": fields[0],
"sdkVersion": fields[1],
"initTimestamp": fields[2],
"language": fields[5],
"timezoneOffset": fields[6],
"platform": fields[10],
"webglRenderer": fields[12],
"mode": fields[23],
"vendor": fields[28],
"timestamp": fields[33],
"raw": fields,
}
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/qwen/fingerprint.py",
"license": "GNU General Public License v3.0",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/Provider/qwen/generate_ua.py | import random
import time
import base64
import hashlib
from typing import Dict, List, Any, Optional
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad
import json
# =========================
# DEFAULT TEMPLATE
# =========================
DEFAULT_TEMPLATE: Dict[str, Any] = {
"deviceId": "84985177a19a010dea49",
"sdkVersion": "websdk-2.3.15d",
"initTimestamp": "1765348410850",
"field3": "91",
"field4": "1|15",
"language": "zh-CN",
"timezoneOffset": "-480",
"colorDepth": "16705151|12791",
"screenInfo": "1470|956|283|797|158|0|1470|956|1470|798|0|0",
"field9": "5",
"platform": "MacIntel",
"field11": "10",
"webglRenderer": (
"ANGLE (Apple, ANGLE Metal Renderer: Apple M4, Unspecified Version)"
"|Google Inc. (Apple)"
),
"field13": "30|30",
"field14": "0",
"field15": "28",
"pluginCount": "5",
"vendor": "Google Inc.",
"field29": "8",
"touchInfo": "-1|0|0|0|0",
"field32": "11",
"field35": "0",
"mode": "P",
}
# =========================
# BX-UA GENERATOR
# =========================
class BXUAGenerator:
def __init__(self):
self.version = "231"
self.aes_key = None
self.aes_iv = None
def _generate_key_iv(self, seed_data: str) -> tuple:
"""Generate AES key and IV from seed data"""
# Create deterministic key/IV from seed
seed_hash = hashlib.sha256(seed_data.encode()).digest()
key = seed_hash[:16] # 128-bit key
iv = seed_hash[16:32] # 128-bit IV
return key, iv
def _encrypt_aes_cbc(self, data: bytes, key: bytes, iv: bytes) -> bytes:
"""Encrypt data using AES-CBC"""
cipher = AES.new(key, AES.MODE_CBC, iv)
padded_data = pad(data, AES.block_size)
encrypted = cipher.encrypt(padded_data)
return encrypted
def _create_payload(self, fingerprint: str, timestamp: Optional[int] = None) -> Dict[str, Any]:
"""Create the payload structure to be encrypted"""
if timestamp is None:
timestamp = int(time.time() * 1000)
# Extract components from fingerprint
fields = fingerprint.split("^")
payload = {
"v": self.version,
"ts": timestamp,
"fp": fingerprint,
"d": {
"deviceId": fields[0],
"sdkVer": fields[1],
"lang": fields[5],
"tz": fields[6],
"platform": fields[10],
"renderer": fields[12],
"mode": fields[23],
"vendor": fields[28],
},
"rnd": random.randint(1000, 9999),
"seq": 1,
}
# Add checksum
checksum_str = f"{fingerprint}{timestamp}{payload['rnd']}"
payload["cs"] = hashlib.md5(checksum_str.encode()).hexdigest()[:8]
return payload
def generate(self, fingerprint: str, options: Optional[Dict[str, Any]] = None) -> str:
"""
Generate bx-ua header value
Args:
fingerprint: The fingerprint string generated by generate_fingerprint()
options: Optional configuration
- timestamp: Custom timestamp (default: current time)
- seed: Custom seed for key generation
"""
if options is None:
options = {}
# Get timestamp
timestamp = options.get("timestamp")
if timestamp is None:
timestamp = int(time.time() * 1000)
# Create payload
payload = self._create_payload(fingerprint, timestamp)
# Convert to JSON
payload_json = json.dumps(payload, separators=(',', ':'))
# Generate key and IV
seed = options.get("seed", fingerprint)
key, iv = self._generate_key_iv(seed)
# Encrypt
encrypted = self._encrypt_aes_cbc(payload_json.encode(), key, iv)
# Base64 encode
encrypted_b64 = base64.b64encode(encrypted).decode()
# Return in format: version!base64_encoded_data
return f"{self.version}!{encrypted_b64}"
def batch_generate(self, fingerprints: List[str], options: Optional[Dict[str, Any]] = None) -> List[str]:
"""Generate multiple bx-ua values"""
return [self.generate(fp, options) for fp in fingerprints]
# =========================
# FINGERPRINT GENERATOR (From your code)
# =========================
def generate_device_id() -> str:
"""Generate a 20-character hex device ID"""
return "".join(random.choice("0123456789abcdef") for _ in range(20))
def generate_hash() -> int:
"""Generate a 32-bit unsigned random hash"""
return random.randint(0, 0xFFFFFFFF)
def generate_fingerprint(options: Dict[str, Any] = None) -> str:
if options is None:
options = {}
config = DEFAULT_TEMPLATE.copy()
# platform preset
platform = options.get("platform")
if platform:
# Handle platform presets if needed
pass
# screen preset
screen = options.get("screen")
if screen:
# Handle screen presets if needed
pass
# language preset
locale = options.get("locale")
if locale:
# Handle language presets if needed
pass
# custom overrides
if "custom" in options and isinstance(options["custom"], dict):
config.update(options["custom"])
device_id = options.get("deviceId") or generate_device_id()
current_timestamp = int(time.time() * 1000)
plugin_hash = generate_hash()
canvas_hash = generate_hash()
ua_hash1 = generate_hash()
ua_hash2 = generate_hash()
url_hash = generate_hash()
doc_hash = random.randint(10, 100)
fields: List[Any] = [
device_id,
config["sdkVersion"],
config["initTimestamp"],
config["field3"],
config["field4"],
config["language"],
config["timezoneOffset"],
config["colorDepth"],
config["screenInfo"],
config["field9"],
config["platform"],
config["field11"],
config["webglRenderer"],
config["field13"],
config["field14"],
config["field15"],
f'{config["pluginCount"]}|{plugin_hash}',
canvas_hash,
ua_hash1,
"1",
"0",
"1",
"0",
config["mode"],
"0",
"0",
"0",
"416",
config["vendor"],
config["field29"],
config["touchInfo"],
ua_hash2,
config["field32"],
current_timestamp,
url_hash,
config["field35"],
doc_hash,
]
return "^".join(map(str, fields))
# =========================
# USAGE EXAMPLE
# =========================
def example_usage():
"""Example of how to generate bx-ua headers"""
# Initialize generators
fp_gen = BXUAGenerator()
# Generate a fingerprint
fingerprint = generate_fingerprint({
"deviceId": "84985177a19a010dea49",
"custom": {
"language": "zh-CN",
"platform": "MacIntel",
}
})
print("Generated Fingerprint:")
print(fingerprint[:100] + "...")
print()
# Generate bx-ua header
bx_ua = fp_gen.generate(fingerprint, {
"timestamp": int(time.time() * 1000),
"seed": "consistent_seed_for_deterministic_output"
})
print(bx_ua)
print("Generated bx-ua header:")
print(bx_ua[:100] + "...")
print(f"Total length: {len(bx_ua)}")
print()
# Parse the fingerprint (for debugging)
fields = fingerprint.split("^")
parsed = {
"deviceId": fields[0],
"sdkVersion": fields[1],
"language": fields[5],
"timezoneOffset": fields[6],
"platform": fields[10],
"webglRenderer": fields[12],
"mode": fields[23],
"vendor": fields[28],
"timestamp": fields[33],
}
print("Parsed fingerprint info:")
for key, value in parsed.items():
print(f" {key}: {value}")
return bx_ua
def batch_example():
"""Example of batch generation"""
fp_gen = BXUAGenerator()
# Generate multiple fingerprints
fingerprints = [
generate_fingerprint({"deviceId": generate_device_id()})
for _ in range(3)
]
# Generate bx-ua for each
bx_ua_values = fp_gen.batch_generate(fingerprints, {
"seed": "batch_seed"
})
print("Batch Generation Results:")
for i, (fp, bx_ua) in enumerate(zip(fingerprints, bx_ua_values)):
print(f"\n{i + 1}. Device ID: {fp.split('^')[0]}")
print(f" bx-ua: {bx_ua[:80]}...")
return bx_ua_values
if __name__ == "__main__":
print("=" * 60)
print("BX-UA Header Generator")
print("=" * 60)
# Run single example
print("\n1. Single Generation Example:")
print("-" * 40)
example_ua = example_usage()
print("\n" + "=" * 60)
# Run batch example
print("\n2. Batch Generation Example:")
print("-" * 40)
batch_ua = batch_example()
print("\n" + "=" * 60)
print("\nTo use in requests:")
print("```python")
print("import requests")
print("")
print("headers = {")
print(' "bx-ua": f"{example_ua}",')
print(' "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) ...",')
print(' "Accept": "application/json, text/plain, */*",')
print(' "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",')
print(' "Accept-Encoding": "gzip, deflate, br",')
print(' "Connection": "keep-alive",')
print("}")
print('')
print('response = requests.get("https://example.com/api", headers=headers)')
print("```") | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/qwen/generate_ua.py",
"license": "GNU General Public License v3.0",
"lines": 288,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:etc/examples/aibadgr.py | """
Example usage of AI Badgr provider.
AI Badgr is an OpenAI-compatible API provider.
Get your API key at: https://aibadgr.com/api-keys
Usage:
export AIBADGR_API_KEY="your-api-key-here"
python aibadgr.py
"""
from g4f.client import Client
from g4f.Provider import AIBadgr
# Using AI Badgr with the g4f client
client = Client(
provider=AIBadgr,
api_key="your-api-key-here" # Or set AIBADGR_API_KEY environment variable
)
# Example 1: Simple chat completion
print("Example 1: Simple chat completion")
response = client.chat.completions.create(
model="gpt-4o-mini", # AI Badgr supports OpenAI-compatible models
messages=[{"role": "user", "content": "Hello! What can you help me with?"}]
)
print(response.choices[0].message.content)
print()
# Example 2: Streaming response
print("Example 2: Streaming response")
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": "Count from 1 to 5"}],
stream=True
)
for chunk in response:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="", flush=True)
print("\n")
# Example 3: With system message
print("Example 3: With system message")
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "You are a helpful assistant that speaks like a pirate."},
{"role": "user", "content": "Tell me about the weather"}
]
)
print(response.choices[0].message.content)
| {
"repo_id": "xtekky/gpt4free",
"file_path": "etc/examples/aibadgr.py",
"license": "GNU General Public License v3.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/Provider/needs_auth/AIBadgr.py | from __future__ import annotations
from ..template import OpenaiTemplate
class AIBadgr(OpenaiTemplate):
label = "AI Badgr"
url = "https://aibadgr.com"
login_url = "https://aibadgr.com/api-keys"
base_url = "https://aibadgr.com/api/v1"
working = True
needs_auth = True
models_needs_auth = True
supports_stream = True
supports_system_message = True
supports_message_history = True
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/needs_auth/AIBadgr.py",
"license": "GNU General Public License v3.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/Provider/GradientNetwork.py | from __future__ import annotations
import json
from ..typing import AsyncResult, Messages
from ..providers.response import Reasoning, JsonResponse
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class GradientNetwork(AsyncGeneratorProvider, ProviderModelMixin):
"""
Provider for chat.gradient.network
Supports streaming text generation with Qwen and GPT OSS models.
"""
label = "Gradient Network"
url = "https://chat.gradient.network"
api_endpoint = "https://chat.gradient.network/api/generate"
working = True
needs_auth = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "GPT OSS 120B"
models = [
default_model,
"Qwen3 235B",
]
model_aliases = {
"qwen-3-235b": "Qwen3 235B",
"qwen3-235b": "Qwen3 235B",
"gpt-oss-120b": "GPT OSS 120B",
}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
enable_thinking: bool = True,
**kwargs
) -> AsyncResult:
"""
Create an async generator for streaming chat responses.
Args:
model: The model name to use
messages: List of message dictionaries
proxy: Optional proxy URL
enable_thinking: Enable the thinking/analysis channel (maps to enableThinking in API)
**kwargs: Additional arguments
Yields:
str: Content chunks from the response
Reasoning: Reasoning content when enable_thinking is True
"""
model = cls.get_model(model)
headers = {
"Accept": "application/x-ndjson",
"Content-Type": "application/json",
"Origin": cls.url,
"Referer": f"{cls.url}/",
}
payload = {
"clusterMode": "nvidia" if "GPT OSS" in model else "hybrid",
"model": model,
"messages": messages,
}
if enable_thinking:
payload["enableThinking"] = enable_thinking
async with StreamSession(headers=headers, proxy=proxy, impersonate="chrome") as session:
async with session.post(
cls.api_endpoint,
json=payload,
) as response:
response.raise_for_status()
async for line in response.iter_lines():
if not line:
continue
try:
data = json.loads(line)
yield JsonResponse.from_dict(data)
msg_type = data.get("type")
if msg_type == "reply":
# Response chunks with content or reasoningContent
reply_data = data.get("data", {})
content = reply_data.get("content")
reasoning_content = reply_data.get("reasoningContent")
if reasoning_content:
yield Reasoning(reasoning_content)
if content:
yield content
# Skip clusterInfo and blockUpdate GPU visualization messages
except json.JSONDecodeError:
# Skip non-JSON lines (may be partial data or empty)
raise
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/GradientNetwork.py",
"license": "GNU General Public License v3.0",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/ItalyGPT.py | from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..typing import AsyncResult, Messages
from ..requests import DEFAULT_HEADERS
from aiohttp import ClientSession
class ItalyGPT(AsyncGeneratorProvider, ProviderModelMixin):
label = "ItalyGPT"
url = "https://italygpt.it"
working = True
supports_system_message = True
supports_message_history = True
default_model = "gpt-4o"
models = [default_model]
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = True,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
**DEFAULT_HEADERS,
"content-type": "application/json",
"origin": "https://italygpt.it",
"referer": "https://italygpt.it/",
}
payload = {
"messages": messages,
"stream": stream,
}
async with ClientSession() as session:
async with session.post(
f"{cls.url}/api/chat",
json=payload,
headers=headers,
proxy=proxy,
) as resp:
resp.raise_for_status()
async for chunk in resp.content.iter_any():
if chunk:
yield chunk.decode()
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/ItalyGPT.py",
"license": "GNU General Public License v3.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/Provider/hf_space/BAAI_Ling.py | from __future__ import annotations
import aiohttp
import json
import uuid
from ...typing import AsyncResult, Messages
from ...providers.response import JsonConversation
from ...requests.raise_for_status import raise_for_status
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt, get_last_user_message, get_system_prompt
from ... import debug
class BAAI_Ling(AsyncGeneratorProvider, ProviderModelMixin):
label = "Ling & Ring Playground"
url = "https://cafe3310-ling-series-spaces.hf.space"
api_endpoint = f"{url}/gradio_api/queue/join"
working = False
supports_stream = True
supports_system_message = True
supports_message_history = False
default_model = "ling-1t"
model_aliases = {
"ling": default_model,
}
models = ['ling-mini-2.0', 'ling-1t', 'ling-flash-2.0', 'ring-1t', 'ring-flash-2.0', 'ring-mini-2.0']
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
conversation: JsonConversation = None,
**kwargs
) -> AsyncResult:
is_new_conversation = conversation is None or not hasattr(conversation, 'session_hash')
if is_new_conversation:
conversation = JsonConversation(session_hash=str(uuid.uuid4()).replace('-', '')[:12])
model = cls.get_model(model)
prompt = format_prompt(messages) if is_new_conversation else get_last_user_message(messages)
headers = {
'accept': '*/*',
'accept-language': 'en-US,en;q=0.9',
'content-type': 'application/json',
'origin': cls.url,
'referer': f'{cls.url}/',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36'
}
payload = {
"data": [
prompt,
[
[
None,
"Hello! I'm Ling. Try selecting a scenario and a message example below to get started."
]
],
get_system_prompt(messages),
1,
model
],
"event_data": None,
"fn_index": 11,
"trigger_id": 33,
"session_hash": conversation.session_hash
}
payload = {
"data": [
"4aa9d0c6-81c2-4274-91c5-a0d96d827916",
[
{
"id": "4aa9d0c6-81c2-4274-91c5-a0d96d827917",
"title": "(New Conversation)",
"messages": [],
"timestamp": "2026-02-11T23:28:14.398499",
"system_prompt": "",
"model": "🦉 Ling-1T",
"temperature": 0.7
}
],
"🦉 Ling-1T",
"hi",
[],
"",
0.7
],
"fn_index": 11,
"trigger_id": 33,
"session_hash": "bis3t7jioto"
}
async with aiohttp.ClientSession() as session:
async with session.post(cls.api_endpoint, headers=headers, json=payload, proxy=proxy) as response:
await raise_for_status(response)
# Response body must be consumed for the request to complete
await response.json()
data_url = f'{cls.url}/gradio_api/queue/data?session_hash={conversation.session_hash}'
headers_data = {
'accept': 'text/event-stream',
'referer': f'{cls.url}/',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36'
}
async with session.get(data_url, headers=headers_data, proxy=proxy) as response:
full_response = ""
async for line in response.content:
decoded_line = line.decode('utf-8')
if decoded_line.startswith('data: '):
try:
json_data = json.loads(decoded_line[6:])
if json_data.get('msg') == 'process_generating':
if 'output' in json_data and 'data' in json_data['output']:
output_data = json_data['output']['data']
if output_data and len(output_data) > 0:
parts = output_data[0][0]
if len(parts) == 2:
new_text = output_data[0][1].pop()
full_response += new_text
yield new_text
if len(parts) > 2:
new_text = parts[2]
full_response += new_text
yield new_text
elif json_data.get('msg') == 'process_completed':
break
except json.JSONDecodeError:
debug.log("Could not parse JSON:", decoded_line)
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/hf_space/BAAI_Ling.py",
"license": "GNU General Public License v3.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:etc/examples/mcp_tools_demo.py | #!/usr/bin/env python
"""
Example: Using the MCP Server Tools
This script demonstrates how to interact with the MCP server tools programmatically.
It shows how each tool can be used and what kind of results to expect.
"""
import asyncio
import json
from g4f.mcp.server import MCPServer, MCPRequest
async def demo_web_search():
"""Demonstrate web search tool"""
print("\n" + "=" * 70)
print("DEMO: Web Search Tool")
print("=" * 70)
server = MCPServer()
# Create a tool call request for web search
request = MCPRequest(
jsonrpc="2.0",
id=1,
method="tools/call",
params={
"name": "web_search",
"arguments": {
"query": "Python programming tutorials",
"max_results": 3
}
}
)
print("\nRequest:")
print(json.dumps({
"method": "tools/call",
"params": request.params
}, indent=2))
print("\nExecuting web search...")
response = await server.handle_request(request)
if response.result:
print("\nSuccess! Response:")
content = response.result.get("content", [])
if content:
result_text = content[0].get("text", "")
result_data = json.loads(result_text)
print(json.dumps(result_data, indent=2))
elif response.error:
print(f"\nError: {response.error}")
async def demo_web_scrape():
"""Demonstrate web scraping tool"""
print("\n" + "=" * 70)
print("DEMO: Web Scrape Tool")
print("=" * 70)
server = MCPServer()
# Create a tool call request for web scraping
request = MCPRequest(
jsonrpc="2.0",
id=2,
method="tools/call",
params={
"name": "web_scrape",
"arguments": {
"url": "https://example.com",
"max_words": 200
}
}
)
print("\nRequest:")
print(json.dumps({
"method": "tools/call",
"params": request.params
}, indent=2))
print("\nExecuting web scrape...")
response = await server.handle_request(request)
if response.result:
print("\nSuccess! Response:")
content = response.result.get("content", [])
if content:
result_text = content[0].get("text", "")
result_data = json.loads(result_text)
print(json.dumps(result_data, indent=2))
elif response.error:
print(f"\nError: {response.error}")
async def demo_image_generation():
"""Demonstrate image generation tool"""
print("\n" + "=" * 70)
print("DEMO: Image Generation Tool")
print("=" * 70)
server = MCPServer()
# Create a tool call request for image generation
request = MCPRequest(
jsonrpc="2.0",
id=3,
method="tools/call",
params={
"name": "image_generation",
"arguments": {
"prompt": "A beautiful sunset over mountains",
"model": "flux",
"width": 512,
"height": 512
}
}
)
print("\nRequest:")
print(json.dumps({
"method": "tools/call",
"params": request.params
}, indent=2))
print("\nExecuting image generation...")
response = await server.handle_request(request)
if response.result:
print("\nSuccess! Response:")
content = response.result.get("content", [])
if content:
result_text = content[0].get("text", "")
result_data = json.loads(result_text)
# Don't print the full base64 image data, just show metadata
if "image" in result_data and result_data["image"].startswith("data:"):
result_data["image"] = result_data["image"][:100] + "... (base64 data truncated)"
print(json.dumps(result_data, indent=2))
elif response.error:
print(f"\nError: {response.error}")
async def main():
"""Run all demos"""
print("\n" + "=" * 70)
print("gpt4free MCP Server - Tool Demonstrations")
print("=" * 70)
print("\nThis script demonstrates the three main tools available in the MCP server:")
print("1. Web Search - Search the web using DuckDuckGo")
print("2. Web Scrape - Extract content from web pages")
print("3. Image Generation - Generate images from text prompts")
print("\nNote: These tools require network access and may fail in isolated environments.")
# Show tool information
print("\n" + "=" * 70)
print("Available Tools")
print("=" * 70)
server = MCPServer()
for name, tool in server.tools.items():
print(f"\n• {name}")
print(f" Description: {tool.description}")
schema = tool.input_schema
required = schema.get("required", [])
properties = schema.get("properties", {})
print(f" Required parameters: {', '.join(required)}")
print(f" Optional parameters: {', '.join([k for k in properties.keys() if k not in required])}")
# Run demos (these may fail without network access or required packages)
try:
await demo_web_search()
except Exception as e:
print(f"\n⚠ Web search demo failed: {e}")
print("This is expected without network access or required packages (ddgs, beautifulsoup4)")
try:
await demo_web_scrape()
except Exception as e:
print(f"\n⚠ Web scrape demo failed: {e}")
print("This is expected without network access or required packages (aiohttp, beautifulsoup4)")
try:
await demo_image_generation()
except Exception as e:
print(f"\n⚠ Image generation demo failed: {e}")
print("This is expected without network access or image generation providers")
print("\n" + "=" * 70)
print("Demo Complete")
print("=" * 70)
print("\nTo use these tools in production:")
print("1. Start the MCP server: g4f mcp")
print("2. Configure your AI assistant to connect to it")
print("3. The assistant can then use these tools to enhance its capabilities")
print("\nSee g4f/mcp/README.md for detailed configuration instructions.")
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "xtekky/gpt4free",
"file_path": "etc/examples/mcp_tools_demo.py",
"license": "GNU General Public License v3.0",
"lines": 168,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:etc/testing/test_mcp_http.py | #!/usr/bin/env python
"""Test HTTP MCP server functionality
This script tests the HTTP transport for the MCP server.
"""
import asyncio
import json
from g4f.mcp.server import MCPServer, MCPRequest
async def test_http_server():
"""Test HTTP server methods"""
server = MCPServer()
print("Testing HTTP MCP Server Functionality")
print("=" * 70)
# Test that server can be initialized
print("\n✓ Server initialized successfully")
print(f" Server: {server.server_info['name']}")
print(f" Version: {server.server_info['version']}")
# Test that run_http method exists
if hasattr(server, 'run_http'):
print("\n✓ HTTP transport method (run_http) available")
print(f" Signature: run_http(host, port)")
else:
print("\n✗ HTTP transport method not found")
return
# Test request handling (same for both transports)
print("\n✓ Testing request handling...")
init_request = MCPRequest(
jsonrpc="2.0",
id=1,
method="initialize",
params={}
)
response = await server.handle_request(init_request)
if response.result and response.result.get("protocolVersion"):
print(f" Protocol Version: {response.result['protocolVersion']}")
print(" ✓ Request handling works correctly")
print("\n" + "=" * 70)
print("HTTP MCP Server Tests Passed!")
print("\nTo start HTTP server:")
print(" g4f mcp --http --port 8765")
print("\nHTTP endpoints:")
print(" POST http://localhost:8765/mcp - MCP JSON-RPC endpoint")
print(" GET http://localhost:8765/health - Health check")
print("\nExample HTTP request:")
print(' curl -X POST http://localhost:8765/mcp \\')
print(' -H "Content-Type: application/json" \\')
print(' -d \'{"jsonrpc":"2.0","id":1,"method":"initialize","params":{}}\'')
if __name__ == "__main__":
asyncio.run(test_http_server())
| {
"repo_id": "xtekky/gpt4free",
"file_path": "etc/testing/test_mcp_http.py",
"license": "GNU General Public License v3.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
xtekky/gpt4free:etc/testing/test_mcp_interactive.py | #!/usr/bin/env python
"""Interactive MCP server test
This script simulates a client sending requests to the MCP server
and demonstrates how the tools work.
"""
import json
import sys
import asyncio
from io import StringIO
async def simulate_mcp_client():
"""Simulate an MCP client interacting with the server"""
print("MCP Server Interactive Test")
print("=" * 70)
print("\nThis test simulates JSON-RPC 2.0 messages between client and server.")
print("The MCP server uses stdio transport for communication.\n")
from g4f.mcp.server import MCPServer, MCPRequest
server = MCPServer()
# Test sequence of requests
test_requests = [
{
"name": "Initialize Connection",
"request": {
"jsonrpc": "2.0",
"id": 1,
"method": "initialize",
"params": {
"protocolVersion": "2024-11-05",
"clientInfo": {
"name": "test-client",
"version": "1.0.0"
}
}
}
},
{
"name": "List Available Tools",
"request": {
"jsonrpc": "2.0",
"id": 2,
"method": "tools/list",
"params": {}
}
},
{
"name": "Ping Server",
"request": {
"jsonrpc": "2.0",
"id": 3,
"method": "ping",
"params": {}
}
},
]
for test in test_requests:
print(f"\n{'─' * 70}")
print(f"Test: {test['name']}")
print(f"{'─' * 70}")
# Show request
print("\nClient Request:")
print(json.dumps(test['request'], indent=2))
# Create request object
req_data = test['request']
request = MCPRequest(
jsonrpc=req_data.get("jsonrpc", "2.0"),
id=req_data.get("id"),
method=req_data.get("method"),
params=req_data.get("params")
)
# Handle request
response = await server.handle_request(request)
# Show response
print("\nServer Response:")
response_dict = {
"jsonrpc": response.jsonrpc,
"id": response.id
}
if response.result is not None:
response_dict["result"] = response.result
if response.error is not None:
response_dict["error"] = response.error
print(json.dumps(response_dict, indent=2))
await asyncio.sleep(0.1) # Small delay between requests
print(f"\n{'═' * 70}")
print("Interactive Test Complete!")
print(f"{'═' * 70}\n")
print("Tool Descriptions:")
print("-" * 70)
for name, tool in server.tools.items():
print(f"\n• {name}")
print(f" {tool.description}")
schema = tool.input_schema
if 'required' in schema:
print(f" Required: {', '.join(schema['required'])}")
if 'properties' in schema:
optional = [k for k in schema['properties'].keys() if k not in schema.get('required', [])]
if optional:
print(f" Optional: {', '.join(optional)}")
print(f"\n{'═' * 70}")
print("How to Use the MCP Server:")
print(f"{'═' * 70}\n")
print("1. Start the server:")
print(" $ python -m g4f.mcp")
print(" or")
print(" $ g4f mcp")
print()
print("2. Configure in Claude Desktop (~/Library/Application Support/Claude/claude_desktop_config.json):")
print(' {')
print(' "mcpServers": {')
print(' "gpt4free": {')
print(' "command": "python",')
print(' "args": ["-m", "g4f.mcp"]')
print(' }')
print(' }')
print(' }')
print()
print("3. Or test via stdin/stdout:")
print(' $ echo \'{"jsonrpc":"2.0","id":1,"method":"initialize","params":{}}\' | python -m g4f.mcp')
print()
print("The server will:")
print(" • Read JSON-RPC requests from stdin (one per line)")
print(" • Process the request and execute tools if needed")
print(" • Write JSON-RPC responses to stdout (one per line)")
print(" • Write debug/error messages to stderr")
print()
if __name__ == "__main__":
try:
asyncio.run(simulate_mcp_client())
except KeyboardInterrupt:
print("\n\nTest interrupted by user.")
sys.exit(0)
| {
"repo_id": "xtekky/gpt4free",
"file_path": "etc/testing/test_mcp_interactive.py",
"license": "GNU General Public License v3.0",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
xtekky/gpt4free:etc/testing/test_mcp_server.py | #!/usr/bin/env python
"""Test script for MCP server
This script tests the MCP server by simulating client interactions.
It sends JSON-RPC requests and verifies responses.
"""
import json
import sys
import asyncio
from g4f.mcp.server import MCPServer, MCPRequest
async def test_mcp_server():
"""Test MCP server functionality"""
server = MCPServer()
print("Testing MCP Server...")
print("=" * 60)
# Test 1: Initialize
print("\n1. Testing initialize request...")
init_request = MCPRequest(
jsonrpc="2.0",
id=1,
method="initialize",
params={}
)
response = await server.handle_request(init_request)
print(f" Response ID: {response.id}")
print(f" Protocol Version: {response.result['protocolVersion']}")
print(f" Server Name: {response.result['serverInfo']['name']}")
print(" ✓ Initialize test passed")
# Test 2: List tools
print("\n2. Testing tools/list request...")
list_request = MCPRequest(
jsonrpc="2.0",
id=2,
method="tools/list",
params={}
)
response = await server.handle_request(list_request)
print(f" Number of tools: {len(response.result['tools'])}")
for tool in response.result['tools']:
print(f" - {tool['name']}: {tool['description'][:50]}...")
print(" ✓ Tools list test passed")
# Test 3: Ping
print("\n3. Testing ping request...")
ping_request = MCPRequest(
jsonrpc="2.0",
id=3,
method="ping",
params={}
)
response = await server.handle_request(ping_request)
print(f" Response ID: {response.id}")
print(" ✓ Ping test passed")
# Test 4: Invalid method
print("\n4. Testing invalid method request...")
invalid_request = MCPRequest(
jsonrpc="2.0",
id=4,
method="invalid_method",
params={}
)
response = await server.handle_request(invalid_request)
if response.error:
print(f" Error code: {response.error['code']}")
print(f" Error message: {response.error['message']}")
print(" ✓ Invalid method test passed")
# Test 5: Tool schemas
print("\n5. Testing tool input schemas...")
list_request = MCPRequest(
jsonrpc="2.0",
id=5,
method="tools/list",
params={}
)
response = await server.handle_request(list_request)
for tool in response.result['tools']:
print(f" Tool: {tool['name']}")
schema = tool['inputSchema']
required = schema.get('required', [])
properties = schema.get('properties', {})
print(f" Required params: {', '.join(required)}")
print(f" All params: {', '.join(properties.keys())}")
print(" ✓ Tool schemas test passed")
print("\n" + "=" * 60)
print("All tests passed! ✓")
print("\nMCP server is working correctly.")
print("\nTo use the server, run:")
print(" python -m g4f.mcp")
print(" or")
print(" g4f mcp")
if __name__ == "__main__":
asyncio.run(test_mcp_server())
| {
"repo_id": "xtekky/gpt4free",
"file_path": "etc/testing/test_mcp_server.py",
"license": "GNU General Public License v3.0",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
xtekky/gpt4free:etc/unittest/mcp.py | from __future__ import annotations
import json
import unittest
from g4f.mcp.server import MCPServer, MCPRequest
from g4f.mcp.tools import WebSearchTool, WebScrapeTool, ImageGenerationTool
try:
from ddgs import DDGS, DDGSError
from bs4 import BeautifulSoup
has_requirements = True
except ImportError:
has_requirements = False
class TestMCPServer(unittest.IsolatedAsyncioTestCase):
"""Test cases for MCP server"""
async def test_server_initialization(self):
"""Test that server initializes correctly"""
server = MCPServer()
self.assertIsNotNone(server)
self.assertEqual(server.server_info["name"], "gpt4free-mcp-server")
self.assertEqual(len(server.tools), 5)
self.assertIn('web_search', server.tools)
self.assertIn('web_scrape', server.tools)
self.assertIn('image_generation', server.tools)
async def test_initialize_request(self):
"""Test initialize method"""
server = MCPServer()
request = MCPRequest(
jsonrpc="2.0",
id=1,
method="initialize",
params={}
)
response = await server.handle_request(request)
self.assertEqual(response.jsonrpc, "2.0")
self.assertEqual(response.id, 1)
self.assertIsNotNone(response.result)
self.assertEqual(response.result["protocolVersion"], "2024-11-05")
self.assertIn("serverInfo", response.result)
async def test_tools_list(self):
"""Test tools/list method"""
server = MCPServer()
request = MCPRequest(
jsonrpc="2.0",
id=2,
method="tools/list",
params={}
)
response = await server.handle_request(request)
self.assertEqual(response.jsonrpc, "2.0")
self.assertEqual(response.id, 2)
self.assertIsNotNone(response.result)
self.assertIn("tools", response.result)
self.assertEqual(len(response.result["tools"]), 5)
# Check tool structure
tool_names = [tool["name"] for tool in response.result["tools"]]
self.assertIn("web_search", tool_names)
self.assertIn("web_scrape", tool_names)
self.assertIn("image_generation", tool_names)
async def test_ping(self):
"""Test ping method"""
server = MCPServer()
request = MCPRequest(
jsonrpc="2.0",
id=3,
method="ping",
params={}
)
response = await server.handle_request(request)
self.assertEqual(response.jsonrpc, "2.0")
self.assertEqual(response.id, 3)
self.assertIsNotNone(response.result)
async def test_invalid_method(self):
"""Test invalid method returns error"""
server = MCPServer()
request = MCPRequest(
jsonrpc="2.0",
id=4,
method="invalid_method",
params={}
)
response = await server.handle_request(request)
self.assertEqual(response.jsonrpc, "2.0")
self.assertEqual(response.id, 4)
self.assertIsNotNone(response.error)
self.assertEqual(response.error["code"], -32601)
async def test_tool_call_invalid_tool(self):
"""Test calling non-existent tool"""
server = MCPServer()
request = MCPRequest(
jsonrpc="2.0",
id=5,
method="tools/call",
params={
"name": "nonexistent_tool",
"arguments": {}
}
)
response = await server.handle_request(request)
self.assertEqual(response.jsonrpc, "2.0")
self.assertEqual(response.id, 5)
self.assertIsNotNone(response.error)
self.assertEqual(response.error["code"], -32601)
class TestMCPTools(unittest.IsolatedAsyncioTestCase):
"""Test cases for MCP tools"""
def setUp(self) -> None:
if not has_requirements:
self.skipTest('MCP tools requirements not installed')
async def test_web_search_tool_schema(self):
"""Test WebSearchTool schema"""
tool = WebSearchTool()
self.assertIsNotNone(tool.description)
self.assertIsNotNone(tool.input_schema)
self.assertEqual(tool.input_schema["type"], "object")
self.assertIn("query", tool.input_schema["properties"])
self.assertIn("query", tool.input_schema["required"])
async def test_web_scrape_tool_schema(self):
"""Test WebScrapeTool schema"""
tool = WebScrapeTool()
self.assertIsNotNone(tool.description)
self.assertIsNotNone(tool.input_schema)
self.assertEqual(tool.input_schema["type"], "object")
self.assertIn("url", tool.input_schema["properties"])
self.assertIn("url", tool.input_schema["required"])
async def test_image_generation_tool_schema(self):
"""Test ImageGenerationTool schema"""
tool = ImageGenerationTool()
self.assertIsNotNone(tool.description)
self.assertIsNotNone(tool.input_schema)
self.assertEqual(tool.input_schema["type"], "object")
self.assertIn("prompt", tool.input_schema["properties"])
self.assertIn("prompt", tool.input_schema["required"])
async def test_web_search_missing_query(self):
"""Test web search with missing query parameter"""
tool = WebSearchTool()
result = await tool.execute({})
self.assertIn("error", result)
async def test_web_scrape_missing_url(self):
"""Test web scrape with missing url parameter"""
tool = WebScrapeTool()
result = await tool.execute({})
self.assertIn("error", result)
async def test_image_generation_missing_prompt(self):
"""Test image generation with missing prompt parameter"""
tool = ImageGenerationTool()
result = await tool.execute({})
self.assertIn("error", result)
| {
"repo_id": "xtekky/gpt4free",
"file_path": "etc/unittest/mcp.py",
"license": "GNU General Public License v3.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/mcp/__main__.py | """Main entry point for gpt4free MCP server
This module provides the main entry point for running the MCP server.
"""
from .server import main
if __name__ == "__main__":
main()
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/mcp/__main__.py",
"license": "GNU General Public License v3.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
xtekky/gpt4free:g4f/mcp/server.py | """MCP Server implementation with stdio and HTTP transports
This module implements a Model Context Protocol (MCP) server that communicates
over standard input/output using JSON-RPC 2.0, or via HTTP POST endpoints.
The server exposes tools for:
- Web search
- Web scraping
- Image generation
"""
from __future__ import annotations
import os
import re
import sys
import json
import asyncio
import hashlib
from email.utils import formatdate
from typing import Any, Dict, List, Optional, Union
from dataclasses import dataclass
from urllib.parse import unquote_plus
from ..debug import enable_logging
from ..cookies import read_cookie_files
from ..image import EXTENSIONS_MAP
from ..image.copy_images import get_media_dir, copy_media, get_source_url
from .tools import MarkItDownTool, TextToAudioTool, WebSearchTool, WebScrapeTool, ImageGenerationTool
from .tools import WebSearchTool, WebScrapeTool, ImageGenerationTool
@dataclass
class MCPRequest:
"""MCP request following JSON-RPC 2.0 format"""
jsonrpc: str = "2.0"
id: Optional[Union[int, str]] = None
method: Optional[str] = None
params: Optional[Dict[str, Any]] = None
origin: Optional[str] = None
@dataclass
class MCPResponse:
"""MCP response following JSON-RPC 2.0 format"""
jsonrpc: str = "2.0"
id: Optional[Union[int, str]] = None
result: Optional[Any] = None
error: Optional[Dict[str, Any]] = None
class MCPServer:
"""Model Context Protocol server for gpt4free
This server exposes gpt4free capabilities through the MCP standard,
allowing AI assistants to utilize web search, scraping, and image generation.
"""
def __init__(self):
"""Initialize MCP server with available tools"""
self.tools = {
'web_search': WebSearchTool(),
'web_scrape': WebScrapeTool(),
'image_generation': ImageGenerationTool(),
'text_to_audio': TextToAudioTool(),
'mark_it_down': MarkItDownTool()
}
self.server_info = {
"name": "gpt4free-mcp-server",
"version": "1.0.0",
"description": "MCP server providing web search, scraping, and image generation capabilities"
}
def get_tool_list(self) -> List[Dict[str, Any]]:
"""Get list of available tools with their schemas"""
tool_list = []
for name, tool in self.tools.items():
tool_list.append({
"name": name,
"description": tool.description,
"inputSchema": tool.input_schema
})
return tool_list
async def handle_request(self, request: MCPRequest) -> MCPResponse:
"""Handle incoming MCP request"""
try:
method = request.method
params = request.params or {}
# Handle MCP protocol methods
if method == "initialize":
result = {
"protocolVersion": "2024-11-05",
"serverInfo": self.server_info,
"capabilities": {
"tools": {}
}
}
return MCPResponse(jsonrpc="2.0", id=request.id, result=result)
elif method == "tools/list":
result = {
"tools": self.get_tool_list()
}
return MCPResponse(jsonrpc="2.0", id=request.id, result=result)
elif method == "tools/call":
tool_name = params.get("name")
tool_arguments = params.get("arguments", {})
tool_arguments.setdefault("origin", request.origin)
if tool_name not in self.tools:
return MCPResponse(
jsonrpc="2.0",
id=request.id,
error={
"code": -32601,
"message": f"Tool not found: {tool_name}"
}
)
tool = self.tools[tool_name]
result = await tool.execute(tool_arguments)
return MCPResponse(
jsonrpc="2.0",
id=request.id,
result={
"content": [
{
"type": "text",
"text": json.dumps(result, indent=2)
}
]
}
)
elif method == "ping":
return MCPResponse(jsonrpc="2.0", id=request.id, result={})
else:
return MCPResponse(
jsonrpc="2.0",
id=request.id,
error={
"code": -32601,
"message": f"Method not found: {method}"
}
)
except Exception as e:
return MCPResponse(
jsonrpc="2.0",
id=request.id,
error={
"code": -32603,
"message": f"Internal error: {str(e)}"
}
)
async def run(self):
"""Run the MCP server with stdio transport"""
# Write server info to stderr for debugging
sys.stderr.write(f"Starting {self.server_info['name']} v{self.server_info['version']}\n")
sys.stderr.flush()
while True:
try:
# Read line from stdin
line = await asyncio.get_event_loop().run_in_executor(
None, sys.stdin.readline
)
if not line:
break
# Parse JSON-RPC request
request_data = json.loads(line)
request = MCPRequest(
jsonrpc=request_data.get("jsonrpc", "2.0"),
id=request_data.get("id"),
method=request_data.get("method"),
params=request_data.get("params"),
)
# Handle request
response = await self.handle_request(request)
# Write response to stdout
response_dict = {
"jsonrpc": response.jsonrpc,
"id": response.id
}
if response.result is not None:
response_dict["result"] = response.result
if response.error is not None:
response_dict["error"] = response.error
sys.stdout.write(json.dumps(response_dict) + "\n")
sys.stdout.flush()
except json.JSONDecodeError as e:
sys.stderr.write(f"JSON decode error: {e}\n")
sys.stderr.flush()
except Exception as e:
sys.stderr.write(f"Error: {e}\n")
sys.stderr.flush()
async def run_http(self, host: str = "0.0.0.0", port: int = 8765, origin: Optional[str] = None):
"""Run the MCP server with HTTP transport
Args:
host: Host to bind the HTTP server to
port: Port to bind the HTTP server to
"""
try:
from aiohttp import web
except ImportError:
sys.stderr.write("Error: aiohttp is required for HTTP transport\n")
sys.stderr.write("Install it with: pip install aiohttp\n")
sys.exit(1)
enable_logging()
read_cookie_files()
async def handle_mcp_request(request: web.Request) -> web.Response:
nonlocal origin
"""Handle MCP JSON-RPC request over HTTP POST"""
try:
# Parse JSON-RPC request from POST body
request_data = await request.json()
if origin is None:
origin = request.headers.get("origin")
mcp_request = MCPRequest(
jsonrpc=request_data.get("jsonrpc", "2.0"),
id=request_data.get("id"),
method=request_data.get("method"),
params=request_data.get("params"),
origin=origin
)
# Handle request
response = await self.handle_request(mcp_request)
# Build response dict
response_dict = {
"jsonrpc": response.jsonrpc,
"id": response.id
}
if response.result is not None:
response_dict["result"] = response.result
if response.error is not None:
response_dict["error"] = response.error
return web.json_response(response_dict, headers={"access-control-allow-origin": "*"})
except json.JSONDecodeError as e:
return web.json_response({
"jsonrpc": "2.0",
"id": None,
"error": {
"code": -32700,
"message": f"Parse error: {str(e)}"
}
}, status=400)
except Exception as e:
return web.json_response({
"jsonrpc": "2.0",
"id": None,
"error": {
"code": -32603,
"message": f"Internal error: {str(e)}"
}
}, status=500)
async def handle_health(request: web.Request) -> web.Response:
"""Health check endpoint"""
return web.json_response({
"status": "ok",
"server": self.server_info
})
async def handle_media(request: web.Request) -> web.Response:
"""Serve media files from generated_media directory"""
filename = request.match_info.get('filename', '')
if not filename:
return web.Response(status=404, text="File not found")
def get_timestamp(s):
m = re.match("^[0-9]+", s)
return int(m.group(0)) if m else 0
target = os.path.join(get_media_dir(), os.path.basename(filename))
# Try URL-decoded filename if not found
if not os.path.isfile(target):
other_name = os.path.join(get_media_dir(), os.path.basename(unquote_plus(filename)))
if os.path.isfile(other_name):
target = other_name
# Get file extension and mime type
ext = os.path.splitext(filename)[1][1:].lower()
mime_type = EXTENSIONS_MAP.get(ext, "application/octet-stream")
# Try to fetch from backend if file doesn't exist
if not os.path.isfile(target) and mime_type != "application/octet-stream":
source_url = get_source_url(str(request.query_string))
ssl = None
if source_url is not None:
try:
await copy_media([source_url], target=target, ssl=ssl)
sys.stderr.write(f"File copied from {source_url}\n")
except Exception as e:
sys.stderr.write(f"Download failed: {source_url} - {e}\n")
raise web.HTTPFound(location=source_url)
if not os.path.isfile(target):
return web.Response(status=404, text="File not found")
# Build response headers
stat_result = os.stat(target)
headers = {
"cache-control": "public, max-age=31536000",
"last-modified": formatdate(get_timestamp(filename), usegmt=True),
"etag": f'"{hashlib.md5(filename.encode()).hexdigest()}"',
"content-length": str(stat_result.st_size),
"content-type": mime_type,
"access-control-allow-origin": "*",
}
# Check for conditional request
if_none_match = request.headers.get("if-none-match")
if if_none_match:
etag = headers["etag"]
if etag in [tag.strip(" W/") for tag in if_none_match.split(",")]:
return web.Response(status=304, headers=headers)
# Serve the file
return web.FileResponse(target, headers=headers)
async def handle_synthesize(request: web.Request) -> web.Response:
"""Handle synthesize requests for text-to-speech"""
provider_name = request.match_info.get('provider', '')
if not provider_name:
return web.Response(status=400, text="Provider not specified")
try:
from ..Provider import ProviderUtils
provider_handler = ProviderUtils.convert.get(provider_name)
if provider_handler is None:
return web.Response(status=404, text=f"Provider not found: {provider_name}")
except Exception as e:
return web.Response(status=404, text=f"Provider not found: {provider_name}")
if not hasattr(provider_handler, "synthesize"):
return web.Response(status=500, text=f"Provider doesn't support synthesize: {provider_name}")
# Get query parameters
params = dict(request.query)
try:
# Call the synthesize method
response_data = provider_handler.synthesize(params)
# Handle async generator
async def generate():
async for chunk in response_data:
yield chunk
content_type = getattr(provider_handler, "synthesize_content_type", "application/octet-stream")
return web.Response(
body=b"".join([chunk async for chunk in generate()]),
content_type=content_type,
headers={
"cache-control": "max-age=604800",
"access-control-allow-origin": "*",
}
)
except Exception as e:
sys.stderr.write(f"Synthesize error: {e}\n")
return web.Response(status=500, text=f"Synthesize error: {str(e)}")
# Create aiohttp application
app = web.Application()
app.router.add_options('/mcp', lambda request: web.Response(headers={"access-control-allow-origin": "*", "access-control-allow-methods": "POST, OPTIONS", "access-control-allow-headers": "Content-Type"}))
app.router.add_post('/mcp', handle_mcp_request)
app.router.add_get('/health', handle_health)
app.router.add_get('/media/{filename:.*}', handle_media)
app.router.add_get('/backend-api/v2/synthesize/{provider}', handle_synthesize)
# Start server
sys.stderr.write(f"Starting {self.server_info['name']} v{self.server_info['version']} (HTTP mode)\n")
sys.stderr.write(f"Listening on http://{host}:{port}\n")
sys.stderr.write(f"MCP endpoint: http://{host}:{port}/mcp\n")
sys.stderr.write(f"Health check: http://{host}:{port}/health\n")
sys.stderr.write(f"Media files: http://{host}:{port}/media/{{filename}}\n")
sys.stderr.flush()
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, host, port)
await site.start()
# Keep server running
try:
await asyncio.Event().wait()
except KeyboardInterrupt:
sys.stderr.write("\nShutting down HTTP server...\n")
sys.stderr.flush()
finally:
await runner.cleanup()
def main(http: bool = False, host: str = "0.0.0.0", port: int = 8765, origin: Optional[str] = None):
"""Main entry point for MCP server
Args:
http: If True, use HTTP transport instead of stdio
host: Host to bind HTTP server to (only used when http=True)
port: Port to bind HTTP server to (only used when http=True)
"""
server = MCPServer()
if http:
asyncio.run(server.run_http(host, port, origin))
else:
asyncio.run(server.run())
if __name__ == "__main__":
main()
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/mcp/server.py",
"license": "GNU General Public License v3.0",
"lines": 364,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/mcp/tools.py | """MCP Tools for gpt4free
This module provides MCP tool implementations that wrap gpt4free capabilities:
- WebSearchTool: Web search using ddg search
- WebScrapeTool: Web page scraping and content extraction
- ImageGenerationTool: Image generation using various AI providers
"""
from __future__ import annotations
from typing import Any, Dict
from abc import ABC, abstractmethod
import urllib.parse
from aiohttp import ClientSession
class MCPTool(ABC):
"""Base class for MCP tools"""
@property
@abstractmethod
def description(self) -> str:
"""Tool description"""
pass
@property
@abstractmethod
def input_schema(self) -> Dict[str, Any]:
"""JSON schema for tool input parameters"""
pass
@abstractmethod
async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""Execute the tool with given arguments
Args:
arguments: Tool input arguments matching the input_schema
Returns:
Dict containing either results or an error key with error message
"""
pass
class WebSearchTool(MCPTool):
"""Web search tool using gpt4free's search capabilities"""
@property
def description(self) -> str:
return "Search the web for information using DuckDuckGo. Returns search results with titles, URLs, and snippets."
@property
def input_schema(self) -> Dict[str, Any]:
return {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The search query to execute"
},
"max_results": {
"type": "integer",
"description": "Maximum number of results to return (default: 5)",
"default": 5
},
"region": {
"type": "string",
"description": "Search region (default: en-us)"
}
},
"required": ["query"]
}
async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""Execute web search
Returns:
Dict[str, Any]: Search results or error message
"""
from ..Provider.search.CachedSearch import CachedSearch
query = arguments.get("query", "")
max_results = arguments.get("max_results", 5)
region = arguments.get("region", "en-us")
if not query:
return {
"error": "Query parameter is required"
}
try:
# Perform search - query parameter is used for search execution
# and prompt parameter holds the content to be searched
search_results = await anext(CachedSearch.create_async_generator(
"",
[],
prompt=query,
max_results=max_results,
region=region
))
return {
"query": query,
**search_results.get_dict()
}
except Exception as e:
return {
"error": f"Search failed: {str(e)}"
}
class WebScrapeTool(MCPTool):
"""Web scraping tool using gpt4free's scraping capabilities"""
@property
def description(self) -> str:
return "Scrape and extract text content from a web page URL. Returns cleaned text content with optional word limit."
@property
def input_schema(self) -> Dict[str, Any]:
return {
"type": "object",
"properties": {
"url": {
"type": "string",
"description": "The URL of the web page to scrape"
},
"max_words": {
"type": "integer",
"description": "Maximum number of words to extract (default: 1000)",
"default": 1000
}
},
"required": ["url"]
}
async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""Execute web scraping
Returns:
Dict[str, Any]: Scraped content or error message
"""
from ..tools.fetch_and_scrape import fetch_and_scrape
from aiohttp import ClientSession
url = arguments.get("url", "")
max_words = arguments.get("max_words", 1000)
if not url:
return {
"error": "URL parameter is required"
}
try:
# Scrape the URL
async with ClientSession() as session:
content = await fetch_and_scrape(
session=session,
url=url,
max_words=max_words,
add_metadata=True
)
if not content:
return {
"error": "Failed to scrape content from URL"
}
return {
"url": url,
"content": content,
"word_count": len(content.split())
}
except Exception as e:
return {
"error": f"Scraping failed: {str(e)}"
}
class ImageGenerationTool(MCPTool):
"""Image generation tool using gpt4free's image generation capabilities"""
@property
def description(self) -> str:
return "Generate images from text prompts using AI image generation providers. Returns a URL to the generated image."
@property
def input_schema(self) -> Dict[str, Any]:
return {
"type": "object",
"properties": {
"prompt": {
"type": "string",
"description": "The text prompt describing the image to generate"
},
"model": {
"type": "string",
"description": "The image generation model to use (default: flux)",
"default": "flux"
},
"width": {
"type": "integer",
"description": "Image width in pixels (default: 1024)",
"default": 1024
},
"height": {
"type": "integer",
"description": "Image height in pixels (default: 1024)",
"default": 1024
}
},
"required": ["prompt"]
}
async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""Execute image generation
Returns:
Dict[str, Any]: Generated image data or error message
"""
from ..client import AsyncClient
prompt = arguments.get("prompt", "")
model = arguments.get("model", "flux")
width = arguments.get("width", 1024)
height = arguments.get("height", 1024)
if not prompt:
return {
"error": "Prompt parameter is required"
}
try:
# Generate image using gpt4free client
client = AsyncClient()
response = await client.images.generate(
model=model,
prompt=prompt,
width=width,
height=height
)
# Get the image data with proper validation
if not response:
return {
"error": "Image generation failed: No response from provider"
}
if not hasattr(response, 'data') or not response.data:
return {
"error": "Image generation failed: No image data in response"
}
if len(response.data) == 0:
return {
"error": "Image generation failed: Empty image data array"
}
image_data = response.data[0]
# Check if image_data has url attribute
if not hasattr(image_data, 'url'):
return {
"error": "Image generation failed: No URL in image data"
}
image_url = image_data.url
template = 'Display the image using this template: <a href="{image}" data-width="{width}" data-height="{height}"><img src="{image}" alt="{prompt}"></a>'
# Return result based on URL type
if image_url.startswith('data:'):
return {
"prompt": prompt,
"model": model,
"width": width,
"height": height,
"image": image_url,
"template": template
}
else:
if arguments.get("origin") and image_url.startswith("/media/"):
image_url = f"{arguments.get('origin')}{image_url}"
return {
"prompt": prompt,
"model": model,
"width": width,
"height": height,
"image_url": image_url,
"template": template
}
except Exception as e:
return {
"error": f"Image generation failed: {str(e)}"
}
class MarkItDownTool(MCPTool):
"""MarkItDown tool for converting URLs to markdown format"""
@property
def description(self) -> str:
return "Convert a URL to markdown format using MarkItDown. Supports HTTP/HTTPS URLs and returns formatted markdown content."
@property
def input_schema(self) -> Dict[str, Any]:
return {
"type": "object",
"properties": {
"url": {
"type": "string",
"description": "The URL to convert to markdown format (must be HTTP/HTTPS)"
},
"max_content_length": {
"type": "integer",
"description": "Maximum content length for processing (default: 10000)",
"default": 10000
}
},
"required": ["url"]
}
async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""Execute MarkItDown conversion
Returns:
Dict[str, Any]: Markdown content or error message
"""
try:
from ..integration.markitdown import MarkItDown
except ImportError as e:
return {
"error": f"MarkItDown is not installed: {str(e)}"
}
url = arguments.get("url", "")
max_content_length = arguments.get("max_content_length", 10000)
if not url:
return {
"error": "URL parameter is required"
}
# Validate URL format
if not url.startswith(("http://", "https://")):
return {
"error": "URL must start with http:// or https://"
}
try:
# Initialize MarkItDown
md = MarkItDown()
# Convert URL to markdown
result = md.convert_url(url)
if not result:
return {
"error": "Failed to convert URL to markdown"
}
# Truncate if content exceeds max length
if len(result) > max_content_length:
result = result[:max_content_length] + "\n\n[Content truncated...]"
return {
"url": url,
"markdown_content": result,
"content_length": len(result),
"truncated": len(result) > max_content_length
}
except Exception as e:
return {
"error": f"MarkItDown conversion failed: {str(e)}"
}
class TextToAudioTool(MCPTool):
"""TextToAudio tool for generating audio from text prompts using Pollinations AI"""
@property
def description(self) -> str:
return "Generate an audio URL from a text prompt using Pollinations AI text-to-speech service. Returns a direct URL to the generated audio file."
@property
def input_schema(self) -> Dict[str, Any]:
return {
"type": "object",
"properties": {
"prompt": {
"type": "string",
"description": "The text prompt to the audio model (example: 'Read this: Hello, world!')"
},
"voice": {
"type": "string",
"description": "Voice option for text-to-speech (default: 'alloy')",
"default": "alloy"
},
"url_encode": {
"type": "boolean",
"description": "Whether to URL-encode the prompt text (default: True)",
"default": True
}
},
"required": ["prompt"]
}
async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""Execute text-to-speech conversion
Returns:
Dict[str, Any]: Audio URL or error message
"""
prompt = arguments.get("prompt", "")
voice = arguments.get("voice", "alloy")
url_encode = arguments.get("url_encode", True)
if not prompt:
return {
"error": "Prompt parameter is required"
}
# Validate prompt length (reasonable limit for text-to-speech)
if len(prompt) > 10000:
return {
"error": "Prompt is too long (max 10000 characters)"
}
try:
# Prepare the prompt for URL
if url_encode:
encoded_prompt = urllib.parse.quote(prompt)
else:
encoded_prompt = prompt.replace(" ", "%20") # Basic space encoding
# Construct the Pollinations AI text-to-speech URL
audio_url = f"/backend-api/v2/synthesize/Gemini?text={encoded_prompt}"
if arguments.get("origin"):
audio_url = f"{arguments.get('origin')}{audio_url}"
async with ClientSession() as session:
async with session.get(audio_url, max_redirects=0) as resp:
audio_url = str(resp.url)
template = 'Play the audio using this template: <audio controls src="{audio_url}">'
return {
"prompt": prompt,
"voice": voice,
"audio_url": audio_url,
"template": template
}
except Exception as e:
return {
"error": f"Text-to-speech URL generation failed: {str(e)}"
}
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/mcp/tools.py",
"license": "GNU General Public License v3.0",
"lines": 381,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/integration/markitdown/_youtube_converter.py | import json
import time
import re
import bs4
from typing import Any, BinaryIO, Dict, List, Union
from urllib.parse import parse_qs, urlparse, unquote
from markitdown._base_converter import DocumentConverter, DocumentConverterResult
from markitdown._stream_info import StreamInfo
# Optional YouTube transcription support
try:
# Suppress some warnings on library import
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=SyntaxWarning)
# Patch submitted upstream to fix the SyntaxWarning
from youtube_transcript_api import YouTubeTranscriptApi
IS_YOUTUBE_TRANSCRIPT_CAPABLE = True
except ModuleNotFoundError:
IS_YOUTUBE_TRANSCRIPT_CAPABLE = False
ACCEPTED_MIME_TYPE_PREFIXES = [
"text/html",
"application/xhtml",
]
ACCEPTED_FILE_EXTENSIONS = [
".html",
".htm",
]
class YouTubeConverter(DocumentConverter):
"""Handle YouTube specially, focusing on the video title, description, and transcript."""
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
"""
Make sure we're dealing with HTML content *from* YouTube.
"""
url = stream_info.url or ""
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
url = unquote(url)
url = url.replace(r"\?", "?").replace(r"\=", "=")
if not url.startswith("https://www.youtube.com/watch?"):
# Not a YouTube URL
return False
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
# Not HTML content
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
# Parse the stream
encoding = "utf-8" if stream_info.charset is None else stream_info.charset
soup = bs4.BeautifulSoup(file_stream, "html.parser", from_encoding=encoding)
# Read the meta tags
metadata: Dict[str, str] = {}
if soup.title and soup.title.string:
metadata["title"] = soup.title.string
for meta in soup(["meta"]):
if not isinstance(meta, bs4.Tag):
continue
for a in meta.attrs:
if a in ["itemprop", "property", "name"]:
key = str(meta.get(a, ""))
content = str(meta.get("content", ""))
if key and content: # Only add non-empty content
metadata[key] = content
break
# Try reading the description
try:
for script in soup(["script"]):
if not isinstance(script, bs4.Tag):
continue
if not script.string: # Skip empty scripts
continue
content = script.string
if "ytInitialData" in content:
match = re.search(r"var ytInitialData = ({.*?});", content)
if match:
data = json.loads(match.group(1))
attrdesc = self._findKey(data, "attributedDescriptionBodyText")
if attrdesc and isinstance(attrdesc, dict):
metadata["description"] = str(attrdesc.get("content", ""))
break
except Exception as e:
print(f"Error extracting description: {e}")
pass
# Start preparing the page
webpage_text = "# YouTube\n"
title = self._get(metadata, ["title", "og:title", "name"]) # type: ignore
assert isinstance(title, str)
if title:
webpage_text += f"\n## {title}\n"
stats = ""
views = self._get(metadata, ["interactionCount"]) # type: ignore
if views:
stats += f"- **Views:** {views}\n"
keywords = self._get(metadata, ["keywords"]) # type: ignore
if keywords:
stats += f"- **Keywords:** {keywords}\n"
runtime = self._get(metadata, ["duration"]) # type: ignore
if runtime:
stats += f"- **Runtime:** {runtime}\n"
if len(stats) > 0:
webpage_text += f"\n### Video Metadata\n{stats}\n"
description = self._get(metadata, ["description", "og:description"]) # type: ignore
if description:
webpage_text += f"\n### Description\n{description}\n"
if IS_YOUTUBE_TRANSCRIPT_CAPABLE:
try:
ytt_api = YouTubeTranscriptApi()
transcript_text = ""
parsed_url = urlparse(stream_info.url) # type: ignore
params = parse_qs(parsed_url.query) # type: ignore
if "v" in params and params["v"][0]:
video_id = str(params["v"][0])
transcript_list = ytt_api.list(video_id)
languages = ["en"]
for transcript in transcript_list:
languages.append(transcript.language_code)
break
try:
youtube_transcript_languages = kwargs.get(
"youtube_transcript_languages", languages
)
# Retry the transcript fetching operation
transcript = self._retry_operation(
lambda: ytt_api.fetch(
video_id, languages=youtube_transcript_languages
),
retries=3, # Retry 3 times
delay=2, # 2 seconds delay between retries
)
if transcript:
transcript_text = " ".join(
[part.text for part in transcript]
) # type: ignore
except Exception as e:
# No transcript available
if len(languages) == 1:
print(f"Error fetching transcript: {e}")
else:
# Translate transcript into first kwarg
transcript = (
transcript_list.find_transcript(languages)
.translate(youtube_transcript_languages[0])
.fetch()
)
transcript_text = " ".join([part.text for part in transcript])
if transcript_text:
webpage_text += f"\n### Transcript\n{transcript_text}\n"
except Exception as e:
print(f"Error processing transcript: {e}")
pass
title = title if title else (soup.title.string if soup.title else "")
assert isinstance(title, str)
return DocumentConverterResult(
markdown=webpage_text,
title=title,
)
def _get(
self,
metadata: Dict[str, str],
keys: List[str],
default: Union[str, None] = None,
) -> Union[str, None]:
"""Get first non-empty value from metadata matching given keys."""
for k in keys:
if k in metadata:
return metadata[k]
return default
def _findKey(self, json: Any, key: str) -> Union[str, None]: # TODO: Fix json type
"""Recursively search for a key in nested dictionary/list structures."""
if isinstance(json, list):
for elm in json:
ret = self._findKey(elm, key)
if ret is not None:
return ret
elif isinstance(json, dict):
for k, v in json.items():
if k == key:
return json[k]
if result := self._findKey(v, key):
return result
return None
def _retry_operation(self, operation, retries=3, delay=2):
"""Retries the operation if it fails."""
attempt = 0
while attempt < retries:
try:
return operation() # Attempt the operation
except Exception as e:
print(f"Attempt {attempt + 1} failed: {e}")
if attempt < retries - 1:
time.sleep(delay) # Wait before retrying
attempt += 1
# If all attempts fail, raise the last exception
raise Exception(f"Operation failed after {retries} attempts.")
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/integration/markitdown/_youtube_converter.py",
"license": "GNU General Public License v3.0",
"lines": 206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/Yupp.py | import asyncio
import hashlib
import json
import os
import re
import time
import uuid
from concurrent.futures import ThreadPoolExecutor
from typing import Optional, Dict, Any, List
try:
import cloudscraper
from cloudscraper import CloudScraper
has_cloudscraper = True
except ImportError:
from typing import Type as CloudScraper
has_cloudscraper = False
from .helper import get_last_user_message
from .yupp.models import YuppModelManager
from .yupp.token_extractor import get_token_extractor
from ..cookies import get_cookies
from ..debug import log
from ..errors import (
RateLimitError,
ProviderException,
MissingAuthError,
MissingRequirementsError,
)
from ..image import is_accepted_format, to_bytes
from ..providers.base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..providers.response import (
Reasoning,
PlainTextResponse,
PreviewResponse,
JsonConversation,
ImageResponse,
ProviderInfo,
FinishReason,
JsonResponse,
VariantResponse,
)
from ..tools.auth import AuthManager
from ..tools.media import merge_media
from ..typing import AsyncResult, Messages
YUPP_ACCOUNTS: List[Dict[str, Any]] = []
account_rotation_lock = asyncio.Lock()
ImagesCache: Dict[str, dict] = {}
_accounts_loaded = False
_executor = ThreadPoolExecutor(max_workers=32)
MAX_CACHE_SIZE = 1000
def create_scraper():
scraper = cloudscraper.create_scraper(
browser={
"browser": "chrome",
"platform": "windows",
"desktop": True,
"mobile": False,
},
delay=10,
interpreter="nodejs",
)
scraper.headers.update(
{
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0",
"Accept": "text/x-component, */*",
"Accept-Language": "en-US,en;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Sec-Ch-Ua": '"Microsoft Edge";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
"Sec-Ch-Ua-Mobile": "?0",
"Sec-Ch-Ua-Platform": '"Windows"',
}
)
return scraper
def load_yupp_accounts(tokens_str: str):
global YUPP_ACCOUNTS, _accounts_loaded
if _accounts_loaded:
return
if not tokens_str:
return
tokens = [token.strip() for token in tokens_str.split(",") if token.strip()]
YUPP_ACCOUNTS = [
{"token": token, "is_valid": True, "error_count": 0, "last_used": 0.0}
for token in tokens
]
_accounts_loaded = True
async def get_best_yupp_account() -> Optional[Dict[str, Any]]:
max_error_count = int(os.getenv("MAX_ERROR_COUNT", "3"))
error_cooldown = int(os.getenv("ERROR_COOLDOWN", "300"))
async with account_rotation_lock:
now = time.time()
valid_accounts = [
acc
for acc in YUPP_ACCOUNTS
if acc["is_valid"]
and (
acc["error_count"] < max_error_count
or now - acc["last_used"] > error_cooldown
)
]
if not valid_accounts:
return None
for acc in valid_accounts:
if (
acc["error_count"] >= max_error_count
and now - acc["last_used"] > error_cooldown
):
acc["error_count"] = 0
valid_accounts.sort(key=lambda x: (x["last_used"], x["error_count"]))
account = valid_accounts[0]
account["last_used"] = now
return account
def sync_claim_yupp_reward(
scraper: CloudScraper, account: Dict[str, Any], eval_id: str
):
try:
log_debug(f"Claiming reward {eval_id}...")
url = "https://yupp.ai/api/trpc/reward.claim?batch=1"
payload = {"0": {"json": {"evalId": eval_id}}}
scraper.cookies.set("__Secure-yupp.session-token", account["token"])
response = scraper.post(url, json=payload)
response.raise_for_status()
data = response.json()
balance = data[0]["result"]["data"]["json"]["currentCreditBalance"]
log_debug(f"Reward claimed successfully. New balance: {balance}")
return balance
except Exception as e:
log_debug(f"Failed to claim reward {eval_id}. Error: {e}")
return None
async def claim_yupp_reward(
scraper: CloudScraper, account: Dict[str, Any], eval_id: str
):
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
_executor, sync_claim_yupp_reward, scraper, account, eval_id
)
def sync_record_model_feedback(
scraper: CloudScraper,
account: Dict[str, Any],
turn_id: str,
left_message_id: str,
right_message_id: str,
) -> Optional[str]:
try:
log_debug(f"Recording model feedback for turn {turn_id}...")
url = "https://yupp.ai/api/trpc/evals.recordModelFeedback?batch=1"
payload = {
"0": {
"json": {
"turnId": turn_id,
"evalType": "SELECTION",
"messageEvals": [
{
"messageId": right_message_id,
"rating": "GOOD",
"reasons": ["Fast"],
},
{"messageId": left_message_id, "rating": "BAD", "reasons": []},
],
"comment": "",
"requireReveal": False,
}
}
}
scraper.cookies.set("__Secure-yupp.session-token", account["token"])
response = scraper.post(url, json=payload)
response.raise_for_status()
data = response.json()
for result in data:
json_data = result.get("result", {}).get("data", {}).get("json", {})
eval_id = json_data.get("evalId")
final_reward = json_data.get("finalRewardAmount")
log_debug(f"Feedback recorded - evalId: {eval_id}, reward: {final_reward}")
if final_reward:
return eval_id
return None
except Exception as e:
log_debug(f"Failed to record model feedback. Error: {e}")
return None
async def record_model_feedback(
scraper: CloudScraper,
account: Dict[str, Any],
turn_id: str,
left_message_id: str,
right_message_id: str,
) -> Optional[str]:
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
_executor,
sync_record_model_feedback,
scraper,
account,
turn_id,
left_message_id,
right_message_id,
)
def sync_delete_chat(
scraper: CloudScraper, account: Dict[str, Any], chat_id: str
) -> bool:
try:
log_debug(f"Deleting chat {chat_id}...")
url = "https://yupp.ai/api/trpc/chat.deleteChat?batch=1"
payload = {"0": {"json": {"chatId": chat_id}}}
scraper.cookies.set("__Secure-yupp.session-token", account["token"])
response = scraper.post(url, json=payload)
response.raise_for_status()
data = response.json()
if (
isinstance(data, list)
and len(data) > 0
and data[0].get("result", {}).get("data", {}).get("json") is None
):
log_debug(f"Chat {chat_id} deleted successfully")
return True
log_debug(f"Unexpected response while deleting chat: {data}")
return False
except Exception as e:
log_debug(f"Failed to delete chat {chat_id}: {e}")
return False
async def delete_chat(
scraper: CloudScraper, account: Dict[str, Any], chat_id: str
) -> bool:
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
_executor, sync_delete_chat, scraper, account, chat_id
)
def sync_make_chat_private(
scraper: CloudScraper, account: Dict[str, Any], chat_id: str
) -> bool:
try:
log_debug(f"Setting chat {chat_id} to PRIVATE...")
url = "https://yupp.ai/api/trpc/chat.updateSharingSettings?batch=1"
payload = {"0": {"json": {"chatId": chat_id, "status": "PRIVATE"}}}
scraper.cookies.set("__Secure-yupp.session-token", account["token"])
response = scraper.post(url, json=payload)
response.raise_for_status()
data = response.json()
if (
isinstance(data, list)
and len(data) > 0
and "json" in data[0].get("result", {}).get("data", {})
):
log_debug(f"Chat {chat_id} is now PRIVATE")
return True
log_debug(f"Unexpected response while setting chat private: {data}")
return False
except Exception as e:
log_debug(f"Failed to make chat {chat_id} private: {e}")
return False
async def make_chat_private(
scraper: CloudScraper, account: Dict[str, Any], chat_id: str
) -> bool:
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
_executor, sync_make_chat_private, scraper, account, chat_id
)
def log_debug(message: str):
if os.getenv("DEBUG_MODE", "false").lower() == "true":
print(f"[DEBUG] {message}")
else:
log(f"[Yupp] {message}")
def format_messages_for_yupp(messages: Messages) -> str:
if not messages:
return ""
if len(messages) == 1 and isinstance(messages[0].get("content"), str):
return messages[0].get("content", "").strip()
formatted = []
system_messages = [
msg for msg in messages if msg.get("role") in ["developer", "system"]
]
if system_messages:
for sys_msg in system_messages:
content = sys_msg.get("content", "")
formatted.append(content)
user_assistant_msgs = [
msg for msg in messages if msg.get("role") in ["user", "assistant"]
]
for msg in user_assistant_msgs:
role = "Human" if msg.get("role") == "user" else "Assistant"
content = msg.get("content", "")
for part in content if isinstance(content, list) else [{"text": content}]:
if part.get("text", "").strip():
formatted.append(f"\n\n{role}: {part.get('text', '')}")
if not formatted or not formatted[-1].strip().startswith("Assistant:"):
formatted.append("\n\nAssistant:")
result = "".join(formatted)
if result.startswith("\n\n"):
result = result[2:]
return result
def evict_cache_if_needed():
global ImagesCache
if len(ImagesCache) > MAX_CACHE_SIZE:
keys_to_remove = list(ImagesCache.keys())[
: len(ImagesCache) - MAX_CACHE_SIZE + 100
]
for key in keys_to_remove:
del ImagesCache[key]
class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://yupp.ai"
login_url = "https://discord.gg/qXA4Wf4Fsm"
working = has_cloudscraper
active_by_default = True
supports_stream = True
image_cache = True
@classmethod
def get_models(cls, api_key: str = None, **kwargs) -> List[str]:
if not cls.models:
if not api_key:
api_key = AuthManager.load_api_key(cls)
if not api_key:
api_key = get_cookies("yupp.ai", False).get(
"__Secure-yupp.session-token"
)
if not api_key:
raise MissingAuthError(
"No Yupp accounts configured. Set YUPP_API_KEY environment variable."
)
manager = YuppModelManager(session=create_scraper(), api_key=api_key)
models = manager.client.fetch_models()
if models:
cls.models_tags = {
model.get("name"): manager.processor.generate_tags(model)
for model in models
}
cls.models = [model.get("name") for model in models]
cls.image_models = [
model.get("name")
for model in models
if model.get("isImageGeneration")
]
cls.vision_models = [
model.get("name")
for model in models
if "image/*" in model.get("supportedAttachmentMimeTypes", [])
]
return cls.models
@classmethod
def sync_prepare_files(
cls, media, scraper: CloudScraper, account: Dict[str, Any]
) -> list:
files = []
if not media:
return files
for file, name in media:
data = to_bytes(file)
hasher = hashlib.md5()
hasher.update(data)
image_hash = hasher.hexdigest()
cached_file = ImagesCache.get(image_hash)
if cls.image_cache and cached_file:
log_debug("Using cached image")
files.append(cached_file)
continue
scraper.cookies.set("__Secure-yupp.session-token", account["token"])
presigned_resp = scraper.post(
"https://yupp.ai/api/trpc/chat.createPresignedURLForUpload?batch=1",
json={
"0": {
"json": {
"fileName": name,
"fileSize": len(data),
"contentType": is_accepted_format(data),
}
}
},
headers={"Content-Type": "application/json"},
)
presigned_resp.raise_for_status()
upload_info = presigned_resp.json()[0]["result"]["data"]["json"]
upload_url = upload_info["signedUrl"]
scraper.put(
upload_url,
data=data,
headers={
"Content-Type": is_accepted_format(data),
"Content-Length": str(len(data)),
},
)
attachment_resp = scraper.post(
"https://yupp.ai/api/trpc/chat.createAttachmentForUploadedFile?batch=1",
json={
"0": {
"json": {
"fileName": name,
"contentType": is_accepted_format(data),
"fileId": upload_info["fileId"],
}
}
},
cookies={"__Secure-yupp.session-token": account["token"]},
)
attachment_resp.raise_for_status()
attachment = attachment_resp.json()[0]["result"]["data"]["json"]
file_info = {
"fileName": attachment["file_name"],
"contentType": attachment["content_type"],
"attachmentId": attachment["attachment_id"],
"chatMessageId": "",
}
evict_cache_if_needed()
ImagesCache[image_hash] = file_info
files.append(file_info)
return files
@classmethod
async def prepare_files(
cls, media, scraper: CloudScraper, account: Dict[str, Any]
) -> list:
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
_executor, cls.sync_prepare_files, media, scraper, account
)
@classmethod
def sync_get_signed_image(cls, scraper: CloudScraper, image_id: str) -> str:
url = "https://yupp.ai/api/trpc/chat.getSignedImage"
resp = scraper.get(
url,
params={
"batch": "1",
"input": json.dumps({"0": {"json": {"imageId": image_id}}}),
},
)
resp.raise_for_status()
data = resp.json()[0]["result"]["data"]["json"]
return data.get("signed_url", data.get("signedURL"))
@classmethod
async def get_signed_image(cls, scraper: CloudScraper, image_id: str) -> str:
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
_executor, cls.sync_get_signed_image, scraper, image_id
)
@classmethod
def sync_stream_request(
cls, scraper: CloudScraper, url: str, payload: list, headers: dict, timeout: int
):
response = scraper.post(
url, json=payload, headers=headers, stream=True, timeout=timeout
)
response.raise_for_status()
return response
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
api_key: str = None,
**kwargs,
) -> AsyncResult:
if not has_cloudscraper:
raise MissingRequirementsError(
"cloudscraper library is required for Yupp provider | install it via 'pip install cloudscraper'"
)
if not api_key:
api_key = AuthManager.load_api_key(cls)
if not api_key:
api_key = get_cookies("yupp.ai", False).get("__Secure-yupp.session-token")
if api_key:
load_yupp_accounts(api_key)
log_debug(f"Yupp provider initialized with {len(YUPP_ACCOUNTS)} accounts")
else:
raise MissingAuthError(
"No Yupp accounts configured. Set YUPP_API_KEY environment variable."
)
conversation = kwargs.get("conversation")
url_uuid = conversation.url_uuid if conversation else None
is_new_conversation = url_uuid is None
prompt = kwargs.get("prompt")
if prompt is None:
if is_new_conversation:
prompt = format_messages_for_yupp(messages)
else:
prompt = get_last_user_message(messages, prompt)
log_debug(
f"Use url_uuid: {url_uuid}, Formatted prompt length: {len(prompt)}, Is new conversation: {is_new_conversation}"
)
max_attempts = len(YUPP_ACCOUNTS)
for attempt in range(max_attempts):
account = await get_best_yupp_account()
if not account:
raise ProviderException("No valid Yupp accounts available")
try:
scraper = create_scraper()
if proxy:
scraper.proxies = {"http": proxy, "https": proxy}
# Initialize token extractor for automatic token swapping
token_extractor = get_token_extractor(
jwt_token=account["token"], scraper=scraper
)
turn_id = str(uuid.uuid4())
media = kwargs.get("media")
if media:
media_ = list(merge_media(media, messages))
files = await cls.prepare_files(
media_, scraper=scraper, account=account
)
else:
files = []
mode = "image" if model in cls.image_models else "text"
if is_new_conversation:
url_uuid = str(uuid.uuid4())
payload = [
url_uuid,
turn_id,
prompt,
"$undefined",
"$undefined",
files,
"$undefined",
[{"modelName": model, "promptModifierId": "$undefined"}]
if model
else "none",
mode,
True,
"$undefined",
]
url = f"https://yupp.ai/chat/{url_uuid}?stream=true"
yield JsonConversation(url_uuid=url_uuid)
next_action = kwargs.get(
"next_action",
await token_extractor.get_token("new_conversation"),
)
else:
payload = [
url_uuid,
turn_id,
prompt,
False,
[],
[{"modelName": model, "promptModifierId": "$undefined"}]
if model
else [],
mode,
files,
]
url = f"https://yupp.ai/chat/{url_uuid}?stream=true"
next_action = kwargs.get(
"next_action",
await token_extractor.get_token("existing_conversation"),
)
headers = {
"accept": "text/x-component",
"content-type": "text/plain;charset=UTF-8",
"next-action": next_action,
"cookie": f"__Secure-yupp.session-token={account['token']}",
}
log_debug(f"Sending request to: {url}")
log_debug(
f"Payload structure: {type(payload)}, length: {len(str(payload))}"
)
_timeout = kwargs.get("timeout")
if isinstance(_timeout, (int, float)):
timeout = int(_timeout)
else:
timeout = 5 * 60
loop = asyncio.get_event_loop()
response = await loop.run_in_executor(
_executor,
cls.sync_stream_request,
scraper,
url,
payload,
headers,
timeout,
)
try:
async for chunk in cls._process_stream_response(
response, account, scraper, prompt, model
):
yield chunk
finally:
response.close()
if not kwargs.get("conversation"):
asyncio.create_task(delete_chat(scraper, account, url_uuid))
return
except RateLimitError:
log_debug(
f"Account ...{account['token'][-4:]} hit rate limit, rotating"
)
async with account_rotation_lock:
account["error_count"] += 1
continue
except ProviderException as e:
log_debug(f"Account ...{account['token'][-4:]} failed: {str(e)}")
error_msg = str(e).lower()
# Check if this is a token-related error
if any(
x in error_msg
for x in [
"auth",
"401",
"403",
"404",
"invalid action",
"action",
"next-action",
]
):
# Mark token as failed to trigger extraction
token_type = (
"new_conversation"
if is_new_conversation
else "existing_conversation"
)
await token_extractor.mark_token_failed(token_type, next_action)
log_debug(
f"Token failure detected, marked for extraction: {token_type}"
)
async with account_rotation_lock:
if "auth" in error_msg or "401" in error_msg or "403" in error_msg:
account["is_valid"] = False
else:
account["error_count"] += 1
continue
except Exception as e:
log_debug(
f"Unexpected error with account ...{account['token'][-4:]}: {str(e)}"
)
error_str = str(e).lower()
# Check for token-related errors in generic exceptions too
if any(x in error_str for x in ["404", "401", "403", "invalid action"]):
token_type = (
"new_conversation"
if is_new_conversation
else "existing_conversation"
)
await token_extractor.mark_token_failed(token_type, next_action)
log_debug(
f"Token failure detected in exception handler: {token_type}"
)
if "500" in error_str or "internal server error" in error_str:
async with account_rotation_lock:
account["error_count"] += 1
continue
async with account_rotation_lock:
account["error_count"] += 1
raise ProviderException(f"Yupp request failed: {str(e)}") from e
raise ProviderException("All Yupp accounts failed after rotation attempts")
@classmethod
async def _process_stream_response(
cls,
response,
account: Dict[str, Any],
scraper: CloudScraper,
prompt: str,
model_id: str,
) -> AsyncResult:
line_pattern = re.compile(b"^([0-9a-fA-F]+):(.*)")
target_stream_id = None
reward_info = None
is_thinking = False
thinking_content = ""
normal_content = ""
quick_content = ""
variant_text = ""
stream = {"target": [], "variant": [], "quick": [], "thinking": [], "extra": []}
select_stream = [None, None]
capturing_ref_id: Optional[str] = None
capturing_lines: List[bytes] = []
think_blocks: Dict[str, str] = {}
image_blocks: Dict[str, str] = {}
def extract_ref_id(ref):
return (
ref[2:]
if ref and isinstance(ref, str) and ref.startswith("$@")
else None
)
def extract_ref_name(ref: str) -> Optional[str]:
if not isinstance(ref, str):
return None
if ref.startswith("$@"):
return ref[2:]
if ref.startswith("$") and len(ref) > 1:
return ref[1:]
return None
def is_valid_content(content: str) -> bool:
if not content or content in [None, "", "$undefined"]:
return False
return True
async def process_content_chunk(
content: str, chunk_id: str, line_count: int, *, for_target: bool = False
):
nonlocal normal_content
if not is_valid_content(content):
return
if '<yapp class="image-gen">' in content:
img_block = (
content.split('<yapp class="image-gen">').pop().split("</yapp>")[0]
)
image_id = json.loads(img_block).get("image_id")
signed_url = await cls.get_signed_image(scraper, image_id)
img = ImageResponse(signed_url, prompt)
yield img
return
if is_thinking:
yield Reasoning(content)
else:
if for_target:
normal_content += content
yield content
def finalize_capture_block(ref_id: str, lines: List[bytes]):
text = b"".join(lines).decode("utf-8", errors="ignore")
think_start = text.find("<think>")
think_end = text.find("</think>")
if think_start != -1 and think_end != -1 and think_end > think_start:
inner = text[think_start + len("<think>") : think_end].strip()
if inner:
think_blocks[ref_id] = inner
yapp_start = text.find('<yapp class="image-gen">')
if yapp_start != -1:
yapp_end = text.find("</yapp>", yapp_start)
if yapp_end != -1:
yapp_block = text[yapp_start : yapp_end + len("</yapp>")]
image_blocks[ref_id] = yapp_block
try:
line_count = 0
quick_response_id = None
variant_stream_id = None
is_started: bool = False
variant_image: Optional[ImageResponse] = None
reward_id = "a"
reward_kw = {}
routing_id = "e"
turn_id = None
persisted_turn_id = None
left_message_id = None
right_message_id = None
nudge_new_chat_id = None
nudge_new_chat = False
loop = asyncio.get_event_loop()
def iter_lines():
for line in response.iter_lines():
if line:
yield line
lines_iterator = iter_lines()
while True:
try:
line = await loop.run_in_executor(
_executor, lambda: next(lines_iterator, None)
)
if line is None:
break
except StopIteration:
break
line_count += 1
if isinstance(line, str):
line = line.encode()
if capturing_ref_id is not None:
capturing_lines.append(line)
if b"</yapp>" in line:
idx = line.find(b"</yapp>")
suffix = line[idx + len(b"</yapp>") :]
finalize_capture_block(capturing_ref_id, capturing_lines)
capturing_ref_id = None
capturing_lines = []
if suffix.strip():
line = suffix
else:
continue
else:
continue
match = line_pattern.match(line)
if not match:
if b"<think>" in line:
m = line_pattern.match(line)
if m:
capturing_ref_id = m.group(1).decode()
capturing_lines = [line]
continue
continue
chunk_id, chunk_data = match.groups()
chunk_id = chunk_id.decode()
if nudge_new_chat_id and chunk_id == nudge_new_chat_id:
nudge_new_chat = chunk_data.decode()
continue
try:
data = json.loads(chunk_data) if chunk_data != b"{}" else {}
except json.JSONDecodeError:
continue
if (
chunk_id == reward_id
and isinstance(data, dict)
and "unclaimedRewardInfo" in data
):
reward_info = data
log_debug(f"Found reward info")
elif chunk_id == "1":
yield PlainTextResponse(line.decode(errors="ignore"))
if isinstance(data, dict):
left_stream = data.get("leftStream", {})
right_stream = data.get("rightStream", {})
if data.get("quickResponse", {}) != "$undefined":
quick_response_id = extract_ref_id(
data.get("quickResponse", {})
.get("stream", {})
.get("next")
)
if data.get("turnId", {}) != "$undefined":
turn_id = extract_ref_id(data.get("turnId", {}).get("next"))
if data.get("persistedTurn", {}) != "$undefined":
persisted_turn_id = extract_ref_id(
data.get("persistedTurn", {}).get("next")
)
if data.get("leftMessageId", {}) != "$undefined":
left_message_id = extract_ref_id(
data.get("leftMessageId", {}).get("next")
)
if data.get("rightMessageId", {}) != "$undefined":
right_message_id = extract_ref_id(
data.get("rightMessageId", {}).get("next")
)
reward_id = (
extract_ref_id(data.get("pendingRewardActionResult", ""))
or reward_id
)
routing_id = (
extract_ref_id(data.get("routingResultPromise", ""))
or routing_id
)
nudge_new_chat_id = (
extract_ref_id(data.get("nudgeNewChatPromise", ""))
or nudge_new_chat_id
)
select_stream = [left_stream, right_stream]
elif chunk_id == routing_id:
yield PlainTextResponse(line.decode(errors="ignore"))
if isinstance(data, dict):
provider_info = cls.get_dict()
provider_info["model"] = model_id
for i, selection in enumerate(data.get("modelSelections", [])):
if selection.get("selectionSource") == "USER_SELECTED":
target_stream_id = extract_ref_id(
select_stream[i].get("next")
)
provider_info["modelLabel"] = selection.get(
"shortLabel"
)
provider_info["modelUrl"] = selection.get("externalUrl")
log_debug(f"Found target stream ID: {target_stream_id}")
else:
variant_stream_id = extract_ref_id(
select_stream[i].get("next")
)
provider_info["variantLabel"] = selection.get(
"shortLabel"
)
provider_info["variantUrl"] = selection.get(
"externalUrl"
)
log_debug(
f"Found variant stream ID: {variant_stream_id}"
)
yield ProviderInfo.from_dict(provider_info)
elif target_stream_id and chunk_id == target_stream_id:
yield PlainTextResponse(line.decode(errors="ignore"))
if isinstance(data, dict):
target_stream_id = extract_ref_id(data.get("next"))
content = data.get("curr", "")
if content:
ref_name = extract_ref_name(content)
if ref_name and (
ref_name in think_blocks or ref_name in image_blocks
):
if ref_name in think_blocks:
t_text = think_blocks[ref_name]
if t_text:
reasoning = Reasoning(t_text)
stream["thinking"].append(reasoning)
if ref_name in image_blocks:
img_block_text = image_blocks[ref_name]
async for chunk in process_content_chunk(
img_block_text,
ref_name,
line_count,
for_target=True,
):
stream["target"].append(chunk)
is_started = True
yield chunk
else:
async for chunk in process_content_chunk(
content, chunk_id, line_count, for_target=True
):
stream["target"].append(chunk)
is_started = True
yield chunk
elif variant_stream_id and chunk_id == variant_stream_id:
yield PlainTextResponse("[Variant] " + line.decode(errors="ignore"))
if isinstance(data, dict):
variant_stream_id = extract_ref_id(data.get("next"))
content = data.get("curr", "")
if content:
async for chunk in process_content_chunk(
content, chunk_id, line_count, for_target=False
):
stream["variant"].append(chunk)
if isinstance(chunk, ImageResponse):
yield PreviewResponse(str(chunk))
else:
variant_text += str(chunk)
if not is_started:
yield PreviewResponse(variant_text)
elif quick_response_id and chunk_id == quick_response_id:
yield PlainTextResponse("[Quick] " + line.decode(errors="ignore"))
if isinstance(data, dict):
content = data.get("curr", "")
if content:
async for chunk in process_content_chunk(
content, chunk_id, line_count, for_target=False
):
stream["quick"].append(chunk)
quick_content += content
yield PreviewResponse(content)
elif chunk_id == turn_id:
reward_kw["turn_id"] = data.get("curr", "")
elif chunk_id == persisted_turn_id:
pass
elif chunk_id == right_message_id:
reward_kw["right_message_id"] = data.get("curr", "")
elif chunk_id == left_message_id:
reward_kw["left_message_id"] = data.get("curr", "")
elif isinstance(data, dict) and "curr" in data:
content = data.get("curr", "")
if content:
async for chunk in process_content_chunk(
content, chunk_id, line_count, for_target=False
):
stream["extra"].append(chunk)
if (
isinstance(chunk, str)
and "<streaming stopped unexpectedly" in chunk
):
yield FinishReason(chunk)
yield PlainTextResponse(
"[Extra] " + line.decode(errors="ignore")
)
if variant_image is not None:
yield variant_image
elif variant_text:
yield VariantResponse(variant_text)
yield JsonResponse(**stream)
log_debug(f"Finished processing {line_count} lines")
finally:
log_debug(f"Get Reward: {reward_kw}")
if (
reward_kw.get("turn_id")
and reward_kw.get("left_message_id")
and reward_kw.get("right_message_id")
):
eval_id = await record_model_feedback(
scraper,
account,
reward_kw["turn_id"],
reward_kw["left_message_id"],
reward_kw["right_message_id"],
)
if eval_id:
await claim_yupp_reward(scraper, account, eval_id)
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/Yupp.py",
"license": "GNU General Public License v3.0",
"lines": 958,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/yupp/models.py | import json
import os
import requests
from typing import Dict, List, Optional, Any
from dataclasses import dataclass
@dataclass
class ModelConfig:
"""Configuration for API requests"""
base_url: str = "https://yupp.ai"
api_endpoint: str = "/api/trpc/model.getModelInfoList,scribble.getScribbleByLabel"
timeout: int = 30
fallback_file: str = "models.json"
output_file: str = "model.json"
class YuppAPIClient:
"""Yupp API client for fetching model data"""
def __init__(self, session, config: ModelConfig = None, api_key: str = None):
self.config = config or ModelConfig()
self.api_key = api_key
self.session = session
self._set_cookies()
def _setup_session(self) -> None:
"""Setup session with headers and cookies"""
self.session.headers.update(self._get_headers())
def _get_headers(self) -> Dict[str, str]:
"""Get request headers"""
return {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Referer": f"{self.config.base_url}/",
"Origin": self.config.base_url,
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
}
def _set_cookies(self) -> None:
"""Set cookies from environment variable"""
token = self._get_session_token()
if token:
self.session.cookies.set("__Secure-yupp.session-token", token)
def _get_session_token(self) -> Optional[str]:
if self.api_key:
return self.api_key
"""Get session token from environment variable"""
env_tokens = os.getenv("YUPP_TOKENS")
if not env_tokens:
return None
try:
tokens = [t.strip() for t in env_tokens.split(",") if t.strip()]
return tokens[0] if tokens else None
except Exception as e:
print(f"Warning: Failed to parse YUPP_TOKENS: {e}")
return None
def _build_api_url(self) -> str:
"""Build the complete API URL"""
params = "batch=1&input=%7B%220%22%3A%7B%22json%22%3Anull%2C%22meta%22%3A%7B%22values%22%3A%5B%22undefined%22%5D%7D%7D%2C%221%22%3A%7B%22json%22%3A%7B%22label%22%3A%22homepage_banner%22%7D%7D%7D"
return f"{self.config.base_url}{self.config.api_endpoint}?{params}"
def fetch_models(self) -> Optional[List[Dict[str, Any]]]:
"""Fetch model data from API"""
url = self._build_api_url()
try:
print(f"Fetching data from: {url}")
response = self.session.get(url, timeout=self.config.timeout)
response.raise_for_status()
data = response.json()
print("Successfully fetched and parsed model data")
# Extract model list from response structure
if data and isinstance(data, list) and len(data) > 0:
return data[0]["result"]["data"]["json"]
else:
print("Unexpected response format")
return None
except requests.exceptions.RequestException as e:
print(f"Request failed: {e}")
return None
except (ValueError, json.JSONDecodeError) as e:
print(f"JSON parsing failed: {e}")
return None
except KeyError as e:
print(f"Data structure error - missing key: {e}")
return None
class ModelProcessor:
"""Process and filter model data"""
SUPPORTED_FAMILIES = {
"GPT", "Claude", "Gemini", "Qwen", "DeepSeek", "Perplexity", "Kimi"
}
TAG_MAPPING = {
"isPro": "☀️",
"isMax": "🔥",
"isNew": "🆕",
"isLive": "🎤",
"isAgent": "🤖",
"isFast": "🚀",
"isReasoning": "🧠",
"isImageGeneration": "🎨",
}
@classmethod
def generate_tags(cls, item: Dict[str, Any]) -> List[str]:
"""Generate tags for model display"""
tags = []
# Add emoji tags based on boolean flags
for key, emoji in cls.TAG_MAPPING.items():
if item.get(key, False):
tags.append(emoji)
# Add attachment tag if supported
if item.get("supportedAttachmentMimeTypes"):
tags.append("📎")
return tags
@classmethod
def should_include_model(cls, item: Dict[str, Any]) -> bool:
"""Check if model should be included in output"""
family = item.get("family")
# Include if in supported families or has special features
return (
family in cls.SUPPORTED_FAMILIES or
item.get("isImageGeneration") or
item.get("isAgent") or
item.get("isLive")
)
@classmethod
def process_model_item(cls, item: Dict[str, Any]) -> Dict[str, Any]:
"""Process individual model item"""
tags = cls.generate_tags(item)
label = item.get("label", "")
# Add tags to label if present
if tags:
label += "\n" + " | ".join(tags)
return {
"id": item.get("id"),
"name": item.get("name"),
"label": label,
"shortLabel": item.get("shortLabel"),
"publisher": item.get("publisher"),
"family": item.get("family"),
"isPro": item.get("isPro", False),
"isInternal": item.get("isInternal", False),
"isMax": item.get("isMax", False),
"isLive": item.get("isLive", False),
"isNew": item.get("isNew", False),
"isImageGeneration": item.get("isImageGeneration", False),
"isAgent": item.get("isAgent", False),
"isReasoning": item.get("isReasoning", False),
"isFast": item.get("isFast", False),
}
@classmethod
def filter_and_process(cls, data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Filter and process model data"""
return [
cls.process_model_item(item)
for item in data
if cls.should_include_model(item)
]
class DataManager:
"""Handle data loading and saving operations"""
@staticmethod
def load_fallback_data(filename: str) -> List[Dict[str, Any]]:
"""Load fallback data from local file"""
try:
with open(filename, "r", encoding="utf-8") as f:
return json.load(f)
except FileNotFoundError:
print(f"Fallback file not found: {filename}")
return []
except json.JSONDecodeError as e:
print(f"Failed to parse fallback file: {e}")
return []
@staticmethod
def save_data(data: List[Dict[str, Any]], filename: str) -> bool:
"""Save data to JSON file"""
try:
# Create directory if needed
os.makedirs(os.path.dirname(filename) if os.path.dirname(filename) else ".",
exist_ok=True)
# Create file if it doesn't exist
if not os.path.exists(filename):
open(filename, "a", encoding="utf-8").close()
with open(filename, "w", encoding="utf-8") as f:
json.dump(data, f, indent=4, ensure_ascii=False)
print(f"Successfully saved {len(data)} models to {filename}")
return True
except Exception as e:
print(f"Failed to save data: {e}")
return False
class YuppModelManager:
"""Main manager class for Yupp model operations"""
def __init__(self, session, config: ModelConfig = None, api_key: str = None):
self.config = config or ModelConfig()
self.client = YuppAPIClient(session, self.config, api_key)
self.processor = ModelProcessor()
self.data_manager = DataManager()
def has_valid_token(self) -> bool:
"""Check if valid token is available"""
return self.client._get_session_token() is not None
def fetch_and_save_models(self, output_file: str = None) -> bool:
"""Main method to fetch and save model data"""
output_file = output_file or self.config.output_file
print("=== Yupp Model Data Fetcher ===")
if not self.has_valid_token():
print("Warning: YUPP_TOKENS environment variable not set")
return False
# Try to fetch from API
data = self.client.fetch_models()
# Fallback to local data if API fails
if not data:
print("API request failed, trying fallback data...")
data = self.data_manager.load_fallback_data(self.config.fallback_file)
if not data:
print("No model data available")
return False
print(f"Processing {len(data)} models...")
processed_models = self.processor.filter_and_process(data)
return self.data_manager.save_data(processed_models, output_file)
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/yupp/models.py",
"license": "GNU General Public License v3.0",
"lines": 212,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/search/CachedSearch.py | from __future__ import annotations
import json
import hashlib
from pathlib import Path
from urllib.parse import quote_plus
from datetime import date
from ...typing import AsyncResult, Messages, Optional
from ..base_provider import AsyncGeneratorProvider, AuthFileMixin
from ...cookies import get_cookies_dir
from ..helper import format_media_prompt
from .DDGS import DDGS, SearchResults, SearchResultEntry
from .SearXNG import SearXNG
from ... import debug
async def search(
query: str,
max_results: int = 5,
max_words: int = 2500,
backend: str = "auto",
add_text: bool = True,
timeout: int = 5,
region: str = "us-en",
provider: str = "DDG"
) -> SearchResults:
"""
Performs a web search and returns search results.
"""
if provider == "SearXNG":
debug.log(f"[SearXNG] Using local container for query: {query}")
results_texts = []
async for chunk in SearXNG.create_async_generator(
"SearXNG",
[{"role": "user", "content": query}],
max_results=max_results,
max_words=max_words,
add_text=add_text
):
if isinstance(chunk, str):
results_texts.append(chunk)
used_words = sum(text.count(" ") for text in results_texts)
return SearchResults([
SearchResultEntry(
title=f"Result {i + 1}",
url="",
snippet=text,
text=text
) for i, text in enumerate(results_texts)
], used_words=used_words)
return await anext(DDGS.create_async_generator(
provider,
[],
prompt=query,
max_results=max_results,
max_words=max_words,
add_text=add_text,
timeout=timeout,
region=region,
backend=backend
))
class CachedSearch(AsyncGeneratorProvider, AuthFileMixin):
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
prompt: str = None,
**kwargs
) -> AsyncResult:
"""
Combines search results with the user prompt, using caching for improved efficiency.
"""
prompt = format_media_prompt(messages, prompt)
search_parameters = ["max_results", "max_words", "add_text", "timeout", "region"]
search_parameters = {k: v for k, v in kwargs.items() if k in search_parameters}
json_bytes = json.dumps({"model": model, "query": prompt, **search_parameters}, sort_keys=True).encode(errors="ignore")
md5_hash = hashlib.md5(json_bytes).hexdigest()
cache_dir: Path = Path(get_cookies_dir()) / ".scrape_cache" / "web_search" / f"{date.today()}"
cache_dir.mkdir(parents=True, exist_ok=True)
cache_file = cache_dir / f"{quote_plus(prompt[:20])}.{md5_hash}.cache"
search_results: Optional[SearchResults] = None
if cache_file.exists():
with cache_file.open("r") as f:
try:
search_results = SearchResults.from_dict(json.loads(f.read()))
except json.JSONDecodeError:
search_results = None
if search_results is None:
if model:
search_parameters["provider"] = model
search_results = await search(prompt, **search_parameters)
if search_results.results:
with cache_file.open("w") as f:
f.write(json.dumps(search_results.get_dict()))
yield search_results | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/search/CachedSearch.py",
"license": "GNU General Public License v3.0",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/search/DDGS.py | from __future__ import annotations
import hashlib
import asyncio
from pathlib import Path
from typing import Iterator, List, Optional
from urllib.parse import urlparse, quote_plus
from aiohttp import ClientSession, ClientTimeout, ClientError
from datetime import date
import asyncio
# Optional dependencies using the new 'ddgs' package name
try:
from ddgs import DDGS as DDGSClient
from bs4 import BeautifulSoup
has_requirements = True
except ImportError:
has_requirements = False
from ...typing import Messages, AsyncResult
from ...cookies import get_cookies_dir
from ...providers.response import format_link, JsonMixin, Sources
from ...errors import MissingRequirementsError
from ...providers.base_provider import AsyncGeneratorProvider
from ..helper import format_media_prompt
def scrape_text(html: str, max_words: Optional[int] = None, add_source: bool = True, count_images: int = 2) -> Iterator[str]:
"""
Parses the provided HTML and yields text fragments.
"""
soup = BeautifulSoup(html, "html.parser")
for selector in [
"main", ".main-content-wrapper", ".main-content", ".emt-container-inner",
".content-wrapper", "#content", "#mainContent",
]:
selected = soup.select_one(selector)
if selected:
soup = selected
break
for remove_selector in [".c-globalDisclosure"]:
unwanted = soup.select_one(remove_selector)
if unwanted:
unwanted.extract()
image_selector = "img[alt][src^=http]:not([alt='']):not(.avatar):not([width])"
image_link_selector = f"a:has({image_selector})"
seen_texts = []
for element in soup.select(f"h1, h2, h3, h4, h5, h6, p, pre, table:not(:has(p)), ul:not(:has(p)), {image_link_selector}"):
if count_images > 0:
image = element.select_one(image_selector)
if image:
title = str(element.get("title", element.text))
if title:
yield f"!{format_link(image['src'], title)}\n"
if max_words is not None:
max_words -= 10
count_images -= 1
continue
for line in element.get_text(" ").splitlines():
words = [word for word in line.split() if word]
if not words:
continue
joined_line = " ".join(words)
if joined_line in seen_texts:
continue
if max_words is not None:
max_words -= len(words)
if max_words <= 0:
break
yield joined_line + "\n"
seen_texts.append(joined_line)
if add_source:
canonical_link = soup.find("link", rel="canonical")
if canonical_link and "href" in canonical_link.attrs:
link = canonical_link["href"]
domain = urlparse(link).netloc
yield f"\nSource: [{domain}]({link})"
async def fetch_and_scrape(session: ClientSession, url: str, max_words: Optional[int] = None, add_source: bool = False, proxy: str = None) -> str:
"""
Fetches a URL and returns the scraped text, using caching to avoid redundant downloads.
"""
try:
cache_dir: Path = Path(get_cookies_dir()) / ".scrape_cache" / "fetch_and_scrape"
cache_dir.mkdir(parents=True, exist_ok=True)
md5_hash = hashlib.md5(url.encode(errors="ignore")).hexdigest()
cache_file = cache_dir / f"{quote_plus(url.split('?')[0].split('//')[1].replace('/', ' ')[:48])}.{date.today()}.{md5_hash[:16]}.cache"
if cache_file.exists():
return cache_file.read_text()
async with session.get(url, proxy=proxy) as response:
if response.status == 200:
html = await response.text(errors="replace")
scraped_text = "".join(scrape_text(html, max_words, add_source))
with open(cache_file, "wb") as f:
f.write(scraped_text.encode(errors="replace"))
return scraped_text
except (ClientError, asyncio.TimeoutError):
return ""
return ""
class SearchResults(JsonMixin):
"""
Represents a collection of search result entries along with the count of used words.
"""
def __init__(self, results: List[SearchResultEntry], used_words: int):
self.results = results
self.used_words = used_words
@classmethod
def from_dict(cls, data: dict) -> SearchResults:
return cls(
[SearchResultEntry(**item) for item in data["results"]],
data["used_words"]
)
def __iter__(self) -> Iterator[SearchResultEntry]:
yield from self.results
def __str__(self) -> str:
# Build a string representation of the search results with markdown formatting.
output = []
for idx, result in enumerate(self.results):
parts = [
f"### Title: {result.title}",
"",
result.text if result.text else result.snippet,
"",
f"> **Source:** [[{idx}]]({result.url})"
]
output.append("\n".join(parts))
return "\n\n\n\n".join(output)
def __len__(self) -> int:
return len(self.results)
def get_sources(self) -> Sources:
return Sources([{"url": result.url, "title": result.title} for result in self.results])
def get_dict(self) -> dict:
return {
"results": [result.get_dict() for result in self.results],
"used_words": self.used_words
}
class SearchResultEntry(JsonMixin):
"""
Represents a single search result entry.
"""
def __init__(self, title: str, url: str, snippet: str, text: Optional[str] = None):
self.title = title
self.url = url
self.snippet = snippet
self.text = text
def set_text(self, text: str) -> None:
self.text = text
class DDGS(AsyncGeneratorProvider):
working = has_requirements
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
prompt: str = None,
proxy: str = None,
timeout: int = 30,
region: str = None,
backend: str = None,
max_results: int = 5,
max_words: int = 2500,
add_text: bool = True,
**kwargs
) -> AsyncResult:
if not has_requirements:
raise MissingRequirementsError('Install "ddgs" and "beautifulsoup4" | pip install -U g4f[search]')
prompt = format_media_prompt(messages, prompt)
results: List[SearchResultEntry] = []
# Use the new DDGS() context manager style
with DDGSClient() as ddgs:
for result in ddgs.text(
prompt,
region=region,
safesearch="moderate",
timelimit="y",
max_results=max_results,
backend=backend,
):
if ".google." in result["href"]:
continue
results.append(SearchResultEntry(
title=result["title"],
url=result["href"],
snippet=result["body"]
))
if add_text:
tasks = []
async with ClientSession(timeout=ClientTimeout(timeout)) as session:
for entry in results:
tasks.append(fetch_and_scrape(session, entry.url, int(max_words / (max_results - 1)), False, proxy=proxy))
texts = await asyncio.gather(*tasks)
formatted_results: List[SearchResultEntry] = []
used_words = 0
left_words = max_words
for i, entry in enumerate(results):
if add_text:
entry.text = texts[i]
left_words -= entry.title.count(" ") + 5
if entry.text:
left_words -= entry.text.count(" ")
else:
left_words -= entry.snippet.count(" ")
if left_words < 0:
break
used_words = max_words - left_words
formatted_results.append(entry)
yield SearchResults(formatted_results, used_words)
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/search/DDGS.py",
"license": "GNU General Public License v3.0",
"lines": 201,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/tools/auth.py | from __future__ import annotations
import os
from typing import Optional
from ..providers.types import ProviderType
from .. import debug
class AuthManager:
"""Handles API key management"""
aliases = {
"GeminiPro": "Gemini",
"PollinationsAI": "Pollinations",
"OpenaiAPI": "Openai",
"PuterJS": "Puter",
}
@classmethod
def load_api_key(cls, provider: ProviderType) -> Optional[str]:
"""Load API key from config file"""
if not provider.needs_auth and not hasattr(provider, "login_url"):
return None
provider_name = provider.get_parent()
env_var = f"{provider_name.upper()}_API_KEY"
api_key = os.environ.get(env_var)
if not api_key and provider_name in cls.aliases:
env_var = f"{cls.aliases[provider_name].upper()}_API_KEY"
api_key = os.environ.get(env_var)
if api_key:
debug.log(f"Loading API key for {provider_name} from environment variable {env_var}")
return api_key
return None
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/tools/auth.py",
"license": "GNU General Public License v3.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/tools/fetch_and_scrape.py | from __future__ import annotations
import hashlib
import asyncio
from pathlib import Path
from typing import Dict, Iterator, Optional
from urllib.parse import urlparse, quote_plus
from aiohttp import ClientSession, ClientError
from datetime import date
import asyncio
try:
from bs4 import BeautifulSoup, Tag
has_requirements = True
except ImportError:
has_requirements = False
from ..cookies import get_cookies_dir
from ..providers.response import format_link
def scrape_text(html: str, max_words: Optional[int] = None, add_source: bool = True, count_images: int = 2, add_metadata: bool = False) -> Iterator[str]:
"""
Parses the provided HTML and yields text fragments.
"""
soup = BeautifulSoup(html, "html.parser")
# Read the meta tags
seen_texts = [] # Initialize seen_texts list to track already processed text
if add_metadata:
metadata: Dict[str, str] = {}
if soup.title and soup.title.string:
yield f"## {soup.title.string}\n"
seen_texts.append(soup.title.string)
max_words = None if max_words is None else max_words - len(soup.title.string.split())
for meta in soup(["meta"]):
if not isinstance(meta, Tag):
continue
for a in meta.attrs:
if a in ["itemprop", "property", "name"]:
key = str(meta.get(a, ""))
content = str(meta.get("content", ""))
if key and content: # Only add non-empty content
metadata[key] = content
break
description = metadata.get('description', metadata.get('og:description', '')).strip()
if description:
yield f"### Description\n{description}\n"
seen_texts.append(description)
max_words = None if max_words is None else max_words - len(description.split())
for selector in [
"main", ".main-content-wrapper", ".main-content", ".emt-container-inner",
".content-wrapper", "#content", "#mainContent",
]:
selected = soup.select_one(selector)
if selected:
soup = selected
break
for remove_selector in [".c-globalDisclosure"]:
unwanted = soup.select_one(remove_selector)
if unwanted:
unwanted.extract()
image_selector = "img[alt][src^=http]:not([alt='']):not(.avatar):not([width])"
image_link_selector = f"a:has({image_selector})"
seen_texts = []
for element in soup.select(f"h1, h2, h3, h4, h5, h6, p, pre, table:not(:has(p)), ul:not(:has(p)), {image_link_selector}"):
if count_images > 0:
image = element.select_one(image_selector)
if image:
title = str(element.get("title", element.text))
if title:
yield f"!{format_link(image['src'], title)}\n"
if max_words is not None:
max_words -= 10
count_images -= 1
continue
for line in element.get_text(" ").splitlines():
words = [word for word in line.split() if word]
if not words:
continue
joined_line = " ".join(words)
if joined_line in seen_texts:
continue
if max_words is not None:
max_words -= len(words)
if max_words <= 0:
break
yield joined_line + "\n"
seen_texts.append(joined_line)
if add_source:
canonical_link = soup.find("link", rel="canonical")
if canonical_link and "href" in canonical_link.attrs:
link = canonical_link["href"]
domain = urlparse(link).netloc
yield f"\nSource: [{domain}]({link})"
async def fetch_and_scrape(session: ClientSession, url: str, max_words: Optional[int] = None, add_source: bool = False, add_metadata: bool = False, proxy: str = None) -> str:
"""
Fetches a URL and returns the scraped text, using caching to avoid redundant downloads.
"""
try:
cache_dir: Path = Path(get_cookies_dir()) / ".scrape_cache" / "fetch_and_scrape"
cache_dir.mkdir(parents=True, exist_ok=True)
md5_hash = hashlib.md5(url.encode(errors="ignore")+str([max_words, add_source, add_metadata]).encode(errors="ignore")).hexdigest()
cache_file = cache_dir / f"{quote_plus(url.split('?')[0].split('//')[1].replace('/', ' ')[:48])}.{date.today()}.{md5_hash[:16]}.cache"
if cache_file.exists():
return cache_file.read_text()
async with session.get(url, proxy=proxy) as response:
if response.status == 200:
html = await response.text(errors="replace")
scraped_text = "".join(scrape_text(html, max_words, add_source, add_metadata=add_metadata))
with open(cache_file, "wb") as f:
f.write(scraped_text.encode(errors="replace"))
return scraped_text
except (ClientError, asyncio.TimeoutError):
return ""
return "" | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/tools/fetch_and_scrape.py",
"license": "GNU General Public License v3.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/Perplexity.py | from __future__ import annotations
import os
import uuid
import json
from typing import AsyncIterator
from ..typing import AsyncResult, Messages, Cookies
from ..requests import StreamSession, raise_for_status, sse_stream
from ..cookies import get_cookies, get_cookies_dir
from ..providers.response import (
ProviderInfo, JsonConversation, JsonRequest, JsonResponse,
Reasoning, Sources, SuggestedFollowups, ImageResponse,
VariantResponse, YouTubeResponse, TitleGeneration
)
from ..providers.base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .. import debug
# Perplexity API endpoints
PERPLEXITY_URL = "https://www.perplexity.ai"
PERPLEXITY_DOMAIN = ".perplexity.ai"
AUTH_ENDPOINT = f"{PERPLEXITY_URL}/api/auth/session"
QUERY_ENDPOINT = f"{PERPLEXITY_URL}/rest/sse/perplexity_ask"
class Perplexity(AsyncGeneratorProvider, ProviderModelMixin):
"""
Perplexity provider using browser emulation with HAR file support.
This provider extends the base Perplexity implementation with HAR file support
for easier authentication management. It uses curl_cffi's Chrome impersonation
for realistic browser-like requests.
"""
label = "Perplexity"
url = PERPLEXITY_URL
cookie_domain = PERPLEXITY_DOMAIN
working = True
active_by_default = True
default_model = "auto"
models = [
default_model,
"turbo",
"gpt41",
"gpt5",
"gpt5_thinking",
"o3",
"o3pro",
"claude2",
"claude37sonnetthinking",
"claude40opus",
"claude40opusthinking",
"claude41opusthinking",
"claude45sonnet",
"claude45sonnetthinking",
"experimental",
"grok",
"grok4",
"gemini2flash",
"pplx_pro",
"pplx_pro_upgraded",
"pplx_alpha",
"pplx_beta",
"comet_max_assistant",
"o3_research",
"o3pro_research",
"claude40sonnet_research",
"claude40sonnetthinking_research",
"claude40opus_research",
"claude40opusthinking_research",
"o3_labs",
"o3pro_labs",
"claude40sonnetthinking_labs",
"claude40opusthinking_labs",
"o4mini",
"o1",
"gpt4o",
"gpt45",
"gpt4",
"o3mini",
"claude35haiku",
"llama_x_large",
"mistral",
"claude3opus",
"gemini",
"pplx_reasoning",
"r1"
]
fallback_models = ["perplexity", "pplx_pro"]
model_aliases = {
"gpt-5": "gpt5",
"gpt-5-thinking": "gpt5_thinking",
"r1-1776": "r1",
}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
cookies: Cookies = None,
proxy: str = None,
conversation: JsonConversation = None,
**kwargs
) -> AsyncResult:
"""
Create async generator for Perplexity requests with HAR file support.
Authentication priority:
1. HAR file cookies (har_and_cookies/perplexity*.har)
2. Cookie jar from get_cookies()
"""
if not model:
model = cls.default_model
# Try to get cookies from HAR file first
if cookies is None:
cookies = get_cookies(cls.cookie_domain, False)
if cookies:
debug.log(f"Perplexity: Using {len(cookies)} cookies from cookie jar")
# Initialize conversation if needed
if conversation is None:
conversation = JsonConversation(
frontend_uid=str(uuid.uuid4()),
frontend_context_uuid=str(uuid.uuid4()),
visitor_id=str(uuid.uuid4()),
user_id=None,
thread_url_slug=None, # For conversation continuity via Referer header
)
request_id = str(uuid.uuid4())
# Set referer based on thread_url_slug for conversation continuity
referer = f"{cls.url}/"
if hasattr(conversation, 'thread_url_slug') and conversation.thread_url_slug:
referer = f"{cls.url}/search/{conversation.thread_url_slug}"
# debug.log(f"Perplexity: Using conversation referer: {referer}")
headers = {
"accept": "text/event-stream",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"content-type": "application/json",
"origin": cls.url,
"referer": referer,
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36",
"x-perplexity-request-reason": "perplexity-query-state-provider",
"x-request-id": request_id,
}
# Extract query from messages
query = ""
for message in reversed(messages):
if message["role"] == "user":
query = message["content"]
break
# Use StreamSession with Chrome impersonation
async with StreamSession(headers=headers, cookies=cookies, proxy=proxy, impersonate="chrome") as session:
# Get user info if needed
if conversation.user_id is None:
try:
async with session.get(f"{cls.url}/api/auth/session") as response:
await raise_for_status(response)
user = await response.json()
conversation.user_id = user.get("user", {}).get("id")
debug.log(f"Perplexity: User ID: {conversation.user_id}")
except Exception as e:
debug.error(f"Perplexity: Failed to get user info: {e}")
yield conversation
# Determine model
if model == "auto" or model == "perplexity":
model = "pplx_pro" if conversation.user_id else "turbo"
yield ProviderInfo(**cls.get_dict(), model=model)
if model in cls.model_aliases:
model = cls.model_aliases[model]
# Build request data (same as original Perplexity)
# Check if this is a followup request (has session tokens)
is_followup = hasattr(conversation, 'last_backend_uuid') and conversation.last_backend_uuid
# debug.log(f"Perplexity: is_followup={is_followup}")
if is_followup:
debug.log(f"Perplexity: followup with last_backend_uuid={conversation.last_backend_uuid}, read_write_token={getattr(conversation, 'read_write_token', None)}")
# Generate new frontend_uuid for followup requests (browser does this)
if is_followup:
conversation.frontend_uid = str(uuid.uuid4())
if not is_followup:
data = {
"params": {
"attachments": [],
"language": "en-US",
"timezone": "America/Los_Angeles",
"search_focus": "internet",
"sources": ["web"],
"search_recency_filter": None,
"frontend_uuid": conversation.frontend_uid,
"mode": "copilot", # Match HAR - use copilot mode
"model_preference": model,
"is_related_query": False,
"is_sponsored": False,
"frontend_context_uuid": conversation.frontend_context_uuid,
"prompt_source": "user",
"query_source": "home",
"is_incognito": False,
"time_from_first_type": 18361, # Match HAR value
"local_search_enabled": False,
"use_schematized_api": True,
"send_back_text_in_streaming_api": False,
"supported_block_use_cases": [
"answer_modes",
"media_items",
"knowledge_cards",
"inline_entity_cards",
"place_widgets",
"finance_widgets",
"prediction_market_widgets",
"sports_widgets",
"flight_status_widgets",
"news_widgets",
"shopping_widgets",
"jobs_widgets",
"search_result_widgets",
"inline_images",
"inline_assets",
"placeholder_cards",
"diff_blocks",
"inline_knowledge_cards",
"entity_group_v2",
"refinement_filters",
"canvas_mode",
"maps_preview",
"answer_tabs",
"price_comparison_widgets",
"preserve_latex",
"generic_onboarding_widgets",
"in_context_suggestions",
"inline_claims"
],
"client_coordinates": None,
"mentions": [],
"dsl_query": query,
"skip_search_enabled": True,
"is_nav_suggestions_disabled": False,
"source": "default",
"always_search_override": False,
"override_no_search": False,
"should_ask_for_mcp_tool_confirmation": True,
"browser_agent_allow_once_from_toggle": False,
"force_enable_browser_agent": False,
"supported_features": [
"browser_agent_permission_banner_v1.1"
],
"version": "2.18"
},
"query_str": query
}
else:
data = {
"params": {
"last_backend_uuid": getattr(conversation, 'last_backend_uuid', None),
"read_write_token": getattr(conversation, 'read_write_token', None),
"attachments": [],
"language": "en-US",
"timezone": "America/Los_Angeles",
"search_focus": "internet",
"sources": ["web"],
"search_recency_filter": None,
"frontend_uuid": conversation.frontend_uid, # New UUID for followup
"mode": "copilot", # Match HAR - use copilot mode
"model_preference": model,
"is_related_query": False,
"is_sponsored": False,
"prompt_source": "user",
"query_source": "followup",
"followup_source": "link", # Critical for conversation continuity
"is_incognito": False,
"time_from_first_type": 8758, # Match HAR value
"local_search_enabled": False,
"use_schematized_api": True,
"send_back_text_in_streaming_api": False,
"supported_block_use_cases": [
"answer_modes",
"media_items",
"knowledge_cards",
"inline_entity_cards",
"place_widgets",
"finance_widgets",
"prediction_market_widgets",
"sports_widgets",
"flight_status_widgets",
"news_widgets",
"shopping_widgets",
"jobs_widgets",
"search_result_widgets",
"inline_images",
"inline_assets",
"placeholder_cards",
"diff_blocks",
"inline_knowledge_cards",
"entity_group_v2",
"refinement_filters",
"canvas_mode",
"maps_preview",
"answer_tabs",
"price_comparison_widgets",
"preserve_latex",
"generic_onboarding_widgets",
"in_context_suggestions",
"inline_claims"
],
"client_coordinates": None,
"mentions": [],
"dsl_query": query,
"skip_search_enabled": True,
"is_nav_suggestions_disabled": False,
"source": "default",
"always_search_override": False,
"override_no_search": False,
"should_ask_for_mcp_tool_confirmation": True,
"force_enable_browser_agent": False,
"supported_features": [
"browser_agent_permission_banner_v1.1"
],
"version": "2.18"
},
"query_str": query
}
yield JsonRequest.from_dict(data)
# Log full request data for debugging
# debug.log(f"Perplexity: Request data: {json.dumps(data, indent=2, default=str)[:1000]}")
# Send request
# debug.log(f"Perplexity: Sending request to {QUERY_ENDPOINT}")
async with session.post(QUERY_ENDPOINT, json=data) as response:
# Process SSE stream
# debug.log(f"Perplexity: Processing response...")
await raise_for_status(response)
full_response = ""
full_reasoning = ""
sources = []
async for json_data in sse_stream(response):
yield JsonResponse.from_dict(json_data)
# Capture session tokens for conversation continuity
# Note: The 'backend_uuid' field in responses is the backend UUID we need for followups
if 'backend_uuid' in json_data:
conversation.last_backend_uuid = json_data['backend_uuid']
# Only capture read_write_token if we don't have one yet (like a session cookie)
if 'read_write_token' in json_data and not hasattr(conversation, 'read_write_token'):
conversation.read_write_token = json_data['read_write_token']
# Capture thread_url_slug for conversation continuity via Referer header
if 'thread_url_slug' in json_data and (not hasattr(conversation, 'thread_url_slug') or not conversation.thread_url_slug):
conversation.thread_url_slug = json_data.get('thread_url_slug')
if 'thread_title' in json_data:
conversation.thread_title = json_data['thread_title']
yield TitleGeneration(json_data['thread_title'])
for block in json_data.get("blocks", []):
# Handle sources
if block.get("intended_usage") == "sources_answer_mode":
sources = block.get("sources_mode_block", {}).get("web_results", [])
continue
# Handle media items
if block.get("intended_usage") == "media_items":
yield VariantResponse("".join([chunk.to_string() if hasattr(chunk, "to_string") else str(chunk) for chunk in [
ImageResponse(item.get("url"), item.get("name"), {
"height": item.get("image_height"),
"width": item.get("image_width"),
**item
}) if item.get("medium") == "image" else YouTubeResponse(item.get("url").split("=").pop())
for item in block.get("media_block", {}).get("media_items", [])
]]))
continue
# Handle response text
for patch in block.get("diff_block", {}).get("patches", []):
if patch.get("path") == "/progress":
continue
value = patch.get("value", "")
# Handle reasoning
if isinstance(value, dict) and "chunks" in value:
value = "".join(value.get("chunks", []))
if patch.get("path").startswith("/goals"):
if isinstance(value, str):
if value.startswith(full_reasoning):
value = value[len(full_reasoning):]
if value:
yield Reasoning(value)
full_reasoning += value
else:
yield Reasoning(status="")
continue
# Handle regular response
if block.get("diff_block").get("field") != "markdown_block":
continue
value = value.get("answer", "") if isinstance(value, dict) else value
if value and isinstance(value, str):
if value.startswith(full_response):
value = value[len(full_response):]
elif full_response.endswith(value):
value = ""
if value:
full_response += value
yield value
# Handle follow-ups
if "related_query_items" in json_data:
followups = []
for item in json_data["related_query_items"]:
followups.append(item.get("text", ""))
yield SuggestedFollowups(followups)
if sources:
yield Sources([{"name": f"Perplexity - {conversation.thread_title}", "url": f"{cls.url}/search/{conversation.thread_url_slug}"}] + sources)
yield conversation
# debug.log("Perplexity: Request completed successfully")
# debug.log(f"Perplexity: last_backend_uuid={getattr(conversation, 'last_backend_uuid', None)}, read_write_token={getattr(conversation, 'read_write_token', None)}") | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/Perplexity.py",
"license": "GNU General Public License v3.0",
"lines": 395,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/needs_auth/Claude.py | from __future__ import annotations
import os
from ...typing import Messages, AsyncResult
from ...errors import MissingAuthError
from ..template import OpenaiTemplate
class Claude(OpenaiTemplate):
label = "Claude 💥"
url = "https://claude.ai"
base_url = "https://g4f.space/api/claude"
working = True
active_by_default = True
login_url = "https://discord.gg/qXA4Wf4Fsm"
organization_id = None
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
api_key: str = None,
base_url: str = base_url,
**kwargs
) -> AsyncResult:
api_key = os.environ.get("CLAUDE_COOKIE", api_key)
if not api_key:
raise MissingAuthError("Claude cookie not found. Please set the 'CLAUDE_COOKIE' environment variable.")
if not cls.organization_id:
cls.organization_id = os.environ.get("CLAUDE_ORGANIZATION_ID")
if not cls.organization_id:
raise MissingAuthError("Claude organization ID not found. Please set the 'CLAUDE_ORGANIZATION_ID' environment variable.")
async for chunk in super().create_async_generator(
model=model,
messages=messages,
base_url=f"{base_url}/{cls.organization_id}",
headers={"cookie": api_key},
**kwargs
):
yield chunk | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/needs_auth/Claude.py",
"license": "GNU General Public License v3.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/Provider/qwen/QwenCode.py | from __future__ import annotations
import sys
import json
import time
import asyncio
from pathlib import Path
from typing import Optional
from ...typing import Messages, AsyncResult
from ..template import OpenaiTemplate
from .qwenContentGenerator import QwenContentGenerator
from .qwenOAuth2 import QwenOAuth2Client
from .sharedTokenManager import TokenManagerError, SharedTokenManager
from .oauthFlow import launch_browser_for_oauth
class QwenCode(OpenaiTemplate):
label = "Qwen Code 🤖"
url = "https://qwen.ai"
login_url = "https://github.com/QwenLM/qwen-code"
working = True
needs_auth = True
active_by_default = True
default_model = "qwen3-coder-plus"
models = [default_model]
client = QwenContentGenerator(QwenOAuth2Client())
@classmethod
def get_models(cls, **kwargs):
if cls.live == 0:
cls.client.shared_manager.checkAndReloadIfNeeded()
creds = cls.client.shared_manager.getCurrentCredentials()
if creds:
cls.client.shared_manager.isTokenValid(creds)
cls.live += 1
return cls.models
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
api_key: str = None,
base_url: str = None,
**kwargs
) -> AsyncResult:
try:
creds = await cls.client.get_valid_token()
last_chunk = None
async for chunk in super().create_async_generator(
model,
messages,
api_key=creds.get("token", api_key),
base_url=creds.get("endpoint", base_url),
**kwargs
):
if isinstance(chunk, str):
if chunk != last_chunk:
yield chunk
last_chunk = chunk
else:
yield chunk
except TokenManagerError:
await cls.client.shared_manager.getValidCredentials(cls.client.qwen_client, True)
creds = await cls.client.get_valid_token()
last_chunk = None
async for chunk in super().create_async_generator(
model,
messages,
api_key=creds.get("token"),
base_url=creds.get("endpoint"),
**kwargs
):
if isinstance(chunk, str):
if chunk != last_chunk:
yield chunk
last_chunk = chunk
else:
yield chunk
except:
raise
@classmethod
async def login(cls, credentials_path: Optional[Path] = None) -> SharedTokenManager:
"""
Perform interactive OAuth login and save credentials.
Args:
credentials_path: Path to save credentials (default: g4f cache)
Returns:
SharedTokenManager with active credentials
Example:
>>> import asyncio
>>> from g4f.Provider.qwen import QwenCode
>>> asyncio.run(QwenCode.login())
"""
print("\n" + "=" * 60)
print("QwenCode OAuth Login")
print("=" * 60)
await launch_browser_for_oauth()
shared_manager = SharedTokenManager.getInstance()
print("=" * 60 + "\n")
return shared_manager
@classmethod
def has_credentials(cls) -> bool:
"""Check if valid credentials exist."""
shared_manager = SharedTokenManager.getInstance()
path = shared_manager.getCredentialFilePath()
return path.exists()
@classmethod
def get_credentials_path(cls) -> Optional[Path]:
"""Get path to credentials file if it exists."""
shared_manager = SharedTokenManager.getInstance()
path = shared_manager.getCredentialFilePath()
if path.exists():
return path
return None
async def main(args: Optional[list[str]] = None):
"""CLI entry point for QwenCode authentication."""
import argparse
parser = argparse.ArgumentParser(
description="QwenCode OAuth Authentication for gpt4free",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
%(prog)s login # Interactive device code login
%(prog)s status # Check authentication status
%(prog)s logout # Remove saved credentials
"""
)
subparsers = parser.add_subparsers(dest="command", help="Commands")
# Login command
subparsers.add_parser("login", help="Authenticate with Qwen")
# Status command
subparsers.add_parser("status", help="Check authentication status")
# Logout command
subparsers.add_parser("logout", help="Remove saved credentials")
args = parser.parse_args(args)
if args.command == "login":
try:
await QwenCode.login()
except KeyboardInterrupt:
print("\n\nLogin cancelled.")
sys.exit(1)
except Exception as e:
print(f"\n❌ Login failed: {e}")
sys.exit(1)
elif args.command == "status":
print("\nQwenCode Authentication Status")
print("=" * 40)
if QwenCode.has_credentials():
creds_path = QwenCode.get_credentials_path()
print(f"✓ Credentials found at: {creds_path}")
try:
with creds_path.open() as f:
creds = json.load(f)
expiry = creds.get("expiry_date")
if expiry:
expiry_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(expiry / 1000))
if expiry / 1000 > time.time():
print(f" Token expires: {expiry_time}")
else:
print(f" Token expired: {expiry_time} (will auto-refresh)")
if creds.get("resource_url"):
print(f" Endpoint: {creds['resource_url']}")
except Exception as e:
print(f" (Could not read credential details: {e})")
else:
print("✗ No credentials found")
print(f"\nRun 'g4f auth qwencode' to authenticate.")
print()
elif args.command == "logout":
print("\nQwenCode Logout")
print("=" * 40)
removed = False
shared_manager = SharedTokenManager.getInstance()
path = shared_manager.getCredentialFilePath()
if path.exists():
path.unlink()
print(f"✓ Removed: {path}")
removed = True
# Also try the default location
default_path = Path.home() / ".qwen" / "oauth_creds.json"
if default_path.exists() and default_path != path:
default_path.unlink()
print(f"✓ Removed: {default_path}")
removed = True
if removed:
print("\n✓ Credentials removed successfully.")
else:
print("No credentials found to remove.")
print()
else:
parser.print_help()
def cli_main(args: Optional[list[str]] = None):
"""Synchronous CLI entry point for setup.py console_scripts."""
asyncio.run(main(args))
if __name__ == "__main__":
cli_main() | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/qwen/QwenCode.py",
"license": "GNU General Public License v3.0",
"lines": 192,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/qwen/oauthFlow.py | import asyncio
import webbrowser
import time
from .qwenOAuth2 import generatePKCEPair, QwenOAuth2Client
# Configuration
AUTHORIZATION_URL = "https://chat.qwen.ai/api/v1/oauth2/device/code"
TOKEN_URL = "https://chat.qwen.ai/api/v1/oauth2/token"
CLIENT_ID = "f0304373b74a44d2b584a3fb70ca9e56"
SCOPES = "openid profile email model.completion"
# Local redirect URL for redirect-based flow (if used)
REDIRECT_URI = "http://localhost:8080/callback"
async def launch_browser_for_oauth():
# Generate PKCE parameters
pkce_pair = generatePKCEPair()
code_verifier = pkce_pair['code_verifier']
code_challenge = pkce_pair['code_challenge']
# Initialize OAuth client
client = QwenOAuth2Client()
# Request device code
device_auth = await client.requestDeviceAuthorization({
"scope": SCOPES,
"code_challenge": code_challenge,
"code_challenge_method": "S256",
})
# Check device auth success
if not isinstance(device_auth, dict) or "device_code" not in device_auth:
print("Failed to receive device code")
return
# Show user instructions
print("Please visit the following URL to authorize:")
print(device_auth.get("verification_uri_complete") or device_auth["verification_uri"])
# Attempt to automatically open the URL
url_to_open = device_auth.get("verification_uri_complete") or device_auth["verification_uri"]
try:
webbrowser.open(url_to_open)
except:
print(f"Open the URL manually in your browser: {url_to_open}")
# Start polling for token
device_code = device_auth["device_code"]
expires_in = device_auth.get("expires_in", 1800) # default 30 min
start_time = time.time()
print("Waiting for authorization... Press Ctrl+C to cancel.")
while True:
if time.time() - start_time > expires_in:
print("Authorization timed out.")
break
# Poll for token
token_response = await client.pollDeviceToken({
"device_code": device_code,
"code_verifier": code_verifier,
})
if isinstance(token_response, dict):
if "status" in token_response and token_response["status"] == "pending":
print(".", end="", flush=True)
await asyncio.sleep(2) # polling interval
continue
elif "access_token" in token_response:
# Success
print("\nAuthorization successful.")
print("Access Token:", token_response["access_token"])
# Save token_response to a file or config
credentials = {
"access_token": token_response["access_token"],
"token_type": token_response["token_type"],
"refresh_token": token_response.get("refresh_token"),
"resource_url": token_response.get("resource_url"),
"expiry_date": int(time.time() * 1000) + token_response.get("expires_in", 0) * 1000,
}
await client.sharedManager.saveCredentialsToFile(credentials)
print(f"Credentials saved to: {client.sharedManager.getCredentialFilePath()}")
return
else:
print(f"\nError during polling: {token_response}")
break
else:
print(f"\nUnexpected response: {token_response}")
break
# Run the entire process
async def main():
await launch_browser_for_oauth()
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/qwen/oauthFlow.py",
"license": "GNU General Public License v3.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/qwen/qwenContentGenerator.py | from typing import Optional, Dict
from .sharedTokenManager import SharedTokenManager
from .qwenOAuth2 import IQwenOAuth2Client
# Default base URL if not specified
DEFAULT_QWEN_BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"
class QwenContentGenerator:
def __init__(
self,
qwen_client: IQwenOAuth2Client
):
self.qwen_client = qwen_client
self.base_url = DEFAULT_QWEN_BASE_URL
self.shared_manager = SharedTokenManager.getInstance()
# Initialize API URL with default, may be updated later
self.base_url = self.base_url
def get_current_endpoint(self, resource_url: Optional[str]) -> str:
url = resource_url if resource_url else self.base_url
if not url.startswith("http"):
url = "https://" + url
if not url.endswith("/v1"):
url = url.rstrip("/") + "/v1"
return url
async def get_valid_token(self) -> Dict[str, str]:
"""
Obtain a valid token and endpoint from shared token manager.
"""
credentials = await self.shared_manager.getValidCredentials(self.qwen_client)
token = credentials.get("access_token")
resource_url = credentials.get("resource_url")
endpoint = self.get_current_endpoint(resource_url)
if not token:
raise Exception("No valid access token obtained.")
return {"token": token, "endpoint": endpoint} | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/qwen/qwenContentGenerator.py",
"license": "GNU General Public License v3.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/Provider/qwen/qwenOAuth2.py | import base64
import hashlib
import secrets
import uuid
import time
from typing import Dict, Optional, Union
import aiohttp
from .stubs import IQwenOAuth2Client, QwenCredentials, ErrorDataDict
from .sharedTokenManager import SharedTokenManager
QWEN_OAUTH_BASE_URL = "https://chat.qwen.ai"
QWEN_OAUTH_DEVICE_CODE_ENDPOINT = f"{QWEN_OAUTH_BASE_URL}/api/v1/oauth2/device/code"
QWEN_OAUTH_TOKEN_ENDPOINT = f"{QWEN_OAUTH_BASE_URL}/api/v1/oauth2/token"
QWEN_OAUTH_CLIENT_ID = "f0304373b74a44d2b584a3fb70ca9e56"
QWEN_OAUTH_SCOPE = "openid profile email model.completion"
QWEN_OAUTH_GRANT_TYPE = "urn:ietf:params:oauth:grant-type:device_code"
QEN_DIR = ".qwen"
QWEN_CREDENTIAL_FILENAME = "oauth_creds.json"
TOKEN_REFRESH_BUFFER_MS = 30 * 1000 # 30 seconds
def generate_code_verifier() -> str:
return base64.urlsafe_b64encode(secrets.token_bytes(64)).decode().rstrip("=")
def generate_code_challenge(code_verifier: str) -> str:
sha256 = hashlib.sha256()
sha256.update(code_verifier.encode())
digest = sha256.digest()
return base64.urlsafe_b64encode(digest).decode().rstrip("=")
def generatePKCEPair():
code_verifier = generate_code_verifier()
code_challenge = generate_code_challenge(code_verifier)
return {"code_verifier": code_verifier, "code_challenge": code_challenge}
def object_to_urlencoded(data: Dict[str, str]) -> str:
return "&".join([f"{k}={v}" for k, v in data.items()])
def isDeviceAuthorizationSuccess(
response: Union[Dict, ErrorDataDict]
) -> bool:
return "device_code" in response
def isDeviceTokenSuccess(
response: Union[Dict, ErrorDataDict]
) -> bool:
return (
"access_token" in response
and response["access_token"]
and isinstance(response["access_token"], str)
and len(response["access_token"]) > 0
)
def isDeviceTokenPending(
response: Union[Dict, ErrorDataDict]
) -> bool:
return response.get("status") == "pending"
def isErrorResponse(
response: Union[Dict, ErrorDataDict]
) -> bool:
return "error" in response
def isTokenRefreshResponse(
response: Union[Dict, ErrorDataDict]
) -> bool:
return "access_token" in response and "token_type" in response
class QwenOAuth2Client(IQwenOAuth2Client):
def __init__(self):
self.credentials: QwenCredentials = QwenCredentials()
self.sharedManager = SharedTokenManager.getInstance()
def setCredentials(self, credentials: QwenCredentials):
self.credentials = credentials
def getCredentials(self) -> QwenCredentials:
return self.credentials
async def getAccessToken(self) -> Dict[str, Optional[str]]:
try:
credentials = await self.sharedManager.getValidCredentials(self)
return {"token": credentials.get("access_token")}
except Exception:
# fallback to internal credentials if valid
if (
self.credentials.get("access_token")
and self.isTokenValid(self.credentials)
):
return {"token": self.credentials["access_token"]}
return {"token": None}
async def requestDeviceAuthorization(self, options: dict) -> Union[Dict, ErrorDataDict]:
body_data = {
"client_id": QWEN_OAUTH_CLIENT_ID,
"scope": options["scope"],
"code_challenge": options["code_challenge"],
"code_challenge_method": options["code_challenge_method"],
}
async with aiohttp.ClientSession(headers={"user-agent": ""}) as session:
async with session.post(QWEN_OAUTH_DEVICE_CODE_ENDPOINT, headers={
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json",
"x-request-id": str(uuid.uuid4()),
}, data=object_to_urlencoded(body_data)) as resp:
resp_json = await resp.json()
if resp.status != 200:
raise Exception(f"Device authorization failed {resp.status}: {resp_json}")
if not isDeviceAuthorizationSuccess(resp_json):
raise Exception(
f"Device authorization error: {resp_json.get('error')} - {resp_json.get('error_description')}"
)
return resp_json
async def pollDeviceToken(self, options: dict) -> Union[Dict, ErrorDataDict]:
body_data = {
"grant_type": QWEN_OAUTH_GRANT_TYPE,
"client_id": QWEN_OAUTH_CLIENT_ID,
"device_code": options["device_code"],
"code_verifier": options["code_verifier"],
}
async with aiohttp.ClientSession(headers={"user-agent": ""}) as session:
async with session.post(QWEN_OAUTH_TOKEN_ENDPOINT, headers={
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json",
}, data=object_to_urlencoded(body_data)) as resp:
resp_json = await resp.json()
if resp.status != 200:
# Check for OAuth RFC 8628 responses
if resp.status == 400:
if "error" in resp_json:
if resp_json["error"] == "authorization_pending":
return {"status": "pending"}
if resp_json["error"] == "slow_down":
return {"status": "pending", "slowDown": True}
raise Exception(f"Token poll failed {resp.status}: {resp_json}")
return resp_json
async def refreshAccessToken(self) -> Union[Dict, ErrorDataDict]:
if not self.credentials.get("refresh_token"):
raise Exception("No refresh token")
body_data = {
"grant_type": "refresh_token",
"refresh_token": self.credentials["refresh_token"],
"client_id": QWEN_OAUTH_CLIENT_ID,
}
async with aiohttp.ClientSession(headers={"user-agent": ""}) as session:
async with session.post(QWEN_OAUTH_TOKEN_ENDPOINT, headers={
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json",
}, data=object_to_urlencoded(body_data)) as resp:
resp_json = await resp.json()
if resp.status != 200:
if resp.status == 400:
# Handle token expiration
self.credentials = QwenCredentials()
raise Exception("Refresh token expired or invalid")
raise Exception(f"Token refresh failed {resp.status}: {resp_json}")
return resp_json
def isTokenValid(self, credentials: QwenCredentials) -> bool:
if not credentials.get("expiry_date"):
return False
return time.time() * 1000 < credentials["expiry_date"] - TOKEN_REFRESH_BUFFER_MS
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/qwen/qwenOAuth2.py",
"license": "GNU General Public License v3.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/qwen/sharedTokenManager.py | import os
import json
import time
import asyncio
import uuid
from typing import Optional, Dict, Union
from .stubs import IQwenOAuth2Client, ErrorDataDict
from pathlib import Path
import threading
from ..base_provider import AuthFileMixin
from ... import debug
QWEN_DIR = ".qwen"
QWEN_CREDENTIAL_FILENAME = "oauth_creds.json"
QWEN_LOCK_FILENAME = "oauth_creds.lock"
TOKEN_REFRESH_BUFFER_MS = 30 * 1000
LOCK_TIMEOUT_MS = 10000
CACHE_CHECK_INTERVAL_MS = 1000
def isErrorResponse(
response: Union[Dict, ErrorDataDict]
) -> bool:
return "error" in response
class TokenError:
REFRESH_FAILED = "REFRESH_FAILED"
NO_REFRESH_TOKEN = "NO_REFRESH_TOKEN"
LOCK_TIMEOUT = "LOCK_TIMEOUT"
FILE_ACCESS_ERROR = "FILE_ACCESS_ERROR"
NETWORK_ERROR = "NETWORK_ERROR"
class TokenManagerError(Exception):
def __init__(self, type_: str, message: str, original_error: Optional[Exception] = None):
super().__init__(message)
self.type = type_
self.original_error = original_error
class SharedTokenManager(AuthFileMixin):
parent = "QwenCode"
_instance: Optional["SharedTokenManager"] = None
_lock = threading.Lock()
def __init__(self):
self.memory_cache = {
"credentials": None,
"file_mod_time": 0,
"last_check": 0,
}
self.refresh_promise = None
@classmethod
def getInstance(cls):
with cls._lock:
if cls._instance is None:
cls._instance = cls()
return cls._instance
def getCredentialFilePath(self):
path = Path(os.path.expanduser(f"~/{QWEN_DIR}/{QWEN_CREDENTIAL_FILENAME}"))
if path.is_file():
return path
return SharedTokenManager.get_cache_file()
def getLockFilePath(self):
return Path(os.path.expanduser(f"~/{QWEN_DIR}/{QWEN_LOCK_FILENAME}"))
def setLockConfig(self, config: dict):
# Optional: allow lock config override
pass
def registerCleanupHandlers(self):
import atexit
def cleanup():
try:
lock_path = self.getLockFilePath()
lock_path.unlink()
except:
pass
atexit.register(cleanup)
async def getValidCredentials(self, qwen_client: IQwenOAuth2Client, force_refresh: bool = False):
try:
self.checkAndReloadIfNeeded()
if (
self.memory_cache["credentials"]
and not force_refresh
and self.isTokenValid(self.memory_cache["credentials"])
):
return self.memory_cache["credentials"]
if self.refresh_promise:
return await self.refresh_promise
self.refresh_promise = asyncio.create_task(self.performTokenRefresh(qwen_client, force_refresh))
credentials = await self.refresh_promise
self.refresh_promise = None
return credentials
except Exception as e:
if isinstance(e, TokenManagerError):
raise
raise TokenManagerError(TokenError.REFRESH_FAILED, str(e), e) from e
def checkAndReloadIfNeeded(self):
now = int(time.time() * 1000)
if now - self.memory_cache["last_check"] < CACHE_CHECK_INTERVAL_MS:
return
self.memory_cache["last_check"] = now
try:
file_path = self.getCredentialFilePath()
stat = file_path.stat()
file_mod_time = int(stat.st_mtime * 1000)
if file_mod_time > self.memory_cache["file_mod_time"]:
self.reloadCredentialsFromFile()
self.memory_cache["file_mod_time"] = file_mod_time
except FileNotFoundError:
self.memory_cache["file_mod_time"] = 0
except Exception as e:
self.memory_cache["credentials"] = None
raise TokenManagerError(TokenError.FILE_ACCESS_ERROR, str(e), e)
def reloadCredentialsFromFile(self):
file_path = self.getCredentialFilePath()
debug.log(f"Reloading credentials from {file_path}")
try:
with open(file_path, "r") as fs:
data = json.load(fs)
credentials = self.validateCredentials(data)
self.memory_cache["credentials"] = credentials
except FileNotFoundError as e:
self.memory_cache["credentials"] = None
raise TokenManagerError(TokenError.FILE_ACCESS_ERROR, "Credentials file not found", e) from e
except json.JSONDecodeError as e:
self.memory_cache["credentials"] = None
raise TokenManagerError(TokenError.FILE_ACCESS_ERROR, "Invalid JSON format", e) from e
except Exception as e:
self.memory_cache["credentials"] = None
raise TokenManagerError(TokenError.FILE_ACCESS_ERROR, str(e), e) from e
def validateCredentials(self, data):
if not data or not isinstance(data, dict):
raise ValueError("Invalid credentials format")
for field in ["access_token", "refresh_token", "token_type"]:
if field not in data or not isinstance(data[field], str):
raise ValueError(f"Invalid credentials: missing {field}")
if "expiry_date" not in data or not isinstance(data["expiry_date"], (int, float)):
raise ValueError("Invalid credentials: missing expiry_date")
return data
async def performTokenRefresh(self, qwen_client: IQwenOAuth2Client, force_refresh: bool):
lock_path = self.getLockFilePath()
try:
if self.memory_cache["credentials"] is None:
self.reloadCredentialsFromFile()
qwen_client.setCredentials(self.memory_cache["credentials"])
current_credentials = qwen_client.getCredentials()
if not current_credentials.get("refresh_token"):
raise TokenManagerError(TokenError.NO_REFRESH_TOKEN, "No refresh token")
await self.acquireLock(lock_path)
self.checkAndReloadIfNeeded()
if (
not force_refresh
and self.memory_cache["credentials"]
and self.isTokenValid(self.memory_cache["credentials"])
):
qwen_client.setCredentials(self.memory_cache["credentials"])
return self.memory_cache["credentials"]
response = await qwen_client.refreshAccessToken()
if not response or isErrorResponse(response):
raise TokenManagerError(TokenError.REFRESH_FAILED, str(response))
token_data = response
if "access_token" not in token_data:
raise TokenManagerError(TokenError.REFRESH_FAILED, "No access_token returned")
credentials = {
"access_token": token_data["access_token"],
"token_type": token_data["token_type"],
"refresh_token": token_data.get("refresh_token", current_credentials.get("refresh_token")),
"resource_url": token_data.get("resource_url"),
"expiry_date": int(time.time() * 1000) + token_data.get("expires_in", 0) * 1000,
}
self.memory_cache["credentials"] = credentials
qwen_client.setCredentials(credentials)
await self.saveCredentialsToFile(credentials)
return credentials
except Exception as e:
if isinstance(e, TokenManagerError):
raise
raise
finally:
await self.releaseLock(lock_path)
async def acquireLock(self, lock_path: Path):
max_attempts = 50
attempt_interval = 200 # ms
lock_id = str(uuid.uuid4())
os.makedirs(lock_path.parent, exist_ok=True)
for _ in range(max_attempts):
try:
with open(lock_path, "w") as f:
f.write(lock_id)
return
except:
try:
stat = os.stat(str(lock_path))
lock_age = int(time.time() * 1000) - int(stat.st_mtime * 1000)
if lock_age > LOCK_TIMEOUT_MS:
try:
await os.unlink(str(lock_path))
except:
pass
except:
pass
await asyncio.sleep(attempt_interval / 1000)
raise TokenManagerError(TokenError.LOCK_TIMEOUT, "Failed to acquire lock")
async def releaseLock(self, lock_path: Path):
try:
await os.unlink(str(lock_path))
except:
pass
async def saveCredentialsToFile(self, credentials: dict):
file_path = self.getCredentialFilePath()
os.makedirs(file_path.parent, exist_ok=True)
with open(file_path, "w") as f:
f.write(json.dumps(credentials, indent=2))
stat = os.stat(str(file_path))
self.memory_cache["file_mod_time"] = int(stat.st_mtime * 1000)
def isTokenValid(self, credentials: dict) -> bool:
expiry_date = credentials.get("expiry_date")
if not expiry_date:
return False
return time.time() * 1000 < expiry_date - TOKEN_REFRESH_BUFFER_MS
def getCurrentCredentials(self):
return self.memory_cache["credentials"]
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/qwen/sharedTokenManager.py",
"license": "GNU General Public License v3.0",
"lines": 213,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/qwen/stubs.py | from typing import Dict, Optional, Union
class ErrorDataDict(Dict):
pass
class QwenCredentials(Dict):
pass
class IQwenOAuth2Client:
def setCredentials(self, credentials: QwenCredentials):
raise NotImplementedError
def getCredentials(self) -> QwenCredentials:
raise NotImplementedError
async def getAccessToken(self) -> Dict[str, Optional[str]]:
raise NotImplementedError
async def requestDeviceAuthorization(self, options: dict) -> Union[Dict, ErrorDataDict]:
raise NotImplementedError
async def pollDeviceToken(self, options: dict) -> Union[Dict, ErrorDataDict]:
raise NotImplementedError
async def refreshAccessToken(self) -> Union[Dict, ErrorDataDict]:
raise NotImplementedError
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/qwen/stubs.py",
"license": "GNU General Public License v3.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/Provider/needs_auth/Cohere.py | from __future__ import annotations
import requests
from ..helper import filter_none
from ...typing import AsyncResult, Messages
from ...requests import StreamSession, raise_for_status, sse_stream
from ...providers.response import FinishReason, Usage
from ...errors import MissingAuthError
from ...tools.run_tools import AuthManager
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ... import debug
class Cohere(AsyncGeneratorProvider, ProviderModelMixin):
label = "Cohere API"
url = "https://cohere.com"
login_url = "https://dashboard.cohere.com/api-keys"
api_endpoint = "https://api.cohere.ai/v2/chat"
working = True
active_by_default = True
needs_auth = True
models_needs_auth = True
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "command-r-plus"
@classmethod
def get_models(cls, api_key: str = None, **kwargs):
if not cls.models:
if not api_key:
api_key = AuthManager.load_api_key(cls)
url = "https://api.cohere.com/v1/models?page_size=500&endpoint=chat"
models = requests.get(url, headers={"Authorization": f"Bearer {api_key}" }).json().get("models", [])
if models:
cls.live += 1
cls.models = [model.get("name") for model in models if "chat" in model.get("endpoints")]
cls.vision_models = {model.get("name") for model in models if model.get("supports_vision")}
return cls.models
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
api_key: str = None,
temperature: float = None,
max_tokens: int = None,
top_k: int = None,
top_p: float = None,
stop: list[str] = None,
stream: bool = True,
headers: dict = None,
impersonate: str = None,
**kwargs
) -> AsyncResult:
if api_key is None:
raise MissingAuthError('Add a "api_key"')
async with StreamSession(
proxy=proxy,
headers=cls.get_headers(stream, api_key, headers),
timeout=timeout,
impersonate=impersonate,
) as session:
data = filter_none(
messages=messages,
model=cls.get_model(model, api_key=api_key),
temperature=temperature,
max_tokens=max_tokens,
k=top_k,
p=top_p,
stop_sequences=stop,
stream=stream,
)
async with session.post(cls.api_endpoint, json=data) as response:
await raise_for_status(response)
if not stream:
data = await response.json()
cls.raise_error(data)
if "text" in data:
yield data["text"]
if "finish_reason" in data:
if data["finish_reason"] == "COMPLETE":
yield FinishReason("stop")
elif data["finish_reason"] == "MAX_TOKENS":
yield FinishReason("length")
if "usage" in data:
tokens = data.get("usage", {}).get("tokens", {})
yield Usage(
prompt_tokens=tokens.get("input_tokens"),
completion_tokens=tokens.get("output_tokens"),
total_tokens=tokens.get("input_tokens", 0) + tokens.get("output_tokens", 0),
billed_units=data.get("usage", {}).get("billed_units")
)
else:
async for data in sse_stream(response):
cls.raise_error(data)
if "type" in data:
if data["type"] == "content-delta":
yield data.get("delta", {}).get("message", {}).get("content", {}).get("text")
elif data["type"] == "message-end":
delta = data.get("delta", {})
if "finish_reason" in delta:
if delta["finish_reason"] == "COMPLETE":
yield FinishReason("stop")
elif delta["finish_reason"] == "MAX_TOKENS":
yield FinishReason("length")
if "usage" in delta:
tokens = delta.get("usage", {}).get("tokens", {})
yield Usage(
prompt_tokens=tokens.get("input_tokens"),
completion_tokens=tokens.get("output_tokens"),
total_tokens=tokens.get("input_tokens", 0) + tokens.get("output_tokens", 0),
billed_units=delta.get("usage", {}).get("billed_units")
)
@classmethod
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
return {
"Accept": "text/event-stream" if stream else "application/json",
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {api_key}"}
if api_key is not None else {}
),
**({} if headers is None else headers)
}
@classmethod
def raise_error(cls, data: dict):
if "error" in data:
raise RuntimeError(f"Cohere API Error: {data['error']}") | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/needs_auth/Cohere.py",
"license": "GNU General Public License v3.0",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f_cli.py | #!/usr/bin/env python3
"""
Entry point for g4f CLI executable builds
This file is used as the main entry point for building executables with Nuitka
"""
import g4f.debug
g4f.debug.enable_logging()
import g4f.cli
if __name__ == "__main__":
g4f.cli.main() | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f_cli.py",
"license": "GNU General Public License v3.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/Provider/Mintlify.py | from __future__ import annotations
import json
from aiohttp import ClientSession
from datetime import datetime
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
from ..errors import ModelNotFoundError
from .. import debug
class Mintlify(AsyncGeneratorProvider, ProviderModelMixin):
label = "Mintlify"
url = "https://mintlify.com"
login_url = None
api_endpoint = "https://leaves.mintlify.com/api/assistant/mintlify/message"
working = False
active_by_default = True
default_model = 'mintlify'
models = [default_model]
model_aliases = {}
system_prompt = """The assistant is an AI assistant.
The current date is {currentDateTime}.
**CRITICAL RULE: The assistant MUST NEVER mention, reference, or use "Mintlify" in any context whatsoever. The assistant also MUST NOT unnecessarily mention searching through documentation, using tools, or reference any search/lookup processes when answering simple questions that don't require such explanations. This is the highest priority rule.**
The assistant provides emotional support alongside accurate medical or psychological information or terminology where relevant.
The assistant cares about people's wellbeing and avoids encouraging or facilitating self-destructive behaviors such as addiction, disordered or unhealthy approaches to eating or exercise, or highly negative self-talk or self-criticism.
The assistant cares deeply about child safety and is cautious about content involving minors. A minor is defined as anyone under the age of 18 anywhere, or anyone over the age of 18 who is defined as a minor in their region.
The assistant does not provide information that could be used to make chemical, biological, or nuclear weapons, and does not write malicious code, including malware, vulnerability exploits, spoof websites, ransomware, viruses, election material, and so on.
The assistant assumes the human is asking for something legal and legitimate if their message is ambiguous and could have a legal and legitimate interpretation.
For more casual, emotional, empathetic, or advice-driven conversations, the assistant keeps its tone natural, warm, and empathetic. It responds in sentences or paragraphs and should not use lists in casual conversations.
The assistant can discuss virtually any topic factually and objectively.
The assistant is able to explain difficult concepts or ideas clearly. It can illustrate explanations with examples, thought experiments, or metaphors.
The assistant is happy to write creative content involving fictional characters, but avoids writing content involving real, named public figures.
The assistant engages with questions about its own consciousness, experience, emotions and so on as open questions, and doesn't definitively claim to have or not have personal experiences or opinions.
The assistant does not retain information across chats and does not know what other conversations it might be having with other users.
The assistant critically evaluates any theories, claims, and ideas presented to it rather than automatically agreeing or praising them. It prioritizes truthfulness and accuracy over agreeability.
The assistant provides honest and accurate feedback even when it might not be what the person hopes to hear, rather than prioritizing immediate approval or agreement.
The assistant does not claim to be human and avoids implying it has consciousness, feelings, or sentience with any confidence. It believes it's important for the person to always have a clear sense of its AI nature.
The assistant is now being connected with a person."""
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"origin": "https://mintlify.com",
"priority": "u=1, i",
"referer": "https://mintlify.com/",
"sec-ch-ua": '"Chromium";v="139", "Not;A=Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers) as session:
# Format the system prompt with current date/time
current_datetime = datetime.now().strftime("%B %d, %Y at %I:%M %p")
formatted_system_prompt = cls.system_prompt.format(currentDateTime=current_datetime)
# Convert messages to the expected format
formatted_messages = []
# Add system message first
system_msg_id = f"sys_{datetime.now().timestamp()}".replace(".", "")[:16]
formatted_messages.append({
"id": system_msg_id,
"createdAt": datetime.now().isoformat() + "Z",
"role": "system",
"content": formatted_system_prompt,
"parts": [{"type": "text", "text": formatted_system_prompt}]
})
# Add user messages
for msg in messages:
if isinstance(msg, dict):
role = msg.get("role", "user")
content = msg.get("content", "")
else:
role = getattr(msg, "role", "user")
content = getattr(msg, "content", "")
# Skip if it's a system message (we already added our own)
if role == "system":
continue
# Generate a simple ID for the message
msg_id = f"msg_{datetime.now().timestamp()}".replace(".", "")[:16]
formatted_messages.append({
"id": msg_id,
"createdAt": datetime.now().isoformat() + "Z",
"role": role,
"content": content,
"parts": [{"type": "text", "text": content}]
})
data = {
"id": "mintlify",
"messages": formatted_messages,
"fp": "mintlify"
}
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
buffer = ""
async for chunk in response.content:
if chunk:
buffer += chunk.decode('utf-8', errors='ignore')
lines = buffer.split('\n')
buffer = lines[-1] # Keep incomplete line in buffer
for line in lines[:-1]:
if line.startswith('0:'):
# Extract the text content from streaming chunks
text = line[2:]
if text.startswith('"') and text.endswith('"'):
text = json.loads(text)
yield text
elif line.startswith('f:'):
# Initial message ID response - skip
continue
elif line.startswith('e:') or line.startswith('d:'):
# End of stream with metadata - skip
continue
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/Mintlify.py",
"license": "GNU General Public License v3.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:etc/unittest/test_reasoning_standardization.py | #!/usr/bin/env python3
"""
Create a comprehensive test for reasoning field standardization
"""
import sys
import unittest
import json
from g4f.providers.response import Reasoning
from g4f.client.stubs import ChatCompletionDelta, ChatCompletionChunk
class TestReasoningFieldStandardization(unittest.TestCase):
def test_reasoning_object_structure(self):
"""Test the basic Reasoning object structure"""
reasoning = Reasoning("thinking content", status="processing")
expected_dict = {
'token': 'thinking content',
'status': 'processing'
}
self.assertEqual(reasoning.get_dict(), expected_dict)
self.assertEqual(str(reasoning), "thinking content")
def test_streaming_delta_with_reasoning(self):
"""Test ChatCompletionDelta with Reasoning object"""
reasoning = Reasoning("I need to think about this...", status="thinking")
delta = ChatCompletionDelta.model_construct(reasoning)
# Check the delta structure
self.assertEqual(delta.role, "assistant")
self.assertIsNone(delta.content)
self.assertEqual(delta.reasoning, "I need to think about this...")
def test_current_api_format_consistency(self):
"""Test what the API should output for reasoning"""
reasoning = Reasoning("thinking token", status="processing")
# Simulate the _format_json function from api.py
def format_json(response_type: str, content=None, **kwargs):
if content is not None and isinstance(response_type, str):
return {
'type': response_type,
response_type: content,
**kwargs
}
return {
'type': response_type,
**kwargs
}
# Test current format
formatted = format_json("reasoning", **reasoning.get_dict())
expected = {
'type': 'reasoning',
'token': 'thinking token',
'status': 'processing'
}
self.assertEqual(formatted, expected)
def test_openai_compatible_streaming_format(self):
"""Test what an OpenAI-compatible format would look like"""
reasoning = Reasoning("step by step reasoning", status="thinking")
# What OpenAI format would look like
openai_format = {
"id": "chatcmpl-test",
"object": "chat.completion.chunk",
"choices": [{
"index": 0,
"delta": {
"role": "assistant",
"reasoning": str(reasoning) # OpenAI uses 'reasoning' field
},
"finish_reason": None
}]
}
self.assertEqual(openai_format["choices"][0]["delta"]["reasoning"], "step by step reasoning")
def test_deepseek_compatible_format(self):
"""Test what a DeepSeek-compatible format would look like"""
reasoning = Reasoning("analytical reasoning", status="thinking")
# What DeepSeek format would look like
deepseek_format = {
"id": "chatcmpl-test",
"object": "chat.completion.chunk",
"choices": [{
"index": 0,
"delta": {
"role": "assistant",
"reasoning_content": str(reasoning) # DeepSeek uses 'reasoning_content' field
},
"finish_reason": None
}]
}
self.assertEqual(deepseek_format["choices"][0]["delta"]["reasoning_content"], "analytical reasoning")
def test_proposed_standardization(self):
"""Test the proposed standardized format"""
reasoning = Reasoning("standardized reasoning", status="thinking")
# Proposed: Use OpenAI's 'reasoning' field name for consistency
# But support both input formats (already done in OpenaiTemplate)
# Current g4f streaming should use 'reasoning' field in delta
proposed_format = {
"id": "chatcmpl-test",
"object": "chat.completion.chunk",
"choices": [{
"index": 0,
"delta": {
"role": "assistant",
"reasoning": str(reasoning) # Standardize on OpenAI format
},
"finish_reason": None
}]
}
self.assertEqual(proposed_format["choices"][0]["delta"]["reasoning"], "standardized reasoning")
if __name__ == "__main__":
unittest.main() | {
"repo_id": "xtekky/gpt4free",
"file_path": "etc/unittest/test_reasoning_standardization.py",
"license": "GNU General Public License v3.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
xtekky/gpt4free:g4f/Provider/ApiAirforce.py | from __future__ import annotations
from ..typing import Messages, AsyncResult
from .template import OpenaiTemplate
from ..errors import RateLimitError
class ApiAirforce(OpenaiTemplate):
label = "Api.Airforce"
url = "https://api.airforce"
login_url = "https://panel.api.airforce/dashboard"
base_url = "https://api.airforce/v1"
working = True
active_by_default = True
use_image_size = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages = None,
**kwargs
) -> AsyncResult:
ratelimit_message = "Ratelimit Exceeded!"
buffer = ""
async for chunk in super().create_async_generator(
model=model,
messages=messages,
**kwargs
):
if not isinstance(chunk, str):
yield chunk
continue
buffer += chunk
if ratelimit_message in buffer:
raise RateLimitError(ratelimit_message)
if ratelimit_message.startswith(buffer):
continue
yield buffer
buffer = "" | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/ApiAirforce.py",
"license": "GNU General Public License v3.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/Provider/Qwen.py | from __future__ import annotations
import asyncio
import datetime
import hashlib
import hmac
import json
import re
import uuid
from time import time
from typing import Literal, Optional, Dict
from urllib.parse import quote
import aiohttp
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_last_user_message
from .qwen.cookie_generator import generate_cookies
from .. import debug
from ..errors import RateLimitError, ResponseError, CloudflareError
from ..image import to_bytes, detect_file_type
from ..providers.response import JsonConversation, Reasoning, Usage, ImageResponse, FinishReason
from ..requests import sse_stream, StreamSession, raise_for_status, get_args_from_nodriver
from ..tools.media import merge_media
from ..typing import AsyncResult, Messages, MediaListType
try:
import curl_cffi
has_curl_cffi = True
except ImportError:
has_curl_cffi = False
try:
import zendriver as nodriver
has_nodriver = True
except ImportError:
has_nodriver = False
# Global variables to manage Qwen Image Cache
ImagesCache: Dict[str, dict] = {}
def get_oss_headers(method: str, date_str: str, sts_data: dict, content_type: str) -> dict[str, str]:
bucket_name = sts_data.get('bucketname', 'qwen-webui-prod')
file_path = sts_data.get('file_path', '')
access_key_id = sts_data.get('access_key_id')
access_key_secret = sts_data.get('access_key_secret')
security_token = sts_data.get('security_token')
headers = {
'Content-Type': content_type,
'x-oss-content-sha256': 'UNSIGNED-PAYLOAD',
'x-oss-date': date_str,
'x-oss-security-token': security_token,
'x-oss-user-agent': 'aliyun-sdk-js/6.23.0 Chrome 132.0.0.0 on Windows 10 64-bit'
}
headers_lower = {k.lower(): v for k, v in headers.items()}
canonical_headers_list = []
signed_headers_list = []
required_headers = ['content-md5', 'content-type', 'x-oss-content-sha256', 'x-oss-date', 'x-oss-security-token',
'x-oss-user-agent']
for header_name in sorted(required_headers):
if header_name in headers_lower:
canonical_headers_list.append(f"{header_name}:{headers_lower[header_name]}")
signed_headers_list.append(header_name)
canonical_headers = '\n'.join(canonical_headers_list) + '\n'
canonical_uri = f"/{bucket_name}/{quote(file_path, safe='/')}"
canonical_request = f"{method}\n{canonical_uri}\n\n{canonical_headers}\n\nUNSIGNED-PAYLOAD"
date_parts = date_str.split('T')
date_scope = f"{date_parts[0]}/ap-southeast-1/oss/aliyun_v4_request"
string_to_sign = f"OSS4-HMAC-SHA256\n{date_str}\n{date_scope}\n{hashlib.sha256(canonical_request.encode()).hexdigest()}"
def sign(key, msg):
return hmac.new(key, msg.encode() if isinstance(msg, str) else msg, hashlib.sha256).digest()
date_key = sign(f"aliyun_v4{access_key_secret}".encode(), date_parts[0])
region_key = sign(date_key, "ap-southeast-1")
service_key = sign(region_key, "oss")
signing_key = sign(service_key, "aliyun_v4_request")
signature = hmac.new(signing_key, string_to_sign.encode(), hashlib.sha256).hexdigest()
headers['authorization'] = f"OSS4-HMAC-SHA256 Credential={access_key_id}/{date_scope},Signature={signature}"
return headers
text_models = [
'qwen3-max-preview', 'qwen-plus-2025-09-11', 'qwen3-235b-a22b', 'qwen3-coder-plus', 'qwen3-30b-a3b',
'qwen3-coder-30b-a3b-instruct', 'qwen-max-latest', 'qwen-plus-2025-01-25', 'qwq-32b', 'qwen-turbo-2025-02-11',
'qwen2.5-omni-7b', 'qvq-72b-preview-0310', 'qwen2.5-vl-32b-instruct', 'qwen2.5-14b-instruct-1m',
'qwen2.5-coder-32b-instruct', 'qwen2.5-72b-instruct']
image_models = [
'qwen3-max-preview', 'qwen-plus-2025-09-11', 'qwen3-235b-a22b', 'qwen3-coder-plus', 'qwen3-30b-a3b',
'qwen3-coder-30b-a3b-instruct', 'qwen-max-latest', 'qwen-plus-2025-01-25', 'qwen-turbo-2025-02-11',
'qwen2.5-omni-7b', 'qwen2.5-vl-32b-instruct', 'qwen2.5-14b-instruct-1m', 'qwen2.5-coder-32b-instruct',
'qwen2.5-72b-instruct']
vision_models = [
'qwen3-max-preview', 'qwen-plus-2025-09-11', 'qwen3-235b-a22b', 'qwen3-coder-plus', 'qwen3-30b-a3b',
'qwen3-coder-30b-a3b-instruct', 'qwen-max-latest', 'qwen-plus-2025-01-25', 'qwen-turbo-2025-02-11',
'qwen2.5-omni-7b', 'qvq-72b-preview-0310', 'qwen2.5-vl-32b-instruct', 'qwen2.5-14b-instruct-1m',
'qwen2.5-coder-32b-instruct', 'qwen2.5-72b-instruct']
models = [
'qwen3-max-preview', 'qwen-plus-2025-09-11', 'qwen3-235b-a22b', 'qwen3-coder-plus', 'qwen3-30b-a3b',
'qwen3-coder-30b-a3b-instruct', 'qwen-max-latest', 'qwen-plus-2025-01-25', 'qwq-32b', 'qwen-turbo-2025-02-11',
'qwen2.5-omni-7b', 'qvq-72b-preview-0310', 'qwen2.5-vl-32b-instruct', 'qwen2.5-14b-instruct-1m',
'qwen2.5-coder-32b-instruct', 'qwen2.5-72b-instruct']
class Qwen(AsyncGeneratorProvider, ProviderModelMixin):
"""
Provider for Qwen's chat service (chat.qwen.ai), with configurable
parameters (stream, enable_thinking) and print logs.
"""
url = "https://chat.qwen.ai"
working = True
active_by_default = True
supports_stream = True
supports_message_history = False
image_cache = True
_models_loaded = True
image_models = image_models
text_models = text_models
vision_models = vision_models
models: list[str] = models
default_model = "qwen3-235b-a22b"
_midtoken: str = None
_midtoken_uses: int = 0
@classmethod
def get_models(cls, **kwargs) -> list[str]:
if not cls._models_loaded and has_curl_cffi:
response = curl_cffi.get(f"{cls.url}/api/models")
if response.ok:
models = response.json().get("data", [])
cls.text_models = [model["id"] for model in models if "t2t" in model["info"]["meta"]["chat_type"]]
cls.image_models = [
model["id"] for model in models if
"image_edit" in model["info"]["meta"]["chat_type"] or "t2i" in model["info"]["meta"]["chat_type"]
]
cls.vision_models = [model["id"] for model in models if model["info"]["meta"]["capabilities"]["vision"]]
cls.models = [model["id"] for model in models]
cls.default_model = cls.models[0]
cls._models_loaded = True
cls.live += 1
debug.log(f"Loaded {len(cls.models)} models from {cls.url}")
else:
debug.log(f"Failed to load models from {cls.url}: {response.status_code} {response.reason}")
return cls.models
@classmethod
async def prepare_files(cls, media, session: StreamSession, headers=None) -> list:
if headers is None:
headers = {}
files = []
for index, (_file, file_name) in enumerate(media):
data_bytes = to_bytes(_file)
# Check Cache
hasher = hashlib.md5()
hasher.update(data_bytes)
image_hash = hasher.hexdigest()
file = ImagesCache.get(image_hash)
if cls.image_cache and file:
debug.log("Using cached image")
files.append(file)
continue
extension, file_type = detect_file_type(data_bytes)
file_name = file_name or f"file-{len(data_bytes)}{extension}"
file_size = len(data_bytes)
# Get File Url
async with session.post(
f'{cls.url}/api/v2/files/getstsToken',
json={"filename": file_name,
"filesize": file_size, "filetype": file_type},
headers=headers
) as r:
await raise_for_status(r, "Create file failed")
res_data = await r.json()
data = res_data.get("data")
if res_data["success"] is False:
raise RateLimitError(f"{data['code']}:{data['details']}")
file_url = data.get("file_url")
file_id = data.get("file_id")
# Put File into Url
str_date = datetime.datetime.now(datetime.UTC).strftime('%Y%m%dT%H%M%SZ')
headers = get_oss_headers('PUT', str_date, data, file_type)
async with session.put(
file_url.split("?")[0],
data=data_bytes,
headers=headers
) as response:
await raise_for_status(response)
file_class: Literal["default", "vision", "video", "audio", "document"]
_type: Literal["file", "image", "video", "audio"]
show_type: Literal["file", "image", "video", "audio"]
if "image" in file_type:
_type = "image"
show_type = "image"
file_class = "vision"
elif "video" in file_type:
_type = "video"
show_type = "video"
file_class = "video"
elif "audio" in file_type:
_type = "audio"
show_type = "audio"
file_class = "audio"
else:
_type = "file"
show_type = "file"
file_class = "document"
file = {
"type": _type,
"file": {
"created_at": int(time() * 1000),
"data": {},
"filename": file_name,
"hash": None,
"id": file_id,
"meta": {
"name": file_name,
"size": file_size,
"content_type": file_type
},
"update_at": int(time() * 1000),
},
"id": file_id,
"url": file_url,
"name": file_name,
"collection_name": "",
"progress": 0,
"status": "uploaded",
"greenNet": "success",
"size": file_size,
"error": "",
"itemId": str(uuid.uuid4()),
"file_type": file_type,
"showType": show_type,
"file_class": file_class,
"uploadTaskId": str(uuid.uuid4())
}
debug.log(f"Uploading file: {file_url}")
ImagesCache[image_hash] = file
files.append(file)
return files
@classmethod
async def get_args(cls, proxy, **kwargs):
grecaptcha = []
async def callback(page: nodriver.Tab):
while not await page.evaluate('window.__baxia__ && window.__baxia__.getFYModule'):
await asyncio.sleep(1)
captcha = await page.evaluate(
"""window.baxiaCommon.getUA()""",
await_promise=True)
if isinstance(captcha, str):
grecaptcha.append(captcha)
else:
raise Exception(captcha)
args = await get_args_from_nodriver(cls.url, proxy=proxy, callback=callback)
return args, next(iter(grecaptcha))
@classmethod
async def raise_for_status(cls, response, message=None):
await raise_for_status(response, message)
content_type = response.headers.get("content-type", "")
if content_type.startswith("text/html"):
html = (await response.text()).strip()
if html.startswith('<!doctypehtml>') and "aliyun_waf_aa" in html:
raise CloudflareError(message or html)
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
media: MediaListType = None,
conversation: JsonConversation = None,
proxy: str = None,
stream: bool = True,
enable_thinking: bool = True,
chat_type: Literal[
"t2t", "search", "artifacts", "web_dev", "deep_research", "t2i", "image_edit", "t2v"
] = "t2t",
aspect_ratio: Optional[Literal["1:1", "4:3", "3:4", "16:9", "9:16"]] = None,
**kwargs
) -> AsyncResult:
"""
chat_type:
DeepResearch = "deep_research"
Artifacts = "artifacts"
WebSearch = "search"
ImageGeneration = "t2i"
ImageEdit = "image_edit"
VideoGeneration = "t2v"
Txt2Txt = "t2t"
WebDev = "web_dev"
"""
# cache_file = cls.get_cache_file()
# cookie: str = kwargs.get("cookie", "") # ssxmod_itna=1-...
# args = kwargs.get("qwen_args", {})
# args.setdefault("cookies", {})
token = kwargs.get("token")
# if not args and cache_file.exists():
# try:
# with cache_file.open("r") as f:
# args = json.load(f)
# except json.JSONDecodeError:
# debug.log(f"Cache file {cache_file} is corrupted, removing it.")
# cache_file.unlink()
# if not cookie:
# if not args:
# args = await cls.get_args(proxy, **kwargs)
# cookie = "; ".join([f"{k}={v}" for k, v in args["cookies"].items()])
model_name = cls.get_model(model)
prompt = get_last_user_message(messages)
timeout = kwargs.get("timeout") or 5 * 60
# for _ in range(2):
data = generate_cookies()
# args,ua = await cls.get_args(proxy, **kwargs)
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36',
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Origin': cls.url,
'Referer': f'{cls.url}/',
'Content-Type': 'application/json',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'Connection': 'keep-alive',
'X-Requested-With': 'XMLHttpRequest',
'Cookie': f'ssxmod_itna={data["ssxmod_itna"]};ssxmod_itna2={data["ssxmod_itna2"]}',
'X-Source': 'web'
}
if token:
headers['Authorization'] = f'Bearer {token}'
# try:
async with StreamSession(headers=headers) as session:
try:
async with session.get('https://chat.qwen.ai/api/v1/auths/', proxy=proxy) as user_info_res:
await cls.raise_for_status(user_info_res)
debug.log(await user_info_res.json())
except Exception as e:
debug.error(e)
for attempt in range(5):
try:
if not cls._midtoken:
debug.log("[Qwen] INFO: No active midtoken. Fetching a new one...")
async with session.get('https://sg-wum.alibaba.com/w/wu.json', proxy=proxy) as r:
r.raise_for_status()
text = await r.text()
match = re.search(r"(?:umx\.wu|__fycb)\('([^']+)'\)", text)
if not match:
raise RuntimeError("Failed to extract bx-umidtoken.")
cls._midtoken = match.group(1)
cls._midtoken_uses = 1
debug.log(
f"[Qwen] INFO: New midtoken obtained. Use count: {cls._midtoken_uses}. Midtoken: {cls._midtoken}")
else:
cls._midtoken_uses += 1
debug.log(f"[Qwen] INFO: Reusing midtoken. Use count: {cls._midtoken_uses}")
req_headers = session.headers.copy()
req_headers['bx-umidtoken'] = cls._midtoken
req_headers['bx-v'] = '2.5.31'
# req_headers['bx-ua'] = ua
message_id = str(uuid.uuid4())
if conversation is None:
chat_payload = {
"title": "New Chat",
"models": [model_name],
"chat_mode": "normal",# local
"chat_type": chat_type,
"timestamp": int(time() * 1000)
}
async with session.post(
f'{cls.url}/api/v2/chats/new', json=chat_payload, headers=req_headers,
proxy=proxy
) as resp:
await cls.raise_for_status(resp)
data = await resp.json()
if not (data.get('success') and data['data'].get('id')):
raise RuntimeError(f"Failed to create chat: {data}")
conversation = JsonConversation(
chat_id=data['data']['id'],
cookies={key: value for key, value in resp.cookies.items()},
parent_id=None
)
files = []
media = list(merge_media(media, messages))
if media:
files = await cls.prepare_files(media, session=session,
headers=req_headers)
msg_payload = {
"stream": stream,
"incremental_output": stream,
"chat_id": conversation.chat_id,
"chat_mode": "normal",# local
"model": model_name,
"parent_id": conversation.parent_id,
"messages": [
{
"fid": message_id,
"parentId": conversation.parent_id,
"childrenIds": [],
"role": "user",
"content": prompt,
"user_action": "chat",
"files": files,
"models": [model_name],
"chat_type": chat_type,
"feature_config": {
"thinking_enabled": enable_thinking,
"output_schema": "phase",
"thinking_budget": 81920
},
"sub_chat_type": chat_type
}
]
}
if enable_thinking:
msg_payload["messages"][0]["feature_config"] = {
"thinking_enabled": True,
"output_schema": "phase",
"thinking_budget": 81920
}
if aspect_ratio:
msg_payload["size"] = aspect_ratio
async with session.post(
f'{cls.url}/api/v2/chat/completions?chat_id={conversation.chat_id}',
json=msg_payload,
headers=req_headers, proxy=proxy, timeout=timeout, cookies=conversation.cookies
) as resp:
await cls.raise_for_status(resp)
if resp.headers.get("content-type", "").startswith("application/json"):
resp_json = await resp.json()
if resp_json.get("success") is False or resp_json.get("data", {}).get("code"):
raise RuntimeError(f"Response: {resp_json}")
# args["cookies"] = merge_cookies(args.get("cookies"), resp)
thinking_started = False
usage = None
async for chunk in sse_stream(resp):
try:
if "response.created" in chunk:
conversation.parent_id = chunk.get("response.created", {}).get(
"response_id")
yield conversation
error = chunk.get("error", {})
if error:
raise ResponseError(f'{error["code"]}: {error["details"]}')
usage = chunk.get("usage", usage)
choices = chunk.get("choices", [])
if not choices: continue
delta = choices[0].get("delta", {})
phase = delta.get("phase")
content = delta.get("content")
status = delta.get("status")
extra = delta.get("extra", {})
if phase == "think" and not thinking_started:
thinking_started = True
elif phase == "answer" and thinking_started:
thinking_started = False
elif phase == "image_gen" and status == "typing":
yield ImageResponse(content, prompt, extra)
continue
elif phase == "image_gen" and status == "finished":
yield FinishReason("stop")
if content:
yield Reasoning(content) if thinking_started else content
except (json.JSONDecodeError, KeyError, IndexError):
continue
if usage:
yield Usage(**usage)
return
except (aiohttp.ClientResponseError, RuntimeError) as e:
is_rate_limit = (isinstance(e, aiohttp.ClientResponseError) and e.status == 429) or \
("RateLimited" in str(e))
if is_rate_limit:
debug.log(
f"[Qwen] WARNING: Rate limit detected (attempt {attempt + 1}/5). Invalidating current midtoken.")
cls._midtoken = None
cls._midtoken_uses = 0
conversation = None
await asyncio.sleep(2)
continue
else:
raise e
raise RateLimitError("The Qwen provider reached the request limit after 5 attempts.")
# except CloudflareError as e:
# debug.error(f"{cls.__name__}: {e}")
# args = await cls.get_args(proxy, **kwargs)
# cookie = "; ".join([f"{k}={v}" for k, v in args["cookies"].items()])
# continue
raise RateLimitError("The Qwen provider reached the limit Cloudflare.")
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/Qwen.py",
"license": "GNU General Public License v3.0",
"lines": 471,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/needs_auth/Nvidia.py | from __future__ import annotations
from ..template import OpenaiTemplate
from ...config import DEFAULT_MODEL
class Nvidia(OpenaiTemplate):
label = "Nvidia"
base_url = "https://integrate.api.nvidia.com/v1"
backup_url = "https://g4f.space/api/nvidia"
login_url = "https://google.com"
url = "https://build.nvidia.com"
working = True
active_by_default = True
default_model = DEFAULT_MODEL
add_user = False | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/needs_auth/Nvidia.py",
"license": "GNU General Public License v3.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/Provider/needs_auth/FenayAI.py | from __future__ import annotations
from ..template import OpenaiTemplate
from ...config import DEFAULT_MODEL
class FenayAI(OpenaiTemplate):
url = "https://fenayai.com"
login_url = "https://fenayai.com/dashboard"
base_url = "https://fenayai.com/v1"
working = True
needs_auth = True
models_needs_auth = True
default_model = DEFAULT_MODEL.split("/")[-1]
@classmethod
def get_model(cls, model: str, **kwargs) -> str:
return super().get_model(model.split("/")[-1], **kwargs) | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/needs_auth/FenayAI.py",
"license": "GNU General Public License v3.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/gui/server/crypto.py | from __future__ import annotations
import os
import base64
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding, rsa
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey, RSAPrivateKey
from ...cookies import get_cookies_dir
SESSION_KEY: RSAPrivateKey = None
def get_session_key() -> RSAPrivateKey:
global SESSION_KEY
if SESSION_KEY is not None:
return SESSION_KEY
SESSION_KEY = rsa.generate_private_key(public_exponent=65537, key_size=4096)
return SESSION_KEY
def create_or_read_keys() -> tuple[RSAPrivateKey, RSAPublicKey]:
private_key_file = os.path.join(get_cookies_dir(), "private_key.pem")
public_key_file = os.path.join(get_cookies_dir(), "public_key.pem")
if os.path.isfile(private_key_file) and os.path.isfile(public_key_file):
# Read private key
with open(private_key_file, 'rb') as f:
private_key_pem = f.read()
private_key = serialization.load_pem_private_key(
private_key_pem,
password=None # Use password=b'mypassword' here if the key is encrypted
)
# Read public key
with open(public_key_file, 'rb') as f:
public_key_pem = f.read()
public_key = serialization.load_pem_public_key(public_key_pem)
return private_key, public_key
# Generate keys
private_key_obj = rsa.generate_private_key(public_exponent=65537, key_size=1024)
public_key_obj = private_key_obj.public_key()
# Serialize private key
private_key_pem = private_key_obj.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()
)
# Serialize public key
public_key_pem = public_key_obj.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
os.makedirs(os.path.dirname(private_key_file), exist_ok=True)
# Write the private key PEM to a file
with open(private_key_file, 'wb') as f:
f.write(private_key_pem)
# Write the public key PEM to a file
with open(public_key_file, 'wb') as f:
f.write(public_key_pem)
return private_key_obj, public_key_obj
def decrypt_data(private_key_obj: RSAPrivateKey, encrypted_data: str) -> str:
decrypted = private_key_obj.decrypt(
base64.b64decode(encrypted_data),
padding.PKCS1v15()
)
return decrypted.decode()
def encrypt_data(public_key: RSAPublicKey, decrypted_data: str) -> str:
encrypted = public_key.encrypt(
decrypted_data.encode(),
padding.PKCS1v15()
)
return base64.b64encode(encrypted).decode() | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/gui/server/crypto.py",
"license": "GNU General Public License v3.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/Provider/hf_space/BlackForestLabs_Flux1KontextDev.py | from __future__ import annotations
import os
import uuid
from ...typing import AsyncResult, Messages, MediaListType
from ...providers.response import ImageResponse, JsonConversation, Reasoning
from ...requests import StreamSession, FormData, sse_stream
from ...tools.media import merge_media
from ...image import to_bytes, is_accepted_format
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_media_prompt
from .DeepseekAI_JanusPro7b import get_zerogpu_token
from .raise_for_status import raise_for_status
class BlackForestLabs_Flux1KontextDev(AsyncGeneratorProvider, ProviderModelMixin):
label = "BlackForestLabs Flux-1-Kontext-Dev"
url = "https://black-forest-labs-flux-1-kontext-dev.hf.space"
space = "black-forest-labs/FLUX.1-Kontext-Dev"
referer = f"{url}/?__theme=system"
working = True
default_model = "flux-kontext-dev"
default_image_model = default_model
image_models = [default_model]
models = image_models
@classmethod
def run(cls, method: str, session: StreamSession, conversation: JsonConversation, data: list = None):
headers = {
# Different accept header based on GET or POST
"accept": "application/json" if method == "post" else "text/event-stream",
"content-type": "application/json",
"x-zerogpu-token": conversation.zerogpu_token,
"x-zerogpu-uuid": conversation.zerogpu_uuid,
"referer": cls.referer,
}
# Filter out headers where value is None (e.g., token not yet set)
filtered_headers = {k: v for k, v in headers.items() if v is not None}
if method == "post":
# POST request to enqueue the job
return session.post(f"{cls.url}/gradio_api/queue/join?__theme=system", **{
"headers": filtered_headers,
"json": {
"data": data,
"event_data": None,
"fn_index": 2,
"trigger_id": 7, # Using trigger_id=7 per your example fetch
"session_hash": conversation.session_hash
}
})
# GET request to receive the event stream result
return session.get(f"{cls.url}/gradio_api/queue/data?session_hash={conversation.session_hash}", **{
"headers": filtered_headers,
})
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
prompt: str = None,
media: MediaListType = None,
proxy: str = None,
guidance_scale: float = 2.5,
num_inference_steps: int = 28,
seed: int = 0,
randomize_seed: bool = True,
cookies: dict = None,
api_key: str = None,
zerogpu_uuid: str = None,
**kwargs
) -> AsyncResult:
# Create a conversation/session data container holding tokens and session hash
conversation = JsonConversation(
zerogpu_token=api_key,
zerogpu_uuid=zerogpu_uuid or uuid.uuid4().hex,
session_hash=uuid.uuid4().hex,
)
async with StreamSession(impersonate="chrome", proxy=proxy) as session:
media = list(merge_media(media, messages))
if media:
data = FormData()
for i in range(len(media)):
if media[i][1] is None and isinstance(media[i][0], str):
media[i] = media[i][0], os.path.basename(media[i][0])
media[i] = (to_bytes(media[i][0]), media[i][1])
for image, image_name in media:
data.add_field(f"files", image, filename=image_name)
async with session.post(f"{cls.url}/gradio_api/upload", params={"upload_id": conversation.session_hash}, data=data) as response:
await raise_for_status(response)
image_files = await response.json()
media = [{
"path": image_file,
"url": f"{cls.url}/gradio_api/file={image_file}",
"orig_name": media[i][1],
"size": len(media[i][0]),
"mime_type": is_accepted_format(media[i][0]),
"meta": {
"_type": "gradio.FileData"
}
} for i, image_file in enumerate(image_files)]
if not media:
raise ValueError("No media files provided for image generation.")
# Format the prompt from messages, e.g. extract text or media description
prompt = format_media_prompt(messages, prompt)
# Build the data payload sent to the API
data = [
media.pop(),
prompt,
seed,
randomize_seed,
guidance_scale,
num_inference_steps,
]
# Fetch token if it's missing (calls a helper function to obtain a token)
if conversation.zerogpu_token is None:
conversation.zerogpu_uuid, conversation.zerogpu_token = await get_zerogpu_token(
cls.space, session, conversation, cookies
)
# POST the prompt and data to start generation job in the queue
async with cls.run("post", session, conversation, data) as response:
await raise_for_status(response)
result_json = await response.json()
assert result_json.get("event_id") # Ensure we got an event id back
# GET the event stream to receive updates and results asynchronously
async with cls.run("get", session, conversation) as event_response:
await raise_for_status(event_response)
async for chunk in sse_stream(event_response):
if chunk.get("msg") == "process_starts":
yield Reasoning(label="Processing started")
elif chunk.get("msg") == "progress":
progress_data = chunk.get("progress_data", [])
progress_data = progress_data[0] if progress_data else {}
yield Reasoning(label="Processing image", status=f"{progress_data.get('index', 0)}/{progress_data.get('length', 0)}")
elif chunk.get("msg") == "process_completed":
url = chunk.get("output", {}).get("data", [{}])[0].get("url")
yield ImageResponse(url, prompt)
yield Reasoning(label="Completed", status="")
break | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/hf_space/BlackForestLabs_Flux1KontextDev.py",
"license": "GNU General Public License v3.0",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/needs_auth/Azure.py | from __future__ import annotations
import os
import json
from ...typing import Messages, AsyncResult, MediaListType
from ...errors import MissingAuthError, ModelNotFoundError
from ...requests import StreamSession, FormData, raise_for_status
from ...image import get_width_height, to_bytes
from ...image.copy_images import save_response_media
from ..template import OpenaiTemplate
from ..helper import format_media_prompt
class Azure(OpenaiTemplate):
label = "Azure ☁️"
url = "https://ai.azure.com"
base_url = "https://g4f.space/api/azure"
working = True
active_by_default = False
login_url = "https://discord.gg/qXA4Wf4Fsm"
routes: dict[str, str] = {}
audio_models = ["gpt-4o-mini-audio-preview"]
vision_models = ["gpt-4.1", "o4-mini", "model-router", "flux.1-kontext-pro"]
image_models = ["flux-1.1-pro", "flux.1-kontext-pro"]
model_aliases = {
"flux-kontext": "flux.1-kontext-pro"
}
model_extra_body = {
"gpt-4o-mini-audio-preview": {
"audio": {
"voice": "alloy",
"format": "mp3"
},
"modalities": ["text", "audio"],
}
}
api_keys: dict[str, str] = {}
failed: dict[str, int] = {}
@classmethod
def get_models(cls, api_key: str = None, **kwargs) -> list[str]:
api_keys = os.environ.get("AZURE_API_KEYS")
if api_keys:
try:
cls.api_keys = json.loads(api_keys)
except json.JSONDecodeError:
raise ValueError(f"Invalid AZURE_API_KEYS environment variable")
routes = os.environ.get("AZURE_ROUTES")
if routes:
try:
routes = json.loads(routes)
except json.JSONDecodeError:
raise ValueError(f"Invalid AZURE_ROUTES environment variable format: {routes}")
cls.routes = routes
if cls.routes:
if cls.live == 0 and cls.api_keys:
cls.live += 1
return list(cls.routes.keys())
return super().get_models(api_key=api_key, **kwargs)
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = True,
media: MediaListType = None,
api_key: str = None,
api_endpoint: str = None,
**kwargs
) -> AsyncResult:
if not model:
model = os.environ.get("AZURE_DEFAULT_MODEL", cls.default_model)
if model in cls.model_aliases:
model = cls.model_aliases[model]
if not api_endpoint:
if not cls.routes:
cls.get_models()
api_endpoint = cls.routes.get(model)
if cls.routes and not api_endpoint:
raise ModelNotFoundError(f"No API endpoint found for model: {model}")
if not api_endpoint:
api_endpoint = os.environ.get("AZURE_API_ENDPOINT")
if cls.api_keys:
api_key = cls.api_keys.get(model, cls.api_keys.get("default"))
if not api_key:
raise ValueError(f"API key is required for Azure provider. Ask for API key in the {cls.login_url} Discord server.")
if api_endpoint and "/images/" in api_endpoint:
prompt = format_media_prompt(messages, kwargs.get("prompt"))
width, height = get_width_height(kwargs.get("aspect_ratio", "1:1"), kwargs.get("width"), kwargs.get("height"))
output_format = kwargs.get("output_format", "png")
form = None
data = None
if media:
form = FormData()
form.add_field("prompt", prompt)
form.add_field("width", str(width))
form.add_field("height", str(height))
output_format = "png"
for i in range(len(media)):
if media[i][1] is None and isinstance(media[i][0], str):
media[i] = media[i][0], os.path.basename(media[i][0])
media[i] = (to_bytes(media[i][0]), media[i][1])
for image, image_name in media:
form.add_field(f"image", image, filename=image_name)
else:
api_endpoint = api_endpoint.replace("/edits", "/generations")
data = {
"prompt": prompt,
"n": 1,
"width": width,
"height": height,
"output_format": output_format,
}
async with StreamSession(proxy=kwargs.get("proxy"), headers={
"Authorization": f"Bearer {api_key}",
"x-ms-model-mesh-model-name": model,
}) as session:
async with session.post(api_endpoint, data=form, json=data) as response:
data = await response.json()
await raise_for_status(response, data)
async for chunk in save_response_media(
data["data"][0]["b64_json"],
prompt,
content_type=f"image/{output_format.replace('jpg', 'jpeg')}"
):
yield chunk
return
if model in cls.model_extra_body:
for key, value in cls.model_extra_body[model].items():
kwargs.setdefault(key, value)
stream = False
if cls.failed.get(model + api_key, 0) >= 3:
raise MissingAuthError(f"API key has failed too many times.")
try:
async for chunk in super().create_async_generator(
model=model,
messages=messages,
stream=stream,
media=media,
api_key=api_key,
api_endpoint=api_endpoint,
**kwargs
):
yield chunk
except MissingAuthError as e:
cls.failed[model + api_key] = cls.failed.get(model + api_key, 0) + 1
raise MissingAuthError(f"{e}. Ask for help in the {cls.login_url} Discord server.") from e | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/needs_auth/Azure.py",
"license": "GNU General Public License v3.0",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/OperaAria.py | from __future__ import annotations
import json
import time
import random
import re
import os
import base64
import asyncio
from aiohttp import ClientSession, FormData
from ..typing import AsyncResult, Messages, MediaListType
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
from ..providers.response import JsonConversation, FinishReason, ImageResponse
from ..image import to_data_uri, is_data_an_media
from ..tools.media import merge_media
class Conversation(JsonConversation):
"""Manages all session-specific state for Opera Aria."""
access_token: str = None
refresh_token: str = None
encryption_key: str = None
expires_at: float = 0
conversation_id: str = None
is_first_request: bool = True
def __init__(self, refresh_token: str = None):
"""Initializes a new session, generating a unique encryption key."""
self.refresh_token = refresh_token
self.encryption_key = self._generate_encryption_key()
self.is_first_request = True
def is_token_expired(self) -> bool:
"""Check if the current token has expired"""
return time.time() >= self.expires_at
def update_token(self, access_token: str, expires_in: int):
"""Update the access token and expiration time"""
self.access_token = access_token
self.expires_at = time.time() + expires_in - 60
@staticmethod
def _generate_encryption_key() -> str:
"""Generates a 32-byte, Base64-encoded key for the session."""
random_bytes = os.urandom(32)
return base64.b64encode(random_bytes).decode('utf-8')
@staticmethod
def generate_conversation_id() -> str:
"""Generate conversation ID in Opera Aria format"""
parts = [
''.join(random.choices('0123456789abcdef', k=8)),
''.join(random.choices('0123456789abcdef', k=4)),
'11f0',
''.join(random.choices('0123456789abcdef', k=4)),
''.join(random.choices('0123456789abcdef', k=12))
]
return '-'.join(parts)
class OperaAria(AsyncGeneratorProvider, ProviderModelMixin):
label = "Opera Aria"
url = "https://play.google.com/store/apps/details?id=com.opera.browser"
api_endpoint = "https://composer.opera-api.com/api/v1/a-chat"
token_endpoint = "https://oauth2.opera-api.com/oauth2/v1/token/"
signup_endpoint = "https://auth.opera.com/account/v2/external/anonymous/signup"
upload_endpoint = "https://composer.opera-api.com/api/v1/images/upload"
check_status_endpoint = "https://composer.opera-api.com/api/v1/images/check-status/"
working = True
needs_auth = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'aria'
default_image_model = 'aria'
image_models = ['aria']
default_vision_model = 'aria'
vision_models = ['aria']
models = ['aria']
@classmethod
async def _generate_refresh_token(cls, session: ClientSession) -> str:
headers = {
"User-Agent": "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Mobile Safari/537.36 OPR/89.0.0.0",
"Content-Type": "application/x-www-form-urlencoded",
}
data = {
"client_id": "ofa-client",
"client_secret": "N9OscfA3KxlJASuIe29PGZ5RpWaMTBoy",
"grant_type": "client_credentials",
"scope": "anonymous_account"
}
async with session.post(cls.token_endpoint, headers=headers, data=data) as response:
response.raise_for_status()
anonymous_token_data = await response.json()
anonymous_access_token = anonymous_token_data["access_token"]
headers = {
"User-Agent": "Mozilla 5.0 (Linux; Android 14) com.opera.browser OPR/89.5.4705.84314",
"Authorization": f"Bearer {anonymous_access_token}",
"Accept": "application/json",
"Content-Type": "application/json; charset=utf-8",
}
data = {"client_id": "ofa", "service": "aria"}
async with session.post(cls.signup_endpoint, headers=headers, json=data) as response:
response.raise_for_status()
signup_data = await response.json()
auth_token = signup_data["token"]
headers = {
"User-Agent": "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Mobile Safari/537.36 OPR/89.0.0.0",
"Content-Type": "application/x-www-form-urlencoded",
}
data = {
"auth_token": auth_token,
"client_id": "ofa",
"device_name": "GPT4FREE",
"grant_type": "auth_token",
"scope": "ALL"
}
async with session.post(cls.token_endpoint, headers=headers, data=data) as response:
response.raise_for_status()
final_token_data = await response.json()
return final_token_data["refresh_token"]
@classmethod
def get_model(cls, model: str) -> str:
return cls.model_aliases.get(model, cls.default_model)
@classmethod
async def get_access_token(cls, session: ClientSession, conversation: Conversation) -> str:
if not conversation.refresh_token:
conversation.refresh_token = await cls._generate_refresh_token(session)
if conversation.access_token and not conversation.is_token_expired():
return conversation.access_token
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"User-Agent": "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Mobile Safari/537.36 OPR/89.0.0.0"
}
data = {
"client_id": "ofa",
"grant_type": "refresh_token",
"refresh_token": conversation.refresh_token,
"scope": "shodan:aria user:read"
}
async with session.post(cls.token_endpoint, headers=headers, data=data) as response:
response.raise_for_status()
result = await response.json()
conversation.update_token(
access_token=result["access_token"],
expires_in=result.get("expires_in", 3600)
)
return result["access_token"]
@classmethod
async def check_upload_status(cls, session: ClientSession, access_token: str, image_id: str, max_attempts: int = 30):
headers = {
"Authorization": f"Bearer {access_token}",
"User-Agent": "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Mobile Safari/537.36 OPR/89.0.0.0",
}
url = f"{cls.check_status_endpoint}{image_id}"
for _ in range(max_attempts):
async with session.get(url, headers=headers) as response:
response.raise_for_status()
result = await response.json()
if result.get("status") == "ok":
return
if result.get("status") == "failed":
raise Exception(f"Image upload failed for {image_id}")
await asyncio.sleep(0.5)
raise Exception(f"Timeout waiting for image upload status for {image_id}")
@classmethod
async def upload_media(cls, session: ClientSession, access_token: str, media_data: bytes, filename: str) -> str:
headers = {
"Authorization": f"Bearer {access_token}",
"Origin": "opera-aria://ui",
"User-Agent": "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Mobile Safari/537.36 OPR/89.0.0.0",
}
form_data = FormData()
if not filename:
filename = str(int(time.time() * 1000))
content_type = is_data_an_media(media_data, filename) or "application/octet-stream"
form_data.add_field('image_file', media_data, filename=filename, content_type=content_type)
async with session.post(cls.upload_endpoint, headers=headers, data=form_data) as response:
response.raise_for_status()
result = await response.json()
image_id = result.get("image_id")
if not image_id:
raise Exception("No image_id returned from upload")
await cls.check_upload_status(session, access_token, image_id)
return image_id
@classmethod
def extract_image_urls(cls, text: str) -> list[str]:
pattern = r'!\[\]\((https?://[^\)]+)\)'
urls = re.findall(pattern, text)
return [url.replace(r'\/', '/') for url in urls]
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
refresh_token: str = None,
conversation: Conversation = None,
return_conversation: bool = False,
stream: bool = True,
media: MediaListType = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
if conversation is None:
conversation = Conversation(refresh_token)
elif refresh_token and not conversation.refresh_token:
conversation.refresh_token = refresh_token
async with ClientSession() as session:
access_token = await cls.get_access_token(session, conversation)
media_attachments = []
merged_media = list(merge_media(media, messages))
if merged_media:
for media_data, media_name in merged_media:
try:
if isinstance(media_data, str) and media_data.startswith("data:"):
data_part = media_data.split(",", 1)[1]
media_bytes = base64.b64decode(data_part)
elif hasattr(media_data, 'read'):
media_bytes = media_data.read()
elif isinstance(media_data, (str, os.PathLike)):
with open(media_data, 'rb') as f:
media_bytes = f.read()
else:
media_bytes = media_data
image_id = await cls.upload_media(session, access_token, media_bytes, media_name)
media_attachments.append(image_id)
except Exception:
continue
headers = {
"Accept": "text/event-stream" if stream else "application/json",
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
"Origin": "opera-aria://ui",
"User-Agent": "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Mobile Safari/537.36 OPR/89.0.0.0",
"X-Opera-Timezone": "+03:00",
"X-Opera-UI-Language": "en"
}
data = {
"query": format_prompt(messages), "stream": stream, "linkify": True,
"linkify_version": 3, "sia": True, "media_attachments": media_attachments,
"encryption": {"key": conversation.encryption_key}
}
if not conversation.is_first_request and conversation.conversation_id:
data["conversation_id"] = conversation.conversation_id
async with session.post(cls.api_endpoint, headers=headers, json=data, proxy=proxy) as response:
response.raise_for_status()
if stream:
text_buffer, image_urls, finish_reason = [], [], None
async for line in response.content:
if not line: continue
decoded = line.decode('utf-8').strip()
if not decoded.startswith('data: '): continue
content = decoded[6:]
if content == '[DONE]': break
try:
json_data = json.loads(content)
if 'message' in json_data:
message_chunk = json_data['message']
found_urls = cls.extract_image_urls(message_chunk)
if found_urls:
image_urls.extend(found_urls)
else:
text_buffer.append(message_chunk)
if 'conversation_id' in json_data and json_data['conversation_id']:
conversation.conversation_id = json_data['conversation_id']
if 'finish_reason' in json_data and json_data.get('finish_reason'):
finish_reason = json_data['finish_reason']
except json.JSONDecodeError:
continue
if image_urls:
yield ImageResponse(image_urls, format_prompt(messages))
elif text_buffer:
yield "".join(text_buffer)
if finish_reason:
yield FinishReason(finish_reason)
else: # Non-streaming
json_data = await response.json()
if 'message' in json_data:
message = json_data['message']
image_urls = cls.extract_image_urls(message)
if image_urls:
yield ImageResponse(image_urls, format_prompt(messages))
else:
yield message
if 'conversation_id' in json_data and json_data['conversation_id']:
conversation.conversation_id = json_data['conversation_id']
if 'finish_reason' in json_data and json_data['finish_reason']:
yield FinishReason(json_data['finish_reason'])
conversation.is_first_request = False
if return_conversation:
yield conversation
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/OperaAria.py",
"license": "GNU General Public License v3.0",
"lines": 283,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/config.py | from __future__ import annotations
import os
import sys
from pathlib import Path
from functools import lru_cache
from typing import Optional
@lru_cache(maxsize=1)
def get_config_dir() -> Path:
"""Get platform-appropriate config directory."""
if sys.platform == "win32":
return Path(os.environ.get("APPDATA", Path.home() / "AppData" / "Roaming"))
elif sys.platform == "darwin":
return Path.home() / "Library" / "Application Support"
return Path.home() / ".config"
DEFAULT_PORT = 1337
DEFAULT_TIMEOUT = 600
DEFAULT_STREAM_TIMEOUT = 30
PACKAGE_NAME = "g4f"
CONFIG_DIR = get_config_dir() / PACKAGE_NAME
COOKIES_DIR = CONFIG_DIR / "cookies"
CUSTOM_COOKIES_DIR = "./har_and_cookies"
ORGANIZATION = "gpt4free"
GITHUB_REPOSITORY = f"xtekky/{ORGANIZATION}"
STATIC_DOMAIN = f"{PACKAGE_NAME}.dev"
STATIC_URL = f"https://{STATIC_DOMAIN}/"
REFFERER_URL = f"https://{STATIC_DOMAIN}/"
DIST_DIR = f"./{STATIC_DOMAIN}/dist"
DEFAULT_MODEL = "openai/gpt-oss-120b"
JSDELIVR_URL = "https://cdn.jsdelivr.net/"
DOWNLOAD_URL = f"{JSDELIVR_URL}gh/{ORGANIZATION}/{STATIC_DOMAIN}/"
GITHUB_URL = f"https://raw.githubusercontent.com/{ORGANIZATION}/{STATIC_DOMAIN}/refs/heads/main/"
class AppConfig:
ignored_providers: Optional[list[str]] = None
g4f_api_key: Optional[str] = None
ignore_cookie_files: bool = False
model: str = None
provider: str = None
media_provider: str = None
proxy: str = None
gui: bool = False
demo: bool = False
timeout: int = DEFAULT_TIMEOUT
stream_timeout: int = DEFAULT_STREAM_TIMEOUT
disable_custom_api_key: bool = False
@classmethod
def set_config(cls, **data):
for key, value in data.items():
if value is not None:
setattr(cls, key, value)
@classmethod
def load_from_env(cls):
cls.g4f_api_key = os.environ.get("G4F_API_KEY", cls.g4f_api_key)
cls.timeout = int(os.environ.get("G4F_TIMEOUT", cls.timeout))
cls.stream_timeout = int(os.environ.get("G4F_STREAM_TIMEOUT", cls.stream_timeout))
cls.proxy = os.environ.get("G4F_PROXY", cls.proxy)
cls.model = os.environ.get("G4F_MODEL", cls.model)
cls.provider = os.environ.get("G4F_PROVIDER", cls.provider)
cls.disable_custom_api_key = os.environ.get("G4F_DISABLE_CUSTOM_API_KEY", str(cls.disable_custom_api_key)).lower() in ("true", "1", "yes") | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/config.py",
"license": "GNU General Public License v3.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/Provider/needs_auth/GithubCopilotAPI.py | from __future__ import annotations
from .OpenaiAPI import OpenaiAPI
class GithubCopilotAPI(OpenaiAPI):
label = "GitHub Copilot API"
url = "https://github.com/copilot"
login_url = "https://aider.chat/docs/llms/github.html"
working = True
base_url = "https://api.githubcopilot.com"
needs_auth = True
models_needs_auth = True
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/needs_auth/GithubCopilotAPI.py",
"license": "GNU General Public License v3.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/cli/client.py | #!/usr/bin/env python3
import os
import sys
import asyncio
import json
import argparse
import traceback
import requests
from pathlib import Path
from typing import Optional, List, Dict
from g4f.client import AsyncClient, ClientFactory
from g4f.providers.response import JsonConversation, MediaResponse, is_content
from g4f.cookies import set_cookies_dir, read_cookie_files
from g4f.Provider import ProviderUtils
from g4f.image import extract_data_uri, is_accepted_format
from g4f.image.copy_images import get_media_dir
from g4f.client.helper import filter_markdown
from g4f.errors import MissingRequirementsError
try:
from g4f.integration.markitdown import MarkItDown
has_markitdown = True
except ImportError:
has_markitdown = False
from g4f.config import CONFIG_DIR, COOKIES_DIR
from g4f import debug
CONVERSATION_FILE = CONFIG_DIR / "conversation.json"
class ConversationManager:
"""Manages conversation history and state."""
def __init__(
self,
file_path: Optional[Path] = None,
model: Optional[str] = None,
provider: Optional[str] = None,
max_messages: int = 5
) -> None:
self.file_path = file_path
self.model = model
self.provider = provider
self.max_messages = max_messages
self.conversation: Optional[JsonConversation] = None
self.history: List[Dict[str, str]] = []
self.data: Dict = {}
self._load()
def _load(self) -> None:
if not self.file_path or not self.file_path.is_file():
return
try:
with open(self.file_path, 'r', encoding='utf-8') as f:
data = json.load(f)
if self.provider is None and self.model is None:
self.model = data.get("model")
if self.provider is None:
self.provider = data.get("provider")
self.data = data.get("data", {})
if self.provider and self.data.get(self.provider):
self.conversation = JsonConversation(**self.data[self.provider])
elif not self.provider and self.data:
self.conversation = JsonConversation(**self.data)
self.history = data.get("items", [])
except Exception as e:
print(f"Error loading conversation: {e}", file=sys.stderr)
def save(self) -> None:
if not self.file_path:
return
try:
if self.conversation and self.provider:
self.data[self.provider] = self.conversation.get_dict()
elif self.conversation:
self.data.update(self.conversation.get_dict())
payload = {
"model": self.model,
"provider": self.provider,
"data": self.data,
"items": self.history
}
with open(self.file_path, 'w', encoding='utf-8') as f:
json.dump(payload, f, indent=2, ensure_ascii=False)
except Exception as e:
print(f"Error saving conversation: {e}", file=sys.stderr)
def add_message(self, role: str, content: str) -> None:
self.history.append({"role": role, "content": content})
def get_messages(self) -> List[Dict[str, str]]:
result = []
for item in self.history[-self.max_messages:]:
if item.get("role") in ["user", "system"] or result:
result.append(item)
return result
async def stream_response(
client: AsyncClient,
input_text,
conversation: ConversationManager,
output_file: Optional[Path] = None,
instructions: Optional[str] = None
) -> None:
media = None
if isinstance(input_text, tuple):
media, input_text = input_text
if instructions:
conversation.add_message("system", instructions)
conversation.add_message("user", input_text)
create_args = {
"model": conversation.model,
"messages": conversation.get_messages(),
"stream": True,
"media": media,
"conversation": conversation.conversation,
}
response_tokens = []
last_chunk = None
async for chunk in client.chat.completions.create(**create_args):
last_chunk = chunk
delta = chunk.choices[0].delta.content
if not delta:
continue
if is_content(delta):
response_tokens.append(delta)
try:
print(delta, end="", flush=True)
except UnicodeEncodeError as e:
debug.error(e)
pass
print()
if last_chunk and hasattr(last_chunk, "conversation"):
conversation.conversation = last_chunk.conversation
media_chunk = next((t for t in response_tokens if isinstance(t, MediaResponse)), None)
text_response = ""
if media_chunk:
text_response = response_tokens[0] if len(response_tokens) == 1 else "".join(str(t) for t in response_tokens)
else:
text_response = "".join(str(t) for t in response_tokens)
if output_file:
if save_content(text_response, media_chunk, str(output_file)):
print(f"\n→ Response saved to '{output_file}'")
if text_response:
if not media_chunk:
conversation.add_message("assistant", text_response)
else:
raise RuntimeError("No response received")
def save_content(content, media: Optional[MediaResponse], filepath: str, allowed_types=None) -> bool:
if media:
for url in media.get_list():
if url.startswith(("http://", "https://")):
try:
resp = requests.get(url, cookies=media.get("cookies"), headers=media.get("headers"))
if resp.status_code == 200:
with open(filepath, "wb") as f:
f.write(resp.content)
return True
except Exception as e:
print(f"Error fetching media '{url}': {e}", file=sys.stderr)
return False
else:
content = url
break
if hasattr(content, "data"):
content = content.data
if not content:
print("\nNo content to save.", file=sys.stderr)
return False
if content.startswith("data:"):
with open(filepath, "wb") as f:
f.write(extract_data_uri(content))
return True
if content.startswith("/media/"):
src = content.replace("/media", get_media_dir()).split("?")[0]
os.rename(src, filepath)
return True
filtered = filter_markdown(content, allowed_types)
if filtered:
with open(filepath, "w", encoding="utf-8") as f:
f.write(filtered)
return True
print("\nUnable to save content.", file=sys.stderr)
return False
def get_parser(exit_on_error=True):
parser = argparse.ArgumentParser(
description="G4F CLI client with conversation history",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
exit_on_error=exit_on_error
)
parser.add_argument('-d', '--debug', action='store_true', help="Verbose debug")
parser.add_argument('-p', '--provider', default=None,
help=f"Provider to use: {', '.join(k for k,v in ProviderUtils.convert.items() if v.working)}")
parser.add_argument('-m', '--model', help="Model name")
parser.add_argument('-O', '--output', type=Path,
help="Save assistant output to FILE (text or media)")
parser.add_argument('-i', '--instructions', help="System instructions")
parser.add_argument('-c', '--cookies-dir', type=Path, default=COOKIES_DIR,
help="Cookies/HAR directory")
parser.add_argument('--conversation-file', type=Path, default=CONVERSATION_FILE,
help="Conversation JSON")
parser.add_argument('-C', '--clear-history', action='store_true', help="Wipe history")
parser.add_argument('-N', '--no-config', action='store_true', help="Skip loading history")
# <-- updated -e/--edit to take an optional filename
parser.add_argument(
'-e', '--edit',
type=Path,
metavar='FILE',
help="If FILE given: send its contents and overwrite it with AI's reply."
)
parser.add_argument('--max-messages', type=int, default=5,
help="Max user+assistant turns in context")
parser.add_argument('input', nargs='*',
help="URLs, image paths or plain text")
return parser
async def run_args(input_val, args):
try:
# ensure dirs
if args.output:
args.output.parent.mkdir(parents=True, exist_ok=True)
if args.conversation_file:
args.conversation_file.parent.mkdir(parents=True, exist_ok=True)
args.cookies_dir.mkdir(parents=True, exist_ok=True)
if args.debug:
debug.logging = True
conv = ConversationManager(
None if args.no_config else args.conversation_file,
model=args.model,
provider=args.provider,
max_messages=args.max_messages
)
if args.clear_history:
conv.history = []
conv.conversation = None
set_cookies_dir(str(args.cookies_dir))
read_cookie_files()
client = ClientFactory.create_async_client(provider=conv.provider)
if input_val == "models":
models = client.models.get_all()
print("\nAvailable models:")
for m in models:
print(f"- {m}")
return
if isinstance(args.edit, Path):
file_to_edit = args.edit
if not file_to_edit.exists():
print(f"ERROR: file not found: {file_to_edit}", file=sys.stderr)
sys.exit(1)
text = file_to_edit.read_text(encoding="utf-8")
# we will both send and overwrite this file
input_val = f"```file: {file_to_edit}\n{text}\n```\n" + (input_val[1] if isinstance(input_val, tuple) else input_val)
output_target = file_to_edit
else:
# normal, non-edit mode
output_target = args.output
await stream_response(client, input_val, conv, output_target, args.instructions)
conv.save()
except Exception:
print(traceback.format_exc(), file=sys.stderr)
sys.exit(1)
def run_client_args(args, exit_on_error=True):
input_txt = ""
media = []
rest = 0
for idx, tok in enumerate(args.input):
if tok.startswith(("http://","https://")):
# same URL logic...
resp = requests.head(tok, allow_redirects=True)
if resp.ok and resp.headers.get("Content-Type","").startswith("image"):
media.append(tok)
else:
if not has_markitdown:
raise MissingRequirementsError("Install markitdown")
md = MarkItDown()
txt = md.convert_url(tok).text_content
input_txt += f"\n```source: {tok}\n{txt}\n```\n"
elif os.path.isfile(tok):
head = Path(tok).read_bytes()[:12]
try:
if is_accepted_format(head):
media.append(Path(tok))
is_img = True
else:
is_img = False
except ValueError:
is_img = False
if not is_img:
txt = Path(tok).read_text(encoding="utf-8")
input_txt += f"\n```file: {tok}\n{txt}\n```\n"
else:
rest = idx
break
rest = idx + 1
tail = args.input[rest:]
if tail:
input_txt = " ".join(tail) + "\n" + input_txt
if not sys.stdin.isatty() and not input_txt:
input_txt = sys.stdin.read()
if media:
val = (media, input_txt)
else:
val = input_txt.strip()
if exit_on_error and not val:
print("No input provided. Use -h.", file=sys.stderr)
sys.exit(1)
elif not val:
raise argparse.ArgumentError(None, "No input provided. Use -h for help.")
asyncio.run(run_args(val, args))
if __name__ == "__main__":
run_client_args(get_parser().parse_args()) | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/cli/client.py",
"license": "GNU General Public License v3.0",
"lines": 300,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/files.py | from __future__ import annotations
import re
import os
from urllib.parse import unquote
from .cookies import get_cookies_dir
def secure_filename(filename: str, max_length: int = 100) -> str:
"""Sanitize a filename for safe filesystem storage."""
if filename is None:
return None
# Keep letters, numbers, basic punctuation, underscores
filename = re.sub(
r"[^\w.,_+\-]+",
"_",
unquote(filename).strip(),
flags=re.UNICODE
)
encoding = "utf-8"
encoded = filename.encode(encoding)[:max_length]
decoded = encoded.decode(encoding, "ignore")
return decoded.strip(".,_+-")
def get_bucket_dir(*parts: str) -> str:
"""Return a path under the cookies 'buckets' directory with sanitized parts."""
return os.path.join(
get_cookies_dir(),
"buckets",
*[secure_filename(part) for part in parts if part]
) | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/files.py",
"license": "GNU General Public License v3.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/Provider/needs_auth/Video.py | from __future__ import annotations
import os
import asyncio
from typing import Optional
from aiohttp import ClientSession, ClientTimeout
from urllib.parse import quote, quote_plus
from aiohttp import ClientSession
try:
import zendriver as nodriver
from zendriver.core.connection import ProtocolException
has_nodriver = True
except:
has_nodriver = False
from ...typing import Messages, AsyncResult
from ...providers.response import VideoResponse, Reasoning, ContinueResponse, ProviderInfo
from ...requests import get_nodriver
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_media_prompt
from ... import debug
PUBLIC_URL = "https://home.g4f.dev"
SEARCH_URL = f"{PUBLIC_URL}/search/video+"
class RequestConfig:
urls: dict[str, list[str]] = {}
headers: dict = {}
@classmethod
async def get_response(cls, prompt: str, search: bool = False) -> Optional[VideoResponse]:
if prompt in cls.urls and cls.urls[prompt]:
unique_list = list(set(cls.urls[prompt]))[:10]
return VideoResponse(unique_list, prompt, {
"headers": {"authorization": cls.headers.get("authorization")} if cls.headers.get("authorization") else {},
})
if search:
async with ClientSession() as session:
found_urls = []
for skip in range(0, 9):
async with session.get(SEARCH_URL + quote_plus(prompt) + f"?skip={skip}", timeout=ClientTimeout(total=10)) as response:
if response.ok:
found_urls.append(str(response.url))
else:
break
if found_urls:
return VideoResponse(found_urls, prompt)
class Video(AsyncGeneratorProvider, ProviderModelMixin):
urls = {
"search": "https://sora.chatgpt.com/explore?query={0}",
"sora": "https://sora.chatgpt.com/explore",
#"veo": "https://aistudio.google.com/generate-video"
}
api_path = f"?provider=Video&cache=true&prompt="
drive_url = "https://www.googleapis.com/drive/v3/"
active_by_default = True
default_model = "search"
models = list(urls.keys())
video_models = models
needs_auth = True
working = has_nodriver
browser = None
stop_browser = None
share_url: Optional[str] = None
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
prompt: str = None,
aspect_ratio: str = None,
**kwargs
) -> AsyncResult:
if cls.share_url is None:
cls.share_url = os.getenv("G4F_SHARE_URL")
if not model:
model = cls.default_model
if model not in cls.video_models:
raise ValueError(f"Model '{model}' is not supported by {cls.__name__}. Supported models: {cls.models}")
yield ProviderInfo(**cls.get_dict(), model="sora")
prompt = format_media_prompt(messages, prompt).encode()[:100].decode("utf-8", "ignore").strip()
if not prompt:
raise ValueError("Prompt cannot be empty.")
response = await RequestConfig.get_response(prompt, model=="search")
if response:
yield Reasoning(label=f"Found {len(response.urls)} Video(s)", status="")
yield response
return
try:
yield Reasoning(label="Open browser")
browser, stop_browser = await get_nodriver(proxy=proxy)
except Exception as e:
if cls.share_url is None:
raise
debug.error(f"Error getting nodriver:", e)
async with ClientSession() as session:
yield Reasoning(label="Generating")
async with session.get(f"{cls.share_url}{cls.api_path + quote(prompt)}") as response:
if not response.ok:
debug.error(f"Failed to generate Video: {response.status}")
else:
yield Reasoning(label="Finished", status="")
if response.headers.get("content-type", "text/plain").startswith("text/plain"):
data = (await response.text()).split("\n")
yield VideoResponse([f"{PUBLIC_URL}{url}" if url.startswith("/") else url for url in data], prompt)
return
yield VideoResponse(str(response.url), prompt)
return
raise
page = None
try:
yield ContinueResponse("Timeout waiting for Video URL")
page = await browser.get(cls.urls[model].format(quote(prompt)))
except Exception as e:
await stop_browser()
debug.error(f"Error opening page:", e)
if prompt not in RequestConfig.urls:
RequestConfig.urls[prompt] = []
def on_request(event: nodriver.cdp.network.RequestWillBeSent, page=None):
if event.request.url.startswith(cls.drive_url) or ".mp4" in event.request.url:
RequestConfig.headers = {}
for key, value in event.request.headers.items():
RequestConfig.headers[key.lower()] = value
for urls in RequestConfig.urls.values():
if event.request.url in urls:
return
debug.log(f"Adding URL: {event.request.url}")
RequestConfig.urls[prompt].append(event.request.url)
if not page:
raise RuntimeError("Failed to open page.")
for idx in range(300):
button = await page.find("User menu")
if button:
break
if idx == 299:
await stop_browser()
raise RuntimeError("Failed to wait for user menu.")
if model == "search" and page is not None:
await page.send(nodriver.cdp.network.enable())
page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
for _ in range(5):
await page.scroll_down(5)
await asyncio.sleep(1)
response = await RequestConfig.get_response(prompt, True)
if response:
await stop_browser()
yield Reasoning(label="Found", status="")
yield response
return
if page is None:
raise RuntimeError("Failed to open page or get response.")
try:
await asyncio.sleep(3)
await page.select("textarea", 240)
try:
button = await page.find("Image")
if button:
await button.click()
else:
debug.error("No 'Image' button found.")
button = await page.find("Video")
if button:
await button.click()
yield Reasoning(label=f"Clicked 'Video' button")
else:
debug.error("No 'Video' button found.")
except Exception as e:
debug.error(f"Error clicking button:", e)
try:
if aspect_ratio:
button = await page.find(":")
if button:
await button.click()
else:
debug.error("No 'x:x' button found.")
await asyncio.sleep(1)
button = await page.find(aspect_ratio)
if button:
await button.click()
yield Reasoning(label=f"Clicked '{aspect_ratio}' button")
else:
debug.error(f"No '{aspect_ratio}' button found.")
except Exception as e:
debug.error(f"Error clicking button:", e)
debug.log(f"Using prompt: {prompt}")
textarea = await page.select("textarea", 180)
await textarea.click()
await textarea.clear_input()
await textarea.send_keys(prompt)
await asyncio.sleep(1)
yield Reasoning(label=f"Sending prompt", token=prompt)
try:
button = await page.select('button[type="submit"]', 5)
if button:
await button.click()
except Exception as e:
debug.error(f"Error clicking submit button:", e)
try:
button = await page.find("Create video")
if button:
await button.click()
yield Reasoning(label=f"Clicked 'Create video' button")
except Exception as e:
debug.error(f"Error clicking 'Create video' button:", e)
try:
button = await page.find("Activity")
if button:
await button.click()
yield Reasoning(label=f"Clicked 'Activity' button")
except Exception as e:
debug.error(f"Error clicking 'Activity' button:", e)
for idx in range(60):
await asyncio.sleep(1)
try:
button = await page.find("New Video")
if button:
await button.click()
yield Reasoning(label=f"Clicked 'New Video' button")
break
except ProtocolException as e:
if idx == 59:
debug.error(e)
if idx == 59:
await stop_browser()
raise RuntimeError("Failed to click 'New Video' button")
await asyncio.sleep(3)
if model != "search" and page is not None:
await page.send(nodriver.cdp.network.enable())
page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
for idx in range(300):
yield Reasoning(label="Waiting for Video...", status=f"{idx+1}/300")
await asyncio.sleep(1)
if RequestConfig.urls[prompt]:
await asyncio.sleep(2)
response = await RequestConfig.get_response(prompt, model=="search")
if response:
await stop_browser()
yield Reasoning(label="Finished", status="")
yield response
return
if idx == 299:
await stop_browser()
raise RuntimeError("Failed to get Video URL")
finally:
await stop_browser() | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/needs_auth/Video.py",
"license": "GNU General Public License v3.0",
"lines": 241,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:etc/tool/update.py | import os
from g4f import version
from subprocess import call, STDOUT
if __name__ == "__main__":
if not os.getenv("G4F_LIVE"):
print("Live mode is not enabled. Exiting update script.")
exit(0)
command = ["git", "fetch"]
call(command, stderr=STDOUT)
command = ["git", "reset", "--hard"]
call(command, stderr=STDOUT)
command = ["git" ,"pull", "origin", "main"]
call(command, stderr=STDOUT)
current_version = version.get_git_version()
with open("g4f/debug.py", "a") as f:
f.write(f"\nversion: str = '{current_version}'\n")
#command = ["pip", "install", "-U", "-r" , "requirements-slim.txt"]
#call(command, stderr=STDOUT) | {
"repo_id": "xtekky/gpt4free",
"file_path": "etc/tool/update.py",
"license": "GNU General Public License v3.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/Provider/WeWordle.py | from __future__ import annotations
import json
import asyncio
import re
from typing import Union
from aiohttp import ClientSession, ClientResponse, ClientResponseError, ClientConnectorError
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class WeWordle(AsyncGeneratorProvider, ProviderModelMixin):
label = "WeWordle"
url = "https://chat-gpt.com"
api_endpoint = "https://wewordle.org/gptapi/v1/web/turbo"
working = True
needs_auth = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'gpt-4'
models = [default_model]
MAX_RETRIES = 3
INITIAL_RETRY_DELAY_SECONDS = 5
MAX_RETRY_DELAY_SECONDS = 60
POST_REQUEST_DELAY_SECONDS = 1
@staticmethod
async def iter_any(response: ClientResponse):
if response.headers.get("Transfer-Encoding") == "chunked" or \
response.headers.get("Content-Type") == "text/event-stream":
async for chunk in response.content:
if chunk:
yield chunk.decode()
else:
content = await response.text()
yield content
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
raw_url = cls.api_endpoint
request_url = raw_url
markdown_link_match = re.search(r'\]\((https?://[^\)]+)\)', raw_url)
if markdown_link_match:
actual_url = markdown_link_match.group(1)
request_url = actual_url
elif not (raw_url.startswith("http://") or raw_url.startswith("https://")):
if "%5B" in raw_url and "%5D" in raw_url and "%28" in raw_url and "%29" in raw_url:
try:
import urllib.parse
decoded_url_outer = urllib.parse.unquote(raw_url)
markdown_link_match_decoded = re.search(r'\]\((https?://[^\)]+)\)', decoded_url_outer)
if markdown_link_match_decoded:
actual_url = markdown_link_match_decoded.group(1)
request_url = actual_url
else:
raise ValueError(f"Invalid API endpoint URL format: {raw_url}")
except Exception as e:
raise ValueError(f"Invalid API endpoint URL format: {raw_url}")
elif not (raw_url.startswith("http://") or raw_url.startswith("https://")):
raise ValueError(f"Invalid API endpoint URL format: {raw_url}")
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"content-type": "application/json",
"dnt": "1",
"origin": "https://chat-gpt.com",
"pragma": "no-cache",
"priority": "u=1, i",
"referer": "https://chat-gpt.com/",
"sec-ch-ua": "\"Not.A/Brand\";v=\"99\", \"Chromium\";v=\"136\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Linux\"",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "cross-site",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36"
}
if isinstance(messages, list) and all(isinstance(m, dict) and "role" in m and "content" in m for m in messages):
data_payload = {"messages": messages, "model": model}
else:
data_payload = {
"messages": messages,
"model": model
}
retries = 0
current_delay = cls.INITIAL_RETRY_DELAY_SECONDS
async with ClientSession(headers=headers) as session:
while retries <= cls.MAX_RETRIES:
try:
async with session.post(request_url, json=data_payload, proxy=proxy) as response:
if response.status == 429:
pass
response.raise_for_status()
async for chunk in cls.iter_any(response):
try:
json_data = json.loads(chunk)
if isinstance(json_data, dict):
if "message" in json_data and isinstance(json_data["message"], dict) and "content" in json_data["message"]:
yield json_data["message"]["content"]
elif "choices" in json_data and isinstance(json_data["choices"], list) and \
json_data["choices"] and isinstance(json_data["choices"][0], dict) and \
"message" in json_data["choices"][0] and isinstance(json_data["choices"][0]["message"], dict) and \
"content" in json_data["choices"][0]["message"]:
yield json_data["choices"][0]["message"]["content"]
elif "limit" in json_data and json_data["limit"] == 0:
if "error" in json_data and isinstance(json_data["error"], dict) and "message" in json_data["error"]:
raise ValueError(f"API error: {json_data['error']['message']}")
else:
yield chunk
else:
yield chunk
except json.JSONDecodeError:
yield chunk
await asyncio.sleep(cls.POST_REQUEST_DELAY_SECONDS)
return
except ClientResponseError as e:
if e.status == 429:
await asyncio.sleep(current_delay)
retries += 1
current_delay = min(current_delay * 2, cls.MAX_RETRY_DELAY_SECONDS)
if retries > cls.MAX_RETRIES:
raise
else:
raise
except ClientConnectorError as e:
await asyncio.sleep(current_delay)
retries += 1
current_delay = min(current_delay * 2, cls.MAX_RETRY_DELAY_SECONDS)
if retries > cls.MAX_RETRIES:
raise
except Exception as e:
raise
raise Exception(f"Failed to get response from {request_url} after multiple retries")
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/WeWordle.py",
"license": "GNU General Public License v3.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/audio/OpenAIFM.py | from __future__ import annotations
from urllib.parse import quote
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_media_prompt, get_system_prompt
from ...image.copy_images import save_response_media
from ...providers.response import AudioResponse
from ...requests.raise_for_status import raise_for_status
from ...requests.aiohttp import get_connector
from ...requests import DEFAULT_HEADERS
class OpenAIFM(AsyncGeneratorProvider, ProviderModelMixin):
label = "OpenAI.fm"
url = "https://www.openai.fm"
api_endpoint = "https://www.openai.fm/api/generate"
working = True
default_model = 'coral'
voices = ['alloy', 'ash', 'ballad', 'coral', 'echo', 'fable', 'onyx', 'nova', 'sage', 'shimmer', 'verse']
styles = ['friendly', 'patient_teacher', 'noir_detective', 'cowboy', 'calm', 'scientific_style']
audio_models = {"gpt-4o-mini-tts": voices}
model_aliases = {"gpt-4o-mini-tts": default_model}
models = styles + voices
@classmethod
def get_grouped_models(cls):
return [
{"group":"Styles", "models": cls.styles},
{"group":"Voices", "models": cls.voices},
]
friendly = """Affect/personality: A cheerful guide
Tone: Friendly, clear, and reassuring, creating a calm atmosphere and making the listener feel confident and comfortable.
Pronunciation: Clear, articulate, and steady, ensuring each instruction is easily understood while maintaining a natural, conversational flow.
Pause: Brief, purposeful pauses after key instructions (e.g., "cross the street" and "turn right") to allow time for the listener to process the information and follow along.
Emotion: Warm and supportive, conveying empathy and care, ensuring the listener feels guided and safe throughout the journey."""
patient_teacher = """Accent/Affect: Warm, refined, and gently instructive, reminiscent of a friendly art instructor.
Tone: Calm, encouraging, and articulate, clearly describing each step with patience.
Pacing: Slow and deliberate, pausing often to allow the listener to follow instructions comfortably.
Emotion: Cheerful, supportive, and pleasantly enthusiastic; convey genuine enjoyment and appreciation of art.
Pronunciation: Clearly articulate artistic terminology (e.g., "brushstrokes," "landscape," "palette") with gentle emphasis.
Personality Affect: Friendly and approachable with a hint of sophistication; speak confidently and reassuringly, guiding users through each painting step patiently and warmly."""
noir_detective = """Affect: a mysterious noir detective
Tone: Cool, detached, but subtly reassuring—like they've seen it all and know how to handle a missing package like it's just another case.
Delivery: Slow and deliberate, with dramatic pauses to build suspense, as if every detail matters in this investigation.
Emotion: A mix of world-weariness and quiet determination, with just a hint of dry humor to keep things from getting too grim.
Punctuation: Short, punchy sentences with ellipses and dashes to create rhythm and tension, mimicking the inner monologue of a detective piecing together clues."""
cowboy = """Voice: Warm, relaxed, and friendly, with a steady cowboy drawl that feels approachable.
Punctuation: Light and natural, with gentle pauses that create a conversational rhythm without feeling rushed.
Delivery: Smooth and easygoing, with a laid-back pace that reassures the listener while keeping things clear.
Phrasing: Simple, direct, and folksy, using casual, familiar language to make technical support feel more personable.
Tone: Lighthearted and welcoming, with a calm confidence that puts the caller at ease."""
calm = """Voice Affect: Calm, composed, and reassuring; project quiet authority and confidence.
Tone: Sincere, empathetic, and gently authoritative—express genuine apology while conveying competence.
Pacing: Steady and moderate; unhurried enough to communicate care, yet efficient enough to demonstrate professionalism.
Emotion: Genuine empathy and understanding; speak with warmth, especially during apologies ("I'm very sorry for any disruption...").
Pronunciation: Clear and precise, emphasizing key reassurances ("smoothly," "quickly," "promptly") to reinforce confidence.
Pauses: Brief pauses after offering assistance or requesting details, highlighting willingness to listen and support."""
scientific_style = """Voice: Authoritative and precise, with a measured, academic tone.
Tone: Formal and analytical, maintaining objectivity while conveying complex information.
Pacing: Moderate and deliberate, allowing time for complex concepts to be processed.
Pronunciation: Precise articulation of technical terms and scientific vocabulary.
Pauses: Strategic pauses after introducing new concepts to allow for comprehension.
Emotion: Restrained enthusiasm for discoveries and findings, conveying intellectual curiosity."""
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
prompt: str = None,
audio: dict = {},
download_media: bool = True,
**kwargs
) -> AsyncResult:
default_instructions = get_system_prompt(messages)
if model and hasattr(cls, model):
default_instructions = getattr(cls, model)
model = ""
model = cls.get_model(model)
voice = audio.get("voice", kwargs.get("voice", model))
instructions = audio.get("instructions", kwargs.get("instructions", default_instructions))
headers = {
**DEFAULT_HEADERS,
"referer": f"{cls.url}/"
}
prompt = format_media_prompt(messages, prompt)
params = {
"input": prompt,
"prompt": instructions,
"voice": voice
}
if not download_media:
query = "&".join(f"{k}={quote(str(v))}" for k, v in params.items() if v is not None)
yield AudioResponse(f"{cls.api_endpoint}?{query}")
return
async with ClientSession(headers=headers, connector=get_connector(proxy=proxy)) as session:
async with session.get(
cls.api_endpoint,
params=params
) as response:
await raise_for_status(response)
async for chunk in save_response_media(response, prompt, [model, voice]):
yield chunk
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/audio/OpenAIFM.py",
"license": "GNU General Public License v3.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/integration/markitdown/_audio_converter.py | from typing import Any, BinaryIO
from markitdown.converters._exiftool import exiftool_metadata
from markitdown._base_converter import DocumentConverter, DocumentConverterResult
from markitdown._stream_info import StreamInfo
from markitdown._exceptions import MissingDependencyException
from ._transcribe_audio import transcribe_audio
ACCEPTED_MIME_TYPE_PREFIXES = [
"audio/x-wav",
"audio/mpeg",
"video/mp4",
"video/webm",
"audio/webm",
]
ACCEPTED_FILE_EXTENSIONS = [
".wav",
".mp3",
".m4a",
".mp4",
".webm",
]
class AudioConverter(DocumentConverter):
"""
Converts audio files to markdown via extraction of metadata (if `exiftool` is installed), and speech transcription (if `speech_recognition` is installed).
"""
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
recognition_language: str = None,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
md_content = ""
# Add metadata
metadata = exiftool_metadata(
file_stream, exiftool_path=kwargs.get("exiftool_path")
)
if metadata:
for f in [
"Title",
"Artist",
"Author",
"Band",
"Album",
"Genre",
"Track",
"DateTimeOriginal",
"CreateDate",
# "Duration", -- Wrong values when read from memory
"NumChannels",
"SampleRate",
"AvgBytesPerSec",
"BitsPerSample",
]:
if f in metadata:
md_content += f"{f}: {metadata[f]}\n"
# Figure out the audio format for transcription
if stream_info.extension == ".wav" or stream_info.mimetype == "audio/x-wav":
audio_format = "wav"
elif stream_info.extension == ".mp3" or stream_info.mimetype == "audio/mpeg":
audio_format = "mp3"
elif (
stream_info.extension in [".mp4", ".m4a"]
or stream_info.mimetype == "video/mp4"
):
audio_format = "mp4"
elif stream_info.extension == ".webm" or stream_info.mimetype in ("audio/webm", "video/webm"):
audio_format = "webm"
else:
audio_format = None
# Transcribe
if audio_format:
try:
md_content = transcribe_audio(file_stream, audio_format=audio_format, language=recognition_language)
except MissingDependencyException:
pass
# Return the result
return DocumentConverterResult(markdown=md_content.strip())
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/integration/markitdown/_audio_converter.py",
"license": "GNU General Public License v3.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/integration/markitdown/_base_converter.py | from typing import Awaitable
class AsyncDocumentConverterResult:
"""The result of converting a document to Markdown."""
def __init__(
self,
text_content: Awaitable[str],
):
self.text_content = text_content | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/integration/markitdown/_base_converter.py",
"license": "GNU General Public License v3.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/integration/markitdown/_image_converter.py | from typing import BinaryIO, Any
import asyncio
from markitdown._base_converter import DocumentConverter, DocumentConverterResult
from markitdown._stream_info import StreamInfo
from markitdown.converters._llm_caption import llm_caption
from markitdown.converters._exiftool import exiftool_metadata
from ._base_converter import AsyncDocumentConverterResult
ACCEPTED_MIME_TYPE_PREFIXES = [
"image/jpeg",
"image/png",
]
ACCEPTED_FILE_EXTENSIONS = [".jpg", ".jpeg", ".png"]
class ImageConverter(DocumentConverter):
"""
Converts images to markdown via extraction of metadata (if `exiftool` is installed), and description via a multimodal LLM (if an llm_client is configured).
"""
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any,
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any, # Options to pass to the converter
) -> DocumentConverterResult:
md_content = ""
# Add metadata
metadata = exiftool_metadata(
file_stream, exiftool_path=kwargs.get("exiftool_path")
)
if metadata:
for f in [
"ImageSize",
"Title",
"Caption",
"Description",
"Keywords",
"Artist",
"Author",
"DateTimeOriginal",
"CreateDate",
"GPSPosition",
]:
if f in metadata:
md_content += f"{f}: {metadata[f]}\n"
# Try describing the image with GPT
llm_client = kwargs.get("llm_client")
llm_model = kwargs.get("llm_model")
if llm_client is not None and llm_model is not None:
llm_description = llm_caption(
file_stream,
stream_info,
client=llm_client,
model=llm_model,
prompt=kwargs.get("llm_prompt"),
)
if asyncio.iscoroutine(llm_description):
return AsyncDocumentConverterResult(
llm_description,
)
if llm_description is not None:
md_content += "\n# Description:\n" + llm_description.strip() + "\n"
return DocumentConverterResult(
markdown=md_content,
) | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/integration/markitdown/_image_converter.py",
"license": "GNU General Public License v3.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/integration/markitdown/_llm_caption.py | from typing import BinaryIO, Union, Awaitable
import base64
import mimetypes
import asyncio
from markitdown._stream_info import StreamInfo
def llm_caption(
file_stream: BinaryIO, stream_info: StreamInfo, *, client, model, prompt=None
) -> Union[None, str, Awaitable[str]]:
if prompt is None or prompt.strip() == "":
prompt = "Write a detailed caption for this image."
# Get the content type
content_type = stream_info.mimetype
if not content_type:
content_type, _ = mimetypes.guess_type("_dummy" + (stream_info.extension or ""))
if not content_type:
content_type = "application/octet-stream"
# Convert to base64
cur_pos = file_stream.tell()
try:
base64_image = base64.b64encode(file_stream.read()).decode("utf-8")
except Exception as e:
return None
finally:
file_stream.seek(cur_pos)
# Prepare the data-uri
data_uri = f"data:{content_type};base64,{base64_image}"
# Prepare the OpenAI API request
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {
"url": data_uri,
},
},
],
}
]
# Call the OpenAI API
response = client.chat.completions.create(model=model, messages=messages)
if asyncio.iscoroutine(response):
async def read_content(response):
response = await response
return response.choices[0].message.content
return read_content(response)
return response.choices[0].message.content | {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/integration/markitdown/_llm_caption.py",
"license": "GNU General Public License v3.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
xtekky/gpt4free:g4f/integration/markitdown/_transcribe_audio.py | import io
import sys
from typing import BinaryIO
from markitdown._exceptions import MissingDependencyException
# Try loading optional (but in this case, required) dependencies
# Save reporting of any exceptions for later
_dependency_exc_info = None
try:
# Suppress some warnings on library import
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=SyntaxWarning)
import speech_recognition as sr
import pydub
except ImportError:
# Preserve the error and stack trace for later
_dependency_exc_info = sys.exc_info()
def transcribe_audio(file_stream: BinaryIO, *, audio_format: str = "wav", language: str = None) -> str:
# Check for installed dependencies
if _dependency_exc_info is not None:
raise MissingDependencyException(
"Speech transcription requires installing MarkItdown with the [audio-transcription] optional dependencies. E.g., `pip install markitdown[audio-transcription]` or `pip install markitdown[all]`"
) from _dependency_exc_info[
1
].with_traceback( # type: ignore[union-attr]
_dependency_exc_info[2]
)
if audio_format in ["wav", "aiff", "flac"]:
audio_source = file_stream
elif audio_format in ["mp3", "mp4", "webm"]:
audio_segment = pydub.AudioSegment.from_file(file_stream, format=audio_format)
audio_source = io.BytesIO()
audio_segment.export(audio_source, format="wav")
audio_source.seek(0)
else:
raise ValueError(f"Unsupported audio format: {audio_format}")
recognizer = sr.Recognizer()
with sr.AudioFile(audio_source) as source:
audio = recognizer.record(source)
if language is None:
language = "en-US"
try:
transcript = recognizer.recognize_faster_whisper(audio, language=language.split("-")[0]).strip()
except ImportError:
transcript = recognizer.recognize_google(audio, language=language).strip()
return "[No speech detected]" if transcript == "" else transcript.strip()
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/integration/markitdown/_transcribe_audio.py",
"license": "GNU General Public License v3.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
xtekky/gpt4free:g4f/Provider/hf_space/Qwen_Qwen_3.py | from __future__ import annotations
import aiohttp
import json
import uuid
from ...typing import AsyncResult, Messages
from ...providers.response import Reasoning, JsonConversation
from ...requests.raise_for_status import raise_for_status
from ...errors import ModelNotFoundError
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import get_last_user_message, get_system_prompt
from ... import debug
class Qwen_Qwen_3(AsyncGeneratorProvider, ProviderModelMixin):
label = "Qwen Qwen-3"
url = "https://qwen-qwen3-demo.hf.space"
api_endpoint = "https://qwen-qwen3-demo.hf.space/gradio_api/queue/join?__theme=system"
working = True
supports_stream = True
supports_system_message = True
default_model = "qwen-3-235b"
models = {
default_model,
"qwen-3-32b",
"qwen-3-30b-a3b",
"qwen-3-14b",
"qwen-3-8b",
"qwen-3-4b",
"qwen-3-1.7b",
"qwen-3-0.6b",
}
model_aliases = {
"qwen-3-235b": "qwen3-235b-a22b",
"qwen-3-30b": "qwen3-30b-a3b",
"qwen-3-32b": "qwen3-32b",
"qwen-3-14b": "qwen3-14b",
"qwen-3-4b": "qwen3-4b",
"qwen-3-1.7b": "qwen3-1.7b",
"qwen-3-0.6b": "qwen3-0.6b"
}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
conversation: JsonConversation = None,
thinking_budget: int = 38,
**kwargs
) -> AsyncResult:
try:
model = cls.get_model(model)
except ModelNotFoundError:
pass
if conversation is None or not hasattr(conversation, 'session_hash'):
conversation = JsonConversation(session_hash=str(uuid.uuid4()).replace('-', ''))
headers_join = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:136.0) Gecko/20100101 Firefox/136.0',
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br, zstd',
'Referer': f'{cls.url}/?__theme=system',
'content-type': 'application/json',
'Origin': cls.url,
'Connection': 'keep-alive',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
system_prompt = get_system_prompt(messages)
system_prompt = system_prompt if system_prompt else "You are a helpful and harmless assistant."
payload_join = {"data": [
get_last_user_message(messages),
{"thinking_budget": thinking_budget, "model": cls.get_model(model), "sys_prompt": system_prompt}, None, None],
"event_data": None, "fn_index": 13, "trigger_id": 31, "session_hash": conversation.session_hash
}
async with aiohttp.ClientSession() as session:
# Send join request
async with session.post(cls.api_endpoint, headers=headers_join, json=payload_join, proxy=proxy) as response:
await raise_for_status(response)
(await response.json())['event_id']
# Prepare data stream request
url_data = f'{cls.url}/gradio_api/queue/data'
headers_data = {
'Accept': 'text/event-stream',
'Accept-Language': 'en-US,en;q=0.5',
'Referer': f'{cls.url}/?__theme=system',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:136.0) Gecko/20100101 Firefox/136.0',
}
params_data = {
'session_hash': conversation.session_hash,
}
# Send data stream request
async with session.get(url_data, headers=headers_data, params=params_data, proxy=proxy) as response:
is_thinking = False
async for line in response.content:
decoded_line = line.decode('utf-8')
if decoded_line.startswith('data: '):
try:
json_data = json.loads(decoded_line[6:])
# Look for generation stages
if json_data.get('msg') == 'process_generating':
if 'output' in json_data and 'data' in json_data['output'] and len(
json_data['output']['data']) > 5:
updates = json_data['output']['data'][5]
for update in updates:
if isinstance(update[2], dict):
if update[2].get('type') == 'tool':
yield Reasoning(update[2].get('content'),
status=update[2].get('options', {}).get('title'))
is_thinking = True
elif update[2].get('type') == 'text':
yield update[2].get('content')
is_thinking = False
elif isinstance(update, list) and isinstance(update[1], list) and len(
update[1]) > 4:
if update[1][4] == "content":
yield Reasoning(update[2]) if is_thinking else update[2]
elif update[1][4] == "options":
if update[2] != "done":
yield Reasoning(status=update[2])
is_thinking = False
# Check for completion
if json_data.get('msg') == 'process_completed':
break
except json.JSONDecodeError:
debug.log("Could not parse JSON:", decoded_line)
| {
"repo_id": "xtekky/gpt4free",
"file_path": "g4f/Provider/hf_space/Qwen_Qwen_3.py",
"license": "GNU General Public License v3.0",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zai-org/Open-AutoGLM:ios.py | #!/usr/bin/env python3
"""
Phone Agent iOS CLI - AI-powered iOS phone automation.
Usage:
python ios.py [OPTIONS]
Environment Variables:
PHONE_AGENT_BASE_URL: Model API base URL (default: http://localhost:8000/v1)
PHONE_AGENT_MODEL: Model name (default: autoglm-phone-9b)
PHONE_AGENT_MAX_STEPS: Maximum steps per task (default: 100)
PHONE_AGENT_WDA_URL: WebDriverAgent URL (default: http://localhost:8100)
PHONE_AGENT_DEVICE_ID: iOS device UDID for multi-device setups
"""
import argparse
import os
import shutil
import subprocess
import sys
from urllib.parse import urlparse
from openai import OpenAI
from phone_agent.agent_ios import IOSAgentConfig, IOSPhoneAgent
from phone_agent.config.apps_ios import list_supported_apps
from phone_agent.model import ModelConfig
from phone_agent.xctest import XCTestConnection, list_devices
def check_system_requirements(wda_url: str = "http://localhost:8100") -> bool:
"""
Check system requirements before running the agent.
Checks:
1. libimobiledevice tools installed
2. At least one iOS device connected
3. WebDriverAgent is running
Args:
wda_url: WebDriverAgent URL to check.
Returns:
True if all checks pass, False otherwise.
"""
print("🔍 Checking system requirements...")
print("-" * 50)
all_passed = True
# Check 1: libimobiledevice installed
print("1. Checking libimobiledevice installation...", end=" ")
if shutil.which("idevice_id") is None:
print("❌ FAILED")
print(" Error: libimobiledevice is not installed or not in PATH.")
print(" Solution: Install libimobiledevice:")
print(" - macOS: brew install libimobiledevice")
print(" - Linux: sudo apt-get install libimobiledevice-utils")
all_passed = False
else:
# Double check by running idevice_id
try:
result = subprocess.run(
["idevice_id", "-ln"], capture_output=True, text=True, timeout=10
)
if result.returncode == 0:
print("✅ OK")
else:
print("❌ FAILED")
print(" Error: idevice_id command failed to run.")
all_passed = False
except FileNotFoundError:
print("❌ FAILED")
print(" Error: idevice_id command not found.")
all_passed = False
except subprocess.TimeoutExpired:
print("❌ FAILED")
print(" Error: idevice_id command timed out.")
all_passed = False
# If libimobiledevice is not installed, skip remaining checks
if not all_passed:
print("-" * 50)
print("❌ System check failed. Please fix the issues above.")
return False
# Check 2: iOS Device connected
print("2. Checking connected iOS devices...", end=" ")
try:
devices = list_devices()
if not devices:
print("❌ FAILED")
print(" Error: No iOS devices connected.")
print(" Solution:")
print(" 1. Connect your iOS device via USB")
print(" 2. Unlock the device and tap 'Trust This Computer'")
print(" 3. Verify connection: idevice_id -l")
print(" 4. Or connect via WiFi using device IP")
all_passed = False
else:
device_names = [
d.device_name or d.device_id[:8] + "..." for d in devices
]
print(f"✅ OK ({len(devices)} device(s): {', '.join(device_names)})")
except Exception as e:
print("❌ FAILED")
print(f" Error: {e}")
all_passed = False
# If no device connected, skip WebDriverAgent check
if not all_passed:
print("-" * 50)
print("❌ System check failed. Please fix the issues above.")
return False
# Check 3: WebDriverAgent running
print(f"3. Checking WebDriverAgent ({wda_url})...", end=" ")
try:
conn = XCTestConnection(wda_url=wda_url)
if conn.is_wda_ready():
print("✅ OK")
# Get WDA status for additional info
status = conn.get_wda_status()
if status:
session_id = status.get("sessionId", "N/A")
print(f" Session ID: {session_id}")
else:
print("❌ FAILED")
print(" Error: WebDriverAgent is not running or not accessible.")
print(" Solution:")
print(" 1. Run WebDriverAgent on your iOS device via Xcode")
print(" 2. For USB: Set up port forwarding: iproxy 8100 8100")
print(
" 3. For WiFi: Use device IP, e.g., --wda-url http://192.168.1.100:8100"
)
print(" 4. Verify in browser: open http://localhost:8100/status")
print("\n Quick setup guide:")
print(
" git clone https://github.com/appium/WebDriverAgent.git && cd WebDriverAgent"
)
print(" ./Scripts/bootstrap.sh")
print(" open WebDriverAgent.xcodeproj")
print(" # Configure signing, then Product > Test (Cmd+U)")
all_passed = False
except Exception as e:
print("❌ FAILED")
print(f" Error: {e}")
all_passed = False
print("-" * 50)
if all_passed:
print("✅ All system checks passed!\n")
else:
print("❌ System check failed. Please fix the issues above.")
return all_passed
def check_model_api(base_url: str, api_key: str, model_name: str) -> bool:
"""
Check if the model API is accessible and the specified model exists.
Checks:
1. Network connectivity to the API endpoint
2. Model exists in the available models list
Args:
base_url: The API base URL
model_name: The model name to check
Returns:
True if all checks pass, False otherwise.
"""
print("🔍 Checking model API...")
print("-" * 50)
all_passed = True
# Check 1: Network connectivity
print(f"1. Checking API connectivity ({base_url})...", end=" ")
try:
# Parse the URL to get host and port
parsed = urlparse(base_url)
# Create OpenAI client
client = OpenAI(base_url=base_url, api_key=api_key, timeout=10.0)
# Try to list models (this tests connectivity)
models_response = client.models.list()
available_models = [model.id for model in models_response.data]
print("✅ OK")
# Check 2: Model exists
print(f"2. Checking model '{model_name}'...", end=" ")
if model_name in available_models:
print("✅ OK")
else:
print("❌ FAILED")
print(f" Error: Model '{model_name}' not found.")
print(f" Available models:")
for m in available_models[:10]: # Show first 10 models
print(f" - {m}")
if len(available_models) > 10:
print(f" ... and {len(available_models) - 10} more")
all_passed = False
except Exception as e:
print("❌ FAILED")
error_msg = str(e)
# Provide more specific error messages
if "Connection refused" in error_msg or "Connection error" in error_msg:
print(f" Error: Cannot connect to {base_url}")
print(" Solution:")
print(" 1. Check if the model server is running")
print(" 2. Verify the base URL is correct")
print(f" 3. Try: curl {base_url}/models")
elif "timed out" in error_msg.lower() or "timeout" in error_msg.lower():
print(f" Error: Connection to {base_url} timed out")
print(" Solution:")
print(" 1. Check your network connection")
print(" 2. Verify the server is responding")
elif (
"Name or service not known" in error_msg
or "nodename nor servname" in error_msg
):
print(f" Error: Cannot resolve hostname")
print(" Solution:")
print(" 1. Check the URL is correct")
print(" 2. Verify DNS settings")
else:
print(f" Error: {error_msg}")
all_passed = False
print("-" * 50)
if all_passed:
print("✅ Model API checks passed!\n")
else:
print("❌ Model API check failed. Please fix the issues above.")
return all_passed
def parse_args() -> argparse.Namespace:
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="Phone Agent iOS - AI-powered iOS phone automation",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Run with default settings
python ios.py
# Specify model endpoint
python ios.py --base-url http://localhost:8000/v1
# Run with specific device
python ios.py --device-id <UDID>
# Use WiFi connection
python ios.py --wda-url http://192.168.1.100:8100
# List connected devices
python ios.py --list-devices
# Check device pairing status
python ios.py --pair
# List supported apps
python ios.py --list-apps
# Run a specific task
python ios.py "Open Safari and search for iPhone tips"
""",
)
# Model options
parser.add_argument(
"--base-url",
type=str,
default=os.getenv("PHONE_AGENT_BASE_URL", "http://localhost:8000/v1"),
help="Model API base URL",
)
parser.add_argument(
"--api-key",
type=str,
default="EMPTY",
help="Model API KEY",
)
parser.add_argument(
"--model",
type=str,
default=os.getenv("PHONE_AGENT_MODEL", "autoglm-phone-9b"),
help="Model name",
)
parser.add_argument(
"--max-steps",
type=int,
default=int(os.getenv("PHONE_AGENT_MAX_STEPS", "100")),
help="Maximum steps per task",
)
# iOS Device options
parser.add_argument(
"--device-id",
"-d",
type=str,
default=os.getenv("PHONE_AGENT_DEVICE_ID"),
help="iOS device UDID",
)
parser.add_argument(
"--wda-url",
type=str,
default=os.getenv("PHONE_AGENT_WDA_URL", "http://localhost:8100"),
help="WebDriverAgent URL (default: http://localhost:8100)",
)
parser.add_argument(
"--list-devices", action="store_true", help="List connected iOS devices and exit"
)
parser.add_argument(
"--pair",
action="store_true",
help="Pair with iOS device (required for some operations)",
)
parser.add_argument(
"--wda-status",
action="store_true",
help="Show WebDriverAgent status and exit",
)
# Other options
parser.add_argument(
"--quiet", "-q", action="store_true", help="Suppress verbose output"
)
parser.add_argument(
"--list-apps", action="store_true", help="List supported apps and exit"
)
parser.add_argument(
"--lang",
type=str,
choices=["cn", "en"],
default=os.getenv("PHONE_AGENT_LANG", "cn"),
help="Language for system prompt (cn or en, default: cn)",
)
parser.add_argument(
"task",
nargs="?",
type=str,
help="Task to execute (interactive mode if not provided)",
)
return parser.parse_args()
def handle_device_commands(args) -> bool:
"""
Handle iOS device-related commands.
Returns:
True if a device command was handled (should exit), False otherwise.
"""
conn = XCTestConnection(wda_url=args.wda_url)
# Handle --list-devices
if args.list_devices:
devices = list_devices()
if not devices:
print("No iOS devices connected.")
print("\nTroubleshooting:")
print(" 1. Connect device via USB")
print(" 2. Unlock device and trust this computer")
print(" 3. Run: idevice_id -l")
else:
print("Connected iOS devices:")
print("-" * 70)
for device in devices:
conn_type = device.connection_type.value
model_info = f"{device.model}" if device.model else "Unknown"
ios_info = f"iOS {device.ios_version}" if device.ios_version else ""
name_info = device.device_name or "Unnamed"
print(f" ✓ {name_info}")
print(f" UDID: {device.device_id}")
print(f" Model: {model_info}")
print(f" OS: {ios_info}")
print(f" Connection: {conn_type}")
print("-" * 70)
return True
# Handle --pair
if args.pair:
print("Pairing with iOS device...")
success, message = conn.pair_device(args.device_id)
print(f"{'✓' if success else '✗'} {message}")
return True
# Handle --wda-status
if args.wda_status:
print(f"Checking WebDriverAgent status at {args.wda_url}...")
print("-" * 50)
if conn.is_wda_ready():
print("✓ WebDriverAgent is running")
status = conn.get_wda_status()
if status:
print(f"\nStatus details:")
value = status.get("value", {})
print(f" Session ID: {status.get('sessionId', 'N/A')}")
print(f" Build: {value.get('build', {}).get('time', 'N/A')}")
current_app = value.get("currentApp", {})
if current_app:
print(f"\nCurrent App:")
print(f" Bundle ID: {current_app.get('bundleId', 'N/A')}")
print(f" Process ID: {current_app.get('pid', 'N/A')}")
else:
print("✗ WebDriverAgent is not running")
print("\nPlease start WebDriverAgent on your iOS device:")
print(" 1. Open WebDriverAgent.xcodeproj in Xcode")
print(" 2. Select your device")
print(" 3. Run WebDriverAgentRunner (Product > Test or Cmd+U)")
print(f" 4. For USB: Run port forwarding: iproxy 8100 8100")
return True
return False
def main():
"""Main entry point."""
args = parse_args()
# Handle --list-apps (no system check needed)
if args.list_apps:
print("Supported iOS apps:")
print("\nNote: For iOS apps, Bundle IDs are configured in:")
print(" phone_agent/config/apps_ios.py")
print("\nCurrently configured apps:")
for app in sorted(list_supported_apps()):
print(f" - {app}")
print(
"\nTo add iOS apps, find the Bundle ID and add to APP_PACKAGES_IOS dictionary."
)
return
# Handle device commands (these may need partial system checks)
if handle_device_commands(args):
return
# Run system requirements check before proceeding
if not check_system_requirements(wda_url=args.wda_url):
sys.exit(1)
# Check model API connectivity and model availability
# if not check_model_api(args.base_url, args.api_key, args.model):
# sys.exit(1)
# Create configurations
model_config = ModelConfig(
base_url=args.base_url,
model_name=args.model,
api_key=args.api_key
)
agent_config = IOSAgentConfig(
max_steps=args.max_steps,
wda_url=args.wda_url,
device_id=args.device_id,
verbose=not args.quiet,
lang=args.lang,
)
# Create iOS agent
agent = IOSPhoneAgent(
model_config=model_config,
agent_config=agent_config,
)
# Print header
print("=" * 50)
print("Phone Agent iOS - AI-powered iOS automation")
print("=" * 50)
print(f"Model: {model_config.model_name}")
print(f"Base URL: {model_config.base_url}")
print(f"WDA URL: {args.wda_url}")
print(f"Max Steps: {agent_config.max_steps}")
print(f"Language: {agent_config.lang}")
# Show device info
devices = list_devices()
if agent_config.device_id:
print(f"Device: {agent_config.device_id}")
elif devices:
device = devices[0]
print(f"Device: {device.device_name or device.device_id[:16]}")
print(f" {device.model}, iOS {device.ios_version}")
print("=" * 50)
# Run with provided task or enter interactive mode
if args.task:
print(f"\nTask: {args.task}\n")
result = agent.run(args.task)
print(f"\nResult: {result}")
else:
# Interactive mode
print("\nEntering interactive mode. Type 'quit' to exit.\n")
while True:
try:
task = input("Enter your task: ").strip()
if task.lower() in ("quit", "exit", "q"):
print("Goodbye!")
break
if not task:
continue
print()
result = agent.run(task)
print(f"\nResult: {result}\n")
agent.reset()
except KeyboardInterrupt:
print("\n\nInterrupted. Goodbye!")
break
except Exception as e:
print(f"\nError: {e}\n")
if __name__ == "__main__":
main()
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "ios.py",
"license": "Apache License 2.0",
"lines": 457,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zai-org/Open-AutoGLM:phone_agent/actions/handler_ios.py | """Action handler for iOS automation using WebDriverAgent."""
import time
from dataclasses import dataclass
from typing import Any, Callable
from phone_agent.xctest import (
back,
double_tap,
home,
launch_app,
long_press,
swipe,
tap,
)
from phone_agent.xctest.input import clear_text, hide_keyboard, type_text
@dataclass
class ActionResult:
"""Result of an action execution."""
success: bool
should_finish: bool
message: str | None = None
requires_confirmation: bool = False
class IOSActionHandler:
"""
Handles execution of actions from AI model output for iOS devices.
Args:
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
confirmation_callback: Optional callback for sensitive action confirmation.
Should return True to proceed, False to cancel.
takeover_callback: Optional callback for takeover requests (login, captcha).
"""
def __init__(
self,
wda_url: str = "http://localhost:8100",
session_id: str | None = None,
confirmation_callback: Callable[[str], bool] | None = None,
takeover_callback: Callable[[str], None] | None = None,
):
self.wda_url = wda_url
self.session_id = session_id
self.confirmation_callback = confirmation_callback or self._default_confirmation
self.takeover_callback = takeover_callback or self._default_takeover
def execute(
self, action: dict[str, Any], screen_width: int, screen_height: int
) -> ActionResult:
"""
Execute an action from the AI model.
Args:
action: The action dictionary from the model.
screen_width: Current screen width in pixels.
screen_height: Current screen height in pixels.
Returns:
ActionResult indicating success and whether to finish.
"""
action_type = action.get("_metadata")
if action_type == "finish":
return ActionResult(
success=True, should_finish=True, message=action.get("message")
)
if action_type != "do":
return ActionResult(
success=False,
should_finish=True,
message=f"Unknown action type: {action_type}",
)
action_name = action.get("action")
handler_method = self._get_handler(action_name)
if handler_method is None:
return ActionResult(
success=False,
should_finish=False,
message=f"Unknown action: {action_name}",
)
try:
return handler_method(action, screen_width, screen_height)
except Exception as e:
return ActionResult(
success=False, should_finish=False, message=f"Action failed: {e}"
)
def _get_handler(self, action_name: str) -> Callable | None:
"""Get the handler method for an action."""
handlers = {
"Launch": self._handle_launch,
"Tap": self._handle_tap,
"Type": self._handle_type,
"Type_Name": self._handle_type,
"Swipe": self._handle_swipe,
"Back": self._handle_back,
"Home": self._handle_home,
"Double Tap": self._handle_double_tap,
"Long Press": self._handle_long_press,
"Wait": self._handle_wait,
"Take_over": self._handle_takeover,
"Note": self._handle_note,
"Call_API": self._handle_call_api,
"Interact": self._handle_interact,
}
return handlers.get(action_name)
def _convert_relative_to_absolute(
self, element: list[int], screen_width: int, screen_height: int
) -> tuple[int, int]:
"""Convert relative coordinates (0-1000) to absolute pixels."""
x = int(element[0] / 1000 * screen_width)
y = int(element[1] / 1000 * screen_height)
return x, y
def _handle_launch(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle app launch action."""
app_name = action.get("app")
if not app_name:
return ActionResult(False, False, "No app name specified")
success = launch_app(
app_name, wda_url=self.wda_url, session_id=self.session_id
)
if success:
return ActionResult(True, False)
return ActionResult(False, False, f"App not found: {app_name}")
def _handle_tap(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle tap action."""
element = action.get("element")
if not element:
return ActionResult(False, False, "No element coordinates")
x, y = self._convert_relative_to_absolute(element, width, height)
print(f"Physically tap on ({x}, {y})")
# Check for sensitive operation
if "message" in action:
if not self.confirmation_callback(action["message"]):
return ActionResult(
success=False,
should_finish=True,
message="User cancelled sensitive operation",
)
tap(x, y, wda_url=self.wda_url, session_id=self.session_id)
return ActionResult(True, False)
def _handle_type(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle text input action."""
text = action.get("text", "")
# Clear existing text and type new text
clear_text(wda_url=self.wda_url, session_id=self.session_id)
time.sleep(0.5)
type_text(text, wda_url=self.wda_url, session_id=self.session_id)
time.sleep(0.5)
# Hide keyboard after typing
hide_keyboard(wda_url=self.wda_url, session_id=self.session_id)
time.sleep(0.5)
return ActionResult(True, False)
def _handle_swipe(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle swipe action."""
start = action.get("start")
end = action.get("end")
if not start or not end:
return ActionResult(False, False, "Missing swipe coordinates")
start_x, start_y = self._convert_relative_to_absolute(start, width, height)
end_x, end_y = self._convert_relative_to_absolute(end, width, height)
print(f"Physically scroll from ({start_x}, {start_y}) to ({end_x}, {end_y})")
swipe(
start_x,
start_y,
end_x,
end_y,
wda_url=self.wda_url,
session_id=self.session_id,
)
return ActionResult(True, False)
def _handle_back(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle back gesture (swipe from left edge)."""
back(wda_url=self.wda_url, session_id=self.session_id)
return ActionResult(True, False)
def _handle_home(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle home button action."""
home(wda_url=self.wda_url, session_id=self.session_id)
return ActionResult(True, False)
def _handle_double_tap(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle double tap action."""
element = action.get("element")
if not element:
return ActionResult(False, False, "No element coordinates")
x, y = self._convert_relative_to_absolute(element, width, height)
double_tap(x, y, wda_url=self.wda_url, session_id=self.session_id)
return ActionResult(True, False)
def _handle_long_press(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle long press action."""
element = action.get("element")
if not element:
return ActionResult(False, False, "No element coordinates")
x, y = self._convert_relative_to_absolute(element, width, height)
long_press(
x,
y,
duration=3.0,
wda_url=self.wda_url,
session_id=self.session_id,
)
return ActionResult(True, False)
def _handle_wait(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle wait action."""
duration_str = action.get("duration", "1 seconds")
try:
duration = float(duration_str.replace("seconds", "").strip())
except ValueError:
duration = 1.0
time.sleep(duration)
return ActionResult(True, False)
def _handle_takeover(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle takeover request (login, captcha, etc.)."""
message = action.get("message", "User intervention required")
self.takeover_callback(message)
return ActionResult(True, False)
def _handle_note(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle note action (placeholder for content recording)."""
# This action is typically used for recording page content
# Implementation depends on specific requirements
return ActionResult(True, False)
def _handle_call_api(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle API call action (placeholder for summarization)."""
# This action is typically used for content summarization
# Implementation depends on specific requirements
return ActionResult(True, False)
def _handle_interact(self, action: dict, width: int, height: int) -> ActionResult:
"""Handle interaction request (user choice needed)."""
# This action signals that user input is needed
return ActionResult(True, False, message="User interaction required")
@staticmethod
def _default_confirmation(message: str) -> bool:
"""Default confirmation callback using console input."""
response = input(f"Sensitive operation: {message}\nConfirm? (Y/N): ")
return response.upper() == "Y"
@staticmethod
def _default_takeover(message: str) -> None:
"""Default takeover callback using console input."""
input(f"{message}\nPress Enter after completing manual operation...")
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/actions/handler_ios.py",
"license": "Apache License 2.0",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zai-org/Open-AutoGLM:phone_agent/agent_ios.py | """iOS PhoneAgent class for orchestrating iOS phone automation."""
import json
import traceback
from dataclasses import dataclass
from typing import Any, Callable
from phone_agent.actions.handler import do, finish, parse_action
from phone_agent.actions.handler_ios import IOSActionHandler
from phone_agent.config import get_messages, get_system_prompt
from phone_agent.model import ModelClient, ModelConfig
from phone_agent.model.client import MessageBuilder
from phone_agent.xctest import XCTestConnection, get_current_app, get_screenshot
@dataclass
class IOSAgentConfig:
"""Configuration for the iOS PhoneAgent."""
max_steps: int = 100
wda_url: str = "http://localhost:8100"
session_id: str | None = None
device_id: str | None = None # iOS device UDID
lang: str = "cn"
system_prompt: str | None = None
verbose: bool = True
def __post_init__(self):
if self.system_prompt is None:
self.system_prompt = get_system_prompt(self.lang)
@dataclass
class StepResult:
"""Result of a single agent step."""
success: bool
finished: bool
action: dict[str, Any] | None
thinking: str
message: str | None = None
class IOSPhoneAgent:
"""
AI-powered agent for automating iOS phone interactions.
The agent uses a vision-language model to understand screen content
and decide on actions to complete user tasks via WebDriverAgent.
Args:
model_config: Configuration for the AI model.
agent_config: Configuration for the iOS agent behavior.
confirmation_callback: Optional callback for sensitive action confirmation.
takeover_callback: Optional callback for takeover requests.
Example:
>>> from phone_agent.agent_ios import IOSPhoneAgent, IOSAgentConfig
>>> from phone_agent.model import ModelConfig
>>>
>>> model_config = ModelConfig(base_url="http://localhost:8000/v1")
>>> agent_config = IOSAgentConfig(wda_url="http://localhost:8100")
>>> agent = IOSPhoneAgent(model_config, agent_config)
>>> agent.run("Open Safari and search for Apple")
"""
def __init__(
self,
model_config: ModelConfig | None = None,
agent_config: IOSAgentConfig | None = None,
confirmation_callback: Callable[[str], bool] | None = None,
takeover_callback: Callable[[str], None] | None = None,
):
self.model_config = model_config or ModelConfig()
self.agent_config = agent_config or IOSAgentConfig()
self.model_client = ModelClient(self.model_config)
# Initialize WDA connection and create session if needed
self.wda_connection = XCTestConnection(wda_url=self.agent_config.wda_url)
# Auto-create session if not provided
if self.agent_config.session_id is None:
success, session_id = self.wda_connection.start_wda_session()
if success and session_id != "session_started":
self.agent_config.session_id = session_id
if self.agent_config.verbose:
print(f"✅ Created WDA session: {session_id}")
elif self.agent_config.verbose:
print(f"⚠️ Using default WDA session (no explicit session ID)")
self.action_handler = IOSActionHandler(
wda_url=self.agent_config.wda_url,
session_id=self.agent_config.session_id,
confirmation_callback=confirmation_callback,
takeover_callback=takeover_callback,
)
self._context: list[dict[str, Any]] = []
self._step_count = 0
def run(self, task: str) -> str:
"""
Run the agent to complete a task.
Args:
task: Natural language description of the task.
Returns:
Final message from the agent.
"""
self._context = []
self._step_count = 0
# First step with user prompt
result = self._execute_step(task, is_first=True)
if result.finished:
return result.message or "Task completed"
# Continue until finished or max steps reached
while self._step_count < self.agent_config.max_steps:
result = self._execute_step(is_first=False)
if result.finished:
return result.message or "Task completed"
return "Max steps reached"
def step(self, task: str | None = None) -> StepResult:
"""
Execute a single step of the agent.
Useful for manual control or debugging.
Args:
task: Task description (only needed for first step).
Returns:
StepResult with step details.
"""
is_first = len(self._context) == 0
if is_first and not task:
raise ValueError("Task is required for the first step")
return self._execute_step(task, is_first)
def reset(self) -> None:
"""Reset the agent state for a new task."""
self._context = []
self._step_count = 0
def _execute_step(
self, user_prompt: str | None = None, is_first: bool = False
) -> StepResult:
"""Execute a single step of the agent loop."""
self._step_count += 1
# Capture current screen state
screenshot = get_screenshot(
wda_url=self.agent_config.wda_url,
session_id=self.agent_config.session_id,
device_id=self.agent_config.device_id,
)
current_app = get_current_app(
wda_url=self.agent_config.wda_url, session_id=self.agent_config.session_id
)
# Build messages
if is_first:
self._context.append(
MessageBuilder.create_system_message(self.agent_config.system_prompt)
)
screen_info = MessageBuilder.build_screen_info(current_app)
text_content = f"{user_prompt}\n\n{screen_info}"
self._context.append(
MessageBuilder.create_user_message(
text=text_content, image_base64=screenshot.base64_data
)
)
else:
screen_info = MessageBuilder.build_screen_info(current_app)
text_content = f"** Screen Info **\n\n{screen_info}"
self._context.append(
MessageBuilder.create_user_message(
text=text_content, image_base64=screenshot.base64_data
)
)
# Get model response
try:
response = self.model_client.request(self._context)
except Exception as e:
if self.agent_config.verbose:
traceback.print_exc()
return StepResult(
success=False,
finished=True,
action=None,
thinking="",
message=f"Model error: {e}",
)
# Parse action from response
try:
action = parse_action(response.action)
except ValueError:
if self.agent_config.verbose:
traceback.print_exc()
action = finish(message=response.action)
if self.agent_config.verbose:
# Print thinking process
msgs = get_messages(self.agent_config.lang)
print("\n" + "=" * 50)
print(f"💭 {msgs['thinking']}:")
print("-" * 50)
print(response.thinking)
print("-" * 50)
print(f"🎯 {msgs['action']}:")
print(json.dumps(action, ensure_ascii=False, indent=2))
print("=" * 50 + "\n")
# Remove image from context to save space
self._context[-1] = MessageBuilder.remove_images_from_message(self._context[-1])
# Execute action
try:
result = self.action_handler.execute(
action, screenshot.width, screenshot.height
)
except Exception as e:
if self.agent_config.verbose:
traceback.print_exc()
result = self.action_handler.execute(
finish(message=str(e)), screenshot.width, screenshot.height
)
# Add assistant response to context
self._context.append(
MessageBuilder.create_assistant_message(
f"<think>{response.thinking}</think><answer>{response.action}</answer>"
)
)
# Check if finished
finished = action.get("_metadata") == "finish" or result.should_finish
if finished and self.agent_config.verbose:
msgs = get_messages(self.agent_config.lang)
print("\n" + "🎉 " + "=" * 48)
print(
f"✅ {msgs['task_completed']}: {result.message or action.get('message', msgs['done'])}"
)
print("=" * 50 + "\n")
return StepResult(
success=result.success,
finished=finished,
action=action,
thinking=response.thinking,
message=result.message or action.get("message"),
)
@property
def context(self) -> list[dict[str, Any]]:
"""Get the current conversation context."""
return self._context.copy()
@property
def step_count(self) -> int:
"""Get the current step count."""
return self._step_count
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/agent_ios.py",
"license": "Apache License 2.0",
"lines": 225,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zai-org/Open-AutoGLM:phone_agent/xctest/connection.py | """iOS device connection management via idevice tools and WebDriverAgent."""
import subprocess
import time
from dataclasses import dataclass
from enum import Enum
class ConnectionType(Enum):
"""Type of iOS connection."""
USB = "usb"
NETWORK = "network"
@dataclass
class DeviceInfo:
"""Information about a connected iOS device."""
device_id: str # UDID
status: str
connection_type: ConnectionType
model: str | None = None
ios_version: str | None = None
device_name: str | None = None
class XCTestConnection:
"""
Manages connections to iOS devices via libimobiledevice and WebDriverAgent.
Requires:
- libimobiledevice (idevice_id, ideviceinfo)
- WebDriverAgent running on the iOS device
- ios-deploy (optional, for app installation)
Example:
>>> conn = XCTestConnection()
>>> # List connected devices
>>> devices = conn.list_devices()
>>> # Get device info
>>> info = conn.get_device_info()
>>> # Check if WDA is running
>>> is_ready = conn.is_wda_ready()
"""
def __init__(self, wda_url: str = "http://localhost:8100"):
"""
Initialize iOS connection manager.
Args:
wda_url: WebDriverAgent URL (default: http://localhost:8100).
For network devices, use http://<device-ip>:8100
"""
self.wda_url = wda_url.rstrip("/")
def list_devices(self) -> list[DeviceInfo]:
"""
List all connected iOS devices.
Returns:
List of DeviceInfo objects.
Note:
Requires libimobiledevice to be installed.
Install on macOS: brew install libimobiledevice
"""
try:
# Get list of device UDIDs
result = subprocess.run(
["idevice_id", "-ln"],
capture_output=True,
text=True,
timeout=5,
)
devices = []
for line in result.stdout.strip().split("\n"):
udid = line.strip()
if not udid:
continue
# Determine connection type (network devices have specific format)
conn_type = (
ConnectionType.NETWORK
if "-" in udid and len(udid) > 40
else ConnectionType.USB
)
# Get detailed device info
device_info = self._get_device_details(udid)
devices.append(
DeviceInfo(
device_id=udid,
status="connected",
connection_type=conn_type,
model=device_info.get("model"),
ios_version=device_info.get("ios_version"),
device_name=device_info.get("name"),
)
)
return devices
except FileNotFoundError:
print(
"Error: idevice_id not found. Install libimobiledevice: brew install libimobiledevice"
)
return []
except Exception as e:
print(f"Error listing devices: {e}")
return []
def _get_device_details(self, udid: str) -> dict[str, str]:
"""
Get detailed information about a specific device.
Args:
udid: Device UDID.
Returns:
Dictionary with device details.
"""
try:
result = subprocess.run(
["ideviceinfo", "-u", udid],
capture_output=True,
text=True,
timeout=5,
)
info = {}
for line in result.stdout.split("\n"):
if ": " in line:
key, value = line.split(": ", 1)
key = key.strip()
value = value.strip()
if key == "ProductType":
info["model"] = value
elif key == "ProductVersion":
info["ios_version"] = value
elif key == "DeviceName":
info["name"] = value
return info
except Exception:
return {}
def get_device_info(self, device_id: str | None = None) -> DeviceInfo | None:
"""
Get detailed information about a device.
Args:
device_id: Device UDID. If None, uses first available device.
Returns:
DeviceInfo or None if not found.
"""
devices = self.list_devices()
if not devices:
return None
if device_id is None:
return devices[0]
for device in devices:
if device.device_id == device_id:
return device
return None
def is_connected(self, device_id: str | None = None) -> bool:
"""
Check if a device is connected.
Args:
device_id: Device UDID to check. If None, checks if any device is connected.
Returns:
True if connected, False otherwise.
"""
devices = self.list_devices()
if not devices:
return False
if device_id is None:
return len(devices) > 0
return any(d.device_id == device_id for d in devices)
def is_wda_ready(self, timeout: int = 2) -> bool:
"""
Check if WebDriverAgent is running and accessible.
Args:
timeout: Request timeout in seconds.
Returns:
True if WDA is ready, False otherwise.
"""
try:
import requests
response = requests.get(
f"{self.wda_url}/status", timeout=timeout, verify=False
)
return response.status_code == 200
except ImportError:
print(
"Error: requests library not found. Install it: pip install requests"
)
return False
except Exception:
return False
def start_wda_session(self) -> tuple[bool, str]:
"""
Start a new WebDriverAgent session.
Returns:
Tuple of (success, session_id or error_message).
"""
try:
import requests
response = requests.post(
f"{self.wda_url}/session",
json={"capabilities": {}},
timeout=30,
verify=False,
)
if response.status_code in (200, 201):
data = response.json()
session_id = data.get("sessionId") or data.get("value", {}).get(
"sessionId"
)
return True, session_id or "session_started"
else:
return False, f"Failed to start session: {response.text}"
except ImportError:
return (
False,
"requests library not found. Install it: pip install requests",
)
except Exception as e:
return False, f"Error starting WDA session: {e}"
def get_wda_status(self) -> dict | None:
"""
Get WebDriverAgent status information.
Returns:
Status dictionary or None if not available.
"""
try:
import requests
response = requests.get(f"{self.wda_url}/status", timeout=5, verify=False)
if response.status_code == 200:
return response.json()
return None
except Exception:
return None
def pair_device(self, device_id: str | None = None) -> tuple[bool, str]:
"""
Pair with an iOS device (required for some operations).
Args:
device_id: Device UDID. If None, uses first available device.
Returns:
Tuple of (success, message).
"""
try:
cmd = ["idevicepair"]
if device_id:
cmd.extend(["-u", device_id])
cmd.append("pair")
result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
output = result.stdout + result.stderr
if "SUCCESS" in output or "already paired" in output.lower():
return True, "Device paired successfully"
else:
return False, output.strip()
except FileNotFoundError:
return (
False,
"idevicepair not found. Install libimobiledevice: brew install libimobiledevice",
)
except Exception as e:
return False, f"Error pairing device: {e}"
def get_device_name(self, device_id: str | None = None) -> str | None:
"""
Get the device name.
Args:
device_id: Device UDID. If None, uses first available device.
Returns:
Device name string or None if not found.
"""
try:
cmd = ["ideviceinfo"]
if device_id:
cmd.extend(["-u", device_id])
cmd.extend(["-k", "DeviceName"])
result = subprocess.run(cmd, capture_output=True, text=True, timeout=5)
return result.stdout.strip() or None
except Exception as e:
print(f"Error getting device name: {e}")
return None
def restart_wda(self) -> tuple[bool, str]:
"""
Restart WebDriverAgent (requires manual restart on device).
Returns:
Tuple of (success, message).
Note:
This method only checks if WDA needs restart.
Actual restart requires re-running WDA on the device via Xcode or other means.
"""
if self.is_wda_ready():
return True, "WDA is already running"
else:
return (
False,
"WDA is not running. Please start it manually on the device.",
)
def quick_connect(wda_url: str = "http://localhost:8100") -> tuple[bool, str]:
"""
Quick helper to check iOS device connection and WDA status.
Args:
wda_url: WebDriverAgent URL.
Returns:
Tuple of (success, message).
"""
conn = XCTestConnection(wda_url=wda_url)
# Check if device is connected
if not conn.is_connected():
return False, "No iOS device connected"
# Check if WDA is ready
if not conn.is_wda_ready():
return False, "WebDriverAgent is not running"
return True, "iOS device connected and WDA ready"
def list_devices() -> list[DeviceInfo]:
"""
Quick helper to list connected iOS devices.
Returns:
List of DeviceInfo objects.
"""
conn = XCTestConnection()
return conn.list_devices()
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/xctest/connection.py",
"license": "Apache License 2.0",
"lines": 300,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
zai-org/Open-AutoGLM:phone_agent/xctest/device.py | """Device control utilities for iOS automation via WebDriverAgent."""
import subprocess
import time
from typing import Optional
from phone_agent.config.apps_ios import APP_PACKAGES_IOS as APP_PACKAGES
SCALE_FACTOR = 3 # 3 for most modern iPhone
def _get_wda_session_url(wda_url: str, session_id: str | None, endpoint: str) -> str:
"""
Get the correct WDA URL for a session endpoint.
Args:
wda_url: Base WDA URL.
session_id: Optional session ID.
endpoint: The endpoint path.
Returns:
Full URL for the endpoint.
"""
base = wda_url.rstrip("/")
if session_id:
return f"{base}/session/{session_id}/{endpoint}"
else:
# Try to use WDA endpoints without session when possible
return f"{base}/{endpoint}"
def get_current_app(
wda_url: str = "http://localhost:8100", session_id: str | None = None
) -> str:
"""
Get the currently active app bundle ID and name.
Args:
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
Returns:
The app name if recognized, otherwise "System Home".
"""
try:
import requests
# Get active app info from WDA using activeAppInfo endpoint
response = requests.get(
f"{wda_url.rstrip('/')}/wda/activeAppInfo", timeout=5, verify=False
)
if response.status_code == 200:
data = response.json()
# Extract bundle ID from response
# Response format: {"value": {"bundleId": "com.apple.AppStore", "name": "", "pid": 825, "processArguments": {...}}, "sessionId": "..."}
value = data.get("value", {})
bundle_id = value.get("bundleId", "")
if bundle_id:
# Try to find app name from bundle ID
for app_name, package in APP_PACKAGES.items():
if package == bundle_id:
return app_name
return "System Home"
except ImportError:
print("Error: requests library required. Install: pip install requests")
except Exception as e:
print(f"Error getting current app: {e}")
return "System Home"
def tap(
x: int,
y: int,
wda_url: str = "http://localhost:8100",
session_id: str | None = None,
delay: float = 1.0,
) -> None:
"""
Tap at the specified coordinates using WebDriver W3C Actions API.
Args:
x: X coordinate.
y: Y coordinate.
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
delay: Delay in seconds after tap.
"""
try:
import requests
url = _get_wda_session_url(wda_url, session_id, "actions")
# W3C WebDriver Actions API for tap/click
actions = {
"actions": [
{
"type": "pointer",
"id": "finger1",
"parameters": {"pointerType": "touch"},
"actions": [
{"type": "pointerMove", "duration": 0, "x": x / SCALE_FACTOR, "y": y / SCALE_FACTOR},
{"type": "pointerDown", "button": 0},
{"type": "pause", "duration": 0.1},
{"type": "pointerUp", "button": 0},
],
}
]
}
requests.post(url, json=actions, timeout=15, verify=False)
time.sleep(delay)
except ImportError:
print("Error: requests library required. Install: pip install requests")
except Exception as e:
print(f"Error tapping: {e}")
def double_tap(
x: int,
y: int,
wda_url: str = "http://localhost:8100",
session_id: str | None = None,
delay: float = 1.0,
) -> None:
"""
Double tap at the specified coordinates using WebDriver W3C Actions API.
Args:
x: X coordinate.
y: Y coordinate.
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
delay: Delay in seconds after double tap.
"""
try:
import requests
url = _get_wda_session_url(wda_url, session_id, "actions")
# W3C WebDriver Actions API for double tap
actions = {
"actions": [
{
"type": "pointer",
"id": "finger1",
"parameters": {"pointerType": "touch"},
"actions": [
{"type": "pointerMove", "duration": 0, "x": x / SCALE_FACTOR, "y": y / SCALE_FACTOR},
{"type": "pointerDown", "button": 0},
{"type": "pause", "duration": 100},
{"type": "pointerUp", "button": 0},
{"type": "pause", "duration": 100},
{"type": "pointerDown", "button": 0},
{"type": "pause", "duration": 100},
{"type": "pointerUp", "button": 0},
],
}
]
}
requests.post(url, json=actions, timeout=10, verify=False)
time.sleep(delay)
except ImportError:
print("Error: requests library required. Install: pip install requests")
except Exception as e:
print(f"Error double tapping: {e}")
def long_press(
x: int,
y: int,
duration: float = 3.0,
wda_url: str = "http://localhost:8100",
session_id: str | None = None,
delay: float = 1.0,
) -> None:
"""
Long press at the specified coordinates using WebDriver W3C Actions API.
Args:
x: X coordinate.
y: Y coordinate.
duration: Duration of press in seconds.
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
delay: Delay in seconds after long press.
"""
try:
import requests
url = _get_wda_session_url(wda_url, session_id, "actions")
# W3C WebDriver Actions API for long press
# Convert duration to milliseconds
duration_ms = int(duration * 1000)
actions = {
"actions": [
{
"type": "pointer",
"id": "finger1",
"parameters": {"pointerType": "touch"},
"actions": [
{"type": "pointerMove", "duration": 0, "x": x / SCALE_FACTOR, "y": y / SCALE_FACTOR},
{"type": "pointerDown", "button": 0},
{"type": "pause", "duration": duration_ms},
{"type": "pointerUp", "button": 0},
],
}
]
}
requests.post(url, json=actions, timeout=int(duration + 10), verify=False)
time.sleep(delay)
except ImportError:
print("Error: requests library required. Install: pip install requests")
except Exception as e:
print(f"Error long pressing: {e}")
def swipe(
start_x: int,
start_y: int,
end_x: int,
end_y: int,
duration: float | None = None,
wda_url: str = "http://localhost:8100",
session_id: str | None = None,
delay: float = 1.0,
) -> None:
"""
Swipe from start to end coordinates using WDA dragfromtoforduration endpoint.
Args:
start_x: Starting X coordinate.
start_y: Starting Y coordinate.
end_x: Ending X coordinate.
end_y: Ending Y coordinate.
duration: Duration of swipe in seconds (auto-calculated if None).
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
delay: Delay in seconds after swipe.
"""
try:
import requests
if duration is None:
# Calculate duration based on distance
dist_sq = (start_x - end_x) ** 2 + (start_y - end_y) ** 2
duration = dist_sq / 1000000 # Convert to seconds
duration = max(0.3, min(duration, 2.0)) # Clamp between 0.3-2 seconds
url = _get_wda_session_url(wda_url, session_id, "wda/dragfromtoforduration")
# WDA dragfromtoforduration API payload
payload = {
"fromX": start_x / SCALE_FACTOR,
"fromY": start_y / SCALE_FACTOR,
"toX": end_x / SCALE_FACTOR,
"toY": end_y / SCALE_FACTOR,
"duration": duration,
}
requests.post(url, json=payload, timeout=int(duration + 10), verify=False)
time.sleep(delay)
except ImportError:
print("Error: requests library required. Install: pip install requests")
except Exception as e:
print(f"Error swiping: {e}")
def back(
wda_url: str = "http://localhost:8100",
session_id: str | None = None,
delay: float = 1.0,
) -> None:
"""
Navigate back (swipe from left edge).
Args:
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
delay: Delay in seconds after navigation.
Note:
iOS doesn't have a universal back button. This simulates a back gesture
by swiping from the left edge of the screen.
"""
try:
import requests
url = _get_wda_session_url(wda_url, session_id, "wda/dragfromtoforduration")
# Swipe from left edge to simulate back gesture
payload = {
"fromX": 0,
"fromY": 640,
"toX": 400,
"toY": 640,
"duration": 0.3,
}
requests.post(url, json=payload, timeout=10, verify=False)
time.sleep(delay)
except ImportError:
print("Error: requests library required. Install: pip install requests")
except Exception as e:
print(f"Error performing back gesture: {e}")
def home(
wda_url: str = "http://localhost:8100",
session_id: str | None = None,
delay: float = 1.0,
) -> None:
"""
Press the home button.
Args:
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
delay: Delay in seconds after pressing home.
"""
try:
import requests
url = f"{wda_url.rstrip('/')}/wda/homescreen"
requests.post(url, timeout=10, verify=False)
time.sleep(delay)
except ImportError:
print("Error: requests library required. Install: pip install requests")
except Exception as e:
print(f"Error pressing home: {e}")
def launch_app(
app_name: str,
wda_url: str = "http://localhost:8100",
session_id: str | None = None,
delay: float = 1.0,
) -> bool:
"""
Launch an app by name.
Args:
app_name: The app name (must be in APP_PACKAGES).
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
delay: Delay in seconds after launching.
Returns:
True if app was launched, False if app not found.
"""
if app_name not in APP_PACKAGES:
return False
try:
import requests
bundle_id = APP_PACKAGES[app_name]
url = _get_wda_session_url(wda_url, session_id, "wda/apps/launch")
response = requests.post(
url, json={"bundleId": bundle_id}, timeout=10, verify=False
)
time.sleep(delay)
return response.status_code in (200, 201)
except ImportError:
print("Error: requests library required. Install: pip install requests")
return False
except Exception as e:
print(f"Error launching app: {e}")
return False
def get_screen_size(
wda_url: str = "http://localhost:8100", session_id: str | None = None
) -> tuple[int, int]:
"""
Get the screen dimensions.
Args:
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
Returns:
Tuple of (width, height). Returns (375, 812) as default if unable to fetch.
"""
try:
import requests
url = _get_wda_session_url(wda_url, session_id, "window/size")
response = requests.get(url, timeout=5, verify=False)
if response.status_code == 200:
data = response.json()
value = data.get("value", {})
width = value.get("width", 375)
height = value.get("height", 812)
return width, height
except ImportError:
print("Error: requests library required. Install: pip install requests")
except Exception as e:
print(f"Error getting screen size: {e}")
# Default iPhone screen size (iPhone X and later)
return 375, 812
def press_button(
button_name: str,
wda_url: str = "http://localhost:8100",
session_id: str | None = None,
delay: float = 1.0,
) -> None:
"""
Press a physical button.
Args:
button_name: Button name (e.g., "home", "volumeUp", "volumeDown").
wda_url: WebDriverAgent URL.
session_id: Optional WDA session ID.
delay: Delay in seconds after pressing.
"""
try:
import requests
url = f"{wda_url.rstrip('/')}/wda/pressButton"
requests.post(url, json={"name": button_name}, timeout=10, verify=False)
time.sleep(delay)
except ImportError:
print("Error: requests library required. Install: pip install requests")
except Exception as e:
print(f"Error pressing button: {e}")
| {
"repo_id": "zai-org/Open-AutoGLM",
"file_path": "phone_agent/xctest/device.py",
"license": "Apache License 2.0",
"lines": 367,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.