Spaces:
Paused
Paused
Upload 64 files
Browse files- cogs/gemini.py +976 -996
- cogs/openai.py +13 -4
- main.py +407 -429
- static/js/presets.js +16 -10
- templates/presets.html +23 -14
- utils/context_prompter.py +301 -311
cogs/gemini.py
CHANGED
|
@@ -1,996 +1,976 @@
|
|
| 1 |
-
from io import BytesIO
|
| 2 |
-
from discord.ext import commands
|
| 3 |
-
import discord
|
| 4 |
-
from google import genai
|
| 5 |
-
from google.genai import types
|
| 6 |
-
import random
|
| 7 |
-
from aiohttp import ClientSession
|
| 8 |
-
from utils.decorator import auto_delete
|
| 9 |
-
from utils.func import async_iter, async_do_thread
|
| 10 |
-
from utils.color_printer import cpr
|
| 11 |
-
from utils.config import config
|
| 12 |
-
from utils.context_prompter import ContextPrompter
|
| 13 |
-
from utils.logger import logger
|
| 14 |
-
from datetime import datetime
|
| 15 |
-
import PIL.Image
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
self.
|
| 28 |
-
self.
|
| 29 |
-
self.
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
self.
|
| 35 |
-
|
| 36 |
-
self.
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
)
|
| 78 |
-
self.
|
| 79 |
-
self.
|
| 80 |
-
self.
|
| 81 |
-
|
| 82 |
-
self.openai_api_key
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
self.
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
"
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
def
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
if
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
#
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
if
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
#
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
|
| 313 |
-
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
),
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
|
| 393 |
-
|
| 394 |
-
|
| 395 |
-
|
| 396 |
-
|
| 397 |
-
|
| 398 |
-
|
| 399 |
-
|
| 400 |
-
|
| 401 |
-
|
| 402 |
-
|
| 403 |
-
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
self,
|
| 417 |
-
ctx
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
|
| 432 |
-
|
| 433 |
-
|
| 434 |
-
|
| 435 |
-
|
| 436 |
-
|
| 437 |
-
|
| 438 |
-
|
| 439 |
-
|
| 440 |
-
|
| 441 |
-
|
| 442 |
-
|
| 443 |
-
|
| 444 |
-
|
| 445 |
-
|
| 446 |
-
|
| 447 |
-
|
| 448 |
-
|
| 449 |
-
|
| 450 |
-
|
| 451 |
-
|
| 452 |
-
|
| 453 |
-
|
| 454 |
-
|
| 455 |
-
|
| 456 |
-
|
| 457 |
-
|
| 458 |
-
|
| 459 |
-
|
| 460 |
-
|
| 461 |
-
|
| 462 |
-
|
| 463 |
-
|
| 464 |
-
|
| 465 |
-
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
|
| 469 |
-
|
| 470 |
-
|
| 471 |
-
|
| 472 |
-
|
| 473 |
-
|
| 474 |
-
|
| 475 |
-
|
| 476 |
-
|
| 477 |
-
|
| 478 |
-
|
| 479 |
-
|
| 480 |
-
|
| 481 |
-
|
| 482 |
-
|
| 483 |
-
|
| 484 |
-
|
| 485 |
-
|
| 486 |
-
|
| 487 |
-
|
| 488 |
-
|
| 489 |
-
|
| 490 |
-
|
| 491 |
-
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
|
| 495 |
-
|
| 496 |
-
|
| 497 |
-
|
| 498 |
-
|
| 499 |
-
|
| 500 |
-
|
| 501 |
-
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
ctx.
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
|
| 512 |
-
|
| 513 |
-
|
| 514 |
-
|
| 515 |
-
|
| 516 |
-
|
| 517 |
-
|
| 518 |
-
|
| 519 |
-
|
| 520 |
-
|
| 521 |
-
|
| 522 |
-
|
| 523 |
-
|
| 524 |
-
|
| 525 |
-
|
| 526 |
-
|
| 527 |
-
|
| 528 |
-
|
| 529 |
-
|
| 530 |
-
|
| 531 |
-
|
| 532 |
-
|
| 533 |
-
|
| 534 |
-
|
| 535 |
-
|
| 536 |
-
|
| 537 |
-
|
| 538 |
-
|
| 539 |
-
|
| 540 |
-
|
| 541 |
-
|
| 542 |
-
|
| 543 |
-
|
| 544 |
-
|
| 545 |
-
|
| 546 |
-
#
|
| 547 |
-
|
| 548 |
-
|
| 549 |
-
|
| 550 |
-
|
| 551 |
-
|
| 552 |
-
|
| 553 |
-
|
| 554 |
-
|
| 555 |
-
|
| 556 |
-
|
| 557 |
-
|
| 558 |
-
|
| 559 |
-
|
| 560 |
-
|
| 561 |
-
|
| 562 |
-
|
| 563 |
-
|
| 564 |
-
|
| 565 |
-
|
| 566 |
-
|
| 567 |
-
|
| 568 |
-
|
| 569 |
-
|
| 570 |
-
|
| 571 |
-
|
| 572 |
-
|
| 573 |
-
|
| 574 |
-
|
| 575 |
-
|
| 576 |
-
|
| 577 |
-
|
| 578 |
-
|
| 579 |
-
|
| 580 |
-
|
| 581 |
-
|
| 582 |
-
|
| 583 |
-
|
| 584 |
-
|
| 585 |
-
|
| 586 |
-
|
| 587 |
-
|
| 588 |
-
|
| 589 |
-
|
| 590 |
-
|
| 591 |
-
|
| 592 |
-
|
| 593 |
-
|
| 594 |
-
|
| 595 |
-
|
| 596 |
-
|
| 597 |
-
|
| 598 |
-
|
| 599 |
-
|
| 600 |
-
|
| 601 |
-
|
| 602 |
-
|
| 603 |
-
|
| 604 |
-
|
| 605 |
-
|
| 606 |
-
|
| 607 |
-
|
| 608 |
-
|
| 609 |
-
|
| 610 |
-
|
| 611 |
-
|
| 612 |
-
|
| 613 |
-
|
| 614 |
-
|
| 615 |
-
|
| 616 |
-
|
| 617 |
-
|
| 618 |
-
|
| 619 |
-
|
| 620 |
-
|
| 621 |
-
|
| 622 |
-
|
| 623 |
-
|
| 624 |
-
|
| 625 |
-
|
| 626 |
-
|
| 627 |
-
|
| 628 |
-
|
| 629 |
-
|
| 630 |
-
|
| 631 |
-
|
| 632 |
-
|
| 633 |
-
|
| 634 |
-
|
| 635 |
-
|
| 636 |
-
|
| 637 |
-
|
| 638 |
-
|
| 639 |
-
|
| 640 |
-
|
| 641 |
-
|
| 642 |
-
|
| 643 |
-
|
| 644 |
-
|
| 645 |
-
|
| 646 |
-
|
| 647 |
-
|
| 648 |
-
|
| 649 |
-
|
| 650 |
-
|
| 651 |
-
|
| 652 |
-
|
| 653 |
-
|
| 654 |
-
|
| 655 |
-
|
| 656 |
-
|
| 657 |
-
|
| 658 |
-
|
| 659 |
-
|
| 660 |
-
|
| 661 |
-
|
| 662 |
-
|
| 663 |
-
|
| 664 |
-
|
| 665 |
-
|
| 666 |
-
|
| 667 |
-
|
| 668 |
-
|
| 669 |
-
|
| 670 |
-
|
| 671 |
-
|
| 672 |
-
|
| 673 |
-
|
| 674 |
-
|
| 675 |
-
|
| 676 |
-
|
| 677 |
-
|
| 678 |
-
|
| 679 |
-
|
| 680 |
-
|
| 681 |
-
|
| 682 |
-
|
| 683 |
-
|
| 684 |
-
|
| 685 |
-
|
| 686 |
-
|
| 687 |
-
|
| 688 |
-
|
| 689 |
-
|
| 690 |
-
|
| 691 |
-
|
| 692 |
-
|
| 693 |
-
|
| 694 |
-
|
| 695 |
-
|
| 696 |
-
|
| 697 |
-
|
| 698 |
-
|
| 699 |
-
|
| 700 |
-
|
| 701 |
-
|
| 702 |
-
|
| 703 |
-
|
| 704 |
-
|
| 705 |
-
|
| 706 |
-
|
| 707 |
-
|
| 708 |
-
|
| 709 |
-
|
| 710 |
-
|
| 711 |
-
|
| 712 |
-
|
| 713 |
-
|
| 714 |
-
|
| 715 |
-
|
| 716 |
-
|
| 717 |
-
types.
|
| 718 |
-
|
| 719 |
-
|
| 720 |
-
|
| 721 |
-
|
| 722 |
-
|
| 723 |
-
|
| 724 |
-
|
| 725 |
-
|
| 726 |
-
|
| 727 |
-
|
| 728 |
-
|
| 729 |
-
|
| 730 |
-
|
| 731 |
-
|
| 732 |
-
|
| 733 |
-
|
| 734 |
-
|
| 735 |
-
|
| 736 |
-
types.
|
| 737 |
-
|
| 738 |
-
|
| 739 |
-
|
| 740 |
-
|
| 741 |
-
|
| 742 |
-
)
|
| 743 |
-
|
| 744 |
-
|
| 745 |
-
|
| 746 |
-
|
| 747 |
-
|
| 748 |
-
|
| 749 |
-
|
| 750 |
-
|
| 751 |
-
|
| 752 |
-
|
| 753 |
-
|
| 754 |
-
|
| 755 |
-
|
| 756 |
-
|
| 757 |
-
|
| 758 |
-
),
|
| 759 |
-
|
| 760 |
-
|
| 761 |
-
|
| 762 |
-
|
| 763 |
-
|
| 764 |
-
|
| 765 |
-
|
| 766 |
-
|
| 767 |
-
|
| 768 |
-
|
| 769 |
-
|
| 770 |
-
|
| 771 |
-
|
| 772 |
-
|
| 773 |
-
|
| 774 |
-
|
| 775 |
-
|
| 776 |
-
|
| 777 |
-
|
| 778 |
-
|
| 779 |
-
|
| 780 |
-
|
| 781 |
-
|
| 782 |
-
|
| 783 |
-
|
| 784 |
-
|
| 785 |
-
|
| 786 |
-
|
| 787 |
-
|
| 788 |
-
|
| 789 |
-
|
| 790 |
-
|
| 791 |
-
|
| 792 |
-
|
| 793 |
-
|
| 794 |
-
|
| 795 |
-
|
| 796 |
-
|
| 797 |
-
|
| 798 |
-
|
| 799 |
-
|
| 800 |
-
|
| 801 |
-
|
| 802 |
-
|
| 803 |
-
|
| 804 |
-
|
| 805 |
-
|
| 806 |
-
|
| 807 |
-
|
| 808 |
-
|
| 809 |
-
|
| 810 |
-
|
| 811 |
-
|
| 812 |
-
|
| 813 |
-
|
| 814 |
-
|
| 815 |
-
|
| 816 |
-
|
| 817 |
-
|
| 818 |
-
|
| 819 |
-
|
| 820 |
-
|
| 821 |
-
|
| 822 |
-
|
| 823 |
-
|
| 824 |
-
|
| 825 |
-
|
| 826 |
-
|
| 827 |
-
|
| 828 |
-
|
| 829 |
-
|
| 830 |
-
|
| 831 |
-
|
| 832 |
-
|
| 833 |
-
|
| 834 |
-
|
| 835 |
-
|
| 836 |
-
|
| 837 |
-
|
| 838 |
-
|
| 839 |
-
|
| 840 |
-
|
| 841 |
-
|
| 842 |
-
|
| 843 |
-
|
| 844 |
-
|
| 845 |
-
|
| 846 |
-
|
| 847 |
-
|
| 848 |
-
|
| 849 |
-
|
| 850 |
-
|
| 851 |
-
|
| 852 |
-
|
| 853 |
-
|
| 854 |
-
|
| 855 |
-
|
| 856 |
-
@
|
| 857 |
-
|
| 858 |
-
|
| 859 |
-
|
| 860 |
-
|
| 861 |
-
|
| 862 |
-
|
| 863 |
-
|
| 864 |
-
|
| 865 |
-
|
| 866 |
-
|
| 867 |
-
)
|
| 868 |
-
|
| 869 |
-
|
| 870 |
-
|
| 871 |
-
|
| 872 |
-
|
| 873 |
-
|
| 874 |
-
|
| 875 |
-
|
| 876 |
-
|
| 877 |
-
|
| 878 |
-
|
| 879 |
-
|
| 880 |
-
|
| 881 |
-
|
| 882 |
-
|
| 883 |
-
|
| 884 |
-
|
| 885 |
-
|
| 886 |
-
|
| 887 |
-
|
| 888 |
-
|
| 889 |
-
|
| 890 |
-
|
| 891 |
-
|
| 892 |
-
|
| 893 |
-
|
| 894 |
-
|
| 895 |
-
|
| 896 |
-
|
| 897 |
-
|
| 898 |
-
|
| 899 |
-
|
| 900 |
-
|
| 901 |
-
|
| 902 |
-
|
| 903 |
-
|
| 904 |
-
|
| 905 |
-
|
| 906 |
-
|
| 907 |
-
|
| 908 |
-
|
| 909 |
-
|
| 910 |
-
|
| 911 |
-
|
| 912 |
-
|
| 913 |
-
|
| 914 |
-
|
| 915 |
-
|
| 916 |
-
|
| 917 |
-
|
| 918 |
-
|
| 919 |
-
|
| 920 |
-
|
| 921 |
-
|
| 922 |
-
|
| 923 |
-
|
| 924 |
-
|
| 925 |
-
|
| 926 |
-
|
| 927 |
-
|
| 928 |
-
|
| 929 |
-
|
| 930 |
-
|
| 931 |
-
|
| 932 |
-
|
| 933 |
-
|
| 934 |
-
|
| 935 |
-
|
| 936 |
-
|
| 937 |
-
|
| 938 |
-
|
| 939 |
-
|
| 940 |
-
|
| 941 |
-
|
| 942 |
-
|
| 943 |
-
|
| 944 |
-
|
| 945 |
-
|
| 946 |
-
|
| 947 |
-
|
| 948 |
-
|
| 949 |
-
|
| 950 |
-
|
| 951 |
-
|
| 952 |
-
|
| 953 |
-
|
| 954 |
-
|
| 955 |
-
|
| 956 |
-
|
| 957 |
-
|
| 958 |
-
|
| 959 |
-
|
| 960 |
-
|
| 961 |
-
|
| 962 |
-
|
| 963 |
-
|
| 964 |
-
|
| 965 |
-
|
| 966 |
-
|
| 967 |
-
|
| 968 |
-
|
| 969 |
-
|
| 970 |
-
|
| 971 |
-
|
| 972 |
-
|
| 973 |
-
|
| 974 |
-
|
| 975 |
-
|
| 976 |
-
|
| 977 |
-
for setting in gemini_config.get("safety_settings", []):
|
| 978 |
-
category = getattr(types.HarmCategory, setting["category"])
|
| 979 |
-
threshold = getattr(types.HarmBlockThreshold, setting["threshold"])
|
| 980 |
-
safety_settings.append(types.SafetySetting(
|
| 981 |
-
category=category,
|
| 982 |
-
threshold=threshold
|
| 983 |
-
))
|
| 984 |
-
|
| 985 |
-
cog.default_gemini_config = types.GenerateContentConfig(
|
| 986 |
-
system_instruction=gemini_config.get("system_instruction", ""),
|
| 987 |
-
top_k=gemini_config.get("top_k", 55),
|
| 988 |
-
top_p=gemini_config.get("top_p", 0.95),
|
| 989 |
-
temperature=gemini_config.get("temperature", 1.3),
|
| 990 |
-
safety_settings=safety_settings
|
| 991 |
-
)
|
| 992 |
-
except Exception as e:
|
| 993 |
-
print(f"Error loading Gemini config: {e}")
|
| 994 |
-
|
| 995 |
-
await bot.add_cog(cog)
|
| 996 |
-
print(cpr.success("Cog loaded: Gemini"))
|
|
|
|
| 1 |
+
from io import BytesIO
|
| 2 |
+
from discord.ext import commands
|
| 3 |
+
import discord
|
| 4 |
+
from google import genai
|
| 5 |
+
from google.genai import types
|
| 6 |
+
import random
|
| 7 |
+
from aiohttp import ClientSession
|
| 8 |
+
from utils.decorator import auto_delete
|
| 9 |
+
from utils.func import async_iter, async_do_thread
|
| 10 |
+
from utils.color_printer import cpr
|
| 11 |
+
from utils.config import config
|
| 12 |
+
from utils.context_prompter import ContextPrompter
|
| 13 |
+
from utils.logger import logger
|
| 14 |
+
from datetime import datetime
|
| 15 |
+
import PIL.Image
|
| 16 |
+
import json
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class Gemini(commands.Cog):
|
| 20 |
+
def __init__(
|
| 21 |
+
self,
|
| 22 |
+
bot: commands.Bot,
|
| 23 |
+
webhook: discord.Webhook,
|
| 24 |
+
):
|
| 25 |
+
self.bot = bot
|
| 26 |
+
self.conversations = {}
|
| 27 |
+
self.apikeys = config.get("gemini_keys")
|
| 28 |
+
self.current_key = config.get("current_key")
|
| 29 |
+
self.num = len(self.apikeys)
|
| 30 |
+
|
| 31 |
+
# 确保chat_channels中的键全部为字符串
|
| 32 |
+
self.update_chat_channels()
|
| 33 |
+
|
| 34 |
+
self.config = config
|
| 35 |
+
self.context_length = 20
|
| 36 |
+
self.target_language = config.get("target_language")
|
| 37 |
+
|
| 38 |
+
# 获取Gemini模型配置
|
| 39 |
+
self.gemini_models = config.get("gemini_models", {
|
| 40 |
+
"chat": "gemini-2.0-pro-exp-02-05", # 默认聊天模型
|
| 41 |
+
"translate": "gemini-2.0-pro-exp-02-05" # 默认翻译模型
|
| 42 |
+
})
|
| 43 |
+
|
| 44 |
+
# 如果配置中没有gemini_models,写入默认配置
|
| 45 |
+
if not config.get("gemini_models"):
|
| 46 |
+
config.write("gemini_models", self.gemini_models)
|
| 47 |
+
|
| 48 |
+
self.default_gemini_config = types.GenerateContentConfig(
|
| 49 |
+
system_instruction="",
|
| 50 |
+
top_k=55,
|
| 51 |
+
top_p=0.95,
|
| 52 |
+
temperature=1.3,
|
| 53 |
+
safety_settings=[
|
| 54 |
+
types.SafetySetting(
|
| 55 |
+
category=types.HarmCategory.HARM_CATEGORY_CIVIC_INTEGRITY,
|
| 56 |
+
threshold=types.HarmBlockThreshold.OFF,
|
| 57 |
+
),
|
| 58 |
+
types.SafetySetting(
|
| 59 |
+
category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
| 60 |
+
threshold=types.HarmBlockThreshold.OFF,
|
| 61 |
+
),
|
| 62 |
+
types.SafetySetting(
|
| 63 |
+
category=types.HarmCategory.HARM_CATEGORY_HARASSMENT,
|
| 64 |
+
threshold=types.HarmBlockThreshold.OFF,
|
| 65 |
+
),
|
| 66 |
+
types.SafetySetting(
|
| 67 |
+
category=types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
| 68 |
+
threshold=types.HarmBlockThreshold.OFF,
|
| 69 |
+
),
|
| 70 |
+
types.SafetySetting(
|
| 71 |
+
category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
| 72 |
+
threshold=types.HarmBlockThreshold.OFF,
|
| 73 |
+
),
|
| 74 |
+
],
|
| 75 |
+
)
|
| 76 |
+
self.webhook = webhook
|
| 77 |
+
self.context_prompter = ContextPrompter()
|
| 78 |
+
self.non_gemini_model = None # for openai model
|
| 79 |
+
self.openai_api_key = config.get("openai_api_key")
|
| 80 |
+
self.openai_endpoint = config.get("openai_endpoint")
|
| 81 |
+
|
| 82 |
+
if self.openai_api_key is not None and self.openai_endpoint is not None:
|
| 83 |
+
print(cpr.info("OpenAI API available."))
|
| 84 |
+
|
| 85 |
+
def update_chat_channels(self):
|
| 86 |
+
"""更新聊天频道配置"""
|
| 87 |
+
# 获取所有服务器配置
|
| 88 |
+
self.servers = config.get("servers", {})
|
| 89 |
+
|
| 90 |
+
# 更新Gemini模型配置
|
| 91 |
+
self.gemini_models = config.get("gemini_models", {
|
| 92 |
+
"chat": "gemini-2.0-pro-exp-02-05",
|
| 93 |
+
"translate": "gemini-2.0-pro-exp-02-05"
|
| 94 |
+
})
|
| 95 |
+
|
| 96 |
+
# 获取可用的Gemini模型列表
|
| 97 |
+
self.available_models = config.get("gemini_available_models", [])
|
| 98 |
+
if not self.available_models:
|
| 99 |
+
# 设置默认模型列表
|
| 100 |
+
self.available_models = [
|
| 101 |
+
{"name": "gemini-2.0-pro-exp-02-05", "description": "默认聊天和翻译模型"},
|
| 102 |
+
{"name": "gemini-pro", "description": "旧版Gemini Pro模型"}
|
| 103 |
+
]
|
| 104 |
+
config.write("gemini_available_models", self.available_models)
|
| 105 |
+
|
| 106 |
+
print(f"Gemini cog 已更新服务器配置和模型列表")
|
| 107 |
+
|
| 108 |
+
def get_channel_config(self, guild_id: str, channel_id: str):
|
| 109 |
+
"""获取频道配置"""
|
| 110 |
+
server_name, server_config = config.get_server_config(guild_id)
|
| 111 |
+
if not server_config:
|
| 112 |
+
return None
|
| 113 |
+
return server_config.get("chat_channels", {}).get(channel_id)
|
| 114 |
+
|
| 115 |
+
def get_next_key(self):
|
| 116 |
+
self.current_key = (self.current_key + 1) % self.num
|
| 117 |
+
config.write("current_key", self.current_key)
|
| 118 |
+
return self.apikeys[self.current_key]
|
| 119 |
+
|
| 120 |
+
def get_random_key(self):
|
| 121 |
+
return self.apikeys[random.randint(0, self.num - 1)]
|
| 122 |
+
|
| 123 |
+
async def request_gemini(
|
| 124 |
+
self,
|
| 125 |
+
ctx: commands.Context,
|
| 126 |
+
prompt: str,
|
| 127 |
+
model_config: types.GenerateContentConfig = None,
|
| 128 |
+
model="gemini-2.0-pro-exp-02-05",
|
| 129 |
+
username=None,
|
| 130 |
+
extra_attachment: discord.Attachment = None,
|
| 131 |
+
):
|
| 132 |
+
if model_config is None:
|
| 133 |
+
model_config = self.default_gemini_config
|
| 134 |
+
|
| 135 |
+
# 获取服务器和频道配置
|
| 136 |
+
guild_id = str(ctx.guild.id)
|
| 137 |
+
channel_id = str(ctx.channel.id)
|
| 138 |
+
channel_config = self.get_channel_config(guild_id, channel_id)
|
| 139 |
+
|
| 140 |
+
if not channel_config:
|
| 141 |
+
await ctx.send("此频道未配置为聊天频道", ephemeral=True)
|
| 142 |
+
return
|
| 143 |
+
|
| 144 |
+
print(f"当前频道配置: {channel_config}")
|
| 145 |
+
|
| 146 |
+
# 尝试获取预设
|
| 147 |
+
agent_manager = self.bot.get_cog("AgentManager")
|
| 148 |
+
preset_data = None
|
| 149 |
+
preset_name = "chat_preset.json" # 默认使用chat_preset.json
|
| 150 |
+
|
| 151 |
+
# 根据情况选择不同的预设
|
| 152 |
+
if extra_attachment:
|
| 153 |
+
preset_name = "attachment_preset.json"
|
| 154 |
+
print(f"使用附件预设: {preset_name}")
|
| 155 |
+
elif ctx.message.reference:
|
| 156 |
+
preset_name = "reference_preset.json"
|
| 157 |
+
|
| 158 |
+
# 获取预设内容,传递频道ID和服务器ID以获取该频道对应的预设
|
| 159 |
+
if agent_manager:
|
| 160 |
+
preset_data = agent_manager.get_preset_json(preset_name, channel_id, guild_id)
|
| 161 |
+
|
| 162 |
+
if model != "gemini-2.0-pro-exp-02-05":
|
| 163 |
+
key = self.get_random_key()
|
| 164 |
+
else:
|
| 165 |
+
key = self.get_next_key()
|
| 166 |
+
client = genai.Client(api_key=key)
|
| 167 |
+
|
| 168 |
+
# 处理附件 - 下载附件内容
|
| 169 |
+
attachment_bytes = None
|
| 170 |
+
attachment_mime_type = None
|
| 171 |
+
if extra_attachment:
|
| 172 |
+
msg = await ctx.send("Downloading the attachment...")
|
| 173 |
+
bytes_data = await extra_attachment.read()
|
| 174 |
+
attachment_bytes = bytes_data
|
| 175 |
+
attachment_mime_type = extra_attachment.content_type.split(";")[0]
|
| 176 |
+
await msg.edit(content="Processing the attachment...")
|
| 177 |
+
print(f"附件已下载: {extra_attachment.filename} ({attachment_mime_type})")
|
| 178 |
+
else:
|
| 179 |
+
msg = await ctx.send("Typing...") if username is None else await self.webhook.send("Typing...", wait=True, username=username)
|
| 180 |
+
|
| 181 |
+
# 检查预设数据是否存在
|
| 182 |
+
if not preset_data:
|
| 183 |
+
await msg.edit(content="无法加载预设数据,请联系管理员")
|
| 184 |
+
return
|
| 185 |
+
|
| 186 |
+
# 首先获取变量替换所需的数据
|
| 187 |
+
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 188 |
+
bot_name = ctx.me.name
|
| 189 |
+
bot_display_name = ctx.me.display_name
|
| 190 |
+
user_name = ctx.author.name
|
| 191 |
+
user_display_name = ctx.author.display_name
|
| 192 |
+
|
| 193 |
+
# 处理上下文
|
| 194 |
+
context = ""
|
| 195 |
+
if hasattr(ctx, 'context') and ctx.context:
|
| 196 |
+
context = ctx.context
|
| 197 |
+
elif hasattr(ctx, 'history') and ctx.history:
|
| 198 |
+
context = ctx.history
|
| 199 |
+
else:
|
| 200 |
+
# 获取历史消息作为上下文
|
| 201 |
+
context = await self.context_prompter.get_context_for_prompt(ctx, self.context_length)
|
| 202 |
+
|
| 203 |
+
# 确保context是字符串
|
| 204 |
+
if not isinstance(context, str):
|
| 205 |
+
context = str(context) if context is not None else ""
|
| 206 |
+
|
| 207 |
+
# 替换预设中的变量
|
| 208 |
+
first_user_message = preset_data.get("first_user_message", "")
|
| 209 |
+
first_user_message = first_user_message.replace("{context}", context)
|
| 210 |
+
first_user_message = first_user_message.replace("{question}", prompt)
|
| 211 |
+
first_user_message = first_user_message.replace("{name}", bot_display_name)
|
| 212 |
+
first_user_message = first_user_message.replace("{bot_name}", bot_name)
|
| 213 |
+
first_user_message = first_user_message.replace("{current_time}", current_time)
|
| 214 |
+
first_user_message = first_user_message.replace("{user_display_name}", user_display_name)
|
| 215 |
+
first_user_message = first_user_message.replace("{user_name}", user_name)
|
| 216 |
+
|
| 217 |
+
main_content = preset_data.get("main_content", "")
|
| 218 |
+
last_message_content = preset_data.get("last_message", "")
|
| 219 |
+
prefill_assistant_reply = preset_data.get("prefill_assistant_reply", False)
|
| 220 |
+
|
| 221 |
+
main_content = main_content.replace("{context}", context)
|
| 222 |
+
main_content = main_content.replace("{question}", prompt)
|
| 223 |
+
main_content = main_content.replace("{name}", bot_display_name)
|
| 224 |
+
main_content = main_content.replace("{bot_name}", bot_name)
|
| 225 |
+
main_content = main_content.replace("{current_time}", current_time)
|
| 226 |
+
main_content = main_content.replace("{user_display_name}", user_display_name)
|
| 227 |
+
main_content = main_content.replace("{user_name}", user_name)
|
| 228 |
+
|
| 229 |
+
# 如果是引用回复
|
| 230 |
+
if ctx.message.reference and 'reference' in preset_name:
|
| 231 |
+
reference = ctx.message.reference.resolved
|
| 232 |
+
reference_time = self.context_prompter.get_msg_time(reference)
|
| 233 |
+
reference_user_name = reference.author.name
|
| 234 |
+
reference_user_display_name = reference.author.display_name
|
| 235 |
+
reference_content = reference.content
|
| 236 |
+
|
| 237 |
+
main_content = main_content.replace("{reference_time}", reference_time)
|
| 238 |
+
main_content = main_content.replace("{reference_user_name}", reference_user_name)
|
| 239 |
+
main_content = main_content.replace("{reference_user_display_name}", reference_user_display_name)
|
| 240 |
+
main_content = main_content.replace("{reference_content}", reference_content)
|
| 241 |
+
first_user_message = first_user_message.replace("{reference_time}", reference_time)
|
| 242 |
+
first_user_message = first_user_message.replace("{reference_user_name}", reference_user_name)
|
| 243 |
+
first_user_message = first_user_message.replace("{reference_user_display_name}", reference_user_display_name)
|
| 244 |
+
first_user_message = first_user_message.replace("{reference_content}", reference_content)
|
| 245 |
+
|
| 246 |
+
# 替换 last_message 中的变量
|
| 247 |
+
last_message_content = last_message_content.replace("{context}", context)
|
| 248 |
+
last_message_content = last_message_content.replace("{question}", prompt)
|
| 249 |
+
last_message_content = last_message_content.replace("{name}", bot_display_name)
|
| 250 |
+
last_message_content = last_message_content.replace("{bot_name}", bot_name)
|
| 251 |
+
last_message_content = last_message_content.replace("{current_time}", current_time)
|
| 252 |
+
last_message_content = last_message_content.replace("{user_display_name}", user_display_name)
|
| 253 |
+
last_message_content = last_message_content.replace("{user_name}", user_name)
|
| 254 |
+
|
| 255 |
+
# 构建user-model-user的三个上下文
|
| 256 |
+
user_parts = [types.Part.from_text(text=first_user_message)]
|
| 257 |
+
model_parts = [types.Part.from_text(text=main_content)]
|
| 258 |
+
last_message_parts = [types.Part.from_text(text=last_message_content)]
|
| 259 |
+
|
| 260 |
+
# 如果有附件,添加到最后一个用户消息中
|
| 261 |
+
if attachment_bytes:
|
| 262 |
+
# 使用Pillow和inline_data方式添加图片
|
| 263 |
+
image_bytes = BytesIO(attachment_bytes)
|
| 264 |
+
image = PIL.Image.open(image_bytes)
|
| 265 |
+
|
| 266 |
+
# 转换为字节数据
|
| 267 |
+
mime_type = attachment_mime_type or "image/jpeg"
|
| 268 |
+
img_byte_arr = BytesIO()
|
| 269 |
+
image.save(img_byte_arr, format=image.format or "JPEG")
|
| 270 |
+
img_byte_data = img_byte_arr.getvalue()
|
| 271 |
+
|
| 272 |
+
# 添加到消息中
|
| 273 |
+
last_message_parts.append(
|
| 274 |
+
types.Part(
|
| 275 |
+
inline_data=types.Blob(
|
| 276 |
+
mime_type=mime_type,
|
| 277 |
+
data=img_byte_data
|
| 278 |
+
)
|
| 279 |
+
)
|
| 280 |
+
)
|
| 281 |
+
print("附件已添加到用户消息中")
|
| 282 |
+
|
| 283 |
+
# 决定最后一条消息的角色
|
| 284 |
+
last_message_role = "model" if prefill_assistant_reply else "user"
|
| 285 |
+
|
| 286 |
+
contents = [
|
| 287 |
+
types.Content(
|
| 288 |
+
role="user",
|
| 289 |
+
parts=user_parts,
|
| 290 |
+
),
|
| 291 |
+
types.Content(
|
| 292 |
+
role=last_message_role,
|
| 293 |
+
parts=last_message_parts,
|
| 294 |
+
),
|
| 295 |
+
]
|
| 296 |
+
|
| 297 |
+
# 设置安全设置
|
| 298 |
+
safety_settings = [
|
| 299 |
+
types.SafetySetting(
|
| 300 |
+
category=types.HarmCategory.HARM_CATEGORY_CIVIC_INTEGRITY,
|
| 301 |
+
threshold=types.HarmBlockThreshold.OFF,
|
| 302 |
+
),
|
| 303 |
+
types.SafetySetting(
|
| 304 |
+
category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
| 305 |
+
threshold=types.HarmBlockThreshold.OFF,
|
| 306 |
+
),
|
| 307 |
+
types.SafetySetting(
|
| 308 |
+
category=types.HarmCategory.HARM_CATEGORY_HARASSMENT,
|
| 309 |
+
threshold=types.HarmBlockThreshold.OFF,
|
| 310 |
+
),
|
| 311 |
+
types.SafetySetting(
|
| 312 |
+
category=types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
| 313 |
+
threshold=types.HarmBlockThreshold.OFF,
|
| 314 |
+
),
|
| 315 |
+
types.SafetySetting(
|
| 316 |
+
category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
| 317 |
+
threshold=types.HarmBlockThreshold.OFF,
|
| 318 |
+
),
|
| 319 |
+
]
|
| 320 |
+
|
| 321 |
+
# 获取Gemini配置
|
| 322 |
+
gemini_config_data = None
|
| 323 |
+
if agent_manager:
|
| 324 |
+
gemini_config_data = agent_manager.get_preset_json("gemini_config.json", channel_id, guild_id)
|
| 325 |
+
|
| 326 |
+
# 构建配置
|
| 327 |
+
generate_content_config = types.GenerateContentConfig(
|
| 328 |
+
temperature=gemini_config_data.get("temperature", 1.0) if gemini_config_data else 1.0,
|
| 329 |
+
top_p=gemini_config_data.get("top_p", 0.95) if gemini_config_data else 0.95,
|
| 330 |
+
top_k=gemini_config_data.get("top_k", 64) if gemini_config_data else 64,
|
| 331 |
+
max_output_tokens=gemini_config_data.get("max_output_tokens", 8192) if gemini_config_data else 8192,
|
| 332 |
+
safety_settings=safety_settings,
|
| 333 |
+
response_mime_type="text/plain",
|
| 334 |
+
system_instruction=[
|
| 335 |
+
types.Part.from_text(text=preset_data.get("system_prompt", "")),
|
| 336 |
+
],
|
| 337 |
+
)
|
| 338 |
+
|
| 339 |
+
full = ""
|
| 340 |
+
n = config.get("gemini_chunk_per_edit")
|
| 341 |
+
every_n_chunk = 1
|
| 342 |
+
try:
|
| 343 |
+
# 准备要记录的原始请求数据
|
| 344 |
+
request_payload = {
|
| 345 |
+
"model": model,
|
| 346 |
+
"contents": [c.to_dict() for c in contents], # 转换为字典以便序列化
|
| 347 |
+
"generation_config": generate_content_config.to_dict(), # 使用构建的配置对象
|
| 348 |
+
"safety_settings": [s.to_dict() for s in safety_settings] # 记录明确传入的安全设置
|
| 349 |
+
}
|
| 350 |
+
|
| 351 |
+
# 记录原始请求体 (或其近似结构) 到日志
|
| 352 |
+
logger.info(
|
| 353 |
+
"Gemini Raw Request: %s",
|
| 354 |
+
json.dumps(request_payload, indent=2, ensure_ascii=False)
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
# 使用流式响应
|
| 358 |
+
# 注意:实际调用时使用的是 self.default_gemini_config
|
| 359 |
+
# 为了日志准确反映实际调用,我们应该记录 self.default_gemini_config
|
| 360 |
+
# 或者修改调用以使用 generate_content_config
|
| 361 |
+
# 这里暂时保持原样,但指出日志记录的是构建的配置,而非最终调用的配置
|
| 362 |
+
|
| 363 |
+
# !! 注意:下方调用使用的是 self.default_gemini_config,
|
| 364 |
+
# 而上方日志记录的是 generate_content_config。
|
| 365 |
+
# 为了调试准确性,应考虑统一或明确记录实际使用的配置。
|
| 366 |
+
response = client.models.generate_content_stream(
|
| 367 |
+
model=model,
|
| 368 |
+
contents=contents,
|
| 369 |
+
config=generate_content_config, # 使用构建的、已记录的配置
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
async for chunk in async_iter(response):
|
| 373 |
+
if chunk.text:
|
| 374 |
+
full += chunk.text
|
| 375 |
+
if every_n_chunk == n:
|
| 376 |
+
await msg.edit(content=full)
|
| 377 |
+
every_n_chunk = 1
|
| 378 |
+
else:
|
| 379 |
+
every_n_chunk += 1
|
| 380 |
+
await msg.edit(content=full)
|
| 381 |
+
except Exception as e:
|
| 382 |
+
logger.error(
|
| 383 |
+
"Error when requesting gemini with key: %s, error: %s",
|
| 384 |
+
key,
|
| 385 |
+
e,
|
| 386 |
+
exc_info=True,
|
| 387 |
+
)
|
| 388 |
+
if full == "":
|
| 389 |
+
await msg.edit(content="Uh oh, something went wrong...")
|
| 390 |
+
else:
|
| 391 |
+
full += "\nUh oh, something went wrong..."
|
| 392 |
+
await msg.edit(content=full)
|
| 393 |
+
|
| 394 |
+
@commands.hybrid_command(name="hey", description="Ask a question to gemini.")
|
| 395 |
+
async def hey(
|
| 396 |
+
self,
|
| 397 |
+
ctx: commands.Context,
|
| 398 |
+
*,
|
| 399 |
+
question: str,
|
| 400 |
+
context_length: int = None,
|
| 401 |
+
):
|
| 402 |
+
# 获取服务器和频道配置
|
| 403 |
+
guild_id = str(ctx.guild.id)
|
| 404 |
+
channel_id = str(ctx.channel.id)
|
| 405 |
+
channel_config = self.get_channel_config(guild_id, channel_id)
|
| 406 |
+
|
| 407 |
+
if not channel_config:
|
| 408 |
+
await ctx.send("此频道未配置为聊天频道", ephemeral=True)
|
| 409 |
+
return
|
| 410 |
+
|
| 411 |
+
if context_length is None:
|
| 412 |
+
context_length = self.context_length
|
| 413 |
+
extra_attachment = None
|
| 414 |
+
|
| 415 |
+
# 获取历史消息作为上下文
|
| 416 |
+
history = await self.context_prompter.get_context_for_prompt(ctx, context_length)
|
| 417 |
+
ctx.history = history # 将历史消息保存到ctx对象中,供预设处理使用
|
| 418 |
+
|
| 419 |
+
# 检查附件
|
| 420 |
+
if ctx.message.reference:
|
| 421 |
+
reference = ctx.message.reference.resolved
|
| 422 |
+
# 优先查找引用消息中的附件
|
| 423 |
+
if reference and reference.attachments:
|
| 424 |
+
extra_attachment = reference.attachments[-1]
|
| 425 |
+
|
| 426 |
+
# 选择合适的预设
|
| 427 |
+
agent_manager = self.bot.get_cog("AgentManager")
|
| 428 |
+
preset_name = "chat_preset.json" # 默认使用chat_preset.json
|
| 429 |
+
|
| 430 |
+
if agent_manager:
|
| 431 |
+
if ctx.message.reference:
|
| 432 |
+
reference = ctx.message.reference.resolved
|
| 433 |
+
if reference and reference.attachments:
|
| 434 |
+
preset_name = "attachment_preset.json"
|
| 435 |
+
else:
|
| 436 |
+
preset_name = "reference_preset.json"
|
| 437 |
+
|
| 438 |
+
# 检查附件是否存在,确保传递正确
|
| 439 |
+
if extra_attachment:
|
| 440 |
+
print(f"处理附件: {extra_attachment.filename} ({extra_attachment.content_type})")
|
| 441 |
+
|
| 442 |
+
# 使用聊天模型
|
| 443 |
+
chat_model = self.gemini_models.get("chat", "gemini-2.0-pro-exp-02-05")
|
| 444 |
+
|
| 445 |
+
# 发送请求
|
| 446 |
+
await self.request_gemini(
|
| 447 |
+
ctx,
|
| 448 |
+
question, # 直接传递原始问题,预设处理在request_gemini中完成
|
| 449 |
+
model=chat_model,
|
| 450 |
+
extra_attachment=extra_attachment,
|
| 451 |
+
)
|
| 452 |
+
|
| 453 |
+
@commands.hybrid_command(name="tr", description="Translate a text.")
|
| 454 |
+
async def translate(
|
| 455 |
+
self,
|
| 456 |
+
ctx: commands.Context,
|
| 457 |
+
target_language: str = None,
|
| 458 |
+
context_length: int = None,
|
| 459 |
+
):
|
| 460 |
+
# 获取服务器和频道配置
|
| 461 |
+
guild_id = str(ctx.guild.id)
|
| 462 |
+
channel_id = str(ctx.channel.id)
|
| 463 |
+
channel_config = self.get_channel_config(guild_id, channel_id)
|
| 464 |
+
|
| 465 |
+
if not channel_config:
|
| 466 |
+
await ctx.send("此频道未配置为聊天频道", ephemeral=True)
|
| 467 |
+
return
|
| 468 |
+
|
| 469 |
+
if ctx.message.reference is None:
|
| 470 |
+
await ctx.send(
|
| 471 |
+
"请回复要翻译的消息", ephemeral=True
|
| 472 |
+
)
|
| 473 |
+
return
|
| 474 |
+
if context_length is None:
|
| 475 |
+
context_length = self.context_length
|
| 476 |
+
if target_language is None:
|
| 477 |
+
target_language = self.target_language
|
| 478 |
+
|
| 479 |
+
# 使用翻译模型
|
| 480 |
+
translate_model = self.gemini_models.get("translate", "gemini-2.0-pro-exp-02-05")
|
| 481 |
+
|
| 482 |
+
# 尝试获取翻译预设
|
| 483 |
+
agent_manager = self.bot.get_cog("AgentManager")
|
| 484 |
+
preset_data = None
|
| 485 |
+
|
| 486 |
+
# 获取被回复的消息
|
| 487 |
+
reference_message = await ctx.channel.fetch_message(
|
| 488 |
+
ctx.message.reference.message_id
|
| 489 |
+
)
|
| 490 |
+
|
| 491 |
+
# 检查是否有附件
|
| 492 |
+
extra_attachment = None
|
| 493 |
+
if reference_message and reference_message.attachments:
|
| 494 |
+
extra_attachment = reference_message.attachments[-1]
|
| 495 |
+
print(f"翻译附件: {extra_attachment.filename} ({extra_attachment.content_type})")
|
| 496 |
+
|
| 497 |
+
# 下载附件内容
|
| 498 |
+
attachment_bytes = None
|
| 499 |
+
attachment_mime_type = None
|
| 500 |
+
if extra_attachment:
|
| 501 |
+
msg = await ctx.send("Downloading the attachment...")
|
| 502 |
+
bytes_data = await extra_attachment.read()
|
| 503 |
+
attachment_bytes = bytes_data
|
| 504 |
+
attachment_mime_type = extra_attachment.content_type.split(";")[0]
|
| 505 |
+
await msg.edit(content="Processing the attachment...")
|
| 506 |
+
print(f"附件已下载: {extra_attachment.filename} ({attachment_mime_type})")
|
| 507 |
+
else:
|
| 508 |
+
msg = await ctx.send("Translating...")
|
| 509 |
+
|
| 510 |
+
# 获取预设内容
|
| 511 |
+
if agent_manager:
|
| 512 |
+
preset_data = agent_manager.get_preset_json("translate_preset.json")
|
| 513 |
+
|
| 514 |
+
if preset_data:
|
| 515 |
+
# 使用预设JSON结构和原始文本
|
| 516 |
+
key = self.get_next_key()
|
| 517 |
+
client = genai.Client(api_key=key)
|
| 518 |
+
|
| 519 |
+
# 获取变量替换所需的数据
|
| 520 |
+
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 521 |
+
bot_name = ctx.me.name
|
| 522 |
+
bot_display_name = ctx.me.display_name
|
| 523 |
+
user_name = ctx.author.name
|
| 524 |
+
user_display_name = ctx.author.display_name
|
| 525 |
+
|
| 526 |
+
# 处理上下文和引用内容
|
| 527 |
+
context = ""
|
| 528 |
+
if hasattr(ctx, 'context') and ctx.context:
|
| 529 |
+
context = ctx.context
|
| 530 |
+
elif hasattr(ctx, 'history') and ctx.history:
|
| 531 |
+
context = ctx.history
|
| 532 |
+
else:
|
| 533 |
+
# 获取历史消息作为上下文
|
| 534 |
+
context = await self.context_prompter.get_context_for_prompt(ctx, context_length)
|
| 535 |
+
|
| 536 |
+
# 确保context是字符串
|
| 537 |
+
if not isinstance(context, str):
|
| 538 |
+
context = str(context) if context is not None else ""
|
| 539 |
+
|
| 540 |
+
# 获取被引用消息的相关信息
|
| 541 |
+
reference_time = reference_message.created_at.strftime("%Y-%m-%d %H:%M:%S")
|
| 542 |
+
reference_user_name = reference_message.author.name
|
| 543 |
+
reference_user_display_name = reference_message.author.display_name
|
| 544 |
+
reference_content = reference_message.content
|
| 545 |
+
|
| 546 |
+
# 使用预设JSON结构
|
| 547 |
+
system_prompt = preset_data.get("system_prompt", "")
|
| 548 |
+
first_user_message = preset_data.get("first_user_message", "")
|
| 549 |
+
|
| 550 |
+
# 确保模型已设置且处于传递中
|
| 551 |
+
translate_model = self.gemini_models.get("translate", "gemini-2.0-pro-exp-02-05")
|
| 552 |
+
|
| 553 |
+
# 处理不同内容格式情况
|
| 554 |
+
if attachment_bytes and attachment_mime_type:
|
| 555 |
+
# 如果有图片附件
|
| 556 |
+
try:
|
| 557 |
+
# 将字节转换为PIL图像以供Gemini处理
|
| 558 |
+
if attachment_mime_type.startswith("image/"):
|
| 559 |
+
image = PIL.Image.open(BytesIO(attachment_bytes))
|
| 560 |
+
|
| 561 |
+
# 按照Gemini推荐尺寸调整图片,适应大多数模型版本
|
| 562 |
+
if max(image.size) > 3072:
|
| 563 |
+
ratio = 3072 / max(image.size)
|
| 564 |
+
new_size = (int(image.size[0] * ratio), int(image.size[1] * ratio))
|
| 565 |
+
image = image.resize(new_size, PIL.Image.LANCZOS)
|
| 566 |
+
|
| 567 |
+
# 构建内容
|
| 568 |
+
content = [
|
| 569 |
+
{
|
| 570 |
+
"role": "user",
|
| 571 |
+
"parts": [
|
| 572 |
+
types.Part.from_text(first_user_message),
|
| 573 |
+
types.Part.from_image(image),
|
| 574 |
+
]
|
| 575 |
+
}
|
| 576 |
+
]
|
| 577 |
+
|
| 578 |
+
# 获取安全设置
|
| 579 |
+
safety_settings = []
|
| 580 |
+
try:
|
| 581 |
+
gemini_config_data = None
|
| 582 |
+
if agent_manager:
|
| 583 |
+
gemini_config_data = agent_manager.get_preset_json("gemini_config.json")
|
| 584 |
+
|
| 585 |
+
if gemini_config_data and "safety_settings" in gemini_config_data:
|
| 586 |
+
safety_settings = self.build_safety_settings(gemini_config_data["safety_settings"])
|
| 587 |
+
except Exception as e:
|
| 588 |
+
logger.error(f"Error loading safety settings: {e}")
|
| 589 |
+
safety_settings = []
|
| 590 |
+
|
| 591 |
+
# 构建和应用生成内容配置
|
| 592 |
+
generate_content_config = types.GenerateContentConfig(
|
| 593 |
+
temperature=0.2,
|
| 594 |
+
top_p=0.95,
|
| 595 |
+
top_k=55,
|
| 596 |
+
max_output_tokens=8192,
|
| 597 |
+
safety_settings=safety_settings,
|
| 598 |
+
response_mime_type="text/plain",
|
| 599 |
+
system_instruction=[
|
| 600 |
+
types.Part.from_text(text=system_prompt),
|
| 601 |
+
],
|
| 602 |
+
)
|
| 603 |
+
|
| 604 |
+
# 使用流式响应
|
| 605 |
+
full = ""
|
| 606 |
+
n = config.get("gemini_chunk_per_edit")
|
| 607 |
+
every_n_chunk = 1
|
| 608 |
+
|
| 609 |
+
try:
|
| 610 |
+
response = client.models.generate_content_stream(
|
| 611 |
+
model=translate_model, # 使用翻译模型
|
| 612 |
+
contents=content,
|
| 613 |
+
config=generate_content_config,
|
| 614 |
+
)
|
| 615 |
+
|
| 616 |
+
async for chunk in async_iter(response):
|
| 617 |
+
if chunk.text:
|
| 618 |
+
full += chunk.text
|
| 619 |
+
if every_n_chunk == n:
|
| 620 |
+
await msg.edit(content=full)
|
| 621 |
+
every_n_chunk = 1
|
| 622 |
+
else:
|
| 623 |
+
every_n_chunk += 1
|
| 624 |
+
await msg.edit(content=full)
|
| 625 |
+
except Exception as e:
|
| 626 |
+
logger.error(
|
| 627 |
+
"Error when translating with gemini, error: %s",
|
| 628 |
+
e,
|
| 629 |
+
exc_info=True,
|
| 630 |
+
)
|
| 631 |
+
if full == "":
|
| 632 |
+
await msg.edit(content="Uh oh, something went wrong...")
|
| 633 |
+
else:
|
| 634 |
+
full += "\nUh oh, something went wrong..."
|
| 635 |
+
await msg.edit(content=full)
|
| 636 |
+
except Exception as e:
|
| 637 |
+
logger.error(f"Error processing image: {e}")
|
| 638 |
+
await ctx.send("无法处理图片,请检查附件格式", ephemeral=True)
|
| 639 |
+
else:
|
| 640 |
+
# 替换预设中的变量
|
| 641 |
+
first_user_message = preset_data.get("first_user_message", "")
|
| 642 |
+
first_user_message = first_user_message.replace("{context}", context)
|
| 643 |
+
first_user_message = first_user_message.replace("{target_language}", target_language)
|
| 644 |
+
first_user_message = first_user_message.replace("{reference_content}", reference_content)
|
| 645 |
+
first_user_message = first_user_message.replace("{reference_time}", reference_time)
|
| 646 |
+
first_user_message = first_user_message.replace("{reference_user_name}", reference_user_name)
|
| 647 |
+
first_user_message = first_user_message.replace("{reference_user_display_name}", reference_user_display_name)
|
| 648 |
+
first_user_message = first_user_message.replace("{name}", bot_display_name)
|
| 649 |
+
first_user_message = first_user_message.replace("{bot_name}", bot_name)
|
| 650 |
+
first_user_message = first_user_message.replace("{current_time}", current_time)
|
| 651 |
+
first_user_message = first_user_message.replace("{user_display_name}", user_display_name)
|
| 652 |
+
first_user_message = first_user_message.replace("{user_name}", user_name)
|
| 653 |
+
|
| 654 |
+
main_content = preset_data.get("main_content", "")
|
| 655 |
+
main_content = main_content.replace("{context}", context)
|
| 656 |
+
main_content = main_content.replace("{target_language}", target_language)
|
| 657 |
+
main_content = main_content.replace("{reference_content}", reference_content)
|
| 658 |
+
main_content = main_content.replace("{reference_time}", reference_time)
|
| 659 |
+
main_content = main_content.replace("{reference_user_name}", reference_user_name)
|
| 660 |
+
main_content = main_content.replace("{reference_user_display_name}", reference_user_display_name)
|
| 661 |
+
main_content = main_content.replace("{name}", bot_display_name)
|
| 662 |
+
main_content = main_content.replace("{bot_name}", bot_name)
|
| 663 |
+
main_content = main_content.replace("{current_time}", current_time)
|
| 664 |
+
main_content = main_content.replace("{user_display_name}", user_display_name)
|
| 665 |
+
main_content = main_content.replace("{user_name}", user_name)
|
| 666 |
+
|
| 667 |
+
last_message_content = preset_data.get("last_message", "")
|
| 668 |
+
last_message_content = last_message_content.replace("{context}", context)
|
| 669 |
+
last_message_content = last_message_content.replace("{target_language}", target_language)
|
| 670 |
+
last_message_content = last_message_content.replace("{reference_content}", reference_content)
|
| 671 |
+
last_message_content = last_message_content.replace("{reference_time}", reference_time)
|
| 672 |
+
last_message_content = last_message_content.replace("{reference_user_name}", reference_user_name)
|
| 673 |
+
last_message_content = last_message_content.replace("{reference_user_display_name}", reference_user_display_name)
|
| 674 |
+
last_message_content = last_message_content.replace("{name}", bot_display_name)
|
| 675 |
+
last_message_content = last_message_content.replace("{bot_name}", bot_name)
|
| 676 |
+
last_message_content = last_message_content.replace("{current_time}", current_time)
|
| 677 |
+
last_message_content = last_message_content.replace("{user_display_name}", user_display_name)
|
| 678 |
+
last_message_content = last_message_content.replace("{user_name}", user_name)
|
| 679 |
+
|
| 680 |
+
# 构建user-model-user的三个上下文
|
| 681 |
+
contents = [
|
| 682 |
+
types.Content(
|
| 683 |
+
role="user",
|
| 684 |
+
parts=[
|
| 685 |
+
types.Part.from_text(text=first_user_message),
|
| 686 |
+
],
|
| 687 |
+
),
|
| 688 |
+
types.Content(
|
| 689 |
+
role="model",
|
| 690 |
+
parts=[
|
| 691 |
+
types.Part.from_text(text=main_content),
|
| 692 |
+
],
|
| 693 |
+
),
|
| 694 |
+
types.Content(
|
| 695 |
+
role="user",
|
| 696 |
+
parts=[
|
| 697 |
+
types.Part.from_text(text=last_message_content),
|
| 698 |
+
],
|
| 699 |
+
),
|
| 700 |
+
]
|
| 701 |
+
|
| 702 |
+
# 如果有附件,添加到最后一个用户消息中
|
| 703 |
+
if attachment_bytes:
|
| 704 |
+
# 使用Pillow和inline_data方式添加图片
|
| 705 |
+
image_bytes = BytesIO(attachment_bytes)
|
| 706 |
+
image = PIL.Image.open(image_bytes)
|
| 707 |
+
|
| 708 |
+
# 转换为字节数据
|
| 709 |
+
mime_type = attachment_mime_type or "image/jpeg"
|
| 710 |
+
img_byte_arr = BytesIO()
|
| 711 |
+
image.save(img_byte_arr, format=image.format or "JPEG")
|
| 712 |
+
img_byte_data = img_byte_arr.getvalue()
|
| 713 |
+
|
| 714 |
+
# 添加到消息中
|
| 715 |
+
contents[2].parts.append(
|
| 716 |
+
types.Part(
|
| 717 |
+
inline_data=types.Blob(
|
| 718 |
+
mime_type=mime_type,
|
| 719 |
+
data=img_byte_data
|
| 720 |
+
)
|
| 721 |
+
)
|
| 722 |
+
)
|
| 723 |
+
print("附件已添加到翻译请求中")
|
| 724 |
+
|
| 725 |
+
# 设置安全设置
|
| 726 |
+
safety_settings = [
|
| 727 |
+
types.SafetySetting(
|
| 728 |
+
category=types.HarmCategory.HARM_CATEGORY_CIVIC_INTEGRITY,
|
| 729 |
+
threshold=types.HarmBlockThreshold.OFF,
|
| 730 |
+
),
|
| 731 |
+
types.SafetySetting(
|
| 732 |
+
category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
| 733 |
+
threshold=types.HarmBlockThreshold.OFF,
|
| 734 |
+
),
|
| 735 |
+
types.SafetySetting(
|
| 736 |
+
category=types.HarmCategory.HARM_CATEGORY_HARASSMENT,
|
| 737 |
+
threshold=types.HarmBlockThreshold.OFF,
|
| 738 |
+
),
|
| 739 |
+
types.SafetySetting(
|
| 740 |
+
category=types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
| 741 |
+
threshold=types.HarmBlockThreshold.OFF,
|
| 742 |
+
),
|
| 743 |
+
types.SafetySetting(
|
| 744 |
+
category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
| 745 |
+
threshold=types.HarmBlockThreshold.OFF,
|
| 746 |
+
),
|
| 747 |
+
]
|
| 748 |
+
|
| 749 |
+
# 获取Gemini配置
|
| 750 |
+
gemini_config_data = None
|
| 751 |
+
if agent_manager:
|
| 752 |
+
gemini_config_data = agent_manager.get_preset_json("gemini_config.json", channel_id)
|
| 753 |
+
|
| 754 |
+
# 构建配置
|
| 755 |
+
generate_content_config = types.GenerateContentConfig(
|
| 756 |
+
temperature=gemini_config_data.get("temperature", 1.0) if gemini_config_data else 1.0,
|
| 757 |
+
top_p=gemini_config_data.get("top_p", 0.95) if gemini_config_data else 0.95,
|
| 758 |
+
top_k=gemini_config_data.get("top_k", 64) if gemini_config_data else 64,
|
| 759 |
+
max_output_tokens=gemini_config_data.get("max_output_tokens", 8192) if gemini_config_data else 8192,
|
| 760 |
+
safety_settings=safety_settings,
|
| 761 |
+
response_mime_type="text/plain",
|
| 762 |
+
system_instruction=[
|
| 763 |
+
types.Part.from_text(text=preset_data.get("system_prompt", "")),
|
| 764 |
+
],
|
| 765 |
+
)
|
| 766 |
+
|
| 767 |
+
# 使用已经存在的msg变量,而不是创建新消息
|
| 768 |
+
# 如果之前没有创建消息(例如没有附件),现在才创建
|
| 769 |
+
if 'msg' not in locals() or msg is None:
|
| 770 |
+
msg = await ctx.send("Translating...")
|
| 771 |
+
|
| 772 |
+
full = ""
|
| 773 |
+
n = config.get("gemini_chunk_per_edit")
|
| 774 |
+
every_n_chunk = 1
|
| 775 |
+
|
| 776 |
+
try:
|
| 777 |
+
# 记录翻译请求内容
|
| 778 |
+
log_contents = []
|
| 779 |
+
for content in contents:
|
| 780 |
+
parts_text = []
|
| 781 |
+
for part in content.parts:
|
| 782 |
+
if hasattr(part, "text") and part.text:
|
| 783 |
+
parts_text.append(f"Text: {part.text}")
|
| 784 |
+
else:
|
| 785 |
+
parts_text.append(f"Unknown part type: {type(part)}")
|
| 786 |
+
log_contents.append(f"Role: {content.role}, Parts: {parts_text}")
|
| 787 |
+
|
| 788 |
+
system_instruction = "None"
|
| 789 |
+
if hasattr(generate_content_config, "system_instruction"):
|
| 790 |
+
if generate_content_config.system_instruction:
|
| 791 |
+
system_instruction = generate_content_config.system_instruction[0].text if generate_content_config.system_instruction else "None"
|
| 792 |
+
|
| 793 |
+
# 只记录到日志文件,不再重复打印到控制台
|
| 794 |
+
logger.info(
|
| 795 |
+
"Gemini翻译请求发送: 模型=gemini-2.0-pro-exp-02-05, 内容=%s, 系统提示=%s, 配置=%s",
|
| 796 |
+
log_contents,
|
| 797 |
+
system_instruction,
|
| 798 |
+
{
|
| 799 |
+
"temperature": generate_content_config.temperature,
|
| 800 |
+
"top_p": generate_content_config.top_p,
|
| 801 |
+
"top_k": generate_content_config.top_k,
|
| 802 |
+
"max_tokens": generate_content_config.max_output_tokens,
|
| 803 |
+
}
|
| 804 |
+
)
|
| 805 |
+
|
| 806 |
+
response = client.models.generate_content_stream(
|
| 807 |
+
model=translate_model, # 使用翻译模型
|
| 808 |
+
contents=contents,
|
| 809 |
+
config=generate_content_config,
|
| 810 |
+
)
|
| 811 |
+
|
| 812 |
+
async for chunk in async_iter(response):
|
| 813 |
+
if chunk.text:
|
| 814 |
+
full += chunk.text
|
| 815 |
+
if every_n_chunk == n:
|
| 816 |
+
await msg.edit(content=full)
|
| 817 |
+
every_n_chunk = 1
|
| 818 |
+
else:
|
| 819 |
+
every_n_chunk += 1
|
| 820 |
+
await msg.edit(content=full)
|
| 821 |
+
except Exception as e:
|
| 822 |
+
logger.error(
|
| 823 |
+
"Error when translating with gemini, error: %s",
|
| 824 |
+
e,
|
| 825 |
+
exc_info=True,
|
| 826 |
+
)
|
| 827 |
+
if full == "":
|
| 828 |
+
await msg.edit(content="Uh oh, something went wrong...")
|
| 829 |
+
else:
|
| 830 |
+
full += "\nUh oh, something went wrong..."
|
| 831 |
+
await msg.edit(content=full)
|
| 832 |
+
else:
|
| 833 |
+
# 预设数据不存在,显示错误信息
|
| 834 |
+
await ctx.send("无法加载翻译预设,请联系管理员", delete_after=5, ephemeral=True)
|
| 835 |
+
|
| 836 |
+
@commands.hybrid_command(
|
| 837 |
+
name="set_context_length", description="Set the context length."
|
| 838 |
+
)
|
| 839 |
+
@commands.is_owner()
|
| 840 |
+
@auto_delete(delay=0)
|
| 841 |
+
async def set_context_length(self, ctx: commands.Context, context_length: int):
|
| 842 |
+
self.context_length = context_length
|
| 843 |
+
await ctx.send("Context length set.", ephemeral=True, delete_after=5)
|
| 844 |
+
|
| 845 |
+
@commands.hybrid_command(
|
| 846 |
+
name="set_target_language", description="Set the target language."
|
| 847 |
+
)
|
| 848 |
+
@commands.is_owner()
|
| 849 |
+
@auto_delete(delay=0)
|
| 850 |
+
async def set_target_language(self, ctx: commands.Context, target_language: str):
|
| 851 |
+
self.target_language = target_language
|
| 852 |
+
await ctx.send("Target language set.", ephemeral=True, delete_after=5)
|
| 853 |
+
|
| 854 |
+
@commands.hybrid_command(name="set_timezone", description="Set the timezone.")
|
| 855 |
+
@commands.is_owner()
|
| 856 |
+
@auto_delete(delay=0)
|
| 857 |
+
async def set_timezone(self, ctx: commands.Context, timezone: str):
|
| 858 |
+
try:
|
| 859 |
+
self.context_prompter.set_tz(timezone)
|
| 860 |
+
await ctx.send(
|
| 861 |
+
f"Timezone set to {timezone}.", ephemeral=True, delete_after=5
|
| 862 |
+
)
|
| 863 |
+
except Exception as e:
|
| 864 |
+
await ctx.send(f"Invalid timezone.", ephemeral=True, delete_after=5)
|
| 865 |
+
|
| 866 |
+
@commands.hybrid_command(name="models", description="列出或切换Gemini模型")
|
| 867 |
+
@commands.has_permissions(administrator=True)
|
| 868 |
+
async def models(self, ctx: commands.Context, model_type: str = None, model_name: str = None):
|
| 869 |
+
"""列出或切换Gemini模型
|
| 870 |
+
|
| 871 |
+
参数:
|
| 872 |
+
model_type: 模型类型(chat或translate)
|
| 873 |
+
model_name: 模型名称
|
| 874 |
+
"""
|
| 875 |
+
if not model_type:
|
| 876 |
+
# 如果没有指定模型类型,列出当前使用的模型和所有可用模型
|
| 877 |
+
embed = discord.Embed(
|
| 878 |
+
title="Gemini模型配置",
|
| 879 |
+
description="当前使用的Gemini模型",
|
| 880 |
+
color=discord.Color.blue()
|
| 881 |
+
)
|
| 882 |
+
|
| 883 |
+
chat_model = self.gemini_models.get("chat", "gemini-2.0-pro-exp-02-05")
|
| 884 |
+
translate_model = self.gemini_models.get("translate", "gemini-2.0-pro-exp-02-05")
|
| 885 |
+
|
| 886 |
+
embed.add_field(name="聊天模型", value=f"`{chat_model}`", inline=False)
|
| 887 |
+
embed.add_field(name="翻译模型", value=f"`{translate_model}`", inline=False)
|
| 888 |
+
|
| 889 |
+
# 添加可用模型列表
|
| 890 |
+
available_models_text = ""
|
| 891 |
+
for model in self.available_models:
|
| 892 |
+
model_name = model.get("name", "")
|
| 893 |
+
model_desc = model.get("description", "")
|
| 894 |
+
if model_desc:
|
| 895 |
+
available_models_text += f"• `{model_name}` - {model_desc}\n"
|
| 896 |
+
else:
|
| 897 |
+
available_models_text += f"• `{model_name}`\n"
|
| 898 |
+
|
| 899 |
+
if available_models_text:
|
| 900 |
+
embed.add_field(name="可用模型列表", value=available_models_text, inline=False)
|
| 901 |
+
else:
|
| 902 |
+
embed.add_field(name="可用模型列表", value="没有可用的模型", inline=False)
|
| 903 |
+
|
| 904 |
+
embed.add_field(
|
| 905 |
+
name="使用方法",
|
| 906 |
+
value="使用 `/models chat <模型名>` 更改聊天模型\n使用 `/models translate <模型名>` 更改翻译模型",
|
| 907 |
+
inline=False
|
| 908 |
+
)
|
| 909 |
+
|
| 910 |
+
await ctx.send(embed=embed)
|
| 911 |
+
return
|
| 912 |
+
|
| 913 |
+
if model_type not in ["chat", "translate"]:
|
| 914 |
+
await ctx.send("错误:模型类��必须是 `chat` 或 `translate`", ephemeral=True)
|
| 915 |
+
return
|
| 916 |
+
|
| 917 |
+
if not model_name:
|
| 918 |
+
await ctx.send(f"错误:请指定模型名称", ephemeral=True)
|
| 919 |
+
return
|
| 920 |
+
|
| 921 |
+
# 检查模型是否在可用列表中
|
| 922 |
+
model_exists = any(model.get("name") == model_name for model in self.available_models)
|
| 923 |
+
if not model_exists:
|
| 924 |
+
# 仍然允许用户设置不在列表中的模型,但显示警告
|
| 925 |
+
await ctx.send(f"警告:模型 `{model_name}` 不在可用模型列表中,但仍将设置为当前模型。", ephemeral=True)
|
| 926 |
+
|
| 927 |
+
# 更新模型配置
|
| 928 |
+
self.gemini_models[model_type] = model_name
|
| 929 |
+
config.write("gemini_models", self.gemini_models)
|
| 930 |
+
|
| 931 |
+
await ctx.send(f"已将{model_type}模型设置为:{model_name}", ephemeral=True)
|
| 932 |
+
|
| 933 |
+
@models.error
|
| 934 |
+
async def models_error(self, ctx: commands.Context, error):
|
| 935 |
+
if isinstance(error, commands.MissingPermissions):
|
| 936 |
+
await ctx.send("错误:只有管理员可以更改模型配置", ephemeral=True)
|
| 937 |
+
|
| 938 |
+
|
| 939 |
+
async def setup(bot: commands.Bot):
|
| 940 |
+
apikeys = config.get("gemini_keys")
|
| 941 |
+
print(cpr.info(f"{len(apikeys)} keys loaded."))
|
| 942 |
+
webhook = discord.Webhook.from_url(
|
| 943 |
+
config.get("webhook_url"), session=ClientSession()
|
| 944 |
+
)
|
| 945 |
+
cog = Gemini(bot, webhook)
|
| 946 |
+
|
| 947 |
+
# 设置AgentManager
|
| 948 |
+
try:
|
| 949 |
+
agent_manager = bot.get_cog("AgentManager")
|
| 950 |
+
if agent_manager:
|
| 951 |
+
cog.context_prompter.set_agent_manager(agent_manager)
|
| 952 |
+
# 加载Gemini配置
|
| 953 |
+
gemini_config = agent_manager.get_preset_json("gemini_config.json")
|
| 954 |
+
if gemini_config:
|
| 955 |
+
# 将JSON配置转换为Gemini配置对象
|
| 956 |
+
safety_settings = []
|
| 957 |
+
for setting in gemini_config.get("safety_settings", []):
|
| 958 |
+
category = getattr(types.HarmCategory, setting["category"])
|
| 959 |
+
threshold = getattr(types.HarmBlockThreshold, setting["threshold"])
|
| 960 |
+
safety_settings.append(types.SafetySetting(
|
| 961 |
+
category=category,
|
| 962 |
+
threshold=threshold
|
| 963 |
+
))
|
| 964 |
+
|
| 965 |
+
cog.default_gemini_config = types.GenerateContentConfig(
|
| 966 |
+
system_instruction=gemini_config.get("system_instruction", ""),
|
| 967 |
+
top_k=gemini_config.get("top_k", 55),
|
| 968 |
+
top_p=gemini_config.get("top_p", 0.95),
|
| 969 |
+
temperature=gemini_config.get("temperature", 1.3),
|
| 970 |
+
safety_settings=safety_settings
|
| 971 |
+
)
|
| 972 |
+
except Exception as e:
|
| 973 |
+
print(f"Error loading Gemini config: {e}")
|
| 974 |
+
|
| 975 |
+
await bot.add_cog(cog)
|
| 976 |
+
print(cpr.success("Cog loaded: Gemini"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cogs/openai.py
CHANGED
|
@@ -112,9 +112,12 @@ class Openai(commands.Cog):
|
|
| 112 |
messages = [{"role": "user", "content": prompt}]
|
| 113 |
else:
|
| 114 |
# 构建符合预设格式的消息数组
|
|
|
|
|
|
|
|
|
|
| 115 |
if preset.get("system_prompt"):
|
| 116 |
messages.append({
|
| 117 |
-
"role": "system",
|
| 118 |
"content": preset["system_prompt"]
|
| 119 |
})
|
| 120 |
|
|
@@ -128,9 +131,11 @@ class Openai(commands.Cog):
|
|
| 128 |
"content": prompt # 这里是原始prompt,它已经包含了main_content的格式
|
| 129 |
})
|
| 130 |
|
|
|
|
|
|
|
| 131 |
messages.append({
|
| 132 |
-
"role":
|
| 133 |
-
"content":
|
| 134 |
})
|
| 135 |
|
| 136 |
data = {
|
|
@@ -143,7 +148,11 @@ class Openai(commands.Cog):
|
|
| 143 |
}
|
| 144 |
|
| 145 |
# 记录API请求数据
|
| 146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
|
| 148 |
return url, headers, data
|
| 149 |
|
|
|
|
| 112 |
messages = [{"role": "user", "content": prompt}]
|
| 113 |
else:
|
| 114 |
# 构建符合预设格式的消息数组
|
| 115 |
+
prefill_assistant_reply = preset.get("prefill_assistant_reply", False)
|
| 116 |
+
last_message_content = preset.get("last_message", "Your reply:")
|
| 117 |
+
|
| 118 |
if preset.get("system_prompt"):
|
| 119 |
messages.append({
|
| 120 |
+
"role": "system",
|
| 121 |
"content": preset["system_prompt"]
|
| 122 |
})
|
| 123 |
|
|
|
|
| 131 |
"content": prompt # 这里是原始prompt,它已经包含了main_content的格式
|
| 132 |
})
|
| 133 |
|
| 134 |
+
# 决定最后一条消息的角色和内容
|
| 135 |
+
last_message_role = "assistant" if prefill_assistant_reply else "user"
|
| 136 |
messages.append({
|
| 137 |
+
"role": last_message_role,
|
| 138 |
+
"content": last_message_content
|
| 139 |
})
|
| 140 |
|
| 141 |
data = {
|
|
|
|
| 148 |
}
|
| 149 |
|
| 150 |
# 记录API请求数据
|
| 151 |
+
log_headers = {k: v for k, v in headers.items() if k.lower() != 'authorization'} # 不记录原始Key
|
| 152 |
+
log_headers['Authorization'] = 'Bearer ***' # 隐藏Key
|
| 153 |
+
logger.info(
|
| 154 |
+
f"OpenAI API Raw Request:\nURL: {url}\nHeaders: {json.dumps(log_headers, ensure_ascii=False, indent=2)}\nBody: {json.dumps(data, ensure_ascii=False, indent=2)}"
|
| 155 |
+
)
|
| 156 |
|
| 157 |
return url, headers, data
|
| 158 |
|
main.py
CHANGED
|
@@ -1,430 +1,408 @@
|
|
| 1 |
-
import asyncio
|
| 2 |
-
import json
|
| 3 |
-
from discord.ext import commands
|
| 4 |
-
import discord
|
| 5 |
-
import os
|
| 6 |
-
import ssl
|
| 7 |
-
import sys
|
| 8 |
-
import traceback
|
| 9 |
-
import aiohttp
|
| 10 |
-
import logging
|
| 11 |
-
import threading
|
| 12 |
-
import webbrowser
|
| 13 |
-
import time
|
| 14 |
-
import requests
|
| 15 |
-
from flask import Flask, render_template, request, jsonify, redirect, url_for, send_from_directory, session
|
| 16 |
-
from werkzeug.utils import secure_filename
|
| 17 |
-
from PIL import Image, ImageDraw, ImageFont
|
| 18 |
-
import io
|
| 19 |
-
import shutil
|
| 20 |
-
from utils.config import config
|
| 21 |
-
from functools import wraps
|
| 22 |
-
|
| 23 |
-
# 配置Flask应用
|
| 24 |
-
app = Flask(__name__,
|
| 25 |
-
static_folder='static',
|
| 26 |
-
template_folder='templates')
|
| 27 |
-
app.secret_key = os.urandom(24) # 设置session密钥
|
| 28 |
-
|
| 29 |
-
# 登录验证装饰器
|
| 30 |
-
def login_required(f):
|
| 31 |
-
@wraps(f)
|
| 32 |
-
def decorated_function(*args, **kwargs):
|
| 33 |
-
if not session.get('logged_in'):
|
| 34 |
-
return redirect(url_for('login'))
|
| 35 |
-
return f(*args, **kwargs)
|
| 36 |
-
return decorated_function
|
| 37 |
-
|
| 38 |
-
# 配置日志级别,过滤掉 google_genai.models 的 INFO 级别日志
|
| 39 |
-
logging.getLogger('google_genai.models').setLevel(logging.WARNING)
|
| 40 |
-
|
| 41 |
-
# Disable SSL verification (only for debugging)
|
| 42 |
-
ssl_context = ssl.create_default_context()
|
| 43 |
-
ssl_context.check_hostname = False
|
| 44 |
-
ssl_context.verify_mode = ssl.CERT_NONE
|
| 45 |
-
|
| 46 |
-
intents = discord.Intents.default()
|
| 47 |
-
intents.message_content = True
|
| 48 |
-
intents.members = True
|
| 49 |
-
|
| 50 |
-
bot = commands.Bot(command_prefix=".", intents=intents)
|
| 51 |
-
|
| 52 |
-
# 创建默认头像
|
| 53 |
-
def create_default_avatar():
|
| 54 |
-
avatar_path = os.path.join('static', 'img', 'default-avatar.png')
|
| 55 |
-
|
| 56 |
-
# 如果已存在,则不需要创建
|
| 57 |
-
if os.path.exists(avatar_path):
|
| 58 |
-
return
|
| 59 |
-
|
| 60 |
-
# 确保目录存在
|
| 61 |
-
os.makedirs(os.path.dirname(avatar_path), exist_ok=True)
|
| 62 |
-
|
| 63 |
-
# 创建简单的默认头像
|
| 64 |
-
img = Image.new('RGB', (200, 200), color=(53, 102, 220))
|
| 65 |
-
d = ImageDraw.Draw(img)
|
| 66 |
-
|
| 67 |
-
# 添加文字
|
| 68 |
-
try:
|
| 69 |
-
# 尝试使用系统字体
|
| 70 |
-
font = ImageFont.truetype("arial.ttf", 60)
|
| 71 |
-
except:
|
| 72 |
-
# 如果没有系统字体,使用默认字体
|
| 73 |
-
font = ImageFont.load_default()
|
| 74 |
-
|
| 75 |
-
d.text((70, 70), "Bot", fill=(255, 255, 255), font=font)
|
| 76 |
-
|
| 77 |
-
# 保存图片
|
| 78 |
-
img.save(avatar_path)
|
| 79 |
-
|
| 80 |
-
# Flask路由
|
| 81 |
-
@app.route('/login')
|
| 82 |
-
def login():
|
| 83 |
-
if session.get('logged_in'):
|
| 84 |
-
return redirect(url_for('index'))
|
| 85 |
-
return render_template('login.html')
|
| 86 |
-
|
| 87 |
-
@app.route('/api/login', methods=['POST'])
|
| 88 |
-
def api_login():
|
| 89 |
-
data = request.json
|
| 90 |
-
password = data.get('password')
|
| 91 |
-
|
| 92 |
-
# 直接从config.json读取最新密码
|
| 93 |
-
try:
|
| 94 |
-
with open('config.json', 'r', encoding='utf-8') as f:
|
| 95 |
-
config_data = json.load(f)
|
| 96 |
-
config_password = config_data.get('password')
|
| 97 |
-
except:
|
| 98 |
-
config_password = None
|
| 99 |
-
|
| 100 |
-
if not config_password:
|
| 101 |
-
# 如果配置中没有密码,设置默认密码为 "admin"
|
| 102 |
-
config_password = "admin"
|
| 103 |
-
config.write("password", config_password)
|
| 104 |
-
|
| 105 |
-
if password == config_password:
|
| 106 |
-
session['logged_in'] = True
|
| 107 |
-
return jsonify({"status": "success"})
|
| 108 |
-
return jsonify({"status": "error", "message": "密码错误"})
|
| 109 |
-
|
| 110 |
-
@app.route('/logout')
|
| 111 |
-
def logout():
|
| 112 |
-
session.pop('logged_in', None)
|
| 113 |
-
return redirect(url_for('login'))
|
| 114 |
-
|
| 115 |
-
@app.route('/')
|
| 116 |
-
@login_required
|
| 117 |
-
def index():
|
| 118 |
-
return render_template('index.html')
|
| 119 |
-
|
| 120 |
-
@app.route('/config')
|
| 121 |
-
@login_required
|
| 122 |
-
def config_page():
|
| 123 |
-
return render_template('config.html')
|
| 124 |
-
|
| 125 |
-
@app.route('/presets')
|
| 126 |
-
@login_required
|
| 127 |
-
def presets_page():
|
| 128 |
-
presets = os.listdir('agent/presets')
|
| 129 |
-
return render_template('presets.html', presets=presets)
|
| 130 |
-
|
| 131 |
-
@app.route('/api/config', methods=['GET'])
|
| 132 |
-
@login_required
|
| 133 |
-
def get_config():
|
| 134 |
-
with open('config.json', 'r', encoding='utf-8') as f:
|
| 135 |
-
config_data = json.load(f)
|
| 136 |
-
return jsonify(config_data)
|
| 137 |
-
|
| 138 |
-
@app.route('/api/config', methods=['POST'])
|
| 139 |
-
@login_required
|
| 140 |
-
def update_config():
|
| 141 |
-
config_data = request.json
|
| 142 |
-
|
| 143 |
-
# 检查密码是否被修改
|
| 144 |
-
current_password = config.get('password')
|
| 145 |
-
password_changed = current_password != config_data.get('password')
|
| 146 |
-
|
| 147 |
-
# 保存配置
|
| 148 |
-
with open('config.json', 'w', encoding='utf-8') as f:
|
| 149 |
-
json.dump(config_data, f, indent=4, ensure_ascii=False)
|
| 150 |
-
|
| 151 |
-
# 如果密码被修改,返回特殊状态码
|
| 152 |
-
if password_changed:
|
| 153 |
-
return jsonify({"status": "password_changed"})
|
| 154 |
-
return jsonify({"status": "success"})
|
| 155 |
-
|
| 156 |
-
@app.route('/api/presets', methods=['GET'])
|
| 157 |
-
def get_presets():
|
| 158 |
-
presets = os.listdir('agent/presets')
|
| 159 |
-
presets_data = {}
|
| 160 |
-
for preset in presets:
|
| 161 |
-
preset_path = os.path.join('agent/presets', preset)
|
| 162 |
-
if os.path.isdir(preset_path):
|
| 163 |
-
presets_data[preset] = {}
|
| 164 |
-
for file in os.listdir(preset_path):
|
| 165 |
-
if file.endswith('.json'):
|
| 166 |
-
with open(os.path.join(preset_path, file), 'r', encoding='utf-8') as f:
|
| 167 |
-
presets_data[preset][file] = json.load(f)
|
| 168 |
-
return jsonify(presets_data)
|
| 169 |
-
|
| 170 |
-
@app.route('/api/preset/<preset_name>', methods=['GET'])
|
| 171 |
-
def get_preset(preset_name):
|
| 172 |
-
preset_path = os.path.join('agent/presets', preset_name)
|
| 173 |
-
if not os.path.exists(preset_path):
|
| 174 |
-
return jsonify({"status": "error", "message": "Preset not found"}), 404
|
| 175 |
-
|
| 176 |
-
preset_data = {}
|
| 177 |
-
for file in os.listdir(preset_path):
|
| 178 |
-
if file.endswith('.json'):
|
| 179 |
-
with open(os.path.join(preset_path, file), 'r', encoding='utf-8') as f:
|
| 180 |
-
preset_data[file] = json.load(f)
|
| 181 |
-
|
| 182 |
-
return jsonify(preset_data)
|
| 183 |
-
|
| 184 |
-
@app.route('/api/preset/<preset_name>', methods=['PUT'])
|
| 185 |
-
def update_preset(preset_name):
|
| 186 |
-
preset_data = request.json
|
| 187 |
-
preset_path = os.path.join('agent/presets', preset_name)
|
| 188 |
-
if not os.path.exists(preset_path):
|
| 189 |
-
return jsonify({"status": "error", "message": "Preset not found"}), 404
|
| 190 |
-
|
| 191 |
-
for file_name, data in preset_data.items():
|
| 192 |
-
with open(os.path.join(preset_path, file_name), 'w', encoding='utf-8') as f:
|
| 193 |
-
json.dump(data, f, indent=4, ensure_ascii=False)
|
| 194 |
-
|
| 195 |
-
return jsonify({"status": "success"})
|
| 196 |
-
|
| 197 |
-
@app.route('/api/preset/<preset_name>', methods=['DELETE'])
|
| 198 |
-
def delete_preset(preset_name):
|
| 199 |
-
preset_path = os.path.join('agent/presets', preset_name)
|
| 200 |
-
if not os.path.exists(preset_path):
|
| 201 |
-
return jsonify({"status": "error", "message": "Preset not found"}), 404
|
| 202 |
-
|
| 203 |
-
shutil.rmtree(preset_path)
|
| 204 |
-
return jsonify({"status": "success"})
|
| 205 |
-
|
| 206 |
-
@app.route('/api/preset', methods=['POST'])
|
| 207 |
-
def create_preset():
|
| 208 |
-
data = request.json
|
| 209 |
-
preset_name = data.get('name')
|
| 210 |
-
template_preset = data.get('template', 'default')
|
| 211 |
-
|
| 212 |
-
if not preset_name:
|
| 213 |
-
return jsonify({"status": "error", "message": "Preset name is required"}), 400
|
| 214 |
-
|
| 215 |
-
new_preset_path = os.path.join('agent/presets', preset_name)
|
| 216 |
-
if os.path.exists(new_preset_path):
|
| 217 |
-
return jsonify({"status": "error", "message": "Preset already exists"}), 400
|
| 218 |
-
|
| 219 |
-
# 复制模板预设
|
| 220 |
-
template_path = os.path.join('agent/presets', template_preset)
|
| 221 |
-
if not os.path.exists(template_path):
|
| 222 |
-
return jsonify({"status": "error", "message": "Template preset not found"}), 404
|
| 223 |
-
|
| 224 |
-
shutil.copytree(template_path, new_preset_path)
|
| 225 |
-
|
| 226 |
-
return jsonify({"status": "success"})
|
| 227 |
-
|
| 228 |
-
@app.route('/api/preset/<preset_name>/avatar', methods=['POST'])
|
| 229 |
-
def upload_avatar(preset_name):
|
| 230 |
-
preset_path = os.path.join('agent/presets', preset_name)
|
| 231 |
-
if not os.path.exists(preset_path):
|
| 232 |
-
return jsonify({"status": "error", "message": "Preset not found"}), 404
|
| 233 |
-
|
| 234 |
-
if 'avatar' not in request.files:
|
| 235 |
-
return jsonify({"status": "error", "message": "No file part"}), 400
|
| 236 |
-
|
| 237 |
-
file = request.files['avatar']
|
| 238 |
-
if file.filename == '':
|
| 239 |
-
return jsonify({"status": "error", "message": "No selected file"}), 400
|
| 240 |
-
|
| 241 |
-
# 处理图片
|
| 242 |
-
try:
|
| 243 |
-
img = Image.open(file)
|
| 244 |
-
|
| 245 |
-
# 调整为正方形
|
| 246 |
-
width, height = img.size
|
| 247 |
-
size = min(width, height)
|
| 248 |
-
left = (width - size) // 2
|
| 249 |
-
top = (height - size) // 2
|
| 250 |
-
right = left + size
|
| 251 |
-
bottom = top + size
|
| 252 |
-
img = img.crop((left, top, right, bottom))
|
| 253 |
-
|
| 254 |
-
# 保存图片
|
| 255 |
-
for old_avatar in os.listdir(preset_path):
|
| 256 |
-
if old_avatar.startswith('avatar.'):
|
| 257 |
-
os.remove(os.path.join(preset_path, old_avatar))
|
| 258 |
-
|
| 259 |
-
# 获取文件扩展名
|
| 260 |
-
ext = os.path.splitext(file.filename)[1].lower()
|
| 261 |
-
if ext not in ['.jpg', '.jpeg', '.png', '.gif']:
|
| 262 |
-
ext = '.jpg' # 默认使用jpg
|
| 263 |
-
|
| 264 |
-
img.save(os.path.join(preset_path, f'avatar{ext}'))
|
| 265 |
-
|
| 266 |
-
return jsonify({"status": "success"})
|
| 267 |
-
except Exception as e:
|
| 268 |
-
return jsonify({"status": "error", "message": str(e)}), 500
|
| 269 |
-
|
| 270 |
-
@app.route('/agent/presets/<path:filename>')
|
| 271 |
-
def preset_files(filename):
|
| 272 |
-
return send_from_directory('agent/presets', filename)
|
| 273 |
-
|
| 274 |
-
@bot.event
|
| 275 |
-
async def on_ready():
|
| 276 |
-
print(f'Bot logged in as {bot.user.name} (ID: {bot.user.id})')
|
| 277 |
-
print(f'Bot invite link: https://discord.com/oauth2/authorize?client_id={bot.user.id}&permissions=8&scope=bot')
|
| 278 |
-
|
| 279 |
-
# Auto sync slash commands
|
| 280 |
-
print("Syncing slash commands...")
|
| 281 |
-
try:
|
| 282 |
-
synced = await bot.tree.sync()
|
| 283 |
-
print(f"Synced {len(synced)} command(s).")
|
| 284 |
-
except Exception as e:
|
| 285 |
-
print(f"Failed to sync commands: {e}")
|
| 286 |
-
|
| 287 |
-
# 确保所有cog都更新频道配置
|
| 288 |
-
try:
|
| 289 |
-
agent_manager = bot.get_cog("AgentManager")
|
| 290 |
-
if agent_manager:
|
| 291 |
-
print("正在更新所有cog的频道配置...")
|
| 292 |
-
agent_manager.reload_chat_channels() # 重新加载配置
|
| 293 |
-
await agent_manager.update_all_cogs_channels() # 更新所有cog的频道配置
|
| 294 |
-
print("所有cog的频道配置已更新")
|
| 295 |
-
except Exception as e:
|
| 296 |
-
print(f"Failed to update channel configs: {e}")
|
| 297 |
-
traceback.print_exc()
|
| 298 |
-
|
| 299 |
-
print('-' * 50)
|
| 300 |
-
|
| 301 |
-
# 添加一个帮助调试的命令,用于检查已加载的cog
|
| 302 |
-
@bot.command(name="list_cogs")
|
| 303 |
-
@commands.is_owner()
|
| 304 |
-
async def list_cogs(ctx):
|
| 305 |
-
loaded_cogs = sorted(list(bot.cogs.keys()))
|
| 306 |
-
all_cogs = sorted([f.replace('.py', '') for f in os.listdir('cogs') if f.endswith('.py')])
|
| 307 |
-
|
| 308 |
-
not_loaded = [cog for cog in all_cogs if cog not in [c.lower() for c in loaded_cogs]]
|
| 309 |
-
|
| 310 |
-
await ctx.send(f"**已加载的Cogs ({len(loaded_cogs)}):**\n"
|
| 311 |
-
f"{', '.join(loaded_cogs)}\n\n"
|
| 312 |
-
f"**未加载的Cogs ({len(not_loaded)}):**\n"
|
| 313 |
-
f"{', '.join(not_loaded)}")
|
| 314 |
-
|
| 315 |
-
# 保持Space活跃的函数
|
| 316 |
-
def keep_alive():
|
| 317 |
-
"""定期自我请求以保持Space活跃"""
|
| 318 |
-
print("启动保活线程...")
|
| 319 |
-
while True:
|
| 320 |
-
try:
|
| 321 |
-
# 获取SPACE_HOST环境变量,这是HF Space自动提供的
|
| 322 |
-
space_host = os.environ.get('SPACE_HOST')
|
| 323 |
-
# 如果没有SPACE_HOST环境变量,说明不是在HF Space上运行
|
| 324 |
-
if not space_host:
|
| 325 |
-
# 本地调试环境
|
| 326 |
-
url = "http://127.0.0.1:5000"
|
| 327 |
-
else:
|
| 328 |
-
# HF Space环境
|
| 329 |
-
url = f"https://{space_host}"
|
| 330 |
-
|
| 331 |
-
requests.get(url, timeout=10)
|
| 332 |
-
print(f"[KeepAlive] 自我请求成功: {time.strftime('%Y-%m-%d %H:%M:%S')}")
|
| 333 |
-
except Exception as e:
|
| 334 |
-
print(f"[KeepAlive] 自我请求失败: {e}")
|
| 335 |
-
|
| 336 |
-
# 每30分钟请求一次,避免Space超时
|
| 337 |
-
time.sleep(1800)
|
| 338 |
-
|
| 339 |
-
def start_flask():
|
| 340 |
-
# 创建默认头像
|
| 341 |
-
create_default_avatar()
|
| 342 |
-
|
| 343 |
-
# 启动保活线程
|
| 344 |
-
keep_alive_thread = threading.Thread(target=keep_alive)
|
| 345 |
-
keep_alive_thread.daemon = True
|
| 346 |
-
keep_alive_thread.start()
|
| 347 |
-
|
| 348 |
-
# 获取PORT环境变量,默认7860(HF Space默认端口)
|
| 349 |
-
# 如果环境变量不存在,就使用5000端口(本地开发环境)
|
| 350 |
-
port = int(os.environ.get('PORT', 5000))
|
| 351 |
-
host = '0.0.0.0' if os.environ.get('SPACE_HOST') else '127.0.0.1'
|
| 352 |
-
|
| 353 |
-
app.run(host=host, port=port)
|
| 354 |
-
|
| 355 |
-
async def main():
|
| 356 |
-
token = config.get("token")
|
| 357 |
-
try:
|
| 358 |
-
print("Loading cogs...")
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
|
| 393 |
-
|
| 394 |
-
|
| 395 |
-
|
| 396 |
-
|
| 397 |
-
|
| 398 |
-
|
| 399 |
-
|
| 400 |
-
|
| 401 |
-
|
| 402 |
-
|
| 403 |
-
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
|
| 407 |
-
|
| 408 |
-
flask_thread.daemon = True # 主程序退出时,Flask线程也会退出
|
| 409 |
-
flask_thread.start()
|
| 410 |
-
|
| 411 |
-
# 是否在Hugging Face Space环境下,不打开浏览器
|
| 412 |
-
if not os.environ.get('SPACE_HOST'):
|
| 413 |
-
webbrowser.open('http://127.0.0.1:5000')
|
| 414 |
-
|
| 415 |
-
await bot.start(token)
|
| 416 |
-
except Exception as e:
|
| 417 |
-
print(f"An error occurred: {e}")
|
| 418 |
-
traceback.print_exc()
|
| 419 |
-
finally:
|
| 420 |
-
await bot.close()
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
if __name__ == "__main__":
|
| 424 |
-
try:
|
| 425 |
-
asyncio.run(main())
|
| 426 |
-
except KeyboardInterrupt:
|
| 427 |
-
print("Bot has been shut down.")
|
| 428 |
-
except Exception as e:
|
| 429 |
-
print(f"Fatal error: {e}")
|
| 430 |
traceback.print_exc()
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import json
|
| 3 |
+
from discord.ext import commands
|
| 4 |
+
import discord
|
| 5 |
+
import os
|
| 6 |
+
import ssl
|
| 7 |
+
import sys
|
| 8 |
+
import traceback
|
| 9 |
+
import aiohttp
|
| 10 |
+
import logging
|
| 11 |
+
import threading
|
| 12 |
+
import webbrowser
|
| 13 |
+
import time
|
| 14 |
+
import requests
|
| 15 |
+
from flask import Flask, render_template, request, jsonify, redirect, url_for, send_from_directory, session
|
| 16 |
+
from werkzeug.utils import secure_filename
|
| 17 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 18 |
+
import io
|
| 19 |
+
import shutil
|
| 20 |
+
from utils.config import config
|
| 21 |
+
from functools import wraps
|
| 22 |
+
|
| 23 |
+
# 配置Flask应用
|
| 24 |
+
app = Flask(__name__,
|
| 25 |
+
static_folder='static',
|
| 26 |
+
template_folder='templates')
|
| 27 |
+
app.secret_key = os.urandom(24) # 设置session密钥
|
| 28 |
+
|
| 29 |
+
# 登录验证装饰器
|
| 30 |
+
def login_required(f):
|
| 31 |
+
@wraps(f)
|
| 32 |
+
def decorated_function(*args, **kwargs):
|
| 33 |
+
if not session.get('logged_in'):
|
| 34 |
+
return redirect(url_for('login'))
|
| 35 |
+
return f(*args, **kwargs)
|
| 36 |
+
return decorated_function
|
| 37 |
+
|
| 38 |
+
# 配置日志级别,过滤掉 google_genai.models 的 INFO 级别日志
|
| 39 |
+
logging.getLogger('google_genai.models').setLevel(logging.WARNING)
|
| 40 |
+
|
| 41 |
+
# Disable SSL verification (only for debugging)
|
| 42 |
+
ssl_context = ssl.create_default_context()
|
| 43 |
+
ssl_context.check_hostname = False
|
| 44 |
+
ssl_context.verify_mode = ssl.CERT_NONE
|
| 45 |
+
|
| 46 |
+
intents = discord.Intents.default()
|
| 47 |
+
intents.message_content = True
|
| 48 |
+
intents.members = True
|
| 49 |
+
|
| 50 |
+
bot = commands.Bot(command_prefix=".", intents=intents)
|
| 51 |
+
|
| 52 |
+
# 创建默认头像
|
| 53 |
+
def create_default_avatar():
|
| 54 |
+
avatar_path = os.path.join('static', 'img', 'default-avatar.png')
|
| 55 |
+
|
| 56 |
+
# 如果已存在,则不需要创建
|
| 57 |
+
if os.path.exists(avatar_path):
|
| 58 |
+
return
|
| 59 |
+
|
| 60 |
+
# 确保目录存在
|
| 61 |
+
os.makedirs(os.path.dirname(avatar_path), exist_ok=True)
|
| 62 |
+
|
| 63 |
+
# 创建简单的默认头像
|
| 64 |
+
img = Image.new('RGB', (200, 200), color=(53, 102, 220))
|
| 65 |
+
d = ImageDraw.Draw(img)
|
| 66 |
+
|
| 67 |
+
# 添加文字
|
| 68 |
+
try:
|
| 69 |
+
# 尝试使用系统字体
|
| 70 |
+
font = ImageFont.truetype("arial.ttf", 60)
|
| 71 |
+
except:
|
| 72 |
+
# 如果没有系统字体,使用默认字体
|
| 73 |
+
font = ImageFont.load_default()
|
| 74 |
+
|
| 75 |
+
d.text((70, 70), "Bot", fill=(255, 255, 255), font=font)
|
| 76 |
+
|
| 77 |
+
# 保存图片
|
| 78 |
+
img.save(avatar_path)
|
| 79 |
+
|
| 80 |
+
# Flask路由
|
| 81 |
+
@app.route('/login')
|
| 82 |
+
def login():
|
| 83 |
+
if session.get('logged_in'):
|
| 84 |
+
return redirect(url_for('index'))
|
| 85 |
+
return render_template('login.html')
|
| 86 |
+
|
| 87 |
+
@app.route('/api/login', methods=['POST'])
|
| 88 |
+
def api_login():
|
| 89 |
+
data = request.json
|
| 90 |
+
password = data.get('password')
|
| 91 |
+
|
| 92 |
+
# 直接从config.json读取最新密码
|
| 93 |
+
try:
|
| 94 |
+
with open('config.json', 'r', encoding='utf-8') as f:
|
| 95 |
+
config_data = json.load(f)
|
| 96 |
+
config_password = config_data.get('password')
|
| 97 |
+
except:
|
| 98 |
+
config_password = None
|
| 99 |
+
|
| 100 |
+
if not config_password:
|
| 101 |
+
# 如果配置中没有密码,设置默认密码为 "admin"
|
| 102 |
+
config_password = "admin"
|
| 103 |
+
config.write("password", config_password)
|
| 104 |
+
|
| 105 |
+
if password == config_password:
|
| 106 |
+
session['logged_in'] = True
|
| 107 |
+
return jsonify({"status": "success"})
|
| 108 |
+
return jsonify({"status": "error", "message": "密码错误"})
|
| 109 |
+
|
| 110 |
+
@app.route('/logout')
|
| 111 |
+
def logout():
|
| 112 |
+
session.pop('logged_in', None)
|
| 113 |
+
return redirect(url_for('login'))
|
| 114 |
+
|
| 115 |
+
@app.route('/')
|
| 116 |
+
@login_required
|
| 117 |
+
def index():
|
| 118 |
+
return render_template('index.html')
|
| 119 |
+
|
| 120 |
+
@app.route('/config')
|
| 121 |
+
@login_required
|
| 122 |
+
def config_page():
|
| 123 |
+
return render_template('config.html')
|
| 124 |
+
|
| 125 |
+
@app.route('/presets')
|
| 126 |
+
@login_required
|
| 127 |
+
def presets_page():
|
| 128 |
+
presets = os.listdir('agent/presets')
|
| 129 |
+
return render_template('presets.html', presets=presets)
|
| 130 |
+
|
| 131 |
+
@app.route('/api/config', methods=['GET'])
|
| 132 |
+
@login_required
|
| 133 |
+
def get_config():
|
| 134 |
+
with open('config.json', 'r', encoding='utf-8') as f:
|
| 135 |
+
config_data = json.load(f)
|
| 136 |
+
return jsonify(config_data)
|
| 137 |
+
|
| 138 |
+
@app.route('/api/config', methods=['POST'])
|
| 139 |
+
@login_required
|
| 140 |
+
def update_config():
|
| 141 |
+
config_data = request.json
|
| 142 |
+
|
| 143 |
+
# 检查密码是否被修改
|
| 144 |
+
current_password = config.get('password')
|
| 145 |
+
password_changed = current_password != config_data.get('password')
|
| 146 |
+
|
| 147 |
+
# 保存配置
|
| 148 |
+
with open('config.json', 'w', encoding='utf-8') as f:
|
| 149 |
+
json.dump(config_data, f, indent=4, ensure_ascii=False)
|
| 150 |
+
|
| 151 |
+
# 如果密码被修改,返回特殊状态码
|
| 152 |
+
if password_changed:
|
| 153 |
+
return jsonify({"status": "password_changed"})
|
| 154 |
+
return jsonify({"status": "success"})
|
| 155 |
+
|
| 156 |
+
@app.route('/api/presets', methods=['GET'])
|
| 157 |
+
def get_presets():
|
| 158 |
+
presets = os.listdir('agent/presets')
|
| 159 |
+
presets_data = {}
|
| 160 |
+
for preset in presets:
|
| 161 |
+
preset_path = os.path.join('agent/presets', preset)
|
| 162 |
+
if os.path.isdir(preset_path):
|
| 163 |
+
presets_data[preset] = {}
|
| 164 |
+
for file in os.listdir(preset_path):
|
| 165 |
+
if file.endswith('.json'):
|
| 166 |
+
with open(os.path.join(preset_path, file), 'r', encoding='utf-8') as f:
|
| 167 |
+
presets_data[preset][file] = json.load(f)
|
| 168 |
+
return jsonify(presets_data)
|
| 169 |
+
|
| 170 |
+
@app.route('/api/preset/<preset_name>', methods=['GET'])
|
| 171 |
+
def get_preset(preset_name):
|
| 172 |
+
preset_path = os.path.join('agent/presets', preset_name)
|
| 173 |
+
if not os.path.exists(preset_path):
|
| 174 |
+
return jsonify({"status": "error", "message": "Preset not found"}), 404
|
| 175 |
+
|
| 176 |
+
preset_data = {}
|
| 177 |
+
for file in os.listdir(preset_path):
|
| 178 |
+
if file.endswith('.json'):
|
| 179 |
+
with open(os.path.join(preset_path, file), 'r', encoding='utf-8') as f:
|
| 180 |
+
preset_data[file] = json.load(f)
|
| 181 |
+
|
| 182 |
+
return jsonify(preset_data)
|
| 183 |
+
|
| 184 |
+
@app.route('/api/preset/<preset_name>', methods=['PUT'])
|
| 185 |
+
def update_preset(preset_name):
|
| 186 |
+
preset_data = request.json
|
| 187 |
+
preset_path = os.path.join('agent/presets', preset_name)
|
| 188 |
+
if not os.path.exists(preset_path):
|
| 189 |
+
return jsonify({"status": "error", "message": "Preset not found"}), 404
|
| 190 |
+
|
| 191 |
+
for file_name, data in preset_data.items():
|
| 192 |
+
with open(os.path.join(preset_path, file_name), 'w', encoding='utf-8') as f:
|
| 193 |
+
json.dump(data, f, indent=4, ensure_ascii=False)
|
| 194 |
+
|
| 195 |
+
return jsonify({"status": "success"})
|
| 196 |
+
|
| 197 |
+
@app.route('/api/preset/<preset_name>', methods=['DELETE'])
|
| 198 |
+
def delete_preset(preset_name):
|
| 199 |
+
preset_path = os.path.join('agent/presets', preset_name)
|
| 200 |
+
if not os.path.exists(preset_path):
|
| 201 |
+
return jsonify({"status": "error", "message": "Preset not found"}), 404
|
| 202 |
+
|
| 203 |
+
shutil.rmtree(preset_path)
|
| 204 |
+
return jsonify({"status": "success"})
|
| 205 |
+
|
| 206 |
+
@app.route('/api/preset', methods=['POST'])
|
| 207 |
+
def create_preset():
|
| 208 |
+
data = request.json
|
| 209 |
+
preset_name = data.get('name')
|
| 210 |
+
template_preset = data.get('template', 'default')
|
| 211 |
+
|
| 212 |
+
if not preset_name:
|
| 213 |
+
return jsonify({"status": "error", "message": "Preset name is required"}), 400
|
| 214 |
+
|
| 215 |
+
new_preset_path = os.path.join('agent/presets', preset_name)
|
| 216 |
+
if os.path.exists(new_preset_path):
|
| 217 |
+
return jsonify({"status": "error", "message": "Preset already exists"}), 400
|
| 218 |
+
|
| 219 |
+
# 复制模板预设
|
| 220 |
+
template_path = os.path.join('agent/presets', template_preset)
|
| 221 |
+
if not os.path.exists(template_path):
|
| 222 |
+
return jsonify({"status": "error", "message": "Template preset not found"}), 404
|
| 223 |
+
|
| 224 |
+
shutil.copytree(template_path, new_preset_path)
|
| 225 |
+
|
| 226 |
+
return jsonify({"status": "success"})
|
| 227 |
+
|
| 228 |
+
@app.route('/api/preset/<preset_name>/avatar', methods=['POST'])
|
| 229 |
+
def upload_avatar(preset_name):
|
| 230 |
+
preset_path = os.path.join('agent/presets', preset_name)
|
| 231 |
+
if not os.path.exists(preset_path):
|
| 232 |
+
return jsonify({"status": "error", "message": "Preset not found"}), 404
|
| 233 |
+
|
| 234 |
+
if 'avatar' not in request.files:
|
| 235 |
+
return jsonify({"status": "error", "message": "No file part"}), 400
|
| 236 |
+
|
| 237 |
+
file = request.files['avatar']
|
| 238 |
+
if file.filename == '':
|
| 239 |
+
return jsonify({"status": "error", "message": "No selected file"}), 400
|
| 240 |
+
|
| 241 |
+
# 处理图片
|
| 242 |
+
try:
|
| 243 |
+
img = Image.open(file)
|
| 244 |
+
|
| 245 |
+
# 调整为正方形
|
| 246 |
+
width, height = img.size
|
| 247 |
+
size = min(width, height)
|
| 248 |
+
left = (width - size) // 2
|
| 249 |
+
top = (height - size) // 2
|
| 250 |
+
right = left + size
|
| 251 |
+
bottom = top + size
|
| 252 |
+
img = img.crop((left, top, right, bottom))
|
| 253 |
+
|
| 254 |
+
# 保存图片
|
| 255 |
+
for old_avatar in os.listdir(preset_path):
|
| 256 |
+
if old_avatar.startswith('avatar.'):
|
| 257 |
+
os.remove(os.path.join(preset_path, old_avatar))
|
| 258 |
+
|
| 259 |
+
# 获取文件扩展名
|
| 260 |
+
ext = os.path.splitext(file.filename)[1].lower()
|
| 261 |
+
if ext not in ['.jpg', '.jpeg', '.png', '.gif']:
|
| 262 |
+
ext = '.jpg' # 默认使用jpg
|
| 263 |
+
|
| 264 |
+
img.save(os.path.join(preset_path, f'avatar{ext}'))
|
| 265 |
+
|
| 266 |
+
return jsonify({"status": "success"})
|
| 267 |
+
except Exception as e:
|
| 268 |
+
return jsonify({"status": "error", "message": str(e)}), 500
|
| 269 |
+
|
| 270 |
+
@app.route('/agent/presets/<path:filename>')
|
| 271 |
+
def preset_files(filename):
|
| 272 |
+
return send_from_directory('agent/presets', filename)
|
| 273 |
+
|
| 274 |
+
@bot.event
|
| 275 |
+
async def on_ready():
|
| 276 |
+
print(f'Bot logged in as {bot.user.name} (ID: {bot.user.id})')
|
| 277 |
+
print(f'Bot invite link: https://discord.com/oauth2/authorize?client_id={bot.user.id}&permissions=8&scope=bot')
|
| 278 |
+
|
| 279 |
+
# Auto sync slash commands
|
| 280 |
+
print("Syncing slash commands...")
|
| 281 |
+
try:
|
| 282 |
+
synced = await bot.tree.sync()
|
| 283 |
+
print(f"Synced {len(synced)} command(s).")
|
| 284 |
+
except Exception as e:
|
| 285 |
+
print(f"Failed to sync commands: {e}")
|
| 286 |
+
|
| 287 |
+
# 确保所有cog都更新频道配置
|
| 288 |
+
try:
|
| 289 |
+
agent_manager = bot.get_cog("AgentManager")
|
| 290 |
+
if agent_manager:
|
| 291 |
+
print("正在更新所有cog的频道配置...")
|
| 292 |
+
agent_manager.reload_chat_channels() # 重新加载配置
|
| 293 |
+
await agent_manager.update_all_cogs_channels() # 更新所有cog的频道配置
|
| 294 |
+
print("所有cog的频道配置已更新")
|
| 295 |
+
except Exception as e:
|
| 296 |
+
print(f"Failed to update channel configs: {e}")
|
| 297 |
+
traceback.print_exc()
|
| 298 |
+
|
| 299 |
+
print('-' * 50)
|
| 300 |
+
|
| 301 |
+
# 添加一个帮助调试的命令,用于检查已加载的cog
|
| 302 |
+
@bot.command(name="list_cogs")
|
| 303 |
+
@commands.is_owner()
|
| 304 |
+
async def list_cogs(ctx):
|
| 305 |
+
loaded_cogs = sorted(list(bot.cogs.keys()))
|
| 306 |
+
all_cogs = sorted([f.replace('.py', '') for f in os.listdir('cogs') if f.endswith('.py')])
|
| 307 |
+
|
| 308 |
+
not_loaded = [cog for cog in all_cogs if cog not in [c.lower() for c in loaded_cogs]]
|
| 309 |
+
|
| 310 |
+
await ctx.send(f"**已加载的Cogs ({len(loaded_cogs)}):**\n"
|
| 311 |
+
f"{', '.join(loaded_cogs)}\n\n"
|
| 312 |
+
f"**未加载的Cogs ({len(not_loaded)}):**\n"
|
| 313 |
+
f"{', '.join(not_loaded)}")
|
| 314 |
+
|
| 315 |
+
# 保持Space活跃的函数
|
| 316 |
+
def keep_alive():
|
| 317 |
+
"""定期自我请求以保持Space活跃"""
|
| 318 |
+
print("启动保活线程...")
|
| 319 |
+
while True:
|
| 320 |
+
try:
|
| 321 |
+
# 获取SPACE_HOST环境变量,这是HF Space自动提供的
|
| 322 |
+
space_host = os.environ.get('SPACE_HOST')
|
| 323 |
+
# 如果没有SPACE_HOST环境变量,说明不是在HF Space上运行
|
| 324 |
+
if not space_host:
|
| 325 |
+
# 本地调试环境
|
| 326 |
+
url = "http://127.0.0.1:5000"
|
| 327 |
+
else:
|
| 328 |
+
# HF Space环境
|
| 329 |
+
url = f"https://{space_host}"
|
| 330 |
+
|
| 331 |
+
requests.get(url, timeout=10)
|
| 332 |
+
print(f"[KeepAlive] 自我请求成功: {time.strftime('%Y-%m-%d %H:%M:%S')}")
|
| 333 |
+
except Exception as e:
|
| 334 |
+
print(f"[KeepAlive] 自我请求失败: {e}")
|
| 335 |
+
|
| 336 |
+
# 每30分钟请求一次,避免Space超时
|
| 337 |
+
time.sleep(1800)
|
| 338 |
+
|
| 339 |
+
def start_flask():
|
| 340 |
+
# 创建默认头像
|
| 341 |
+
create_default_avatar()
|
| 342 |
+
|
| 343 |
+
# 启动保活线程
|
| 344 |
+
keep_alive_thread = threading.Thread(target=keep_alive)
|
| 345 |
+
keep_alive_thread.daemon = True
|
| 346 |
+
keep_alive_thread.start()
|
| 347 |
+
|
| 348 |
+
# 获取PORT环境变量,默认7860(HF Space默认端口)
|
| 349 |
+
# 如果环境变量不存在,就使用5000端口(本地开发环境)
|
| 350 |
+
port = int(os.environ.get('PORT', 5000))
|
| 351 |
+
host = '0.0.0.0' if os.environ.get('SPACE_HOST') else '127.0.0.1'
|
| 352 |
+
|
| 353 |
+
app.run(host=host, port=port)
|
| 354 |
+
|
| 355 |
+
async def main():
|
| 356 |
+
token = config.get("token")
|
| 357 |
+
try:
|
| 358 |
+
print("Loading cogs...")
|
| 359 |
+
|
| 360 |
+
# 先加载AgentManager cog
|
| 361 |
+
try:
|
| 362 |
+
await bot.load_extension("cogs.agent_manager")
|
| 363 |
+
except Exception as e:
|
| 364 |
+
print(f"Failed to load AgentManager: {e}")
|
| 365 |
+
traceback.print_exc()
|
| 366 |
+
|
| 367 |
+
# 然后加载其他cog
|
| 368 |
+
for file in os.listdir("cogs"):
|
| 369 |
+
if file.endswith(".py") and file != "agent_manager.py" and file != "gemini_backup.py":
|
| 370 |
+
try:
|
| 371 |
+
await bot.load_extension(f"cogs.{file[:-3]}")
|
| 372 |
+
except Exception as e:
|
| 373 |
+
print(f"Failed to load extension {file}: {e}")
|
| 374 |
+
traceback.print_exc()
|
| 375 |
+
|
| 376 |
+
print("Starting bot...")
|
| 377 |
+
# Create custom aiohttp session
|
| 378 |
+
connector = aiohttp.TCPConnector(ssl=ssl_context)
|
| 379 |
+
session = aiohttp.ClientSession(connector=connector)
|
| 380 |
+
|
| 381 |
+
# Use custom session
|
| 382 |
+
bot.http.session = session
|
| 383 |
+
|
| 384 |
+
# 启动Flask应用
|
| 385 |
+
flask_thread = threading.Thread(target=start_flask)
|
| 386 |
+
flask_thread.daemon = True # 主程序退出时,Flask线程也会退出
|
| 387 |
+
flask_thread.start()
|
| 388 |
+
|
| 389 |
+
# 是否在Hugging Face Space环境下,不打开浏览器
|
| 390 |
+
if not os.environ.get('SPACE_HOST'):
|
| 391 |
+
webbrowser.open('http://127.0.0.1:5000')
|
| 392 |
+
|
| 393 |
+
await bot.start(token)
|
| 394 |
+
except Exception as e:
|
| 395 |
+
print(f"An error occurred: {e}")
|
| 396 |
+
traceback.print_exc()
|
| 397 |
+
finally:
|
| 398 |
+
await bot.close()
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
if __name__ == "__main__":
|
| 402 |
+
try:
|
| 403 |
+
asyncio.run(main())
|
| 404 |
+
except KeyboardInterrupt:
|
| 405 |
+
print("Bot has been shut down.")
|
| 406 |
+
except Exception as e:
|
| 407 |
+
print(f"Fatal error: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 408 |
traceback.print_exc()
|
static/js/presets.js
CHANGED
|
@@ -204,8 +204,8 @@ function fillPresetForms(data) {
|
|
| 204 |
document.getElementById('chat_system_prompt').value = chatPreset.system_prompt || '';
|
| 205 |
document.getElementById('chat_first_user_message').value = chatPreset.first_user_message || '';
|
| 206 |
document.getElementById('chat_main_content').value = chatPreset.main_content || '';
|
| 207 |
-
document.getElementById('
|
| 208 |
-
document.getElementById('
|
| 209 |
}
|
| 210 |
|
| 211 |
// 翻译预设
|
|
@@ -214,7 +214,8 @@ function fillPresetForms(data) {
|
|
| 214 |
document.getElementById('translate_system_prompt').value = translatePreset.system_prompt || '';
|
| 215 |
document.getElementById('translate_first_user_message').value = translatePreset.first_user_message || '';
|
| 216 |
document.getElementById('translate_main_content').value = translatePreset.main_content || '';
|
| 217 |
-
document.getElementById('
|
|
|
|
| 218 |
}
|
| 219 |
|
| 220 |
// 附件预设
|
|
@@ -223,7 +224,8 @@ function fillPresetForms(data) {
|
|
| 223 |
document.getElementById('attachment_system_prompt').value = attachmentPreset.system_prompt || '';
|
| 224 |
document.getElementById('attachment_first_user_message').value = attachmentPreset.first_user_message || '';
|
| 225 |
document.getElementById('attachment_main_content').value = attachmentPreset.main_content || '';
|
| 226 |
-
document.getElementById('
|
|
|
|
| 227 |
}
|
| 228 |
|
| 229 |
// 引用预设
|
|
@@ -232,7 +234,8 @@ function fillPresetForms(data) {
|
|
| 232 |
document.getElementById('reference_system_prompt').value = referencePreset.system_prompt || '';
|
| 233 |
document.getElementById('reference_first_user_message').value = referencePreset.first_user_message || '';
|
| 234 |
document.getElementById('reference_main_content').value = referencePreset.main_content || '';
|
| 235 |
-
document.getElementById('
|
|
|
|
| 236 |
}
|
| 237 |
|
| 238 |
// Gemini 设置
|
|
@@ -297,8 +300,8 @@ function collectFormData() {
|
|
| 297 |
system_prompt: document.getElementById('chat_system_prompt').value,
|
| 298 |
first_user_message: document.getElementById('chat_first_user_message').value,
|
| 299 |
main_content: document.getElementById('chat_main_content').value,
|
| 300 |
-
|
| 301 |
-
|
| 302 |
};
|
| 303 |
|
| 304 |
// 翻译预设
|
|
@@ -306,7 +309,8 @@ function collectFormData() {
|
|
| 306 |
system_prompt: document.getElementById('translate_system_prompt').value,
|
| 307 |
first_user_message: document.getElementById('translate_first_user_message').value,
|
| 308 |
main_content: document.getElementById('translate_main_content').value,
|
| 309 |
-
|
|
|
|
| 310 |
};
|
| 311 |
|
| 312 |
// 附件预设
|
|
@@ -314,7 +318,8 @@ function collectFormData() {
|
|
| 314 |
system_prompt: document.getElementById('attachment_system_prompt').value,
|
| 315 |
first_user_message: document.getElementById('attachment_first_user_message').value,
|
| 316 |
main_content: document.getElementById('attachment_main_content').value,
|
| 317 |
-
|
|
|
|
| 318 |
};
|
| 319 |
|
| 320 |
// 引用预设
|
|
@@ -322,7 +327,8 @@ function collectFormData() {
|
|
| 322 |
system_prompt: document.getElementById('reference_system_prompt').value,
|
| 323 |
first_user_message: document.getElementById('reference_first_user_message').value,
|
| 324 |
main_content: document.getElementById('reference_main_content').value,
|
| 325 |
-
|
|
|
|
| 326 |
};
|
| 327 |
|
| 328 |
// Gemini 设置
|
|
|
|
| 204 |
document.getElementById('chat_system_prompt').value = chatPreset.system_prompt || '';
|
| 205 |
document.getElementById('chat_first_user_message').value = chatPreset.first_user_message || '';
|
| 206 |
document.getElementById('chat_main_content').value = chatPreset.main_content || '';
|
| 207 |
+
document.getElementById('chat_last_message').value = chatPreset.last_message || '';
|
| 208 |
+
document.getElementById('chat_prefill_assistant_reply').checked = chatPreset.prefill_assistant_reply || false;
|
| 209 |
}
|
| 210 |
|
| 211 |
// 翻译预设
|
|
|
|
| 214 |
document.getElementById('translate_system_prompt').value = translatePreset.system_prompt || '';
|
| 215 |
document.getElementById('translate_first_user_message').value = translatePreset.first_user_message || '';
|
| 216 |
document.getElementById('translate_main_content').value = translatePreset.main_content || '';
|
| 217 |
+
document.getElementById('translate_last_message').value = translatePreset.last_message || '';
|
| 218 |
+
document.getElementById('translate_prefill_assistant_reply').checked = translatePreset.prefill_assistant_reply || false;
|
| 219 |
}
|
| 220 |
|
| 221 |
// 附件预设
|
|
|
|
| 224 |
document.getElementById('attachment_system_prompt').value = attachmentPreset.system_prompt || '';
|
| 225 |
document.getElementById('attachment_first_user_message').value = attachmentPreset.first_user_message || '';
|
| 226 |
document.getElementById('attachment_main_content').value = attachmentPreset.main_content || '';
|
| 227 |
+
document.getElementById('attachment_last_message').value = attachmentPreset.last_message || '';
|
| 228 |
+
document.getElementById('attachment_prefill_assistant_reply').checked = attachmentPreset.prefill_assistant_reply || false;
|
| 229 |
}
|
| 230 |
|
| 231 |
// 引用预设
|
|
|
|
| 234 |
document.getElementById('reference_system_prompt').value = referencePreset.system_prompt || '';
|
| 235 |
document.getElementById('reference_first_user_message').value = referencePreset.first_user_message || '';
|
| 236 |
document.getElementById('reference_main_content').value = referencePreset.main_content || '';
|
| 237 |
+
document.getElementById('reference_last_message').value = referencePreset.last_message || '';
|
| 238 |
+
document.getElementById('reference_prefill_assistant_reply').checked = referencePreset.prefill_assistant_reply || false;
|
| 239 |
}
|
| 240 |
|
| 241 |
// Gemini 设置
|
|
|
|
| 300 |
system_prompt: document.getElementById('chat_system_prompt').value,
|
| 301 |
first_user_message: document.getElementById('chat_first_user_message').value,
|
| 302 |
main_content: document.getElementById('chat_main_content').value,
|
| 303 |
+
last_message: document.getElementById('chat_last_message').value,
|
| 304 |
+
prefill_assistant_reply: document.getElementById('chat_prefill_assistant_reply').checked
|
| 305 |
};
|
| 306 |
|
| 307 |
// 翻译预设
|
|
|
|
| 309 |
system_prompt: document.getElementById('translate_system_prompt').value,
|
| 310 |
first_user_message: document.getElementById('translate_first_user_message').value,
|
| 311 |
main_content: document.getElementById('translate_main_content').value,
|
| 312 |
+
last_message: document.getElementById('translate_last_message').value,
|
| 313 |
+
prefill_assistant_reply: document.getElementById('translate_prefill_assistant_reply').checked
|
| 314 |
};
|
| 315 |
|
| 316 |
// 附件预设
|
|
|
|
| 318 |
system_prompt: document.getElementById('attachment_system_prompt').value,
|
| 319 |
first_user_message: document.getElementById('attachment_first_user_message').value,
|
| 320 |
main_content: document.getElementById('attachment_main_content').value,
|
| 321 |
+
last_message: document.getElementById('attachment_last_message').value,
|
| 322 |
+
prefill_assistant_reply: document.getElementById('attachment_prefill_assistant_reply').checked
|
| 323 |
};
|
| 324 |
|
| 325 |
// 引用预设
|
|
|
|
| 327 |
system_prompt: document.getElementById('reference_system_prompt').value,
|
| 328 |
first_user_message: document.getElementById('reference_first_user_message').value,
|
| 329 |
main_content: document.getElementById('reference_main_content').value,
|
| 330 |
+
last_message: document.getElementById('reference_last_message').value,
|
| 331 |
+
prefill_assistant_reply: document.getElementById('reference_prefill_assistant_reply').checked
|
| 332 |
};
|
| 333 |
|
| 334 |
// Gemini 设置
|
templates/presets.html
CHANGED
|
@@ -123,15 +123,12 @@
|
|
| 123 |
<textarea class="form-control" id="chat_main_content" rows="10"></textarea>
|
| 124 |
</div>
|
| 125 |
<div class="mb-3">
|
| 126 |
-
<label for="
|
| 127 |
-
<input type="text" class="form-control" id="
|
| 128 |
</div>
|
| 129 |
-
<div class="form-check mb-3">
|
| 130 |
-
<input class="form-check-input" type="checkbox" id="
|
| 131 |
-
<label class="form-check-label" for="
|
| 132 |
-
将最后一条消息预填充为助手回应
|
| 133 |
-
</label>
|
| 134 |
-
<small class="form-text text-muted d-block">勾选后,在生成提示时,上下文中的最后一条消息会被视为机器人的回复。</small>
|
| 135 |
</div>
|
| 136 |
</form>
|
| 137 |
</div>
|
|
@@ -152,8 +149,12 @@
|
|
| 152 |
<textarea class="form-control" id="translate_main_content" rows="10"></textarea>
|
| 153 |
</div>
|
| 154 |
<div class="mb-3">
|
| 155 |
-
<label for="
|
| 156 |
-
<input type="text" class="form-control" id="
|
|
|
|
|
|
|
|
|
|
|
|
|
| 157 |
</div>
|
| 158 |
</form>
|
| 159 |
</div>
|
|
@@ -174,8 +175,12 @@
|
|
| 174 |
<textarea class="form-control" id="attachment_main_content" rows="10"></textarea>
|
| 175 |
</div>
|
| 176 |
<div class="mb-3">
|
| 177 |
-
<label for="
|
| 178 |
-
<input type="text" class="form-control" id="
|
|
|
|
|
|
|
|
|
|
|
|
|
| 179 |
</div>
|
| 180 |
</form>
|
| 181 |
</div>
|
|
@@ -196,8 +201,12 @@
|
|
| 196 |
<textarea class="form-control" id="reference_main_content" rows="10"></textarea>
|
| 197 |
</div>
|
| 198 |
<div class="mb-3">
|
| 199 |
-
<label for="
|
| 200 |
-
<input type="text" class="form-control" id="
|
|
|
|
|
|
|
|
|
|
|
|
|
| 201 |
</div>
|
| 202 |
</form>
|
| 203 |
</div>
|
|
|
|
| 123 |
<textarea class="form-control" id="chat_main_content" rows="10"></textarea>
|
| 124 |
</div>
|
| 125 |
<div class="mb-3">
|
| 126 |
+
<label for="chat_last_message" class="form-label">最后一条消息</label>
|
| 127 |
+
<input type="text" class="form-control" id="chat_last_message">
|
| 128 |
</div>
|
| 129 |
+
<div class="form-check form-switch mb-3">
|
| 130 |
+
<input class="form-check-input" type="checkbox" role="switch" id="chat_prefill_assistant_reply">
|
| 131 |
+
<label class="form-check-label" for="chat_prefill_assistant_reply">预填充模型回复 (最后一条消息的角色为'model')</label>
|
|
|
|
|
|
|
|
|
|
| 132 |
</div>
|
| 133 |
</form>
|
| 134 |
</div>
|
|
|
|
| 149 |
<textarea class="form-control" id="translate_main_content" rows="10"></textarea>
|
| 150 |
</div>
|
| 151 |
<div class="mb-3">
|
| 152 |
+
<label for="translate_last_message" class="form-label">最后一条消息</label>
|
| 153 |
+
<input type="text" class="form-control" id="translate_last_message">
|
| 154 |
+
</div>
|
| 155 |
+
<div class="form-check form-switch mb-3">
|
| 156 |
+
<input class="form-check-input" type="checkbox" role="switch" id="translate_prefill_assistant_reply">
|
| 157 |
+
<label class="form-check-label" for="translate_prefill_assistant_reply">预填充模型回复 (最后一条消息的角色为'model')</label>
|
| 158 |
</div>
|
| 159 |
</form>
|
| 160 |
</div>
|
|
|
|
| 175 |
<textarea class="form-control" id="attachment_main_content" rows="10"></textarea>
|
| 176 |
</div>
|
| 177 |
<div class="mb-3">
|
| 178 |
+
<label for="attachment_last_message" class="form-label">最后一条消息</label>
|
| 179 |
+
<input type="text" class="form-control" id="attachment_last_message">
|
| 180 |
+
</div>
|
| 181 |
+
<div class="form-check form-switch mb-3">
|
| 182 |
+
<input class="form-check-input" type="checkbox" role="switch" id="attachment_prefill_assistant_reply">
|
| 183 |
+
<label class="form-check-label" for="attachment_prefill_assistant_reply">预填充模型回复 (最后一条消息的角色为'model')</label>
|
| 184 |
</div>
|
| 185 |
</form>
|
| 186 |
</div>
|
|
|
|
| 201 |
<textarea class="form-control" id="reference_main_content" rows="10"></textarea>
|
| 202 |
</div>
|
| 203 |
<div class="mb-3">
|
| 204 |
+
<label for="reference_last_message" class="form-label">最后一条消息</label>
|
| 205 |
+
<input type="text" class="form-control" id="reference_last_message">
|
| 206 |
+
</div>
|
| 207 |
+
<div class="form-check form-switch mb-3">
|
| 208 |
+
<input class="form-check-input" type="checkbox" role="switch" id="reference_prefill_assistant_reply">
|
| 209 |
+
<label class="form-check-label" for="reference_prefill_assistant_reply">预填充模型回复 (最后一条消息的角色为'model')</label>
|
| 210 |
</div>
|
| 211 |
</form>
|
| 212 |
</div>
|
utils/context_prompter.py
CHANGED
|
@@ -1,311 +1,301 @@
|
|
| 1 |
-
import discord
|
| 2 |
-
import pytz
|
| 3 |
-
import re
|
| 4 |
-
from discord.ext import commands
|
| 5 |
-
from utils.func import get_time, now
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
self.agent_manager = agent_manager
|
| 15 |
-
|
| 16 |
-
def
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
"""
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
# 匹配
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
#
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
)
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
context
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
<
|
| 295 |
-
{
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
<reference>
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
<author>
|
| 303 |
-
{reference.author.display_name} ({reference.author.name}) ({self.get_msg_time(reference)})
|
| 304 |
-
</author>
|
| 305 |
-
You are a skilled muti-lingual translator, currently doing a translation job in a discord server. You'll get a message which you need to translate into {target_language} with context. You only need to supply the translation according to the context without any additional information. Don't act like a machine, translate smoothly like a human without being too informal.
|
| 306 |
-
Your translation should not include the author's name and the time.
|
| 307 |
-
Now is {now(tz=self.tz)}.
|
| 308 |
-
{ctx.author.display_name} ({ctx.author.name}) is asking you to translate the message in `<reference>` into {target_language} under the context (refer to <context>). The message is from `<author>`, so consider the context and try to understand the message before translating.
|
| 309 |
-
Your translation:
|
| 310 |
-
"""
|
| 311 |
-
return prompt
|
|
|
|
| 1 |
+
import discord
|
| 2 |
+
import pytz
|
| 3 |
+
import re
|
| 4 |
+
from discord.ext import commands
|
| 5 |
+
from utils.func import get_time, now
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ContextPrompter:
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.tz = pytz.timezone("Asia/Shanghai")
|
| 11 |
+
self.agent_manager = None
|
| 12 |
+
|
| 13 |
+
def set_agent_manager(self, agent_manager):
|
| 14 |
+
self.agent_manager = agent_manager
|
| 15 |
+
|
| 16 |
+
def set_tz(self, tz: str):
|
| 17 |
+
try:
|
| 18 |
+
self.tz = pytz.timezone(tz)
|
| 19 |
+
except Exception as e:
|
| 20 |
+
print(e)
|
| 21 |
+
|
| 22 |
+
def maltose_regex(self, context: str) -> str:
|
| 23 |
+
"""
|
| 24 |
+
使用正则表达式预处理上下文内容,删除命令触发词
|
| 25 |
+
例如: .hey, .yo claude, .yoo 等,这些只是触发词对机器人理解没有用处
|
| 26 |
+
"""
|
| 27 |
+
# 匹配并删除命令触发词
|
| 28 |
+
# 1. 匹配 .hey, .yo, .yoo 后跟空格和可能的参数
|
| 29 |
+
pattern1 = r'\.hey\s+|\.yo\s+[a-zA-Z]+\s+|\.yoo\s+'
|
| 30 |
+
# 2. 匹配独立的命令 .hey, .yo, .yoo (行首或前面有空格,后面是行尾或空格)
|
| 31 |
+
pattern2 = r'(^|\s)\.hey($|\s)|(^|\s)\.yo($|\s)|(^|\s)\.yoo($|\s)'
|
| 32 |
+
|
| 33 |
+
# 应用正则替换
|
| 34 |
+
context = re.sub(pattern1, '', context)
|
| 35 |
+
context = re.sub(pattern2, '', context)
|
| 36 |
+
|
| 37 |
+
return context.strip()
|
| 38 |
+
|
| 39 |
+
def get_msg_time(self, msg: discord.Message) -> str:
|
| 40 |
+
time = msg.created_at if msg.edited_at is None else msg.edited_at
|
| 41 |
+
return get_time(time, tz=self.tz)
|
| 42 |
+
|
| 43 |
+
async def get_context_for_prompt(
|
| 44 |
+
self,
|
| 45 |
+
ctx: commands.Context,
|
| 46 |
+
context_length: int,
|
| 47 |
+
before_message=None,
|
| 48 |
+
after_message=None,
|
| 49 |
+
after_message_context_length=0,
|
| 50 |
+
):
|
| 51 |
+
context_msg = []
|
| 52 |
+
if before_message is not None and after_message is not None:
|
| 53 |
+
async for msg in ctx.channel.history(
|
| 54 |
+
limit=context_length + 1, before=before_message
|
| 55 |
+
):
|
| 56 |
+
context_msg.append(
|
| 57 |
+
f"{msg.author.display_name} ({msg.author.name}) ({self.get_msg_time(msg)}): {msg.content}"
|
| 58 |
+
)
|
| 59 |
+
context_msg.reverse()
|
| 60 |
+
context_msg.append(
|
| 61 |
+
f"{after_message.author.display_name} ({after_message.author.name}) ({self.get_msg_time(after_message)}): {after_message.content}"
|
| 62 |
+
)
|
| 63 |
+
async for msg in ctx.channel.history(
|
| 64 |
+
limit=after_message_context_length + 1, after=after_message
|
| 65 |
+
):
|
| 66 |
+
context_msg.append(
|
| 67 |
+
f"{msg.author.display_name} ({msg.author.name}) ({self.get_msg_time(msg)}): {msg.content}"
|
| 68 |
+
)
|
| 69 |
+
elif before_message is not None:
|
| 70 |
+
async for msg in ctx.channel.history(
|
| 71 |
+
limit=context_length + 1, before=before_message
|
| 72 |
+
):
|
| 73 |
+
context_msg.append(
|
| 74 |
+
f"{msg.author.display_name} ({msg.author.name}) ({self.get_msg_time(msg)}): {msg.content}"
|
| 75 |
+
)
|
| 76 |
+
context_msg.reverse()
|
| 77 |
+
elif after_message is not None:
|
| 78 |
+
async for msg in ctx.channel.history(
|
| 79 |
+
limit=context_length + 1, after=after_message
|
| 80 |
+
):
|
| 81 |
+
context_msg.append(
|
| 82 |
+
f"{msg.author.display_name} ({msg.author.name}) ({self.get_msg_time(msg)}): {msg.content}"
|
| 83 |
+
)
|
| 84 |
+
else:
|
| 85 |
+
async for msg in ctx.channel.history(
|
| 86 |
+
limit=context_length + 1, before=ctx.message
|
| 87 |
+
):
|
| 88 |
+
context_msg.append(
|
| 89 |
+
f"{msg.author.display_name} ({msg.author.name}) ({self.get_msg_time(msg)}): {msg.content}"
|
| 90 |
+
)
|
| 91 |
+
context_msg.reverse()
|
| 92 |
+
return "\n".join(context_msg)
|
| 93 |
+
|
| 94 |
+
def _get_template(self, template_name, channel_id=None):
|
| 95 |
+
"""获取模板内容"""
|
| 96 |
+
if self.agent_manager:
|
| 97 |
+
return self.agent_manager.get_preset_file(template_name, channel_id)
|
| 98 |
+
else:
|
| 99 |
+
# 如果没有设置agent_manager,返回默认模板
|
| 100 |
+
return None
|
| 101 |
+
|
| 102 |
+
async def chat_prompt(
|
| 103 |
+
self,
|
| 104 |
+
ctx: commands.Context,
|
| 105 |
+
context_length: int,
|
| 106 |
+
question: str,
|
| 107 |
+
name: str = None,
|
| 108 |
+
):
|
| 109 |
+
context = await self.get_context_for_prompt(ctx, context_length)
|
| 110 |
+
# 应用正则预处理
|
| 111 |
+
context = self.maltose_regex(context)
|
| 112 |
+
name = name if name else ctx.me.display_name
|
| 113 |
+
|
| 114 |
+
# 获取预设模板
|
| 115 |
+
if self.agent_manager:
|
| 116 |
+
# 获取预设JSON
|
| 117 |
+
preset = self.agent_manager.get_preset_json("chat_preset.json", ctx.channel.id)
|
| 118 |
+
if preset and "main_content" in preset:
|
| 119 |
+
# 使用预设中的main_content,并替换其中的模板变量
|
| 120 |
+
main_content = preset["main_content"]
|
| 121 |
+
return main_content.format(
|
| 122 |
+
context=context,
|
| 123 |
+
question=question,
|
| 124 |
+
name=name,
|
| 125 |
+
bot_name=ctx.me.name,
|
| 126 |
+
current_time=now(tz=self.tz),
|
| 127 |
+
user_display_name=ctx.author.display_name,
|
| 128 |
+
user_name=ctx.author.name
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
# 回退到原始模板
|
| 132 |
+
prompt = f"""
|
| 133 |
+
<context>
|
| 134 |
+
{context}
|
| 135 |
+
</context>
|
| 136 |
+
<question>
|
| 137 |
+
{question}
|
| 138 |
+
</question>
|
| 139 |
+
You are {name} ({ctx.me.name}), chatting in a discord server.
|
| 140 |
+
Speak naturally like a human who talks, and don't use phrases like 'according to the context' since humans never talk like that. Remember the Language is Chinese unless the user specifies otherwise! Avoid explicitly mentioning someone's name. If you have to mention someone (try to avoid this case), use their display name (the name that appears outside the parentheses).
|
| 141 |
+
Now is {now(tz=self.tz)}.
|
| 142 |
+
{ctx.author.display_name} ({ctx.author.name}) is asking you a question (refer to `<question>`).
|
| 143 |
+
Consider the context in `<context>` and reply now.
|
| 144 |
+
Avoid using ellipsis!
|
| 145 |
+
Your reply:
|
| 146 |
+
"""
|
| 147 |
+
return prompt
|
| 148 |
+
|
| 149 |
+
async def chat_prompt_with_reference(
|
| 150 |
+
self,
|
| 151 |
+
ctx: commands.Context,
|
| 152 |
+
context_length: int,
|
| 153 |
+
after_message_context_length: int,
|
| 154 |
+
question: str,
|
| 155 |
+
reference: discord.Message,
|
| 156 |
+
name: str = None,
|
| 157 |
+
):
|
| 158 |
+
context = await self.get_context_for_prompt(
|
| 159 |
+
ctx, context_length, reference, after_message_context_length=after_message_context_length
|
| 160 |
+
)
|
| 161 |
+
# 应用正则预处理
|
| 162 |
+
context = self.maltose_regex(context)
|
| 163 |
+
name = name if name else ctx.me.display_name
|
| 164 |
+
|
| 165 |
+
# 获取预设模板
|
| 166 |
+
if self.agent_manager:
|
| 167 |
+
# 获取预设JSON
|
| 168 |
+
preset = self.agent_manager.get_preset_json("reference_preset.json", ctx.channel.id)
|
| 169 |
+
if preset and "main_content" in preset:
|
| 170 |
+
# 使用预设中的main_content,并替换其中的模板变量
|
| 171 |
+
main_content = preset["main_content"]
|
| 172 |
+
return main_content.format(
|
| 173 |
+
context=context,
|
| 174 |
+
question=question,
|
| 175 |
+
name=name,
|
| 176 |
+
bot_name=ctx.me.name,
|
| 177 |
+
current_time=now(tz=self.tz),
|
| 178 |
+
user_display_name=ctx.author.display_name,
|
| 179 |
+
user_name=ctx.author.name,
|
| 180 |
+
reference_user_display_name=reference.author.display_name,
|
| 181 |
+
reference_user_name=reference.author.name,
|
| 182 |
+
reference_time=self.get_msg_time(reference),
|
| 183 |
+
reference_content=reference.content
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
# 回退到原始模板
|
| 187 |
+
prompt = f"""
|
| 188 |
+
<context>
|
| 189 |
+
{context}
|
| 190 |
+
</context>
|
| 191 |
+
<question>
|
| 192 |
+
{question}
|
| 193 |
+
</question>
|
| 194 |
+
<reference>
|
| 195 |
+
{reference.author.display_name} ({reference.author.name}) ({self.get_msg_time(reference)}): {reference.content}
|
| 196 |
+
</reference>
|
| 197 |
+
You are {name} ({ctx.me.name}), chatting in a discord server.
|
| 198 |
+
Speak naturally like a human who talks, and don't use phrases like 'according to the context' since humans never talk like that. Remember the Language is Chinese unless the user specifies otherwise! Avoid explicitly mentioning someone's name. If you have to mention someone (try to avoid this case), use their display name (the name that appears outside the parentheses).
|
| 199 |
+
Now is {now(tz=self.tz)}.
|
| 200 |
+
{ctx.author.display_name} ({ctx.author.name}) is asking you a question (refer to `<question>`) about the message above (refer to `<reference>`).
|
| 201 |
+
Consider the context in `<context>` and reply now.
|
| 202 |
+
Avoid using ellipsis!
|
| 203 |
+
Your reply:
|
| 204 |
+
"""
|
| 205 |
+
return prompt
|
| 206 |
+
|
| 207 |
+
async def chat_prompt_with_attachment(
|
| 208 |
+
self,
|
| 209 |
+
ctx: commands.Context,
|
| 210 |
+
question: str,
|
| 211 |
+
reference: discord.Message,
|
| 212 |
+
):
|
| 213 |
+
content = reference.content
|
| 214 |
+
if content == "":
|
| 215 |
+
content = "[No content, only attachments]"
|
| 216 |
+
|
| 217 |
+
# 使用模板
|
| 218 |
+
template = self._get_template("chat_prompt_with_attachment.txt", ctx.channel.id)
|
| 219 |
+
if template:
|
| 220 |
+
return template.format(
|
| 221 |
+
question=question,
|
| 222 |
+
name=ctx.me.display_name,
|
| 223 |
+
bot_name=ctx.me.name,
|
| 224 |
+
current_time=now(tz=self.tz),
|
| 225 |
+
user_display_name=ctx.author.display_name,
|
| 226 |
+
user_name=ctx.author.name,
|
| 227 |
+
reference_user_display_name=reference.author.display_name,
|
| 228 |
+
reference_user_name=reference.author.name,
|
| 229 |
+
reference_time=self.get_msg_time(reference),
|
| 230 |
+
reference_content=content
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
# 回退到原始模板
|
| 234 |
+
prompt: str = f"""
|
| 235 |
+
<question>
|
| 236 |
+
{question}
|
| 237 |
+
</question>
|
| 238 |
+
<reference>
|
| 239 |
+
{reference.author.display_name} ({reference.author.name}) ({self.get_msg_time(reference)}): {content}
|
| 240 |
+
</reference>
|
| 241 |
+
You are {ctx.me.display_name} ({ctx.me.name}), chatting in a discord server.
|
| 242 |
+
Speak naturally like a human who talks, and don't use phrases like 'according to the context' since humans never talk like that. Remember the Language is Chinese unless the user specifies otherwise! Avoid explicitly mentioning someone's name. If you have to mention someone (try to avoid this case), use their display name (the name that appears outside the parentheses).
|
| 243 |
+
Now is {now(tz=self.tz)}.
|
| 244 |
+
{ctx.author.display_name} ({ctx.author.name}) is asking you a question (refer to `<question>`) about the message (refer to `<reference>`) with the ATTACHMENT FILE.
|
| 245 |
+
Analyze the attachment file and reply now.
|
| 246 |
+
Avoid using ellipsis!
|
| 247 |
+
Your reply:
|
| 248 |
+
"""
|
| 249 |
+
return prompt
|
| 250 |
+
|
| 251 |
+
async def translate_prompt(
|
| 252 |
+
self,
|
| 253 |
+
ctx: commands.Context,
|
| 254 |
+
context_length: int,
|
| 255 |
+
reference: discord.Message,
|
| 256 |
+
after_message_context_length: int,
|
| 257 |
+
target_language: str,
|
| 258 |
+
):
|
| 259 |
+
# 获取上下文内容
|
| 260 |
+
context = await self.get_context_for_prompt(
|
| 261 |
+
ctx, context_length, reference, after_message_context_length=after_message_context_length
|
| 262 |
+
)
|
| 263 |
+
# 应用正则预处理
|
| 264 |
+
context = self.maltose_regex(context)
|
| 265 |
+
|
| 266 |
+
# 使用模板
|
| 267 |
+
template = self._get_template("translate_prompt.txt", ctx.channel.id)
|
| 268 |
+
if template:
|
| 269 |
+
return template.format(
|
| 270 |
+
context=context,
|
| 271 |
+
target_language=target_language,
|
| 272 |
+
reference_content=reference.content,
|
| 273 |
+
bot_name=ctx.me.name,
|
| 274 |
+
current_time=now(tz=self.tz),
|
| 275 |
+
user_display_name=ctx.author.display_name,
|
| 276 |
+
user_name=ctx.author.name,
|
| 277 |
+
reference_user_display_name=reference.author.display_name,
|
| 278 |
+
reference_user_name=reference.author.name,
|
| 279 |
+
reference_time=self.get_msg_time(reference)
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
# 回退到原始模板
|
| 283 |
+
prompt = f"""
|
| 284 |
+
<context>
|
| 285 |
+
{context}
|
| 286 |
+
</context>
|
| 287 |
+
`<reference>` is the message you need to translate.
|
| 288 |
+
<reference>
|
| 289 |
+
{reference.content}
|
| 290 |
+
</reference>
|
| 291 |
+
This message is from `<author>`.
|
| 292 |
+
<author>
|
| 293 |
+
{reference.author.display_name} ({reference.author.name}) ({self.get_msg_time(reference)})
|
| 294 |
+
</author>
|
| 295 |
+
You are a skilled muti-lingual translator, currently doing a translation job in a discord server. You'll get a message which you need to translate into {target_language} with context. You only need to supply the translation according to the context without any additional information. Don't act like a machine, translate smoothly like a human without being too informal.
|
| 296 |
+
Your translation should not include the author's name and the time.
|
| 297 |
+
Now is {now(tz=self.tz)}.
|
| 298 |
+
{ctx.author.display_name} ({ctx.author.name}) is asking you to translate the message in `<reference>` into {target_language} under the context (refer to <context>). The message is from `<author>`, so consider the context and try to understand the message before translating.
|
| 299 |
+
Your translation:
|
| 300 |
+
"""
|
| 301 |
+
return prompt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|