code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.1.0 # language: julia # name: julia-1.1 # --- using Plots gr() lowerBoundsData=[ 10 0.009111901 11 0.009111901 12 0.009111901 13 0.009111901 14 0.009111901 15 0.009111901 16 0.009111901 17 0.009111901 18 0.009111901 19 0.009111901 20 0.009111901 21 0.009111901 22 0.009111901 23 0.009111901 24 0.009111901 25 0.009111901 26 0.009111901 27 0.009111901 28 0.009111901 29 0.009111901 30 0.009111901 31 0.009111901 32 0.009111901 33 0.009111901 34 0.009111901 35 0.009111901 36 0.009111901 37 0.009111901 38 0.009111901 39 0.009111901 40 0.009111901 41 0.009111901 42 0.009112683 43 0.009112683 44 0.009112683 45 0.009112683 46 0.009112683 47 0.009112683 48 0.009112683 49 0.009112683 50 0.009112683 51 0.009112683 52 0.009112683 53 0.009112683 54 0.009112683 55 0.009112683 56 0.009112683 57 0.009112683 58 0.009112683 59 0.009112683 60 0.009112683 61 0.009119239 62 0.009119239 63 0.009119239 64 0.009119239 65 0.009156315 66 0.009156315 67 0.009156315 68 0.009156315 69 0.009156315 70 0.009156315 71 0.009156315 72 0.009156315 73 0.009156315 74 0.009156315 75 0.009156315 76 0.009156315 77 0.009156315 78 0.009156315 79 0.009156315 80 0.009156315 81 0.009156315 82 0.009156315 83 0.009156315 84 0.009156315 85 0.009156315 86 0.009174682 87 0.009174682 88 0.009174682 89 0.009174682 90 0.009174682 91 0.009174682 92 0.009174682 93 0.009174682 94 0.009174682 95 0.009174682 96 0.009174682 97 0.009174682 98 0.009174682 99 0.009174682 100 0.009174682 101 0.009174682 102 0.009174682 103 0.009174682 104 0.009174682 105 0.009174682 106 0.009174682 107 0.009174682 108 0.009174682 109 0.009176541 110 0.009176541 111 0.009176541 112 0.009176541 113 0.009183562 114 0.009183562 115 0.009183562 116 0.009183562 117 0.009183562 118 0.009183562 119 0.009183562 120 0.009183562 121 0.009189362 122 0.009189362 123 0.009189362 124 0.009189362 125 0.009189362 126 0.009189717 127 0.009189717 128 0.009189717 129 0.009189717 130 0.009189843 131 0.009191355 132 0.009195094 133 0.009195437 134 0.009195437 135 0.009195437 136 0.009196579 137 0.009196579 138 0.009196579 139 0.009196579 140 0.009198987 141 0.009203291 142 0.009203291 143 0.009204676 144 0.009205487 145 0.009205487 146 0.009205487 147 0.009205487 148 0.009206255 149 0.009206255 150 0.009206303 151 0.009206303 152 0.009207443 153 0.009207443 154 0.009207443 155 0.009207443 156 0.009207443 157 0.009207443 158 0.009207443 159 0.009207443 160 0.009207443 161 0.009207443 162 0.009207443 163 0.009207443 164 0.009207443 165 0.009207443 166 0.009207443 167 0.009207443 168 0.009207443 169 0.009207443 170 0.009207443 171 0.009207443 172 0.009207443 173 0.009207443 174 0.009207443 175 0.009207443 176 0.009207443 177 0.00920922 178 0.009209349 179 0.009212602 180 0.009212602 181 0.009212602 182 0.009212602 183 0.009212602 184 0.009212602 185 0.009212602 186 0.009212615 187 0.009212615 188 0.009212615 189 0.009213055 190 0.009213055 191 0.009213055 192 0.009213055 193 0.009216501 194 0.009220584 195 0.009220584 196 0.009220584 197 0.009220584 198 0.009228494 199 0.009228494 200 0.009228494 201 0.009228494 202 0.009228545 203 0.009228545 204 0.009228545 205 0.009228545 206 0.009228545 207 0.009228545 208 0.009228941 209 0.009229473 210 0.009229473 211 0.009229473 212 0.009229473 213 0.009229473 214 0.009229473 215 0.009229473 216 0.009229473 217 0.009229473 218 0.009229473 219 0.009229473 220 0.009229473 221 0.009229473 222 0.009229473 223 0.009229473 224 0.009229473 225 0.009229473 226 0.009229473 227 0.009229473 228 0.009229473 229 0.009229473 230 0.009229473 231 0.009229473 232 0.009229473 233 0.009229473 234 0.009229473 235 0.009230343 236 0.009230343 237 0.009230811 238 0.009230811 239 0.009230811 240 0.0092311 241 0.0092311 242 0.0092311 243 0.0092311 244 0.0092313 245 0.009231725 246 0.009232037 247 0.009232132 248 0.009232132 249 0.009232132 250 0.009232132 251 0.009232271 252 0.009232271 253 0.009232601 254 0.009232601 255 0.009233632 256 0.009233632 257 0.009233632 258 0.009233948 259 0.009233948 260 0.009233948 261 0.009234472 262 0.009234877 263 0.009234878 264 0.009234878 265 0.00923569 266 0.009235987 267 0.009237128 268 0.009237613 269 0.009237918 270 0.009237918 271 0.009237994 272 0.009237994 273 0.009238163 274 0.009238163 275 0.009238163 276 0.00923823 277 0.009238498 278 0.009238498 279 0.009238598 280 0.009239133 281 0.009239133 282 0.009239133 283 0.009239168 284 0.009239168 285 0.009239168 286 0.009239168 287 0.009239168 288 0.009239168 289 0.009239168 290 0.009239168 291 0.009239168 292 0.009239474 293 0.009239474 294 0.009239499 295 0.009239499 296 0.00923953 297 0.00923953 298 0.009239649 299 0.009239649 300 0.009239649 301 0.009239649 302 0.009239649 303 0.009239649 304 0.009239649 305 0.009239649 306 0.009239649 307 0.009239649 308 0.009239649 309 0.009239649 310 0.009239649 311 0.009239649 312 0.009239649 313 0.009239649 314 0.009240091 315 0.009240091 316 0.009240185 317 0.009240691 318 0.009241175 319 0.009241265 320 0.009241501 321 0.009241501 322 0.009241811 323 0.009241811 324 0.009241901 325 0.009242284 326 0.009242284 327 0.009242657 328 0.009242954 329 0.009243323 330 0.009243323 331 0.009243323 332 0.009243323 333 0.009243323 334 0.009243594 335 0.009243594 336 0.009243862 337 0.009243862 338 0.009243868 339 0.009244044 340 0.009244044 341 0.009244344 342 0.009244415 343 0.009244415 344 0.009244559 345 0.009244565 346 0.009244687 347 0.009244687 348 0.009244687 349 0.009244687 350 0.009244928 351 0.009244928 352 0.009244945 353 0.009245102 354 0.009245102 355 0.009245261 356 0.009245508 357 0.009245675 358 0.009245675 359 0.009245675 360 0.009245675 361 0.009246057 362 0.009246098 363 0.009246401 364 0.009246621 365 0.009246622 366 0.009246637 367 0.009246637 368 0.009246637 369 0.009246637 370 0.009246738 371 0.009246741 372 0.009246741 373 0.009246741 374 0.009246741 375 0.009247122 376 0.009247122 377 0.009247312 378 0.009247596 379 0.009247634 380 0.009247763 381 0.009247931 382 0.009247931 383 0.009247931 384 0.009247931 385 0.009247931 386 0.009247931 387 0.009248837 388 0.009248982 389 0.009248993 390 0.009248993 391 0.009249225 392 0.009249225 393 0.009249242 394 0.009249242 395 0.009249242 396 0.009249242 397 0.009249242 398 0.009249242 399 0.009249242 400 0.009249242 401 0.009249242 402 0.009249242 403 0.009249242 404 0.009249331 405 0.009249382 406 0.009249504 407 0.009249505 408 0.009249505 409 0.009249605 410 0.009249605 411 0.009249671 412 0.009249671 413 0.009249671 414 0.009249671 415 0.009249671 416 0.009249828 417 0.009249848 418 0.009249848 419 0.009249848 420 0.009249958 421 0.009249958 422 0.009249978 423 0.009250142 424 0.00925018 425 0.00925018 426 0.00925018 427 0.00925027 428 0.009250289 429 0.009250289 430 0.009250289 431 0.009250406 432 0.009250426 433 0.00925055 434 0.00925055 435 0.009251028 436 0.009251028 437 0.009251059 438 0.009251082 439 0.009251082 440 0.00925112 441 0.00925112 442 0.00925112 443 0.009251148 444 0.009251165 445 0.009251165 446 0.009251165 447 0.009251165 448 0.009251165 449 0.009251183 450 0.009251183 451 0.009251241 452 0.009251241 453 0.009251241 454 0.009251291 455 0.009251291 456 0.009251291 457 0.009251291 458 0.009251291 459 0.009251291 460 0.009251291 461 0.009251291 462 0.009251291 463 0.009251291 464 0.009251291 465 0.009251291 466 0.009251397 467 0.009251609 468 0.009251609 469 0.009251609 470 0.00925186 471 0.009252032 472 0.009252032 473 0.009252032 474 0.009252032 475 0.009252032 476 0.009252185 477 0.009252229 478 0.009252282 479 0.009252509 480 0.009252649 481 0.009252924 482 0.009253162 483 0.009253308 484 0.009253315 485 0.009253407 486 0.009253766 487 0.009253766 488 0.009253803 489 0.009253955 490 0.009253971 491 0.009254145 492 0.009254174 493 0.009254174 494 0.009254174 495 0.009254174 496 0.009254174 497 0.009254174 498 0.009254283 499 0.009254486 500 0.009254587 501 0.009254587 502 0.009254587 503 0.009254624 504 0.00925512 505 0.009255228 506 0.009255236 507 0.009255236 508 0.009255473 509 0.009255624 510 0.009255656 511 0.009255755 512 0.009256027 513 0.009256027 514 0.009256027 515 0.009256206 516 0.009256206 517 0.00925645 518 0.00925645 519 0.00925645 520 0.009256474 521 0.009256474 522 0.009256475 523 0.009256475 524 0.009256539 525 0.009256831 526 0.009256831 527 0.00925708 528 0.009257084 529 0.009257196 530 0.009257528 531 0.009257781 532 0.009257792 533 0.009257893 534 0.009257893 535 0.009258286 536 0.009258322 537 0.009258507 538 0.009258519 539 0.009258671 540 0.00925872 541 0.00925872 542 0.009258754 543 0.009258758 544 0.009259156 545 0.009259156 546 0.009259162 547 0.009259162 548 0.009259162 549 0.009259162 550 0.009259162 551 0.009259162 552 0.009259162 553 0.009259162 554 0.009259162 555 0.009259162 556 0.009259162 557 0.009259187 558 0.009259187 559 0.009259187 560 0.009259187 561 0.009259187 562 0.009259187 563 0.009259187 564 0.009259187 565 0.009259187 566 0.009259468 567 0.009259629 568 0.009260053 569 0.009260079 570 0.009260079 571 0.009260079 572 0.009260142 573 0.009260208 574 0.009260364 575 0.009260373 576 0.009260641 577 0.009260643 578 0.009260682 579 0.009260682 580 0.009260682 581 0.009260682 582 0.009260682 583 0.009260682 584 0.009260682 585 0.009260765 586 0.009260907 587 0.009260907 588 0.009261052 589 0.009261119 590 0.009261379 591 0.009261442 592 0.009261656 593 0.00926181 594 0.009261821 595 0.009261837 596 0.009261837 597 0.009262113 598 0.009262431 599 0.009262448 600 0.009262567 601 0.009262806 602 0.009262807 603 0.009262809 604 0.009262817 605 0.009262858 606 0.00926286 607 0.009262962 608 0.009263034 609 0.009263034 610 0.009263034 611 0.009263034 612 0.009263034 613 0.009263199 614 0.009263225 615 0.009263266 616 0.009263367 617 0.009263433 618 0.009263454 619 0.009263476 620 0.009263518 621 0.009263518 622 0.009263562 623 0.009263594 624 0.009263879 625 0.009263926 626 0.009263926 627 0.009263947 628 0.009263947 629 0.009263947 630 0.009264071 631 0.009264154 632 0.009264189 633 0.0092644 634 0.009264472 635 0.009264688 636 0.009264691 637 0.00926478 638 0.00926478 639 0.009264795 640 0.009264823 641 0.009264841 642 0.00926488 643 0.009264908 644 0.009265058 645 0.009265125 646 0.009265304 647 0.009265448 648 0.009265448 649 0.009265519 650 0.009265519 651 0.009265519 652 0.009265556 653 0.009265644 654 0.009265644 655 0.009265644 656 0.009265644 657 0.009265677 658 0.009265725 659 0.009265733 660 0.009265744 661 0.009265759 662 0.00926596 663 0.009266167 664 0.009266314 665 0.009266341 666 0.009266493 667 0.009266493 668 0.00926653 669 0.009266601 670 0.009266601 671 0.009266673 672 0.009266706 673 0.009266706 674 0.009266758 675 0.0092669 676 0.009266949 677 0.009267053 678 0.009267053 679 0.009267054 680 0.009267054 681 0.009267054 682 0.009267105 683 0.009267281 684 0.009267297 685 0.009267297 686 0.009267652 687 0.009267818 688 0.009267868 689 0.009267868 690 0.009267918 691 0.009267941 692 0.009268016 693 0.009268016 694 0.009268053 695 0.009268243 696 0.009268256 697 0.009268256 698 0.009268337 699 0.009268431 700 0.009268485 701 0.009268625 702 0.009268625 703 0.009268682 704 0.009268826 705 0.00926886 706 0.00926895 707 0.00926895 708 0.00926895 709 0.009269124 710 0.009269174 711 0.009269297 712 0.009269299 713 0.009269466 714 0.009269638 715 0.009269687 716 0.009269687 717 0.009269734 718 0.009269734 719 0.009269751 720 0.009269832 721 0.009269881 722 0.009269881 723 0.009269881 724 0.009269931 725 0.009269931 726 0.009270047 727 0.009270113 728 0.009270286 729 0.00927033 730 0.009270811 731 0.009270811 732 0.009270811 733 0.009270811 734 0.009270847 735 0.009270908 736 0.009271 737 0.009271 738 0.009271239 739 0.009271328 740 0.009271328 741 0.009271444 742 0.009271469 743 0.009271522 744 0.009271681 745 0.009271734 746 0.009271744 747 0.009271744 748 0.009271746 749 0.009271836 750 0.009271914 751 0.009272033 752 0.009272442 753 0.009272525 754 0.009272692 755 0.009272704 756 0.009272866 757 0.009272866 758 0.009272866 759 0.009272935 760 0.009273028 761 0.009273031 762 0.009273076 763 0.009273095 764 0.009273234 765 0.009273268 766 0.009273306 767 0.009273306 768 0.009273355 769 0.009273355 770 0.009273362 771 0.00927337 772 0.009273481 773 0.009273738 774 0.009273892 775 0.009273989 776 0.009274087 777 0.009274087 778 0.009274141 779 0.009274159 780 0.009274296 781 0.009274296 782 0.009274296 783 0.009274296 784 0.009274338 785 0.009274352 786 0.009274429 787 0.009274515 788 0.009274574 789 0.009274632 790 0.009274757 791 0.009274908 792 0.009275096 793 0.009275201 794 0.009275204 795 0.009275274 796 0.009275274 797 0.009275329 798 0.009275395 799 0.00927548 800 0.009275592 801 0.009275603 802 0.009275613 803 0.009275613 804 0.009275766 805 0.009275801 806 0.009275816 807 0.009275864 808 0.009275936 809 0.009275958 810 0.009275958 811 0.009276046 812 0.009276191 813 0.009276308 814 0.009276316 815 0.009276316 816 0.009276316 817 0.009276337 818 0.009276337 819 0.009276337 820 0.009276338 821 0.009276383 822 0.009276397 823 0.009276434 824 0.009276441 825 0.009276503 826 0.009276517 827 0.009276599 828 0.009276606 829 0.009276647 830 0.009276657 831 0.009276729 832 0.009276752 833 0.009276764 834 0.009276789 835 0.009276889 836 0.009276889 837 0.009276991 838 0.009277051 839 0.009277112 840 0.009277112 841 0.009277112 842 0.009277112 843 0.00927718 844 0.009277354 845 0.009277362 846 0.009277432 847 0.009277474 848 0.009277474 849 0.00927752 850 0.00927752 851 0.009277525 852 0.00927756 853 0.009277611 854 0.009277734 855 0.009277755 856 0.009277779 857 0.009277818 858 0.009277834 859 0.009277913 860 0.009277945 861 0.009277961 862 0.009278037 863 0.009278076 864 0.009278138 865 0.009278142 866 0.009278142 867 0.009278179 868 0.009278211 869 0.009278211 870 0.009278241 871 0.009278241 872 0.009278254 873 0.009278256 874 0.009278256 875 0.009278256 876 0.009278256 877 0.009278256 878 0.009278256 879 0.009278256 880 0.00927831 881 0.009278346 882 0.009278373 883 0.009278461 884 0.009278473 885 0.00927849 886 0.0092785 887 0.009278532 888 0.009278589 889 0.009278689 890 0.009278768 891 0.009278768 892 0.009278775 893 0.009278787 894 0.009278792 895 0.009278818 896 0.009278818 897 0.009278876 898 0.009278895 899 0.009278895 900 0.009278899 901 0.009278899 902 0.009278934 903 0.00927897 904 0.00927897 905 0.00927897 906 0.009278985 907 0.009279 908 0.009279026 909 0.009279031 910 0.009279048 911 0.009279091 912 0.009279099 913 0.009279216 914 0.009279235 915 0.009279271 916 0.009279318 917 0.009279318 918 0.009279327 919 0.009279429 920 0.00927945 921 0.00927945 922 0.009279466 923 0.009279598 924 0.009279613 925 0.00927967 926 0.00927967 927 0.009279673 928 0.009279689 929 0.009279689 930 0.009279689 931 0.009279689 932 0.009279689 933 0.009279689 934 0.009279689 935 0.009279689 936 0.009279731 937 0.009279746 938 0.009279814 939 0.009279951 940 0.009279958 941 0.009279987 942 0.00928002 943 0.009280021 944 0.009280083 945 0.009280083 946 0.009280257 947 0.009280257 948 0.00928029 949 0.009280324 950 0.009280324 951 0.009280324 952 0.009280346 953 0.009280346 954 0.009280407 955 0.009280479 956 0.009280544 957 0.009280553 958 0.009280561 959 0.009280563 960 0.009280563 961 0.009280563 962 0.009280602 963 0.009280748 964 0.009280752 965 0.009280752 966 0.009280752 967 0.009280752 968 0.009280752 969 0.009280755 970 0.009280768 971 0.009280768 972 0.00928077 973 0.009280781 974 0.009280815 975 0.009280815 976 0.009280914 977 0.009281005 978 0.009281021 979 0.009281021 980 0.009281023 981 0.009281124 982 0.009281124 983 0.009281124 984 0.009281125 985 0.009281142 986 0.009281222 987 0.009281327 988 0.009281327 989 0.009281424 990 0.009281433 991 0.009281433 992 0.009281443 993 0.009281446 994 0.009281446 995 0.009281509 996 0.009281559 997 0.009281571 998 0.009281571 999 0.009281574 1000 0.009281574 1001 0.009281574 1002 0.009281615 1003 0.009281672 1004 0.009281713 1005 0.009281807 1006 0.009281807 1007 0.009281833 1008 0.009281836 1009 0.009281836 1010 0.009281888 1011 0.009281898 1012 0.009281979 1013 0.009282052 1014 0.009282064 1015 0.00928209 1016 0.009282219 1017 0.009282229 1018 0.009282244 1019 0.009282245 1020 0.009282376 1021 0.009282412 1022 0.009282439 1023 0.009282555 1024 0.009282555 1025 0.009282586 1026 0.009282602 1027 0.009282623 1028 0.009282728 1029 0.009282735 1030 0.009282785 1031 0.009282794 1032 0.009282796 1033 0.009282815 1034 0.009282843 1035 0.009282987 1036 0.009283045 1037 0.009283045 1038 0.009283161 1039 0.009283247 1040 0.009283261 1041 0.009283308 1042 0.009283308 1043 0.009283308 1044 0.009283392 1045 0.009283392 1046 0.009283392 1047 0.009283401 1048 0.009283415 1049 0.009283415 1050 0.009283449 1051 0.009283451 1052 0.009283463 1053 0.009283473 1054 0.009283473 1055 0.00928348 1056 0.00928349 1057 0.009283514 1058 0.009283569 1059 0.009283579 1060 0.009283579 1061 0.009283582 1062 0.009283608 1063 0.009283668 1064 0.009283694 1065 0.009283694 1066 0.009283702 1067 0.009283797 1068 0.009283797 1069 0.009283808 1070 0.009283808 1071 0.009283836 1072 0.009283899 1073 0.009283919 1074 0.009283951 1075 0.009283951 1076 0.009283951 1077 0.009284011 1078 0.009284012 1079 0.009284034 1080 0.009284059 1081 0.009284111 1082 0.00928416 1083 0.009284175 1084 0.009284215 1085 0.009284267 1086 0.009284267 1087 0.009284267 1088 0.009284267 1089 0.009284267 1090 0.009284272 1091 0.009284286 1092 0.009284427 1093 0.009284427 1094 0.009284465 1095 0.009284478 1096 0.009284496 1097 0.009284496 1098 0.009284496 1099 0.009284583 1100 0.009284653 1101 0.009284653 1102 0.009284757 1103 0.009284782 1104 0.009284783 1105 0.009284819 1106 0.009284892 1107 0.009284912 1108 0.009284917 1109 0.009284935 1110 0.00928494 1111 0.009284978 1112 0.009285058 1113 0.009285061 1114 0.009285061 1115 0.009285062 1116 0.009285062 1117 0.009285071 1118 0.009285073 1119 0.009285073 1120 0.009285073 1121 0.009285073 1122 0.00928509 1123 0.009285137 1124 0.009285143 1125 0.009285143 1126 0.00928518 1127 0.00928518 1128 0.009285208 1129 0.009285246 1130 0.00928547 1131 0.00928547 1132 0.009285473 1133 0.009285533 1134 0.009285533 1135 0.009285533 1136 0.00928556 1137 0.009285797 1138 0.009285798 1139 0.00928582 1140 0.00928585 1141 0.009285914 1142 0.009285916 1143 0.009285916 1144 0.009285949 1145 0.009285949 1146 0.009285971 1147 0.009285977 1148 0.009286041 1149 0.009286109 1150 0.009286145 1151 0.009286177 1152 0.009286178 1153 0.009286184 1154 0.009286197 1155 0.009286225 1156 0.009286285 1157 0.009286304 1158 0.009286381 1159 0.009286425 1160 0.009286434 1161 0.009286481 1162 0.009286481 1163 0.009286481 1164 0.009286545 1165 0.009286545 1166 0.009286677 1167 0.00928673 1168 0.00928673 1169 0.00928673 1170 0.00928673 1171 0.00928673 1172 0.00928673 1173 0.009286813 1174 0.009286813 1175 0.009286873 1176 0.009286881 1177 0.009286925 1178 0.009286976 1179 0.009286987 1180 0.009286999 1181 0.009287021 1182 0.009287021 1183 0.009287041 1184 0.009287062 1185 0.009287091 1186 0.009287097 1187 0.009287275 1188 0.009287362 1189 0.009287385 1190 0.009287385 1191 0.009287539 1192 0.009287539 1193 0.009287597 1194 0.009287628 1195 0.009287676 1196 0.009287813 1197 0.009287829 1198 0.009287842 1199 0.009287854 1200 0.009287854 1201 0.009287863 1202 0.009287863 1203 0.009287863 1204 0.009287863 1205 0.009287863 1206 0.009287964 1207 0.009287964 1208 0.009287964 1209 0.00928799 1210 0.009287992 1211 0.009287992 1212 0.009288002 1213 0.009288008 1214 0.00928803 1215 0.00928803 1216 0.00928804 1217 0.009288102 1218 0.009288143 1219 0.009288143 1220 0.009288193 1221 0.009288212 1222 0.009288326 1223 0.009288326 1224 0.009288338 1225 0.009288415 1226 0.009288481 1227 0.009288485 1228 0.009288503 1229 0.009288589 1230 0.009288613 1231 0.009288646 1232 0.009288702 1233 0.009288761 1234 0.009288777 1235 0.009288798 1236 0.009288804 1237 0.009288871 1238 0.009288875 1239 0.009288913 1240 0.009288913 1241 0.009288913 1242 0.009288913 1243 0.009288938 1244 0.009288957 1245 0.009288961 1246 0.009288981 1247 0.009288988 1248 0.009289022 1249 0.009289022 1250 0.009289022 1251 0.009289234 1252 0.009289247 1253 0.00928931 1254 0.00928931 1255 0.009289343 1256 0.009289347 1257 0.009289354 1258 0.009289403 1259 0.009289403 1260 0.009289405 1261 0.009289413 1262 0.009289413 1263 0.009289413 1264 0.009289413 1265 0.009289414 1266 0.009289445 1267 0.009289479 1268 0.009289537 1269 0.009289652 1270 0.009289667 1271 0.009289667 1272 0.009289673 1273 0.009289703 1274 0.009289724 1275 0.009289774 1276 0.009289829 1277 0.009289862 1278 0.009289862 1279 0.009289898 1280 0.009289959 1281 0.009289959 1282 0.009289973 1283 0.009289984 1284 0.009289988 1285 0.009290046 1286 0.009290046 1287 0.009290065 1288 0.009290069 1289 0.009290096 1290 0.009290096 1291 0.009290096 1292 0.009290096 1293 0.009290096 1294 0.009290136 1295 0.009290179 1296 0.009290266 1297 0.009290299 1298 0.009290299 1299 0.009290358 1300 0.009290379 1301 0.009290379 1302 0.009290379 1303 0.009290379 1304 0.009290379 1305 0.009290379 1306 0.009290382 1307 0.009290438 1308 0.009290458 1309 0.009290466 1310 0.009290466 1311 0.009290466 1312 0.009290466 1313 0.009290466 1314 0.009290471 1315 0.009290478 1316 0.00929048 1317 0.009290492 1318 0.009290521 1319 0.009290521 1320 0.009290521 1321 0.009290521 1322 0.009290521 1323 0.009290521 1324 0.009290567 1325 0.009290567 1326 0.009290583 1327 0.009290615 1328 0.009290678 1329 0.009290713 1330 0.009290713 1331 0.009290724 1332 0.009290872 1333 0.009290875 1334 0.009290903 1335 0.009290946 1336 0.009290958 1337 0.009290958 1338 0.009290977 1339 0.009291153 1340 0.009291218 1341 0.009291236 1342 0.009291267 1343 0.009291295 1344 0.009291297 1345 0.009291306 1346 0.009291326 1347 0.009291396 1348 0.009291404 1349 0.009291409 1350 0.009291434 1351 0.009291434 1352 0.00929147 1353 0.00929147 1354 0.009291473 1355 0.009291473 1356 0.009291481 1357 0.009291493 1358 0.009291493 1359 0.009291509 1360 0.009291546 1361 0.009291546 1362 0.009291611 1363 0.009291663 1364 0.00929168 1365 0.00929168 1366 0.009291721 1367 0.009291732 1368 0.009291758 1369 0.009291773 1370 0.009291806 1371 0.00929183 1372 0.009291933 1373 0.009291953 1374 0.009291976 1375 0.009291979 1376 0.009291991 1377 0.009291991 1378 0.009291991 1379 0.009291991 1380 0.009291991 1381 0.009291994 1382 0.009291994 1383 0.009292024 1384 0.009292041 1385 0.009292059 1386 0.009292059 1387 0.009292178 1388 0.009292239 1389 0.009292276 1390 0.009292329 1391 0.009292367 1392 0.00929239 1393 0.009292448 1394 0.009292448 1395 0.009292452 1396 0.009292479 1397 0.00929252 1398 0.009292525 1399 0.009292525 1400 0.009292602 1401 0.009292672 1402 0.009292673 1403 0.009292695 1404 0.009292724 1405 0.009292724 1406 0.009292835 1407 0.009292861 1408 0.009292873 1409 0.009292901 1410 0.009292923 1411 0.009292942 1412 0.009292973 1413 0.009292974 1414 0.009292974 1415 0.009292988 1416 0.009293038 1417 0.009293039 1418 0.00929306 1419 0.009293085 1420 0.009293085 1421 0.009293094 1422 0.009293094 1423 0.009293094 1424 0.009293094 1425 0.009293094 1426 0.009293094 1427 0.009293158 1428 0.009293165 1429 0.009293172 1430 0.009293186 1431 0.009293219 1432 0.009293247 1433 0.009293335 1434 0.009293399 1435 0.009293419 1436 0.009293441 1437 0.00929346 1438 0.009293527 1439 0.009293545 1440 0.009293545 1441 0.00929355 1442 0.009293591 1443 0.009293591 1444 0.009293591 1445 0.009293604 1446 0.009293623 1447 0.009293694 1448 0.009293707 1449 0.009293707 1450 0.00929376 1451 0.00929376 1452 0.00929377 1453 0.009293798 1454 0.009293869 1455 0.009293927 1456 0.009293959 1457 0.009293969 1458 0.009294002 1459 0.009294131 1460 0.009294133 1461 0.009294175 1462 0.00929424 1463 0.009294352 1464 0.009294363 1465 0.009294374 1466 0.009294396 1467 0.009294448 1468 0.009294487 1469 0.009294539 1470 0.009294625 1471 0.009294656 1472 0.009294657 1473 0.009294657 1474 0.009294709 1475 0.009294727 1476 0.009294758 1477 0.009294876 1478 0.009294892 1479 0.009294913 1480 0.009294954 1481 0.009294954 1482 0.009295018 1483 0.009295019 1484 0.009295113 1485 0.00929515 1486 0.009295172 1487 0.009295244 1488 0.009295314 1489 0.009295323 1490 0.009295323 1491 0.009295335 1492 0.009295368 1493 0.009295419 1494 0.009295514 1495 0.009295545 1496 0.009295663 1497 0.009295663 1498 0.009295663 1499 0.009295689 1500 0.009295729 1501 0.009295751 1502 0.009295764 1503 0.009295767 1504 0.009295792 1505 0.009295897 1506 0.009295911 1507 0.009295924 1508 0.009295927 1509 0.00929594 1510 0.009295957 1511 0.009296111 1512 0.009296153 1513 0.009296175 1514 0.009296182 1515 0.009296182 1516 0.009296204 1517 0.009296319 1518 0.009296369 1519 0.009296426 1520 0.009296492 1521 0.009296492 1522 0.009296678 1523 0.009296712 1524 0.009296763 1525 0.009296786 1526 0.009296786 1527 0.009296871 1528 0.00929691 1529 0.009296917 1530 0.009297019 1531 0.009297069 1532 0.009297069 1533 0.009297106 1534 0.009297142 1535 0.009297196 1536 0.00929731 1537 0.009297323 1538 0.009297354 1539 0.009297441 1540 0.009297455 1541 0.009297532 1542 0.009297615 1543 0.009297638 1544 0.009297695 1545 0.009297707 1546 0.009297784 1547 0.0092978 1548 0.009297829 1549 0.009297906 1550 0.009298015 1551 0.009298015 1552 0.009298018 1553 0.009298059 1554 0.009298073 1555 0.009298101 1556 0.009298265 1557 0.009298338 1558 0.009298391 1559 0.009298391 1560 0.009298461 1561 0.009298516 1562 0.009298516 1563 0.009298555 1564 0.009298593 1565 0.009298676 1566 0.009298703 1567 0.00929873 1568 0.009298755 1569 0.009298817 1570 0.009298892 1571 0.009298918 1572 0.009298919 1573 0.009298919 1574 0.009298919 1575 0.009298919 1576 0.009298948 1577 0.009298997 1578 0.009299004 1579 0.009299004 1580 0.009299036 1581 0.009299036 1582 0.00929925 1583 0.009299326 1584 0.009299333 1585 0.009299364 1586 0.009299375 1587 0.009299381 1588 0.009299439 1589 0.009299521 1590 0.009299521 1591 0.00929975 1592 0.009299756 1593 0.009299795 1594 0.00929982 1595 0.009299826 1596 0.009299876 1597 0.009299968 1598 0.009300036 1599 0.009300082 1600 0.009300125 1601 0.009300151 1602 0.009300216 1603 0.009300247 1604 0.009300247 1605 0.009300275 1606 0.009300283 1607 0.009300295 1608 0.009300303 1609 0.009300314 1610 0.009300332 1611 0.009300401 1612 0.009300407 1613 0.009300446 1614 0.009300453 1615 0.00930046 1616 0.009300469 1617 0.00930055 1618 0.009300596 1619 0.00930075 1620 0.009300781 1621 0.009300808 1622 0.009300814 1623 0.009300814 1624 0.009300814 1625 0.009300814 1626 0.009300814 1627 0.009300814 1628 0.00930085 1629 0.009300861 1630 0.009300861 1631 0.009300873 1632 0.009301031 1633 0.009301051 1634 0.00930107 1635 0.009301127 1636 0.009301165 1637 0.00930122 1638 0.009301242 1639 0.009301286 1640 0.009301397 1641 0.009301407 1642 0.009301407 1643 0.009301475 1644 0.009301519 1645 0.009301525 1646 0.009301525 1647 0.009301525 1648 0.00930161 1649 0.009301665 1650 0.009301753 1651 0.009301767 1652 0.009301787 1653 0.009301787 1654 0.009301811 1655 0.009301811 1656 0.009301837 1657 0.009301854 1658 0.009301885 1659 0.009301886 1660 0.00930194 1661 0.009302013 1662 0.009302054 1663 0.009302054 1664 0.00930208 1665 0.009302221 1666 0.009302281 1667 0.009302281 1668 0.009302316 1669 0.009302324 1670 0.009302433 1671 0.009302576 1672 0.009302629 1673 0.009302636 1674 0.009302647 1675 0.009302662 1676 0.009302732 1677 0.009302755 1678 0.009302757 1679 0.009302812 1680 0.009302812 1681 0.009302858 1682 0.00930287 1683 0.00930296 1684 0.009302991 1685 0.009302991 1686 0.009302992 1687 0.009303039 1688 0.00930311 1689 0.00930319 1690 0.00930324 1691 0.009303245 1692 0.009303271 1693 0.009303461 1694 0.009303502 1695 0.009303531 1696 0.009303579 1697 0.009303653 1698 0.009303675 1699 0.009303704 1700 0.009303704 1701 0.009303707 1702 0.009303707 1703 0.009303709 1704 0.009303723 1705 0.009303797 1706 0.009303799 1707 0.009303886 1708 0.009303893 1709 0.009303909 1710 0.009303917 1711 0.009303939 1712 0.009304002 1713 0.009304116 1714 0.009304136 1715 0.009304156 1716 0.009304184 1717 0.009304184 1718 0.009304277 1719 0.009304277 1720 0.009304349 1721 0.009304375 1722 0.009304397 1723 0.009304416 1724 0.009304416 1725 0.009304481 1726 0.009304481 1727 0.009304589 1728 0.009304594 1729 0.009304597 1730 0.009304605 1731 0.009304617 1732 0.009304626 1733 0.009304642 1734 0.009304667 1735 0.009304842 1736 0.009305036 1737 0.009305091 1738 0.009305141 1739 0.009305249 1740 0.009305278 1741 0.009305297 1742 0.009305444 1743 0.009305461 1744 0.009305485 1745 0.009305497 1746 0.009305546 1747 0.009305694 1748 0.009305702 1749 0.009305716 1750 0.009305731 1751 0.009305794 1752 0.009305873 1753 0.009305882 1754 0.009305882 1755 0.009305882 1756 0.009305899 1757 0.009306026 1758 0.009306243 1759 0.009306308 1760 0.009306341 1761 0.009306362 1762 0.009306365 1763 0.009306389 1764 0.00930646 1765 0.009306612 1766 0.009306634 1767 0.009306647 1768 0.00930671 1769 0.009306722 1770 0.009306753 1771 0.009306807 1772 0.009306893 1773 0.009306907 1774 0.009306927 1775 0.009307017 1776 0.009307262 1777 0.009307273 1778 0.009307277 1779 0.009307558 1780 0.009307558 1781 0.009307607 1782 0.009307665 1783 0.009307692 1784 0.009307701 1785 0.009307733 1786 0.009307743 1787 0.009307861 1788 0.009308195 1789 0.009308261 1790 0.009308309 1791 0.009308532 1792 0.009308554 1793 0.009308556 1794 0.009308556 1795 0.009308674 1796 0.009308721 1797 0.009308722 1798 0.009308751 1799 0.009308924 1800 0.009309136 1801 0.0093092 1802 0.009309501 1803 0.009309507 1804 0.009309529 1805 0.009309596 1806 0.009309614 1807 0.009309816 1808 0.009309866 1809 0.009309965 1810 0.009310033 1811 0.009310039 1812 0.009310161 1813 0.009310186 1814 0.009310194 1815 0.009310273 1816 0.009310273 1817 0.009310356 1818 0.00931037 1819 0.009310406 1820 0.00931041 1821 0.009310457 1822 0.009310462 1823 0.009310518 1824 0.009310635 1825 0.009310654 1826 0.009310673 1827 0.00931078 1828 0.009310791 1829 0.009310793 1830 0.009310872 1831 0.009310883 1832 0.009310925 1833 0.00931097 1834 0.009310982 1835 0.009311014 1836 0.009311025 1837 0.009311135 1838 0.009311214 1839 0.009311257 1840 0.009311278 1841 0.009311322 1842 0.009311337 1843 0.009311409 1844 0.00931144 1845 0.009311496 1846 0.009311513 1847 0.009311623 1848 0.00931184 1849 0.00931196 1850 0.009312022 1851 0.009312123 1852 0.009312149 1853 0.009312189 1854 0.009312317 1855 0.009312355 1856 0.009312377 1857 0.009312475 1858 0.009312583 1859 0.009312615 1860 0.009312651 1861 0.009312654 1862 0.009312718 1863 0.009312726 1864 0.009312994 1865 0.009312994 1866 0.009312998 1867 0.009313095 1868 0.009313125 1869 0.009313213 1870 0.009313539 1871 0.009313559 1872 0.009313564 1873 0.009313582 1874 0.009313689 1875 0.009313712 1876 0.009313786 1877 0.009313786 1878 0.009313797 1879 0.0093138 1880 0.009313824 1881 0.009313826 1882 0.009313898 1883 0.00931398 1884 0.009313999 1885 0.009314134 1886 0.009314164 1887 0.009314207 1888 0.009314248 1889 0.009314256 1890 0.009314286 1891 0.009314335 1892 0.009314341 1893 0.009314343 1894 0.009314459 1895 0.009314682 1896 0.009314711 1897 0.009314763 1898 0.009314792 1899 0.009315017 1900 0.009315061 1901 0.009315069 1902 0.009315073 1903 0.009315165 1904 0.009315182 1905 0.00931527 1906 0.009315279 1907 0.009315283 1908 0.009315301 1909 0.009315345 1910 0.009315371 1911 0.009315446 1912 0.009315449 1913 0.009315476 1914 0.009315421 1915 0.00931554 1916 0.009315615 1917 0.009315655 1918 0.009315681 1919 0.009315941 1920 0.00931629 1921 0.009316361 1922 0.009316387 1923 0.009316448 1924 0.00931649 1925 0.00931669 1926 0.009316749 1927 0.009316763 1928 0.009316869 1929 0.009316936 1930 0.009317011 1931 0.009317054 1932 0.009317168 1933 0.009317268 1934 0.009317323 1935 0.009317438 1936 0.009317489 1937 0.009317622 1938 0.009317755 1939 0.009317773 1940 0.009317798 1941 0.009317809 1942 0.009317909 1943 0.009318091 1944 0.009318111 1945 0.009317683 1946 0.009318197 1947 0.009318288 1948 0.00931846 1949 0.009318749 1950 0.0093188 1951 0.009318884 1952 0.009319101 1953 0.00931913 1954 0.009319463 1955 0.009319505 1956 0.009319513 ] upperBoundsData=[ 10 0.009386359 11 0.009386359 12 0.009386359 13 0.009386359 14 0.009386359 15 0.009386359 16 0.009386359 17 0.009386359 18 0.009386359 19 0.009386359 20 0.009386359 21 0.009386359 22 0.009386359 23 0.009386359 24 0.009386359 25 0.009386359 26 0.009386359 27 0.009386359 28 0.009386359 29 0.009386359 30 0.009386359 31 0.009386359 32 0.009386359 33 0.009386359 34 0.009386359 35 0.009386359 36 0.009386359 37 0.009386359 38 0.009386359 39 0.009386359 40 0.009386359 41 0.009386359 42 0.009383583 43 0.009383583 44 0.009383583 45 0.009383583 46 0.009383583 47 0.009383583 48 0.009383583 49 0.009383583 50 0.009383583 51 0.009383583 52 0.009383583 53 0.009383583 54 0.009383583 55 0.009383583 56 0.009383583 57 0.009383583 58 0.009383583 59 0.009383583 60 0.009383583 61 0.009364704 62 0.009364704 63 0.009364704 64 0.009364704 65 0.009358514 66 0.009358514 67 0.009358514 68 0.009358514 69 0.009358514 70 0.009358514 71 0.009358514 72 0.009358514 73 0.009358514 74 0.009358514 75 0.009358514 76 0.009358514 77 0.009358514 78 0.009358514 79 0.009358514 80 0.009358514 81 0.009358514 82 0.009358514 83 0.009358514 84 0.009358514 85 0.009358514 86 0.009358514 87 0.00935486 88 0.00935486 89 0.00935486 90 0.00935486 91 0.00935486 92 0.00935486 93 0.00935486 94 0.00935486 95 0.00935486 96 0.00935486 97 0.00935486 98 0.00935486 99 0.00935486 100 0.00935486 101 0.00935486 102 0.00935486 103 0.00935486 104 0.00935486 105 0.00935486 106 0.00935486 107 0.00935486 108 0.00935486 109 0.00935486 110 0.00935486 111 0.00935486 112 0.00935486 113 0.00935486 114 0.00935486 115 0.00935486 116 0.00935486 117 0.00935486 118 0.00935486 119 0.00935486 120 0.00935486 121 0.00935486 122 0.00935486 123 0.00935486 124 0.00935486 125 0.00935486 126 0.00935486 127 0.00935486 128 0.00935486 129 0.00935486 130 0.00935486 131 0.00935486 132 0.00935486 133 0.00935486 134 0.00935486 135 0.00935486 136 0.009336686 137 0.009336686 138 0.009336686 139 0.009336686 140 0.009336686 141 0.009336686 142 0.009336686 143 0.009336686 144 0.009336686 145 0.009336686 146 0.009336686 147 0.009336686 148 0.009336686 149 0.009336686 150 0.009336686 151 0.009336686 152 0.009336686 153 0.009336686 154 0.009336686 155 0.009336686 156 0.009336686 157 0.009336686 158 0.009336686 159 0.009336686 160 0.009336686 161 0.009336686 162 0.009336686 163 0.009336686 164 0.009336686 165 0.009336686 166 0.009336686 167 0.009336686 168 0.009336686 169 0.009336686 170 0.009336686 171 0.009336686 172 0.009336686 173 0.009336686 174 0.009336686 175 0.009336686 176 0.009336686 177 0.009336686 178 0.009336686 179 0.009336686 180 0.009336686 181 0.009336686 182 0.009336686 183 0.009336686 184 0.009336686 185 0.009336686 186 0.009336686 187 0.009336686 188 0.009336686 189 0.009336686 190 0.009336686 191 0.009336686 192 0.009336686 193 0.009336686 194 0.009336686 195 0.009336686 196 0.009336686 197 0.009336686 198 0.009336686 199 0.009336686 200 0.009336686 201 0.009336686 202 0.009336686 203 0.009336686 204 0.009336686 205 0.009336686 206 0.009336686 207 0.009336686 208 0.009336686 209 0.009336686 210 0.009336686 211 0.009336686 212 0.009336686 213 0.009336686 214 0.009336686 215 0.009336686 216 0.009336686 217 0.009336686 218 0.009336686 219 0.009336686 220 0.009336686 221 0.009336686 222 0.009336686 223 0.009336686 224 0.009336686 225 0.009336686 226 0.009336686 227 0.009336686 228 0.009336686 229 0.009336686 230 0.009336686 231 0.009336686 232 0.009336686 233 0.009336686 234 0.009336686 235 0.009336686 236 0.009336686 237 0.009336686 238 0.009336686 239 0.009336686 240 0.009336686 241 0.009336686 242 0.009336686 243 0.009336686 244 0.009336686 245 0.009336686 246 0.009336686 247 0.009336686 248 0.009336686 249 0.009336686 250 0.009336686 251 0.009336686 252 0.009336686 253 0.009336686 254 0.009336686 255 0.009336686 256 0.009336686 257 0.009336686 258 0.009336686 259 0.009336686 260 0.009336686 261 0.009336686 262 0.009336686 263 0.009336686 264 0.009336686 265 0.009336686 266 0.009336686 267 0.009336686 268 0.009336686 269 0.009336686 270 0.009336686 271 0.009336686 272 0.009336686 273 0.009336686 274 0.009336686 275 0.009336686 276 0.009336686 277 0.009336686 278 0.009336686 279 0.009336686 280 0.009336686 281 0.009336686 282 0.009336686 283 0.009336686 284 0.009336686 285 0.009336686 286 0.009336686 287 0.009336686 288 0.009336686 289 0.009336686 290 0.009336686 291 0.009336686 292 0.009336686 293 0.009336686 294 0.009336686 295 0.009336686 296 0.009336686 297 0.009336686 298 0.009336686 299 0.009331194 300 0.009331194 301 0.009329295 302 0.009329295 303 0.009329295 304 0.009329295 305 0.009329295 306 0.009327387 307 0.009327387 308 0.009327387 309 0.009327387 310 0.009327387 311 0.009327387 312 0.009327387 313 0.009327387 314 0.009327387 315 0.009327387 316 0.009327387 317 0.009327387 318 0.009327387 319 0.009327387 320 0.009327387 321 0.009327387 322 0.009327387 323 0.009327387 324 0.009327387 325 0.009327387 326 0.009327387 327 0.009327387 328 0.009327387 329 0.009327387 330 0.009327387 331 0.009327387 332 0.009327387 333 0.009327387 334 0.009327387 335 0.009327387 336 0.009327387 337 0.009327387 338 0.009327387 339 0.009327387 340 0.009327387 341 0.009327387 342 0.009327387 343 0.009327387 344 0.009327387 345 0.009327387 346 0.009327387 347 0.009327387 348 0.009327387 349 0.009327387 350 0.009327387 351 0.009327387 352 0.009327387 353 0.009327387 354 0.009327387 355 0.009327387 356 0.009327387 357 0.009327387 358 0.009327387 359 0.009327387 360 0.009327387 361 0.009327387 362 0.009327387 363 0.009327387 364 0.009327387 365 0.009327387 366 0.009327387 367 0.009327387 368 0.009327387 369 0.009327387 370 0.009327387 371 0.009327387 372 0.009327387 373 0.009327387 374 0.009327387 375 0.009327387 376 0.009327387 377 0.009327387 378 0.009327387 379 0.009327387 380 0.009327387 381 0.009327387 382 0.009327387 383 0.009327387 384 0.009327387 385 0.009327387 386 0.009327387 387 0.009327387 388 0.009327387 389 0.009327387 390 0.009327387 391 0.009327387 392 0.009327387 393 0.009327387 394 0.009327387 395 0.009327387 396 0.009327387 397 0.009327387 398 0.009327387 399 0.009327387 400 0.009327387 401 0.009327387 402 0.009327387 403 0.009327387 404 0.009327387 405 0.009327387 406 0.009327387 407 0.009327387 408 0.009327387 409 0.009327387 410 0.009327387 411 0.009327387 412 0.009327387 413 0.009327387 414 0.009327387 415 0.009327387 416 0.009327387 417 0.009327387 418 0.009327387 419 0.009327387 420 0.009327387 421 0.009327387 422 0.009327387 423 0.009327387 424 0.009327387 425 0.009327387 426 0.009327387 427 0.009327387 428 0.009327387 429 0.009327387 430 0.009327387 431 0.009327387 432 0.009327387 433 0.009327387 434 0.009327387 435 0.009327387 436 0.009327387 437 0.009327387 438 0.009327387 439 0.009327387 440 0.009327387 441 0.009327387 442 0.009327387 443 0.009327387 444 0.009327387 445 0.009327387 446 0.009327387 447 0.009327387 448 0.009327387 449 0.009327387 450 0.009327387 451 0.009327387 452 0.009327387 453 0.009327387 454 0.009327387 455 0.009327387 456 0.009327387 457 0.009327387 458 0.009327387 459 0.009327387 460 0.009327387 461 0.009327387 462 0.009327387 463 0.009327387 464 0.009327387 465 0.009327387 466 0.009327387 467 0.009327387 468 0.009327387 469 0.009327387 470 0.009327387 471 0.009327387 472 0.009327387 473 0.009327387 474 0.009327387 475 0.009327387 476 0.009327387 477 0.009327387 478 0.009327387 479 0.009327387 480 0.009327387 481 0.009327387 482 0.009327387 483 0.009327387 484 0.009327387 485 0.009327387 486 0.009327387 487 0.009327387 488 0.009327387 489 0.009327387 490 0.009327387 491 0.009327387 492 0.009327387 493 0.009327387 494 0.009327387 495 0.009327387 496 0.009327387 497 0.009327387 498 0.009327387 499 0.009327387 500 0.009327387 501 0.009327387 502 0.009327387 503 0.009327387 504 0.009327387 505 0.009327387 506 0.009327387 507 0.009327387 508 0.009327387 509 0.009327387 510 0.009327387 511 0.009327387 512 0.009327387 513 0.009327387 514 0.009327387 515 0.009327387 516 0.009327387 517 0.009327387 518 0.009327387 519 0.009327387 520 0.009327387 521 0.009327387 522 0.009327387 523 0.009327387 524 0.009327387 525 0.009327387 526 0.009327387 527 0.009327387 528 0.009327387 529 0.009327387 530 0.009327387 531 0.009327387 532 0.009327387 533 0.009327387 534 0.009327387 535 0.009327387 536 0.009327387 537 0.009327387 538 0.009327387 539 0.009327387 540 0.009327387 541 0.009327387 542 0.009327387 543 0.009327387 544 0.009327387 545 0.009327387 546 0.009327387 547 0.009327387 548 0.009327387 549 0.009327387 550 0.009327387 551 0.009327387 552 0.009327387 553 0.009327387 554 0.009327387 555 0.009327387 556 0.009327387 557 0.009327387 558 0.009327387 559 0.009327387 560 0.009327387 561 0.009327387 562 0.009327387 563 0.009327387 564 0.009327387 565 0.009327387 566 0.009327387 567 0.009327387 568 0.009327387 569 0.009327387 570 0.009327387 571 0.009327387 572 0.009327387 573 0.009327387 574 0.009327387 575 0.009327387 576 0.009327387 577 0.009327387 578 0.009325836 579 0.009325836 580 0.009325836 581 0.009325196 582 0.009325196 583 0.009325196 584 0.009325196 585 0.009325196 586 0.009325196 587 0.009325196 588 0.009325196 589 0.009325196 590 0.009325196 591 0.009325196 592 0.009325196 593 0.009325196 594 0.009325196 595 0.009325196 596 0.009325196 597 0.009325196 598 0.009325196 599 0.009325196 600 0.009325196 601 0.009325196 602 0.009325196 603 0.009325196 604 0.009325196 605 0.009325196 606 0.009325196 607 0.009325196 608 0.009325196 609 0.009325196 610 0.009325196 611 0.009325196 612 0.009325196 613 0.009325196 614 0.009325196 615 0.009325196 616 0.009325196 617 0.009325196 618 0.009325196 619 0.009325196 620 0.009325196 621 0.009325196 622 0.009325196 623 0.009325196 624 0.009325196 625 0.009325196 626 0.009325196 627 0.009325196 628 0.009325196 629 0.009325196 630 0.009325196 631 0.009325196 632 0.009325196 633 0.009325196 634 0.009325196 635 0.009325196 636 0.009325196 637 0.009325196 638 0.009325196 639 0.009325196 640 0.009325196 641 0.009325196 642 0.009325196 643 0.009325196 644 0.009325196 645 0.009325196 646 0.009325196 647 0.009325196 648 0.009325196 649 0.009325196 650 0.009325196 651 0.009325196 652 0.009325196 653 0.009325196 654 0.009325196 655 0.009325196 656 0.009325196 657 0.009325196 658 0.009325196 659 0.009325196 660 0.009325196 661 0.009325196 662 0.009325196 663 0.009325196 664 0.009325196 665 0.009325196 666 0.009325196 667 0.009325196 668 0.009325196 669 0.009325196 670 0.009325196 671 0.009325196 672 0.009325196 673 0.009325196 674 0.009325196 675 0.009325196 676 0.009325196 677 0.009325196 678 0.009325196 679 0.009325196 680 0.009325196 681 0.009325196 682 0.009325196 683 0.009325196 684 0.009325196 685 0.009325196 686 0.009325196 687 0.009325196 688 0.009325196 689 0.009325196 690 0.009325196 691 0.009325196 692 0.009325196 693 0.009325196 694 0.009325196 695 0.009325196 696 0.009325196 697 0.009325196 698 0.009325196 699 0.009325196 700 0.009325196 701 0.009325196 702 0.009325196 703 0.009325196 704 0.009325196 705 0.009325196 706 0.009325196 707 0.009325196 708 0.009325196 709 0.009325196 710 0.009325196 711 0.009325196 712 0.009325196 713 0.009325196 714 0.009325196 715 0.009325196 716 0.009325196 717 0.009325196 718 0.009325196 719 0.009325196 720 0.009325196 721 0.009325196 722 0.009325196 723 0.009325196 724 0.009325196 725 0.009325196 726 0.009325196 727 0.009325196 728 0.009325196 729 0.009325196 730 0.009325196 731 0.009325196 732 0.009325196 733 0.009325196 734 0.009325196 735 0.009325196 736 0.009325196 737 0.009325196 738 0.009325196 739 0.009325196 740 0.009325196 741 0.009325196 742 0.009325196 743 0.009325196 744 0.009325196 745 0.009325196 746 0.009325196 747 0.009325196 748 0.009325196 749 0.009325196 750 0.009325196 751 0.009325196 752 0.009325196 753 0.009325196 754 0.009325196 755 0.009325196 756 0.009325196 757 0.009325196 758 0.009325196 759 0.009325196 760 0.009325196 761 0.009325196 762 0.009325196 763 0.009325196 764 0.009325196 765 0.009325196 766 0.009325196 767 0.009325196 768 0.009325196 769 0.009325196 770 0.009325196 771 0.009325196 772 0.009325196 773 0.009325196 774 0.009325196 775 0.009325196 776 0.009325196 777 0.009325196 778 0.009325196 779 0.009325196 780 0.009325196 781 0.009325196 782 0.009325196 783 0.009325196 784 0.009325196 785 0.009325196 786 0.009325196 787 0.009325196 788 0.009325196 789 0.009325196 790 0.009325196 791 0.009325196 792 0.009325196 793 0.009325196 794 0.009325196 795 0.009325196 796 0.009325196 797 0.009325196 798 0.009325196 799 0.009325196 800 0.009325196 801 0.009325196 802 0.009325196 803 0.009325196 804 0.009325196 805 0.009325196 806 0.009325196 807 0.009325196 808 0.009325196 809 0.009325196 810 0.009325196 811 0.009325196 812 0.009325196 813 0.009325196 814 0.009325196 815 0.009325196 816 0.009325196 817 0.009325196 818 0.009325196 819 0.009325196 820 0.009325196 821 0.009325196 822 0.009325196 823 0.009325196 824 0.009325196 825 0.009325196 826 0.009325196 827 0.009325196 828 0.009325196 829 0.009325196 830 0.009325196 831 0.009325196 832 0.009325196 833 0.009325196 834 0.009325196 835 0.009325196 836 0.009325196 837 0.009325196 838 0.009325196 839 0.009325196 840 0.009325196 841 0.009325196 842 0.009325196 843 0.009325196 844 0.009325196 845 0.009325196 846 0.009325196 847 0.009325196 848 0.009325196 849 0.009325196 850 0.009325196 851 0.009325196 852 0.009325196 853 0.009325196 854 0.009325196 855 0.009325196 856 0.009325196 857 0.009325196 858 0.009325196 859 0.009325196 860 0.009325196 861 0.009325196 862 0.009325196 863 0.009325196 864 0.009325196 865 0.009325196 866 0.009325196 867 0.009325196 868 0.009325196 869 0.009325196 870 0.009325196 871 0.009325196 872 0.009325196 873 0.009325196 874 0.009325196 875 0.009325196 876 0.009325196 877 0.009325196 878 0.009325196 879 0.009325196 880 0.009325196 881 0.009325196 882 0.009325196 883 0.009325196 884 0.009325196 885 0.009325196 886 0.009325196 887 0.009325196 888 0.009325196 889 0.009325196 890 0.009325196 891 0.009325196 892 0.009325196 893 0.009325196 894 0.009325196 895 0.009325196 896 0.009325196 897 0.009325196 898 0.009325196 899 0.009325196 900 0.009325196 901 0.009325196 902 0.009325196 903 0.009325196 904 0.009325196 905 0.009325196 906 0.009325196 907 0.009325196 908 0.009325196 909 0.009325196 910 0.009325196 911 0.009325196 912 0.009325196 913 0.009325196 914 0.009325196 915 0.009325196 916 0.009325196 917 0.009325196 918 0.009325196 919 0.009325196 920 0.009325196 921 0.009325196 922 0.009325196 923 0.009325196 924 0.009325196 925 0.009325196 926 0.009325196 927 0.009325196 928 0.009325196 929 0.009325196 930 0.009325196 931 0.009325196 932 0.009325196 933 0.009325196 934 0.009325196 935 0.009325196 936 0.009325196 937 0.009325196 938 0.009325196 939 0.009325196 940 0.009325196 941 0.009325196 942 0.009325196 943 0.009325196 944 0.009325196 945 0.009325196 946 0.009325196 947 0.009325196 948 0.009325196 949 0.009325196 950 0.009325196 951 0.009325196 952 0.009325196 953 0.009325196 954 0.009325196 955 0.009325196 956 0.009325196 957 0.009325196 958 0.009325196 959 0.009325196 960 0.009325196 961 0.009325196 962 0.009325196 963 0.009325196 964 0.009325196 965 0.009325196 966 0.009325196 967 0.009325196 968 0.009325196 969 0.009325196 970 0.009325196 971 0.009325196 972 0.009325196 973 0.009325196 974 0.009325196 975 0.009325196 976 0.009325196 977 0.009325196 978 0.009325196 979 0.009325196 980 0.009325196 981 0.009325196 982 0.009325196 983 0.009325196 984 0.009325196 985 0.009325196 986 0.009325196 987 0.009325196 988 0.009325196 989 0.009325196 990 0.009325196 991 0.009325196 992 0.009325196 993 0.009325196 994 0.009325196 995 0.009325196 996 0.009325196 997 0.009325196 998 0.009325196 999 0.009325196 1000 0.009325196 1001 0.009325196 1002 0.009325196 1003 0.009325196 1004 0.009325196 1005 0.009325196 1006 0.009325196 1007 0.009325196 1008 0.009325196 1009 0.009325196 1010 0.009325196 1011 0.009325196 1012 0.009325196 1013 0.009325196 1014 0.009325196 1015 0.009325196 1016 0.009325196 1017 0.009325196 1018 0.009325196 1019 0.009325196 1020 0.009325196 1021 0.009325196 1022 0.009325196 1023 0.009325196 1024 0.009325196 1025 0.009325196 1026 0.009325196 1027 0.009325196 1028 0.009325196 1029 0.009325196 1030 0.009325196 1031 0.009325196 1032 0.009325196 1033 0.009325196 1034 0.009325196 1035 0.009325196 1036 0.009325196 1037 0.009325196 1038 0.009325196 1039 0.009325196 1040 0.009325196 1041 0.009325196 1042 0.009325196 1043 0.009325196 1044 0.009325196 1045 0.009325196 1046 0.009325196 1047 0.009325196 1048 0.009325196 1049 0.009325196 1050 0.009325196 1051 0.009325196 1052 0.009325196 1053 0.009325196 1054 0.009325196 1055 0.009325196 1056 0.009325196 1057 0.009325196 1058 0.009325196 1059 0.009325196 1060 0.009325196 1061 0.009325196 1062 0.009325196 1063 0.009325196 1064 0.009325196 1065 0.009325196 1066 0.009325196 1067 0.009325196 1068 0.009325196 1069 0.009325196 1070 0.009325196 1071 0.009325196 1072 0.009325196 1073 0.009325196 1074 0.009325196 1075 0.009325196 1076 0.009325196 1077 0.009325196 1078 0.009325196 1079 0.009325196 1080 0.009325196 1081 0.009325196 1082 0.009325196 1083 0.009325196 1084 0.009325196 1085 0.009324919 1086 0.009324919 1087 0.009324919 1088 0.009324919 1089 0.009324919 1090 0.009324919 1091 0.009324919 1092 0.009324919 1093 0.009324919 1094 0.009324919 1095 0.009324919 1096 0.009324919 1097 0.009324919 1098 0.009324919 1099 0.009324919 1100 0.009324919 1101 0.009324919 1102 0.009324919 1103 0.009324919 1104 0.009324919 1105 0.009324919 1106 0.009324919 1107 0.009324919 1108 0.009324919 1109 0.009324919 1110 0.009324919 1111 0.009324919 1112 0.009324919 1113 0.009324919 1114 0.009324919 1115 0.009324919 1116 0.009324919 1117 0.009324919 1118 0.009324919 1119 0.009324919 1120 0.009324919 1121 0.009324919 1122 0.009324919 1123 0.009324919 1124 0.009324919 1125 0.009324919 1126 0.009324919 1127 0.009324919 1128 0.009324919 1129 0.009324919 1130 0.009324919 1131 0.009324919 1132 0.009324919 1133 0.009324919 1134 0.009324919 1135 0.009324919 1136 0.009324919 1137 0.009324919 1138 0.009324919 1139 0.009324919 1140 0.009324919 1141 0.009324919 1142 0.009324919 1143 0.009324919 1144 0.009324919 1145 0.009324919 1146 0.009324919 1147 0.009324919 1148 0.009324919 1149 0.009324919 1150 0.009324919 1151 0.009324919 1152 0.009324919 1153 0.009324919 1154 0.009324919 1155 0.009324919 1156 0.009324919 1157 0.009324919 1158 0.009324919 1159 0.009324919 1160 0.009324919 1161 0.009324919 1162 0.009324919 1163 0.009324919 1164 0.009324919 1165 0.009324919 1166 0.009324919 1167 0.009324919 1168 0.009324919 1169 0.009324919 1170 0.009324919 1171 0.009324919 1172 0.009324919 1173 0.009324919 1174 0.009324919 1175 0.009324919 1176 0.009324919 1177 0.009324919 1178 0.009324919 1179 0.009324919 1180 0.009324919 1181 0.009324919 1182 0.009324919 1183 0.009324919 1184 0.009324919 1185 0.009324919 1186 0.009324919 1187 0.009324919 1188 0.009324919 1189 0.009324919 1190 0.009324919 1191 0.009324919 1192 0.009324919 1193 0.009324919 1194 0.009324919 1195 0.009324919 1196 0.009324919 1197 0.009324919 1198 0.009324919 1199 0.009324919 1200 0.009324919 1201 0.009324919 1202 0.009324919 1203 0.009324919 1204 0.009324919 1205 0.009324919 1206 0.009324919 1207 0.009324919 1208 0.009324919 1209 0.009324919 1210 0.009324919 1211 0.009324919 1212 0.009324919 1213 0.009324919 1214 0.009324919 1215 0.009324919 1216 0.009324919 1217 0.009324919 1218 0.009324919 1219 0.009324919 1220 0.009324919 1221 0.009324919 1222 0.009324919 1223 0.009324919 1224 0.009324919 1225 0.009324919 1226 0.009324919 1227 0.009324919 1228 0.009324919 1229 0.009324919 1230 0.009324919 1231 0.009324919 1232 0.009324919 1233 0.009324919 1234 0.009324919 1235 0.009324919 1236 0.009324919 1237 0.009324919 1238 0.009324919 1239 0.009324919 1240 0.009324919 1241 0.009324919 1242 0.009324919 1243 0.009324919 1244 0.009324919 1245 0.009324919 1246 0.009324919 1247 0.009324919 1248 0.009324919 1249 0.009324919 1250 0.009324919 1251 0.009324919 1252 0.009324919 1253 0.009324919 1254 0.009324919 1255 0.009324919 1256 0.009324919 1257 0.009324919 1258 0.009324919 1259 0.009324919 1260 0.009324919 1261 0.009324919 1262 0.009324919 1263 0.009324919 1264 0.009324919 1265 0.009324919 1266 0.009324919 1267 0.009324919 1268 0.009324919 1269 0.009324919 1270 0.009324919 1271 0.009324919 1272 0.009324919 1273 0.009324919 1274 0.009324919 1275 0.009324919 1276 0.009324919 1277 0.009324919 1278 0.009324919 1279 0.009324919 1280 0.009324919 1281 0.009324919 1282 0.009324919 1283 0.009324919 1284 0.009324919 1285 0.009324919 1286 0.009324919 1287 0.009324919 1288 0.009324919 1289 0.009324919 1290 0.009324919 1291 0.009324919 1292 0.009324919 1293 0.009324919 1294 0.009324919 1295 0.009324919 1296 0.009324919 1297 0.009324919 1298 0.009324919 1299 0.009324919 1300 0.009324919 1301 0.009324919 1302 0.009324919 1303 0.009324919 1304 0.009324919 1305 0.009324919 1306 0.009324919 1307 0.009324919 1308 0.009324919 1309 0.009324919 1310 0.009324919 1311 0.009324919 1312 0.009324919 1313 0.009324919 1314 0.009324919 1315 0.009324919 1316 0.009324919 1317 0.009324919 1318 0.009324919 1319 0.009324919 1320 0.009324919 1321 0.009324919 1322 0.009324919 1323 0.009324919 1324 0.009324919 1325 0.009324919 1326 0.009324919 1327 0.009324919 1328 0.009324919 1329 0.009324919 1330 0.009324919 1331 0.009324919 1332 0.009324919 1333 0.009324919 1334 0.009324919 1335 0.009324919 1336 0.009324919 1337 0.009324919 1338 0.009324919 1339 0.009324919 1340 0.009324919 1341 0.009324919 1342 0.009324919 1343 0.009324919 1344 0.009324919 1345 0.009324919 1346 0.009324919 1347 0.009324919 1348 0.009324919 1349 0.009324919 1350 0.009324919 1351 0.009324919 1352 0.009324919 1353 0.009324919 1354 0.009324919 1355 0.009324919 1356 0.009324919 1357 0.009324919 1358 0.009324919 1359 0.009324919 1360 0.009324919 1361 0.009324919 1362 0.009324919 1363 0.009324919 1364 0.009324919 1365 0.009324919 1366 0.009324919 1367 0.009324919 1368 0.009324919 1369 0.009324919 1370 0.009324919 1371 0.009324919 1372 0.009324919 1373 0.009324919 1374 0.009324919 1375 0.009324919 1376 0.009324919 1377 0.009324919 1378 0.009324919 1379 0.009324919 1380 0.009324919 1381 0.009324919 1382 0.009324919 1383 0.009324919 1384 0.009324919 1385 0.009324919 1386 0.009324919 1387 0.009324919 1388 0.009324919 1389 0.009324919 1390 0.009324919 1391 0.009324919 1392 0.009324919 1393 0.009324919 1394 0.009324919 1395 0.009324919 1396 0.009324919 1397 0.009324919 1398 0.009324919 1399 0.009324919 1400 0.009324919 1401 0.009324919 1402 0.009324919 1403 0.009324919 1404 0.009324919 1405 0.009324919 1406 0.009324919 1407 0.009324919 1408 0.009324919 1409 0.009324919 1410 0.009324919 1411 0.009324919 1412 0.009324919 1413 0.009324919 1414 0.009324919 1415 0.009324919 1416 0.009324919 1417 0.009324919 1418 0.009324919 1419 0.009324919 1420 0.009324919 1421 0.009324919 1422 0.009324919 1423 0.009324919 1424 0.009324919 1425 0.009324919 1426 0.009324919 1427 0.009324919 1428 0.009324919 1429 0.009324919 1430 0.009324919 1431 0.009324919 1432 0.009324919 1433 0.009324919 1434 0.009324919 1435 0.009324919 1436 0.009324919 1437 0.009324919 1438 0.009324919 1439 0.009324919 1440 0.009324919 1441 0.009324919 1442 0.009324919 1443 0.009324919 1444 0.009324919 1445 0.009324919 1446 0.009324919 1447 0.009324919 1448 0.009323381 1449 0.009323381 1450 0.009323381 1451 0.009323381 1452 0.009323381 1453 0.009323381 1454 0.009323381 1455 0.009323381 1456 0.009323381 1457 0.009323381 1458 0.009323381 1459 0.009323381 1460 0.009323381 1461 0.009323381 1462 0.009323381 1463 0.009323381 1464 0.009323381 1465 0.009323381 1466 0.009323381 1467 0.009323381 1468 0.009323381 1469 0.009323381 1470 0.009323381 1471 0.009323381 1472 0.009323381 1473 0.009323381 1474 0.009323381 1475 0.009323381 1476 0.009323381 1477 0.009323381 1478 0.009323381 1479 0.009323381 1480 0.009323381 1481 0.009323381 1482 0.009323381 1483 0.009323381 1484 0.009323381 1485 0.009323381 1486 0.009323381 1487 0.009323381 1488 0.009323381 1489 0.009323381 1490 0.009323381 1491 0.009323381 1492 0.009323381 1493 0.009323381 1494 0.009323381 1495 0.009323381 1496 0.009323381 1497 0.009323381 1498 0.009323381 1499 0.009323381 1500 0.009323381 1501 0.009323381 1502 0.009323381 1503 0.009323381 1504 0.009323381 1505 0.009323381 1506 0.009323381 1507 0.009323381 1508 0.009323381 1509 0.009323381 1510 0.009323381 1511 0.009323381 1512 0.009323381 1513 0.009323381 1514 0.009323381 1515 0.009323381 1516 0.009323381 1517 0.009323381 1518 0.009323381 1519 0.009323381 1520 0.009323381 1521 0.009323381 1522 0.009323381 1523 0.009323381 1524 0.009323381 1525 0.009323381 1526 0.009323381 1527 0.009323381 1528 0.009323381 1529 0.009323381 1530 0.009323381 1531 0.009323381 1532 0.009323381 1533 0.009323381 1534 0.009323381 1535 0.009323381 1536 0.009323381 1537 0.009323381 1538 0.009323381 1539 0.009323381 1540 0.009323381 1541 0.009323381 1542 0.009323381 1543 0.009323381 1544 0.009323381 1545 0.009323381 1546 0.009323381 1547 0.009323381 1548 0.009323381 1549 0.009323381 1550 0.009323381 1551 0.009323381 1552 0.009323381 1553 0.009323381 1554 0.009323381 1555 0.009323381 1556 0.009323381 1557 0.009323381 1558 0.009323381 1559 0.009323381 1560 0.009323381 1561 0.009323381 1562 0.009323381 1563 0.009323381 1564 0.009323381 1565 0.009323381 1566 0.009323381 1567 0.009323381 1568 0.009323381 1569 0.009323381 1570 0.009323381 1571 0.009323381 1572 0.009323381 1573 0.009323381 1574 0.009323381 1575 0.009323381 1576 0.009323381 1577 0.009323381 1578 0.009323381 1579 0.009323381 1580 0.009323381 1581 0.009323381 1582 0.009323381 1583 0.009323381 1584 0.009323381 1585 0.009323381 1586 0.009323381 1587 0.009323381 1588 0.009323381 1589 0.009323381 1590 0.009323381 1591 0.009323381 1592 0.009323381 1593 0.009323381 1594 0.009323381 1595 0.009323381 1596 0.009323381 1597 0.009323381 1598 0.009323381 1599 0.009323381 1600 0.009323381 1601 0.009323381 1602 0.009323381 1603 0.009323381 1604 0.009323381 1605 0.009323381 1606 0.009323381 1607 0.009323381 1608 0.009323381 1609 0.009323381 1610 0.009323381 1611 0.009323381 1612 0.009323381 1613 0.009323381 1614 0.009323381 1615 0.009323381 1616 0.009323381 1617 0.009323381 1618 0.009323381 1619 0.009323381 1620 0.009323381 1621 0.009323381 1622 0.009323381 1623 0.009323381 1624 0.009323381 1625 0.009323381 1626 0.009323381 1627 0.009323381 1628 0.009323381 1629 0.009323381 1630 0.009323381 1631 0.009323381 1632 0.009323381 1633 0.009323381 1634 0.009323381 1635 0.009323381 1636 0.009323381 1637 0.009323381 1638 0.009323381 1639 0.009323381 1640 0.009323381 1641 0.009323381 1642 0.009323381 1643 0.009323381 1644 0.009323381 1645 0.009323381 1646 0.009323381 1647 0.009323381 1648 0.009323381 1649 0.009323381 1650 0.009323381 1651 0.009323381 1652 0.009323381 1653 0.009323381 1654 0.009323381 1655 0.009323381 1656 0.009323381 1657 0.009323381 1658 0.009323381 1659 0.009323381 1660 0.009323381 1661 0.009323381 1662 0.009323381 1663 0.009323381 1664 0.009323381 1665 0.009323381 1666 0.009323381 1667 0.009323381 1668 0.009323381 1669 0.009323381 1670 0.009323381 1671 0.009323381 1672 0.009323381 1673 0.009323381 1674 0.009323381 1675 0.009323381 1676 0.009323381 1677 0.009323381 1678 0.009323381 1679 0.009323381 1680 0.009323381 1681 0.009323381 1682 0.009323381 1683 0.009323381 1684 0.009323381 1685 0.009323381 1686 0.009323381 1687 0.009323381 1688 0.009323381 1689 0.009323381 1690 0.009323381 1691 0.009323381 1692 0.009323381 1693 0.009323381 1694 0.009323381 1695 0.009323381 1696 0.009323381 1697 0.009323381 1698 0.009323381 1699 0.009323381 1700 0.009323381 1701 0.009323381 1702 0.009323381 1703 0.009323381 1704 0.009323381 1705 0.009323381 1706 0.009323381 1707 0.009323381 1708 0.009323381 1709 0.009323381 1710 0.009323381 1711 0.009323381 1712 0.009323381 1713 0.009323381 1714 0.009323381 1715 0.009323381 1716 0.009323381 1717 0.009323381 1718 0.009323381 1719 0.009323381 1720 0.009323381 1721 0.009323381 1722 0.009323381 1723 0.009323381 1724 0.009323381 1725 0.009321195 1726 0.009321195 1727 0.009321195 1728 0.009321195 1729 0.009321195 1730 0.009321195 1731 0.009321195 1732 0.009321195 1733 0.009321195 1734 0.009321195 1735 0.009321195 1736 0.009321195 1737 0.009321195 1738 0.009321195 1739 0.009321195 1740 0.009321195 1741 0.009321195 1742 0.009321195 1743 0.009321195 1744 0.009321195 1745 0.009321195 1746 0.009321195 1747 0.009321195 1748 0.009321195 1749 0.009321195 1750 0.009321195 1751 0.009321195 1752 0.009321195 1753 0.009321195 1754 0.009321195 1755 0.009321195 1756 0.009321195 1757 0.009321195 1758 0.009321195 1759 0.009321195 1760 0.009321195 1761 0.009321195 1762 0.009321195 1763 0.009321195 1764 0.009321195 1765 0.009321195 1766 0.009321195 1767 0.009321195 1768 0.009321195 1769 0.009321195 1770 0.009321195 1771 0.009321195 1772 0.009321195 1773 0.009321195 1774 0.009321195 1775 0.009321195 1776 0.009321195 1777 0.009321195 1778 0.009321195 1779 0.009321195 1780 0.009321195 1781 0.009321195 1782 0.009321195 1783 0.009321195 1784 0.009321195 1785 0.009321195 1786 0.009321195 1787 0.009321195 1788 0.009321195 1789 0.009321195 1790 0.009321195 1791 0.009321195 1792 0.009321195 1793 0.009321195 1794 0.009321195 1795 0.009321195 1796 0.009321195 1797 0.009321195 1798 0.009321195 1799 0.009321195 1800 0.009321195 1801 0.009321195 1802 0.009321195 1803 0.009321195 1804 0.009321195 1805 0.009321195 1806 0.009321195 1807 0.009321195 1808 0.009321195 1809 0.009321195 1810 0.009321195 1811 0.009321195 1812 0.009321195 1813 0.009321195 1814 0.009321195 1815 0.009321195 1816 0.009321195 1817 0.009321195 1818 0.009321195 1819 0.009321195 1820 0.009321195 1821 0.009321195 1822 0.009321195 1823 0.009321195 1824 0.009321195 1825 0.009321195 1826 0.009321195 1827 0.009321195 1828 0.009321195 1829 0.009321195 1830 0.009321195 1831 0.009321195 1832 0.009321195 1833 0.009321195 1834 0.009321195 1835 0.009321195 1836 0.009321195 1837 0.009321195 1838 0.009321195 1839 0.009321195 1840 0.009321195 1841 0.009321195 1842 0.009321195 1843 0.009321195 1844 0.009321195 1845 0.009321195 1846 0.009321195 1847 0.009321195 1848 0.009321195 1849 0.009321195 1850 0.009321195 1851 0.009321195 1852 0.009321195 1853 0.009321195 1854 0.009321195 1855 0.009321195 1856 0.009321195 1857 0.009321195 1858 0.009321195 1859 0.009321195 1860 0.009321195 1861 0.009321195 1862 0.009321195 1863 0.009321195 1864 0.009321195 1865 0.009321195 1866 0.009321195 1867 0.009321195 1868 0.009321195 1869 0.009321195 1870 0.009321195 1871 0.009321195 1872 0.009321195 1873 0.009321195 1874 0.009321195 1875 0.009321195 1876 0.009321195 1877 0.009321195 1878 0.009321195 1879 0.009321195 1880 0.009321195 1881 0.009321195 1882 0.009321195 1883 0.009321195 1884 0.009321195 1885 0.009321195 1886 0.009321195 1887 0.009321195 1888 0.009321195 1889 0.009321195 1890 0.009321195 1891 0.009321195 1892 0.009321195 1893 0.009321195 1894 0.009321195 1895 0.009321195 1896 0.009321195 1897 0.009321195 1898 0.009321195 1899 0.009321195 1900 0.009321195 1901 0.009321195 1902 0.009321195 1903 0.009321195 1904 0.009321195 1905 0.009321195 1906 0.009321195 1907 0.009321195 1908 0.009321195 1909 0.009321195 1910 0.009321195 1911 0.009321195 1912 0.009321195 1913 0.009321195 1914 0.009321195 1915 0.009321195 1916 0.009321195 1917 0.009321195 1918 0.009321195 1919 0.009321195 1920 0.009321195 1921 0.009321195 1922 0.009321195 1923 0.009321195 1924 0.009321195 1925 0.009321195 1926 0.009321195 1927 0.009321195 1928 0.009321195 1929 0.009321195 1930 0.009321195 1931 0.009321195 1932 0.009321195 1933 0.009321195 1934 0.009321195 1935 0.009321195 1936 0.009321195 1937 0.009321195 1938 0.009321195 1939 0.009321195 1940 0.009321195 1941 0.009321195 1942 0.009321195 1943 0.009321195 1944 0.009321195 1945 0.009321195 1946 0.009321195 1947 0.009321195 1948 0.009321195 1949 0.009321195 1950 0.009321195 1951 0.009321195 1952 0.009321195 1953 0.009321195 1954 0.009321195 1955 0.009321195 1956 0.009321195 ] using LaTeXStrings Plots.plot(lowerBoundsData[:,1], lowerBoundsData[:,2], xaxis=("Number of Cuts"), yaxis=("Objective value"), label="Lower Bound", legend=:topleft, legendtitle="Bounds") # + Plots.plot!(upperBoundsData[:,1], upperBoundsData[:,2], xaxis=("Number of Cuts"), yaxis=("Objective value"), label="Upper Bound", legend=:bottomright, legendtitle="Bounds") # - savefig("port2_k5_withminreturn_convergenceplot.pdf")
Create Convergence Plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # # Lesson 11 - Stock Prices # <NAME>-Pinion import numpy as np import pandas as pd # import tensorflow as tf import matplotlib.pyplot as plt # %matplotlib inline # #### Part (a): How does the RMSE for you linear model compare to the standard deviation of the LogRatios? df = pd.read_csv('./Data/Stocks15yr.csv') df.head() # conert dates to date-time format df['Date'] = pd.to_datetime(df.Date) df.dtypes # pivot closing prices df = df.pivot(index='Date', columns='ticker', values='Close') df.head(12) df.shape # check for bad/missing values df.isnull().sum().sum() # forward fill and black fill missing prices df.fillna(method='ffill', axis=0).fillna(method='bfill', axis=0).isnull().sum().sum() df = df.fillna(method='ffill', axis=0).fillna(method='bfill', axis=0) df.head(13) # compute logs of closing prices df1 = np.log(df) df1.head() # shit log price forward one day df1.shift().head() # compute LogRatios df2 = df1 - df1.shift() df2 = df2.dropna() df2.head(15) # plot Google's LogRatios df2.GOOG.plot() # look for stock splits print('Google minimum LogRatio', df2.GOOG.min()) print('Google minimum price ratio', np.exp(df2.GOOG.min())) # This looks like a stock split. # compute standard deviation for all LogRatios df2.values.std() # check for extreme variations (df2.abs() > 0.1).sum().sum() # + # remove extreme variations # FOR HOMEWORK: change this to "3*std", or 3 standard deviations away because 0.1 throws away too much data df2[df2.abs() > 0.1] = 0 # check for extreme variations (df2.abs() > 0.1).sum().sum() # + # define inputs and outputs inputs = df2.values[0:-1, :] outputs = df2.values[1:,:] print("Shapes:") print(inputs.shape) print(outputs.shape) # today's values are used to predict tomorrow's value. So the data is shifted print("\nSample values:") print(inputs[0:3, 0:3]) print(outputs[0:3, 0:3]) # - # load sklearn from sklearn.linear_model import LinearRegression LR = LinearRegression() # fir linear model LR.fit(inputs, outputs) # predict outputs predictions = LR.predict(inputs) # check Sklearn mean square error mse = ((outputs - predictions) ** 2).mean() rmse = np.sqrt(mse) print('MSE = ', mse) print('RMSE = ', rmse) print('output\'s standard deviation = ', outputs.std()) # #### Part (b): Plot scatter plot of actual LogRatios vs predicted LogRatios for Google Stock (GOOG) print(outputs[0:3, 0:3]) print('\n', predictions[0:3, 0:3]) df2_sk = df2.copy() df2_sk.iloc[1:, :] = predictions # actual logRatios are in df2, predicted LogRatios are in df2_sk plt.scatter(x=df2.GOOG.values, y=df2_sk.GOOG.values, alpha=0.1) print('correlation coefficient = ', df2.GOOG.corr(df2_sk.GOOG))
Lesson 11 - Stock Prices.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Firefly # Firefly is a natural inspired metaheuristic search algorithm and its idea is that the fireflies (aka solutions) get attracted to the most luminous solution (aka fly) and try to follow that solution. Fireflies in natural don't get attracted to luminous objects but actualy get repulsed by lighting but for pedagogical reasons we assume that light attracts fireflies. # __FIRE-FLY__(_source_ , _destination_ , _num of iterations_ , _num of flies_ ) __returns__ a solution/route # &emsp;generate _num of flies_ random solutions/routes between _source_ and _destination_ # &emsp;__for__ _num of iterations_ __do__ # &emsp;&emsp;__for__ i __of__ _num of flies_ __do__ # &emsp;&emsp;initialize _attractivness_ list for the other fireflies relative to the fly _i_ # &emsp;&emsp;&emsp;__for__ j __of__ _num of flies_ __do__ # &emsp;&emsp;&emsp;&emsp; __if__ i \= j __skip this iteration__ &emsp;&emsp;&emsp;&emsp;&emsp;// so we don't move fly in the direction of itself # &emsp;&emsp;&emsp;&emsp; _attractivness_ \[j\] \= _luminosity_ of fly j relative to i # &emsp;&emsp;&emsp;_move_ fly _i_ to fly _j_ which is the most luminous fly relative to it # &emsp;&emsp;__return__ the most _luminous_ fly # We will be doing shortest path problem as usual so our solution would be a route and _moving_ would be between fly/route to another fly/route, it would be just like doing crossover between the two lists/routes and substitute the moving route with the child of the crossover. __But__ the function of luminosity wouldn't be that the shorter of two paths would be more luminous and attractive. # # Navigation apps usually find the path to be travelled with algorithms like contractions hierarchy or graph neural networks which offers near exact solutions (or excat in the case of CH). But they learned that the best way to travel is not just the shortest path but althought the path that has fewer turns and the path the doesn't go into residence areas, read more in this article [your navigation app is making traffic unmanageable](https://spectrum.ieee.org/computing/hardware/your-navigation-app-is-making-traffic-unmanageable). # # What can we do in our objective function so we don't always favor the shortest path without the need for massive datacenters. We can incorporate the value of how much the route doesn't have turns by counting the number of nodes in the route. If you go digging through `osmnx` source code and inspect how it parses osm data and filter them and remove unnecesary data to the topology of the graph generated, remember `simplify` option in [`osmnx.graph`](https://osmnx.readthedocs.io/en/stable/osmnx.html#module-osmnx.graph) module functions. you will find that library gives us nodes that changes the direction of a given road. # # _luminous_ function would be equal to the length of the route plus the number of nodes, because we are trying to minizing both of them and because the paths in our search space are relatively close to each other the addition of the number of nodes would make a difference. # # We also need to have a notion of distance between two solutions/routes and that would be the number of common nodes between them. import osmnx as ox from utilities import * from itertools import filterfalse location_point = (43.661667, -79.395) G = ox.graph_from_point(location_point, dist=300, clean_periphery=True, simplify=True) graph_map = ox.plot_graph_folium(G, popup_attribute='name', edge_width=2) fig, ax = ox.plot_graph(G) # + # marking both the source and destination node nc = ['r' if node == 389677909 or node == 55808290 else '#336699' for node in G.nodes()] ns = [50 if node == 389677909 or node == 55808290 else 8 for node in G.nodes()] fig, ax = ox.plot_graph(G, node_size=ns, node_color=nc, node_zorder=2) # - # let's define the move function and luminosity function # + move = cross_over # we want to merge two routes so cross_over function explained in GA notebook is enough # the length of the route + number of curves and intersection of the route def luminosity(G, route): return cost(G, route) + len(route) # number of common nodes between two routes def distance(route1, route2): return len(set(route1) & set(route2)) # - num_of_iterations = 100 num_of_flies = 25 # initialize our starting population flies = [randomized_search(G, 55808290, 389677909) for _ in range(num_of_iterations)] # fly _i_ perceives fly _j_ from distance we will have the final luminosity function to be the independent luminosity multiplied by factor dependet on the distance between the two flies, so $${luminosity}_{i} = {luminosity}_{j}*{e}^{-\gamma * distance}$$ and this gamma is the attractiveness coefficient between the flies which will be assumed constant to simplify the implementation. gamma = 2 # ## The Algorithm for _ in tqdm(range(num_of_iterations)): for i in range(num_of_flies): flies_luminosity = list() # for all flies except i for j in range(num_of_flies): if i == j: continue # skips getting luminosity of fly i flies_luminosity.append((j , luminosity(G, flies[j]))) # saving both the fly and its luminosity moving_fly = flies[i] # remember that the shortest path and the fewest number of nodes is more luminous for us # in this problem, hence using min function instead of max moving_to_fly = min(flies_luminosity, key = lambda fly : fly[1] * math.exp(-1 * gamma * distance(moving_fly, flies[fly[0]]))) moving_to_fly = flies[moving_to_fly[0]] # updating flies[i] = move(moving_fly, moving_to_fly) # let's find out the most luminous fly route = min(flies, key = lambda fly : luminosity(G, fly)) cost(G, route) fig, ax = ox.plot_graph_route(G, route)
FireFly.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import sys import json import datetime import pandas as pd import numpy as np # - cwd = os.getcwd() join = os.path.join norm = os.path.normpath sys.path.append(norm(join(cwd, '..', '..', '..', 'glhe'))) from standalone.plant_loop import PlantLoop import glhe import matplotlib.pyplot as plt # %matplotlib inline # + plt.style.use('ggplot') plt.rcParams['figure.figsize'] = [15, 9] plt.rcParams['font.size'] = 14 # pd.set_option('display.max_columns', None) # pd.set_option('display.max_rows', None) # - def rename_csv_g_files(new_name): for root, dirs, files in os.walk(cwd): for file in files: if file in ['g.csv', 'g_b.csv', 'sts.csv', 'lts.csv']: tokens = file.split('.') name = '{}_{}.csv'.format(tokens[0], new_name) if os.path.exists(join(cwd, name)): os.remove(join(cwd, name)) os.rename(file, name) # + d = glhe.utilities.functions.load_json('STS_Flow_input.json') d['simulation']['output-path'] = cwd alpha = d['soil']['conductivity'] / (d['soil']['specific-heat'] * d['soil']['density']) ts = d['borehole-definitions'][0]['length'] ** 2 / (9 * alpha) # + flow_rate = 0.01 d['flow-profile'][0]['value'] = flow_rate d['simulation']['output-csv-name'] = 'out_{:0.2f}.csv'.format(flow_rate) with open(join(cwd, 'in_{:0.2f}.json'.format(flow_rate)), 'w') as f: f.write(json.dumps(d, sort_keys=True, indent=2, separators=(',', ': '))) PlantLoop('in_{:0.2f}.json'.format(flow_rate)).simulate() # + flow_rate = 0.05 d['flow-profile'][0]['value'] = flow_rate d['simulation']['output-csv-name'] = 'out_{:0.2f}.csv'.format(flow_rate) with open(join(cwd, 'in_{:0.2f}.json'.format(flow_rate)), 'w') as f: f.write(json.dumps(d, sort_keys=True, indent=2, separators=(',', ': '))) PlantLoop('in_{:0.2f}.json'.format(flow_rate)).simulate() # + flow_rate = 0.1 d['flow-profile'][0]['value'] = flow_rate d['simulation']['output-csv-name'] = 'out_{:0.2f}.csv'.format(flow_rate) with open(join(cwd, 'in_{:0.2f}.json'.format(flow_rate)), 'w') as f: f.write(json.dumps(d, sort_keys=True, indent=2, separators=(',', ': '))) PlantLoop('in_{:0.2f}.json'.format(flow_rate)).simulate() # + flow_rate = 0.2 d['flow-profile'][0]['value'] = flow_rate d['simulation']['output-csv-name'] = 'out_{:0.2f}.csv'.format(flow_rate) with open(join(cwd, 'in_{:0.2f}.json'.format(flow_rate)), 'w') as f: f.write(json.dumps(d, sort_keys=True, indent=2, separators=(',', ': '))) PlantLoop('in_{:0.2f}.json'.format(flow_rate)).simulate() # + flow_rate = 0.3 d['flow-profile'][0]['value'] = flow_rate d['simulation']['output-csv-name'] = 'out_{:0.2f}.csv'.format(flow_rate) with open(join(cwd, 'in_{:0.2f}.json'.format(flow_rate)), 'w') as f: f.write(json.dumps(d, sort_keys=True, indent=2, separators=(',', ': '))) PlantLoop('in_{:0.2f}.json'.format(flow_rate)).simulate() # + flow_rate = 0.4 d['flow-profile'][0]['value'] = flow_rate d['simulation']['output-csv-name'] = 'out_{:0.2f}.csv'.format(flow_rate) with open(join(cwd, 'in_{:0.2f}.json'.format(flow_rate)), 'w') as f: f.write(json.dumps(d, sort_keys=True, indent=2, separators=(',', ': '))) PlantLoop('in_{:0.2f}.json'.format(flow_rate)).simulate() # - df_001 = pd.read_csv('out_{:0.2f}.csv'.format(0.01), parse_dates=True, index_col='Date/Time') df_005 = pd.read_csv('out_{:0.2f}.csv'.format(0.05), parse_dates=True, index_col='Date/Time') df_010 = pd.read_csv('out_{:0.2f}.csv'.format(0.1), parse_dates=True, index_col='Date/Time') df_020 = pd.read_csv('out_{:0.2f}.csv'.format(0.2), parse_dates=True, index_col='Date/Time') df_030 = pd.read_csv('out_{:0.2f}.csv'.format(0.3), parse_dates=True, index_col='Date/Time') df_040 = pd.read_csv('out_{:0.2f}.csv'.format(0.4), parse_dates=True, index_col='Date/Time') def make_g_b(df): df['g_b'] = (df['SingleUTubeBHGrouted:BH 1:Outlet Temp. [C]'] - df['GroundHeatExchangerSTS:GHE 1:Borehole Wall Temp. [C]']) / (10 * df['SingleUTubeBHGrouted:BH 1:BH Resist. [m-K/W]']) df['lntts'] = np.log(df['Elapsed Time [s]'] / ts) return df df_001 = make_g_b(df_001) df_005 = make_g_b(df_005) df_010 = make_g_b(df_010) df_020 = make_g_b(df_020) df_030 = make_g_b(df_030) df_040 = make_g_b(df_040) def plot_g_b(dfs, labels): fig = plt.figure() for idx, df in enumerate(dfs): plt.plot(df['lntts'], df['g_b'], label=labels[idx]) plt.legend() plt.savefig('gb_flow.PNG',bbox_inches='tight') plt.show() plot_g_b([df_001, df_005, df_010, df_020, df_030, df_040], ['0.01', '0.05', '0.10', '0.20', '0.30', '0.40']) def plot_temps(dfs, labels): fig = plt.figure() for idx, df in enumerate(dfs): plt.plot(df['SingleUTubeBHGrouted:BH 1:Outlet Temp. [C]'].loc['2019-01-01 00:00:00' : '2019-01-01 00:10:00'], label=labels[idx]) plt.legend() plt.show() plot_temps([df_001, df_005, df_010, df_020, df_030, df_040], ['0.01', '0.05', '0.10', '0.20', '0.30', '0.40'])
validation/STS_Flow_Testing/STS_Flow_Testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #!/usr/bin/env python3s import os.path import tensorflow as tf import helper import warnings from distutils.version import LooseVersion import project_tests as tests from tqdm import tqdm KEEP_PROB = 0.8 #lower value will help generalize more (but with fewer epochs, higher keep_prob creates more clearer segmentations) LEARNING_RATE = 0.0009 #high learning rate will cause overshooting and huge oscillations in loss. (i.e. even 0.009 - 10 times higher will completely ruin the training) IMAGE_SHAPE = (160, 576) #higher resolution will help segmenting in a more detailed fashion EPOCHS = 50 BATCH_SIZE = 5 #with batch_size smaller, lower memory will be used as less number of images need to be loaded into memory, the training will go on in SGD fashion, and even with 1 epoch, the small batch size and SGD will make the training look like many epochs training if the trianing sets are somewhat similar (i.e. all roads and we're doing only 2 classes) NUM_CLASSES = 2 #the smaller the classes, the easier it is to segment using lower number of epochs and batch_size USE_L2_LOSS = False L2_LOSS_WEIGHT = 0.01 # Check TensorFlow Version assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__) print('TensorFlow Version: {}'.format(tf.__version__)) # Check for a GPU if not tf.test.gpu_device_name(): warnings.warn('No GPU found. Please use a GPU to train your neural network.') else: print('Default GPU Device: {}'.format(tf.test.gpu_device_name())) # + def load_vgg(sess, vgg_path): """ Load Pretrained VGG Model into TensorFlow. :param sess: TensorFlow Session :param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb" :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out) """ # TODO: Implement function # Use tf.saved_model.loader.load to load the model and weights vgg_tag = 'vgg16' vgg_input_tensor_name = 'image_input:0' vgg_keep_prob_tensor_name = 'keep_prob:0' vgg_layer3_out_tensor_name = 'layer3_out:0' vgg_layer4_out_tensor_name = 'layer4_out:0' vgg_layer7_out_tensor_name = 'layer7_out:0' #load the vgg model located at data/vgg16/vgg, this path is defined by vgg_path later tf.saved_model.loader.load(sess, [vgg_tag], vgg_path) #make sure to load the default graph from the loaded model before pulling tensors by name into storage variables graph = tf.get_default_graph() input_image = graph.get_tensor_by_name(vgg_input_tensor_name) keep_prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name) layer3_out = graph.get_tensor_by_name(vgg_layer3_out_tensor_name) layer4_out = graph.get_tensor_by_name(vgg_layer4_out_tensor_name) layer7_out = graph.get_tensor_by_name(vgg_layer7_out_tensor_name) # a = tf.Print(layer7_out, [tf.shape(layer7_out)]) # with tf.Session() as sess: # sess.run(a) return input_image, keep_prob, layer3_out, layer4_out, layer7_out tests.test_load_vgg(load_vgg, tf) # + def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes, sess=None, vgg_input=None, keep_prob=None): """ Create the layers for a fully convolutional network. Build skip-layers using the vgg layers. :param vgg_layer3_out: TF Tensor for VGG Layer 3 output :param vgg_layer4_out: TF Tensor for VGG Layer 4 output :param vgg_layer7_out: TF Tensor for VGG Layer 7 output :param num_classes: Number of classes to classify :return: The Tensor for the last layer of output """ # TODO: Implement function #constructing FCN-8 architecture #reduce the number of outputs to match the num_classes in the training set (in this case 2, roads vs not roads) by using 1x1 convolution conv_1x1 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding="same", kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3)) #upsample or deconv (from 1x1 to 2v2 just like in the FCN paper: https://people.eecs.berkeley.edu/~jonlong/long_shelhamer_fcn.pdf) layer4a_in1 = tf.layers.conv2d_transpose(conv_1x1, num_classes, 4, 2, padding="same", kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3)) # make sure the shapes are the same! # 1x1 convolution of vgg layer 4 layer4a_in2 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, padding= 'same', kernel_initializer= tf.random_normal_initializer(stddev=0.01), kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3)) # skip connection (element-wise addition) layer4a_out = tf.add(layer4a_in1, layer4a_in2) # upsample layer3a_in1 = tf.layers.conv2d_transpose(layer4a_out, num_classes, 4, strides= (2, 2), padding= 'same', kernel_initializer= tf.random_normal_initializer(stddev=0.01), kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3)) # 1x1 convolution of vgg layer 3 layer3a_in2 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, padding= 'same', kernel_initializer= tf.random_normal_initializer(stddev=0.01), kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3)) # skip connection (element-wise addition) layer3a_out = tf.add(layer3a_in1, layer3a_in2) # upsample nn_last_layer = tf.layers.conv2d_transpose(layer3a_out, num_classes, 16, strides= (8, 8), padding= 'same', kernel_initializer= tf.random_normal_initializer(stddev=0.01), kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3)) #following are used for printing shapes of layers of interest - very useful. # if sess is not None: # img = np.random.rand(1, 160, 576, 3) # prints = [ # tf.Print(conv_1x1, [tf.shape(conv_1x1), " -------------------1x1conv before deconv starts -------------------"], # summarize=4)] # sess.run(tf.global_variables_initializer()) # sess.run(prints, feed_dict={vgg_input: img, keep_prob: 1.0}) # if sess is not None: # img2 = np.random.rand(1, 160, 576, 3) # prints = [ # tf.Print(vgg_layer7_out, [tf.shape(vgg_layer7_out), " ------------------- vgg_layer7_out -------------------"], # summarize=4)] # sess.run(tf.global_variables_initializer()) # sess.run(prints, feed_dict={vgg_input: img2, keep_prob: 1.0}) return nn_last_layer tests.test_layers(layers) # - def optimize(nn_last_layer, correct_label, learning_rate, num_classes): """ Build the TensorFLow loss and optimizer operations. :param nn_last_layer: TF Tensor of the last layer in the neural network :param correct_label: TF Placeholder for the correct label image :param learning_rate: TF Placeholder for the learning rate :param num_classes: Number of classes to classify :return: Tuple of (logits, train_op, cross_entropy_loss) """ # make logits a 2D tensor where each row represents a pixel and each column a class logits = tf.reshape(nn_last_layer, (-1, num_classes)) correct_label = tf.reshape(correct_label, (-1,num_classes)) # define loss function cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits= logits, labels= correct_label)) if USE_L2_LOSS: #adding L2 losses to apply to loss reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) #collect all losses from every layer reg_constant = L2_LOSS_WEIGHT # Choose an appropriate one. final_loss = cross_entropy_loss + reg_constant * sum(reg_losses) # define training operation optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate) if USE_L2_LOSS: train_op = optimizer.minimize(final_loss) else: train_op = optimizer.minimize(cross_entropy_loss) return logits, train_op, cross_entropy_loss tests.test_optimize(optimize) def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate): """ Train neural network and print out the loss during training. :param sess: TF Session :param epochs: Number of epochs :param batch_size: Batch size :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size) :param train_op: TF Operation to train the neural network :param cross_entropy_loss: TF Tensor for the amount of loss :param input_image: TF Placeholder for input images :param correct_label: TF Placeholder for label images :param keep_prob: TF Placeholder for dropout keep probability :param learning_rate: TF Placeholder for learning rate """ # TODO: Implement function sess.run(tf.global_variables_initializer()) print("Training...") print() for i in range(epochs): print("EPOCH {} ...".format(i+1)) for image, label in get_batches_fn(batch_size): _, loss = sess.run([train_op, cross_entropy_loss], feed_dict={input_image: image, correct_label: label, keep_prob: KEEP_PROB, learning_rate:LEARNING_RATE}) print("Loss: = {:.3f}".format(loss)) print() tests.test_train_nn(train_nn) # + num_classes = NUM_CLASSES image_shape = IMAGE_SHAPE data_dir = './data' runs_dir = './runs' tests.test_for_kitti_dataset(data_dir) # Download pretrained vgg model #helper.maybe_download_pretrained_vgg(data_dir) # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset. # You'll need a GPU with at least 10 teraFLOPS to train on. # https://www.cityscapes-dataset.com/ # + import numpy as np sess = tf.InteractiveSession() #use interactive session so running on test images can be done on a separate cell if desired # Path to vgg model vgg_path = os.path.join(data_dir, 'vgg16/vgg') # Create function to get batches get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape) # OPTIONAL: Augment Images for better results # https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network # TODO: Build NN using load_vgg, layers, and optimize function # TODO: Train NN using the train_nn function # TODO: Save inference data using helper.save_inference_samples # helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image) # OPTIONAL: Apply the trained model to a video epochs = EPOCHS batch_size = BATCH_SIZE # TF placeholders correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes], name='correct_label') learning_rate = tf.placeholder(tf.float32, name='learning_rate') input_image, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(sess, vgg_path) #sess.run(tf.Print(vgg_layer7_out, [tf.shape(vgg_layer7_out)])) #nn_last_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes) nn_last_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes, sess=sess, vgg_input=input_image, keep_prob=keep_prob) logits, train_op, cross_entropy_loss = optimize(nn_last_layer, correct_label, learning_rate, num_classes) # TODO: Train NN using the train_nn function train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate) # TODO: Save inference data using helper.save_inference_samples #helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image) # OPTIONAL: Apply the trained model to a video # - # TODO: Save inference data using helper.save_inference_samples helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image) print("running on images - done")
semanticSegmentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import wisps import pandas as pd import numpy as np import matplotlib.pyplot as plt import wisps.simulations as wispsim import astropy.units as u import splat.empirical as spe import splat #plt.style.use('dark_background') # Purpose: compare various luminosity functions # %matplotlib inline import popsims baraffe=popsims.make_systems(model='baraffe2003', bfraction=0.2) saumon=popsims.make_systems(model='saumon2008', bfraction=0.2) sonora=popsims.make_systems(model='marley2019', bfraction=0.2) phillips=popsims.make_systems(model='phillips2020', bfraction=0.2) burrows=popsims.make_systems(model='burrows2001', bfraction=0.2) from astropy.io import ascii klf=pd.read_csv('/users/caganze/research/wisps/data/kirkpatricklf.txt', delimiter=',') dntb=ascii.read('/users/caganze/research/wisps/data/daniellalf.txt').to_pandas() def splat_mag_to_spt(mag): grid=np.arange(17, 39) rel=spe.typeToMag(grid, 'MKO_J',reference='dupuy2012')[0] vals= np.polyval(rel, grid) spt_sorted_idx=np.argsort(vals) return np.interp(mag, vals[spt_sorted_idx], grid[spt_sorted_idx]) klf['tfm']=np.mean(np.array([klf.t0.values, klf.tf.values]), axis=0) klf['spt']=klf.tfm.apply(wispsim.splat_teff_to_spt).apply(round) klf=klf.applymap(float) dntb['spt']=dntb.M_J.apply(splat_mag_to_spt) # + def ryan_lf(J): logphi=-0.30 + 0.11*(J-14) + 0.15*(J -14)**2.+ 0.015*(J-14)**3-0.00020*(J-14)**4 return (10**logphi)*(10**-3) def custom_histogram(things, grid, binsize): n=[] for g in grid: n.append(len(things[np.logical_and(g<=things, things< g+binsize)])) return np.array(n) # - jgrid=np.arange(10, 18, .5) teffgrid=np.arange(50, 4000, 150) #the last point is a lower limit kirkpatrick2020LF={'bin_center':np.flip(np.array([2025, 1875, 1725, 1575, 1425, 1275, 1125 , 975, 825, 675, 525])), 'values':np.flip(np.array([0.72, 0.50,0.78, 0.81,0.94, 1.95, 1.11, 1.72, 1.99, 2.80, 4.24])), 'unc':np.flip(([0.18, 0.17, 0.20,0.20, 0.22, 0.3, 0.25, 0.3, 0.32, 0.37, 0.70]))} 1950+150 # + binedges= np.append(kirkpatrick2020LF['bin_center']-75, kirkpatrick2020LF['bin_center'][-1]+75) binedges # - kirkpatrick2020LF def scale_lf_teff(teffs): binedges= np.append(kirkpatrick2020LF['bin_center']-75, kirkpatrick2020LF['bin_center'][-1]+75) #bools=np.logical_and(teffs <= binedges[-1], teffs >= binedges[0]) #print (binedges[0], binedges[-1]) preds=np.histogram(teffs, bins=binedges, normed=False)[0] obs=np.array(kirkpatrick2020LF['values']) unc=np.array(kirkpatrick2020LF['unc']) obs_monte_carlo= np.random.normal(obs, unc, (10000, len(obs))) pred_monte= np.ones_like(obs_monte_carlo)*(preds) unc_monte= np.ones_like(obs_monte_carlo)*(unc) #unc_monte= scale=(np.nansum((obs_monte_carlo*pred_monte)/(unc_monte**2), axis=1)\ /np.nansum(((pred_monte**2)/(unc_monte**2)), axis=1))*(10**-3) res=preds*np.nanmedian(scale), [np.nanmedian(scale), np.nanstd(scale), \ np.sum(preds*np.nanmedian(scale))] #fig, ax=plt.subplots() #ax.step(kirkpatrick2020LF['bin_centertw'], preds*scale*(10**-3), where='mid') #ax.errorbar(kirkpatrick2020LF['bin_center'], np.array(kirkpatrick2020LF['values'])*(10**-3), # yerr= np.array(kirkpatrick2020LF['unc'])*(10**-3), fmt='o', color='#111111') return res klf['bin_center']=np.mean(np.array([klf.t0.values, klf.tf.values]), axis=0) klf=klf.replace(0.0,np.nan) # + teffs_bar=baraffe['temperature'] teffs_saumon=saumon['temperature'] teffs_sonora=sonora['temperature'] teffs_phil=phillips['temperature'] teffs_burr=burrows['temperature'] #normteff_bar = 0.63*(10**-3)/ len(teffs_bar[np.logical_and(teffs_bar>=1650, teffs_bar <=1800)]) #normteff_saumon = 0.63*(10**-3)/ len(teffs_saumon[np.logical_and(teffs_saumon>=1650, teffs_saumon <=1800)]) #normteff_sonora= 0.63*(10**-3)/ len(teffs_sonora[np.logical_and(teffs_sonora>=1650, teffs_sonora <=1800)]) #normteff_phil=0.63*(10**-3)/ len(teffs_phil[np.logical_and(teffs_phil>=1650, teffs_phil <=1800)]) #jmags=wisps.drop_nan(spe.typeToMag(SIMULATED_DIST['spts'][0], '2MASS J')[0]) #jnorm=6.570*(10**-3)/len(jmags[np.logical_and(jmags>=10.25-.25, jmags <=10.25+.25)]) # - scale_lf_teff(teffs_bar) # + #s0 # - #sem_emp_phi_j=custom_histogram(jmags, jgrid, .5)*jnorm baraffe_phi_teff, s0= scale_lf_teff(teffs_bar) saumon_phi_teff, s1=scale_lf_teff(teffs_saumon) sonora_phi_teff, s2=scale_lf_teff(teffs_sonora) phil_phi_teff, s3=scale_lf_teff(teffs_phil) burr_phi_teff, s4=scale_lf_teff(teffs_burr) SCALE_FACTORS={'baraffe2003': s0, 'saumon2008': s1, 'marley2019': s2, 'phillips2020': s3, 'burrows2001':s4} SCALE_FACTORS import pickle with open(wisps.OUTPUT_FILES+'/lf_scales.pkl', 'wb') as file: pickle.dump(SCALE_FACTORS,file) # + #count how many things are in the masses of 0.1 and stuff and compare to 0.005 pc to 0.0037 pc^3 #2 things doing this #teff-> bolometric correction -> simulated luminosity ---> magnitude #try will best's 2018 relation #and that we dont't complete samples for some magnitudes #compare to bochanski's measurement #look at the scale factors between those two plots you might you want #you should be plotting the fits that are #invert relations #invert #binary fraction (how many) #binary fraction random 20% #binary mass ratio from a distribution from splat (allen et al. from splat) #the secondary have a magnitude #the bianay #mag of the system by combining the flux #hst magnitude of the secondary by adding the #adding the #everything is laid out in burgasser 2007 #educational exercise to find thre number of stars and brown dwarfs # - import seaborn as sns sns.set_palette(sns.color_palette('Paired')) # + #plt.style.use('dark_background') # + fig, ax1=plt.subplots(figsize=(8, 5), ncols=1) #ax.step(jgrid, sem_emp_phi_j, color='#0074D9', label='Simulated') #ax.step(jgrid, ryan_lf(jgrid), color='#FF4136', label='RyanJr2017') #ax.errorbar(dntb.M_J, dntb.Density*(10**-3), fmt='o', c='k', label='BG2019') #ax.set_xlabel('J', fontsize=18) #ax.set_ylabel(r'LF [pc$^{-3}$ mag$^{-1} $]', fontsize=18) #ax.minorticks_on() #ax.legend(fontsize=18) #ax.set_xlim([10, 16]) #ax.set_ylim([0., 0.005]) ax1.step(kirkpatrick2020LF['bin_center'], burr_phi_teff, where='mid', linewidth=3) ax1.step(kirkpatrick2020LF['bin_center'], baraffe_phi_teff, where='mid', linewidth=3) ax1.step(kirkpatrick2020LF['bin_center'], saumon_phi_teff, where='mid', linewidth=3) ax1.step(kirkpatrick2020LF['bin_center'], sonora_phi_teff, where='mid', linewidth=3) ax1.step(kirkpatrick2020LF['bin_center'], phil_phi_teff, where='mid', linewidth=3) lolims=np.zeros(len(kirkpatrick2020LF['bin_center'])).astype(int) lolims[0]=int(1) print (lolims) ax1.errorbar(kirkpatrick2020LF['bin_center'], np.array(kirkpatrick2020LF['values'])*(10**-3), yerr= np.array(kirkpatrick2020LF['unc'])*(10**-3), fmt='o', color='k') #for index, row in klf.iterrows(): # if row.lf==0.0: # pass # elif row.lfunc==0.0: # ax1.errorbar(row.tfm, row.lf*(10**-3), yerr=0.0005, color='#B10DC9', fmt='o',lolims=True, ls='none') # else: # ax1.errorbar(row.tfm, row.lf*(10**-3), yerr=row.lfunc*(10**-3), color='#B10DC9', fmt='o') ax1.set_xlabel(r'T$_\mathrm{eff}$ [K]', fontsize=18) ax1.set_ylabel(r'$\frac{dN}{dT\mathrm{eff}}$ [ K$^{-1}$ pc$^{-3}$]', fontsize=18) ax1.minorticks_on() ax1.legend(fontsize=18, labels=['B01', 'B03', 'SM08', 'M19', 'P20', 'K20']) ax1.set_xlim([2100, 450.]) plt.tight_layout() plt.savefig(wisps.OUTPUT_FIGURES+'/lfs_teffs_plus_binaries.pdf',bbox_inches='tight', facecolor='white', transparent=False) # + #ghj # - import splat.empirical as spe # + # # + #ms=get_mko_mag(baraffe['system_spts'].flatten()) # - #plt.scatter(baraffe['system_spts'].flatten(), ms, s=1) # + #js_saumon=wisps.absolute_magnitude_jh(np.sort(saumon['system_spts'].flatten()))[0] #js_bar=wisps.absolute_magnitude_jh(np.sort( baraffe['system_spts'].flatten()))[0] #js_sonora=wisps.absolute_magnitude_jh(np.sort( sonora['system_spts'].flatten()))[0] #js_phil=wisps.absolute_magnitude_jh(np.sort( phillips['system_spts'].flatten()))[0] js_saumon=saumon['abs_2MASS_J'].values.flatten() js_bar=baraffe['abs_2MASS_J'].values.flatten() js_sonora=sonora['abs_2MASS_J'].values.flatten() js_phil=phillips['abs_2MASS_J'].values.flatten() js_burr=burrows['abs_2MASS_J'].values.flatten() # - # + CRUZ={"J":np.array([10.75, 11.25, 11.75, 12.25, 12.75, 13.25, 13.75, 14.25, 14.75]), "lf": np.array([2.38, 1.66, 1.16, 0.83, 0.50, 0.58, 0.50, 0.66, 0.33]), 'er':np.array([0.64, 0.37, 0.31, 0.26, 0.20, 0.22, 0.20, 0.23, 0.17])} DNLF={"J": dntb.M_J.values, 'lf': dntb.Density.values, "er":[[1.39, 0.39, 0.29, 0.20, 0.18, 0.15, 0.41, 0.16, 0.15], [1.62, 4.14, 0.31, 0.23, 0.20, 0.18, 1.65, 0.18, 0.18]]} # - DNLF['er_tot']=np.nanmedian( DNLF['er'], axis=0) DNLF['J'][np.argmin(DNLF['er_tot'])] DNLF['lf'][np.argmin(DNLF['er_tot'])] # + mask=np.logical_and(DNLF['J'] < 14.5, DNLF['J'] > 10.5) def scale_lf_jmag(js): #mask=np.logical_and(DNLF['J'] <= 15.5, DNLF['J'] >= 10.) jgrid=np.array(DNLF['J'])[mask] print (jgrid) obs=np.array(DNLF['lf'])[mask] unc=np.array(DNLF['er_tot'])[mask] binedges= np.append(jgrid-0.25, jgrid[-1]+0.25) #binedges=jgrid print (binedges) preds=np.histogram(js, bins=binedges, normed=False)[0] obs_monte_carlo= np.random.normal(obs, unc, (10000, len(obs))) pred_monte= np.ones_like(obs_monte_carlo)*(preds) unc_monte= np.ones_like(obs_monte_carlo)*(unc) #unc_monte= scale=(np.nansum((obs_monte_carlo*pred_monte)/(unc_monte**2), axis=1)\ /np.nansum(((pred_monte**2)/(unc_monte**2)), axis=1))*(10**-3) res=preds*np.nanmedian(scale), [np.nanmedian(scale), np.nanstd(scale), \ np.sum(preds*np.nanmedian(scale))] #scale=np.nansum((obs*preds)/(unc**2))/np.nansum(((preds**2)/(unc**2))) #bools=np.logical_and(js > 12., js< 12.5) #print (res) #scale= return res[0] #return 1.1*preds/len(js[bools]) # - baraffe_phi_j=scale_lf_jmag(js_bar)#*(10**-3) saumon_phi_j=scale_lf_jmag(js_saumon)#*(10**-3) sonora_phi_j=scale_lf_jmag(js_sonora)#*(10**-3) phil_phi_j=scale_lf_jmag(js_phil)#*(10**-3)\ burr_phi_j=scale_lf_jmag(js_burr) jgrid=np.array(DNLF['J'])[mask] len(jgrid), len(saumon_phi_j) # + #DNLF['J'][bools] # + fig, ax1=plt.subplots(figsize=(8, 6), ncols=1) #plt.step(jgrid, ryan_lf(jgrid)) plt.step(jgrid, burr_phi_j, linewidth=3, where='mid') plt.step(jgrid, baraffe_phi_j, linewidth=3, where='mid') plt.step(jgrid, saumon_phi_j, linewidth=3, where='mid') plt.step(jgrid, sonora_phi_j, linewidth=3, where='mid') plt.step(jgrid,phil_phi_j, linewidth=3, where='mid') plt.step(jgrid, ryan_lf(jgrid), \ linewidth=3, where='mid', linestyle='-.') #plt.fill_between(jgrid, np.array(baraffe_phi_j)-0.00016, np.array(baraffe_phi_j)+0.00018, step="pre") plt.errorbar(DNLF['J'], DNLF['lf']*0.001, yerr=np.array(DNLF['er'])*0.001,color='#111111', fmt='o') plt.errorbar(CRUZ['J'], np.array(CRUZ['lf'])*0.001, yerr=np.array(CRUZ['er'])*0.001,color='#B10DC9', fmt='o') ax1.set_xlabel(r'$M_J$ [mag]', fontsize=18) ax1.set_ylabel(r'$\frac{dN}{dM_J}$ [mag $^{-1}$ pc$^{-3}$]', fontsize=18) ax1.minorticks_on() ax1.legend(fontsize=18, labels=[ 'B01', 'B03', 'SM08', 'M19', 'P20', 'R16', 'B19', 'C07']) plt.tight_layout() plt.xlim(10.5, 14.) plt.ylim([0.00, 0.006]) plt.savefig(wisps.OUTPUT_FIGURES+'/lfs_js_plus_binaries_teff.pdf', bbox_inches='tight') # + #other diagnostics #plt.hist(burrows['system_']) # - ydf=pd.read_pickle('/users/caganze/y_standards.pkl') ydf[ydf.spt=='Y0.0'].spec.values[0].flux ydf.name
notebooks/.ipynb_checkpoints/LF-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:miniconda3-metabolic] # language: python # name: conda-env-miniconda3-metabolic-py # --- # # Viable traits # # Compute the geographic habitat that is viable for each combination of traits within the [defined trait-space](trait-space-joint-pdf.ipynb). # %load_ext autoreload # %autoreload 2 # + import os from itertools import product import dask import data_collections as dc import funnel import matplotlib.pyplot as plt import metabolic as mi import numpy as np import regrid_tools import util import xarray as xr # - # ## Ecotype viability # # A region is considered habitable for a given organism if $\Phi > 1$ (for resting metabolism) or $\Phi > \Phi_{crit}$ (for active metabolism). # # The Metabolic Index is a nonlinear function; therefore, the order of operations can have an impact on results. Acknowledging this subtlety, we choose to compute $\Phi$ on a monthly climatology of $P_{\mathrm{O}_2}$ and temperature and asses regions as habitable on the basis of the annual-mean value of $\Phi$. Here is that computation. # # ### Compute monthly climatology # First step: spin up a `dask` cluster. try: cluster client except: cluster, client = util.get_ClusterClient(memory='100GB') cluster.scale(32) client # Load data from the CESM-LE and compute a monthly, ensemble-mean climatology. # + sub_spec = dict( name='drift-corrected', experiment=['20C'], member_id=dc.ocean_bgc_member_ids, variable=['pO2', 'TEMP'], ) catalog = funnel.to_intake_esm(agg_member_id=True).search(**sub_spec) exp_keys = [ f'20C.ocn.pop.h.drift-corrected', ] dsets = catalog.to_dataset_dict() assert set(dsets.keys()) == set(exp_keys) _, ds = dsets.popitem() ds # - # Choose a reference period corresponding to near-present and WOA2018 oxygen data compilation. ds_clm = ( ds.sel(time=slice("1960", "2017")) .mean("member_id") .groupby("time.month") .mean("time") .rename({"month": "time"}) .compute() ) ds_clm # + fig, axs = plt.subplots(2, 1, figsize=(8, 6)) ds_clm.pO2.isel(nlon=200).mean('time').plot(yincrease=False, ax=axs[0]) ds_clm.TEMP.isel(nlon=200).mean('time').plot(yincrease=False, ax=axs[1]) plt.tight_layout(); # + ds_1x1 = regrid_tools.generate_latlon_grid(nx=360, ny=180, lon0=-180.0)[["area"]] ds_1x1['z_t'] = ds.z_t ds_1x1['dz'] = ds.dz src_grid = regrid_tools.grid("POP_gx1v6") dst_grid = regrid_tools.grid("latlon", nx=360, ny=180, lon0=-180.0) regrid_obj = regrid_tools.regridder(src_grid, dst_grid, method="bilinear", clobber=False) regrid_obj # - ds_1x1 = xr.merge( ( ds_1x1, regrid_obj(ds_clm[["TEMP", "pO2"]], renormalize=True, apply_mask=True).rename( {"nlat": "lat", "nlon": "lon"} ), ) ) ds_1x1 # + fig, axs = plt.subplots(2, 1, figsize=(8, 6)) ds_1x1.pO2.isel(lon=0).mean('time').plot(yincrease=False, ax=axs[0]) ds_1x1.TEMP.isel(lon=0).mean('time').plot(yincrease=False, ax=axs[1]) plt.tight_layout(); # + [markdown] tags=[] # ### Compute viability # # First load trait space PDF. # + curator = util.curator_local_assets() cat = curator.open_catalog() ds_trait_space = cat['trait-space'].to_dask().load() ds_trait_space.trait_spc_active.plot(); # - ds_trait_space.trait_spc_active # Loop over trait space and store an index array into geographic space for each viable trait. # + # %%time dpm = xr.DataArray( [31.0, 28.0, 31.0, 30.0, 31.0, 30.0, 31.0, 31.0, 30.0, 31.0, 30.0, 31.0], dims=("time"), ) ds_clm_future = client.scatter(ds_1x1) @dask.delayed def Phi_mean(ds, Ac, Eo): return (dpm * mi.Phi(ds.pO2, ds.TEMP, Ac, Eo, dEodT=mi.dEodT_bar)).sum("time") / dpm.sum("time") dso = xr.Dataset() for v in ["trait_spc_active", "trait_spc_resting"]: # get trait space vars trait_space = ds_trait_space[v] hypoxic_tol, temp_sens = trait_space.dims Phi_varname = f'Phi_{ds_trait_space[v].metabolic_baseline}' viability_varname = f'viability_{ds_trait_space[v].metabolic_baseline}' ni = len(trait_space[hypoxic_tol]) nj = len(trait_space[temp_sens]) # loop over trait space and lat bands, sum trait frequency da_list_outer = [] for i, A_parm in enumerate(trait_space[hypoxic_tol].values): da_list_inner = [] for j, Eo in enumerate(trait_space[temp_sens].values): da_list_inner.append(Phi_mean(ds_clm_future, A_parm, Eo)) da_list_outer.append(da_list_inner) computed_list = dask.compute(*[obj for inner_list in da_list_outer for obj in inner_list]) da_list_outer = [] for i in range(ni): da_list_outer.append( xr.concat(computed_list[i * nj : i * nj + nj], dim=trait_space[temp_sens]) ) dso[Phi_varname] = xr.concat(da_list_outer, dim=trait_space[hypoxic_tol]) dso[viability_varname] = xr.where((1 < dso[Phi_varname]) & (dso[Phi_varname] <= 2), 1.0, 0.0) dso[v] = ds_trait_space[v] dso # + Eo = dso.Eo.values[1::2] # [1, 3, 5]] Ac = dso.Ac.values[::2] fig, axs = plt.subplots(len(Ac), len(Eo), figsize=(12, 9)) for i, j in product(range(len(Ac)), range(len(Eo))): dso.Phi_active.sel(Eo=Eo[j], Ac=Ac[i]).isel(lon=0).plot( yincrease=False, ax=axs[i, j], vmin=0, vmax=2, ) plt.tight_layout(); # + Eo = dso.Eo.values[1::2] # [1, 3, 5]] Ac = dso.Ac.values[::2] fig, axs = plt.subplots(len(Ac), len(Eo), figsize=(12, 9)) for i, j in product(range(len(Ac)), range(len(Eo))): dso.viability_active.sel(Eo=Eo[j], Ac=Ac[i]).isel(lon=0).plot( yincrease=False, ax=axs[i, j], vmin=0, vmax=1, ) plt.tight_layout(); # - # ## Write a cache file # + clobber = True this_notebook = 'compute-viable-traits.ipynb' curator = util.curator_local_assets() key = 'viable-traits-mask-cesm-1x1' if clobber: cache_file = f'{dc.cache_dir}/{key}.zarr' os.makedirs(os.path.dirname(cache_file), exist_ok=True) dso.to_zarr(cache_file, mode='w', consolidated=True) curator.add_source( key=key, urlpath=cache_file, description=f'Viable traits mask generated by {this_notebook}', driver='zarr', overwrite=True, ) # - if True: cluster.close() client.close()
notebooks/compute-viable-traits.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Hello Quantum # # This application first entangles two qubits which were initially in the state $\vert00\rangle$. Then, compare the results by running the same circuit on a simulator and a real quantum hardware at IBM. # ## Build the Circuit # Import Qiskit: # + from qiskit import * from qiskit import IBMQ from qiskit.visualization import plot_histogram from qiskit.tools.monitor import job_monitor # %matplotlib inline # - qiskit.__qiskit_version__ # Initialize two qubits into a `QuantumRegister` and two classical bits into a `ClassicalRegister`: qr = QuantumRegister(2) cr = ClassicalRegister(2) # Build a `QuantumCircuit` composed of the two classical and quantum bits: circuit = QuantumCircuit(qr, cr) circuit.draw(output='mpl') # Add the [Hadamard gate](https://quantum-computing.ibm.com/docs/iqx/q-gates#h-gate): circuit.h(qr[0]) # Hadamard gate circuit.draw(output='mpl') # Add the [controlled-X gate](https://quantum-computing.ibm.com/docs/iqx/q-gates#cx-gate): circuit.cx(qr[0], qr[1]) # control, target circuit.draw(output='mpl') # Add the [measurement gate](https://quantum-computing.ibm.com/docs/iqx/q-gates#measurement): circuit.measure(qr, cr) # qubits are measured and results are stored in classical bits in order circuit.draw(output='mpl') # ## Run on Simulator # Set the simulator to `qasm_simulator` from the Aer element of Qiskit: simulator = Aer.get_backend('qasm_simulator') # Execute the circuit and store the results: result = execute(circuit, backend=simulator).result() # Plot results: plot_histogram(result.get_counts(circuit)) # ## Run on Real Quantum Hardware # Setup IBMQ account: # + # IBMQ.save_account('<IBMQ_API_TOKEN>') IBMQ.load_account() provider = IBMQ.get_provider(hub = 'ibm-q') # - # Get the least busy device: # + num_qubits = 2 from qiskit.providers.ibmq import least_busy possible_devices = provider.backends(filters=lambda x: x.configuration().n_qubits >= num_qubits and x.configuration().simulator == False) qcomp = least_busy(possible_devices) # - # Execute the circuit: job = execute(circuit, backend=qcomp) # Monitor the job progress: job_monitor(job) # Plot results: result = job.result() plot_histogram(result.get_counts(circuit))
hello-quantum.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import collections from collections import defaultdict import sys import json import random from jsmin import jsmin from io import StringIO import numpy as np import copy import importlib from functools import partial import math import os import compress_pickle # script_n = os.path.basename(__file__).split('.')[0] script_n = 'mf_size_210514_compare_zplus' sys.path.insert(0, '/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc') import my_plot importlib.reload(my_plot) from my_plot import MyPlotData import tools_mf_graph importlib.reload(tools_mf_graph) fname = ('/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc/'\ 'mf_grc_model/input_graph_210407_all.gz') input_graph = compress_pickle.load(fname) # z_min = 19800 # z_max = 29800 z_min = 19800-2500 z_max = 29800+2500 # GrCs are fully reconstructed and proofread from 90k to 150k # MFs are fully reconstructed and proofread from 70k to 150k # x_min = 100*1000*4 # x_max = 140*1000*4 x_min = 100*1000*4 x_max = 125*1000*4 # x_max = 115*1000*4 # x_max = 125*1000*4 pair_reps = defaultdict(int) mfs_within_box = set() mf_locs_within_box = set() loc_to_mf = dict() for mf_id, mf in input_graph.mfs.items(): rosette_loc_size = {} mf.get_all_mf_locs_size(rosette_loc_size) for rosette_loc, size in rosette_loc_size.items(): x, y, z = rosette_loc if x < x_min or x > x_max: continue if z < z_min or z > z_max: continue mfs_within_box.add(mf_id) mf_locs_within_box.add(rosette_loc) loc_to_mf[rosette_loc] = mf_id # input_graph.randomize(preserve_mf_degree='soft') # input_graph.randomize(preserve_mf_degree=False) connected_grcs = set() mf_connections = defaultdict(set) mf_size = defaultdict(int) for grc_id in input_graph.grcs: grc = input_graph.grcs[grc_id] connected_edge_count = 0 for edge in grc.edges: mf_id, mf_loc = edge if mf_loc not in mf_locs_within_box: continue mf_connections[mf_id].add(grc_id) mf_size[mf_id] += 1 connected_edge_count += 1 connected_grcs.add((grc_id, connected_edge_count)) mf_size_sorted = [] for mf_id in mf_size: mf_size_sorted.append((mf_id, mf_size[mf_id])) mf_size_sorted.sort(key=lambda x: x[1]) mpd_count = MyPlotData() i = 0 for mf_id in mf_size_sorted: mf_id, size = mf_id mpd_count.add_data_point( # count=len(mf_connections[mf_id]), count=size, mf_id=i, ) i += 1 importlib.reload(my_plot); my_plot.my_relplot( mpd_count, x='mf_id', y='count', # kind='hist', context='paper', linewidth=2.5, ylim=[0, 50], # kde=True, # stat='density', height=4, aspect=2, y_axis_label='# of connected GrCs', x_axis_label='Sorted MFs', save_filename=f'{script_n}_line.svg', show=True, ) # + # connected_grcs = set() mf_connections = defaultdict(set) mf_size = defaultdict(int) mf_locs_within_box_list = list(mf_locs_within_box) for grc_id, edge_count in connected_grcs: for i in range(edge_count): loc = random.sample(mf_locs_within_box_list, 1)[0] mf_id = loc_to_mf[loc] mf_connections[mf_id].add(grc_id) mf_size[mf_id] += 1 mf_size_sorted = [] for mf_id in mf_size: mf_size_sorted.append((mf_id, mf_size[mf_id])) mf_size_sorted.sort(key=lambda x: x[1]) mpd_count_total_random = MyPlotData() i = 0 for mf_id in mf_size_sorted: mf_id, size = mf_id mpd_count_total_random.add_data_point( # count=len(mf_connections[mf_id]), count=size, mf_id=i, ) i += 1 importlib.reload(my_plot); my_plot.my_relplot( mpd_count_total_random, x='mf_id', y='count', # kind='hist', context='paper', linewidth=2.5, ylim=[0, 50], # kde=True, # stat='density', height=4, aspect=2, y_axis_label='# of connected GrCs', x_axis_label='Sorted MFs', save_filename=f'{script_n}_line.svg', show=True, ) # + mpd_count_observed = copy.copy(mpd_count) mpd_count_observed.add_key_value('model', 'Observed') # mpd_count_random = compress_pickle.load('mf_size_210514_random_22000.gz') # mpd_count_random.add_key_value('model', 'Random') # mpd_count_random_global = compress_pickle.load('mf_size_210514_random_global.gz') # mpd_count_random_global.add_key_value('model', 'Global') mpd_count_total_random.add_key_value('model', 'Global Random') mpd_all = MyPlotData() mpd_all.append(mpd_count_observed) # mpd_all.append(mpd_count_random) mpd_all.append(mpd_count_total_random) importlib.reload(my_plot); my_plot.my_relplot( mpd_all, x='mf_id', y='count', hue='model', # kind='hist', context='paper', linewidth=2.5, ylim=[0, 50], # kde=True, # stat='density', height=4, aspect=2, y_axis_label='# of connected GrCs', x_axis_label='Sorted MFs', save_filename=f'{script_n}_line.svg', show=True, ) # + mpd_count_observed = copy.copy(mpd_count) mpd_count_observed.add_key_value('model', 'Observed') mpd_all = MyPlotData() mpd_all.append(mpd_count_observed) # mpd_all.append(compress_pickle.load( # 'mf_size_210514_random_edge_10000.gz').add_key_value('model', 'edges_10000')) mpd_all.append(compress_pickle.load( 'mf_size_210514_random_edge_5000_400000_500000_17300_32300.gz').add_key_value('model', 'edges_5000')) # mpd_all.append(compress_pickle.load( # 'mf_size_210514_random_edge_short_23000_5000_400000_500000_17300_32300.gz').add_key_value('model', 'short_23k')) # mpd_count_total_random.add_key_value('model', 'Global Random') # mpd_all.append(mpd_count_total_random) importlib.reload(my_plot); my_plot.my_relplot( mpd_all, x='mf_id', y='count', hue='model', # kind='hist', context='paper', linewidth=2.5, ylim=[0, 50], # kde=True, # stat='density', height=4, aspect=2, y_axis_label='# of connected GrCs', x_axis_label='Sorted MFs', save_filename=f'{script_n}_line.svg', show=True, ) # + mpd_count_observed = copy.copy(mpd_count) mpd_count_observed.add_key_value('model', 'Observed') mpd_count_random = compress_pickle.load('mf_size_210514_random_17000_10000.gz') mpd_count_random.add_key_value('model', 'Random') mpd_all = MyPlotData() mpd_all.append(mpd_count_observed) mpd_all.append(mpd_count_random) importlib.reload(my_plot); my_plot.my_relplot( mpd_all, x='mf_id', y='count', hue='model', # kind='hist', context='paper', linewidth=2.5, ylim=[0, 50], # kde=True, # stat='density', height=4, aspect=2, y_axis_label='# of connected GrCs', x_axis_label='Sorted MFs', save_filename=f'{script_n}_line.svg', show=True, ) # + mpd_count_observed = copy.copy(mpd_count) mpd_count_observed.add_key_value('model', 'Observed') mpd_all = MyPlotData() mpd_all.append(mpd_count_observed) for l in [22000]: # for l in [10000, 15000, 22000]: # for l in [30000]: mpd_count_random = compress_pickle.load(f'mf_size_210514_random_{l}.gz') mpd_count_random.add_key_value('model', f'Random_{l}') mpd_all.append(mpd_count_random) importlib.reload(my_plot); my_plot.my_relplot( mpd_all, x='mf_id', y='count', hue='model', # kind='hist', context='paper', linewidth=2.5, ylim=[0, 50], # kde=True, # stat='density', height=4, aspect=2, y_axis_label='# of connected GrCs', x_axis_label='Sorted MFs', save_filename=f'{script_n}_line.svg', show=True, ) # - print(len(connected_grcs)) print(len(mf_size_sorted)) plot_array = [] for grc_id in connected_grcs: kwargs = {} tmp = [] for mf_id, _ in mf_size_sorted: if grc_id in mf_connections[mf_id]: tmp.append(1) else: tmp.append(0) plot_array.append(tmp) # + import matplotlib.pyplot as plt import seaborn as sns # sns.set_theme # Draw a heatmap with the numeric values in each cell f, ax = plt.subplots(figsize=(20, 6)) sns.heatmap(mpd.to_dataframe(), # linewidths=.5, ax=ax, vmin=-.1, vmax=1.15, # cmap="YlGnBu", ) # + import matplotlib.pyplot as plt import seaborn as sns # sns.set_theme # Draw a heatmap with the numeric values in each cell # n = 4 n = 1 f, ax = plt.subplots(figsize=(14*n, 4*n)) sns.heatmap(plot_array, # linewidths=.5, ax=ax, vmin=-.1, vmax=1.15, # fontscale=4, # cmap="YlGnBu", # xticklabels=[], # yticklabels=[], ) plt.savefig(f'{script_n}.png', bbox_inches='tight', transparent=True) # + importlib.reload(my_plot); my_plot.my_relplot( mpd_count, x='mf_id', y='count', # kind='hist', context='paper', linewidth=2.5, # kde=True, # stat='density', height=4, aspect=2, y_axis_label='# of connected GrCs', x_axis_label='Sorted MFs', save_filename=f'{script_n}_line.svg', show=True, ) # + importlib.reload(my_plot); my_plot.my_relplot( mpd_count, x='mf_id', y='mf_2share', # kind='hist', context='paper', linewidth=2.5, # kde=True, # stat='density', height=4, aspect=2, y_axis_label='# of connected GrCs', x_axis_label='Sorted MFs', title='2share representation sorted by mf size', save_filename=f'{script_n}_line.svg', show=True, ) # + importlib.reload(my_plot); my_plot.my_relplot( mpd_count_2share, x='mf_id', y='mf_2share', # kind='hist', context='paper', linewidth=2.5, # kde=True, # stat='density', height=4, aspect=2, y_axis_label='# of connected GrCs', x_axis_label='Sorted MFs', title='2share representation sorted', save_filename=f'{script_n}_line.svg', show=True, ) # + importlib.reload(my_plot); my_plot.my_relplot( mpd_count_2share, x='mf_id', y='count', # kind='hist', context='paper', linewidth=2.5, # kde=True, # stat='density', height=4, aspect=2, y_axis_label='# of connected GrCs', x_axis_label='Sorted MFs', title='mf size sorted by 2share', save_filename=f'{script_n}_line.svg', show=True, ) # - mf_size_sorted # + import itertools grcs_edge_count = defaultdict(int) def get_prob_2share(in_graph, count_within_box=True): shares = defaultdict(lambda: defaultdict(int)) # processed = set() # total_n_pairs = 0 # hist = defaultdict(int) # n = 0 counted_grcs = 0 for grc_i_id in in_graph.grcs: grc_i = in_graph.grcs[grc_i_id] if count_within_box: mf_ids = set([mf[0] for mf in grc_i.edges if mf[0] in mfs_within_box]) else: mf_ids = set([mf[0] for mf in grc_i.edges if mf[0]]) mf_ids = sorted(list(mf_ids)) if len(mf_ids): grcs_edge_count[grc_i_id] = len(mf_ids) # print(mf_ids) for i in range(1, len(mf_ids)+1): for combination in itertools.combinations(mf_ids, i): shares[i][combination] += 1 # print(combination) return shares # if len(rosettes_i) == 0: # continue # if len(rosettes_i) == 1: # shares[1][mf_ids[0]] # continue # for grc_j_id in in_graph.grcs: # if grc_i_id == grc_j_id: # continue # if unique_count and (grc_i_id, grc_j_id) in processed: # continue # processed.add((grc_i_id, grc_j_id)) # processed.add((grc_j_id, grc_i_id)) # grc_j = in_graph.grcs[grc_j_id] # common_rosettes = set([mf[0] for mf in grc_j.edges]) # common_rosettes = common_rosettes & rosettes_i # hist[len(common_rosettes)] += 1 # for k in hist: # # fix 0 datapoint plots # if hist[k] == 0: # hist[k] = 1 # if return_counted: # return hist, counted_grcs # else: # return hist hist_data = get_prob_2share(input_graph) # print(hist_data) # n_grcs = len(input_graph.grcs) # + def calc_distribution(hist): dist = defaultdict(lambda: defaultdict(int)) dist_raw = defaultdict(list) for k in hist: for v in hist[k]: val = hist[k][v] dist[k][val] += 1 dist_raw[k].append(val) return dist, dist_raw dist, dist_raw = calc_distribution(hist_data) # - for k in sorted(hist_data.keys()): pairs = list(hist_data[k].items()) pairs.sort(key=lambda x: x[1]) print(k) for pair in pairs: print(pair) for share in dist: print(share) for v in sorted(dist[share].keys()): print(f'{v}: {dist[share][v]}') print(len(mfs_within_box)) print(len(grcs_edge_count)) s = 0 for k, v in grcs_edge_count.items(): if v >= 2: s += 1 print(s) # + # shuffle the graph and see how it affects distribution shuffled_shares = defaultdict(lambda: defaultdict(int)) mfs_within_box = list(mfs_within_box) for grc_id, count in grcs_edge_count.items(): mf_ids = [] for i in range(count): mf_ids.append(random.choice(mfs_within_box)) mf_ids = list(set(mf_ids)) for i in range(1, len(mf_ids)+1): for combination in itertools.combinations(mf_ids, i): shuffled_shares[i][combination] += 1 shuffle_dist, shuffle_dist_raw = calc_distribution(shuffled_shares) # - for share in shuffle_dist: print(share) for v in sorted(shuffle_dist[share].keys()): print(f'{v}: {shuffle_dist[share][v]}') # + import umap import umap.plot from sklearn.datasets import load_digits digits = load_digits() mapper = umap.UMAP().fit(digits.data) umap.plot.points(mapper, labels=digits.target) # - umap.plot.points(mapper)
analysis/mf_grc_analysis/mf_combination_representation/mf_size_210514_compare_zplus2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Todo: # * investigate nan's in test set # * threshold motif scores at 0 # * overlay motif scores and deepLIFT tracks # * add grad x input # * PRC curves # * option of shuffled reference & background frequencies for deepLIFT # * fixed axes on deepLIFT plots # # # # How to train your DragoNN tutorial 3: # ## Interpreting features induced by DNN's across multiple types of motif grammars # # This tutorial is a supplement to the DragoNN manuscript and follows figure 7 in the manuscript. # # This tutorial will take 1 hour if executed on a GPU. # # Please complete "Primer Tutorial 1- Exploring model architectures for a homotypic motif density simulation" prior to completing this tutorial. # # The architectures used in this tutorial were determined as optimal by hyperparameter grid search in "Primer Tutorial 3 - CNN Hyperparameter Tuning via Grid Search" # # # ## Outline<a name='outline'> # <ol> # <li><a href=#1>How to use this tutorial</a></li> # <li><a href=#2>Defining helper functions for model training and interpretation</a></li> # TODO: explain reference options. # <li><a href=#3>Simulating training data with simdna: Review of Tutorial 1</a></li> # <li><a href=#4>Single Motif</a></li> # <li><a href=#5>Homotypic motif density detection</a></li> # <li><a href=#6>Homotypic motif density localization</a></li> # <li><a href=#7>Multiple motifs (multi-task)</a></li> # <li><a href=#8>Heterotypic motifs spatial grammar</a></li> # <li><a href=#9>Conclusions</a></li> # </ol> # Github issues on the dragonn repository with feedback, questions, and discussion are always welcome. # # # ## How to use this tutorial<a name='1'> # <a href=#outline>Home</a> # # This tutorial utilizes a Jupyter/IPython Notebook - an interactive computational enviroment that combines live code, visualizations, and explanatory text. The notebook is organized into a series of cells. You can run the next cell by cliking the play button: # ![play button](./primer_tutorial_images/play_button.png) # You can also run all cells in a series by clicking "run all" in the Cell drop-down menu: # ![play all button](./primer_tutorial_images/play_all_button.png) # Half of the cells in this tutorial contain code, the other half contain visualizations and explanatory text. Code, visualizations, and text in cells can be modified - you are encouraged to modify the code as you advance through the tutorial. You can inspect the implementation of a function used in a cell by following these steps: # ![inspecting code](./primer_tutorial_images/inspecting_code.png) # #uncomment the lines below if you are running this tutorial from Google Colab # #!pip install https://github.com/kundajelab/simdna/archive/0.3.zip # #!pip install https://github.com/kundajelab/dragonn/archive/keras_2.2_tensorflow_1.6_purekeras.zip #To prepare for model training, we import the necessary functions and submodules from keras from keras.models import Sequential from keras.layers.core import Dropout, Reshape, Dense, Activation, Flatten from keras.layers.convolutional import Conv2D, MaxPooling2D from keras.optimizers import Adadelta, SGD, RMSprop; import keras.losses; from keras.constraints import maxnorm; from keras.layers.normalization import BatchNormalization from keras.regularizers import l1, l2 from keras.callbacks import EarlyStopping, History from keras import backend as K K.set_image_data_format('channels_last') import numpy as np # We start by loading dragonn's tutorial utilities. # + #load dragonn tutorial utilities # %reload_ext autoreload # %autoreload 2 # %matplotlib inline import warnings warnings.filterwarnings('ignore') from dragonn.tutorial_utils import * # - # ## Defining helper functions for model training and interpretation <a name='2'> # <a href=#outline>Home</a> # # For each type of simulation, we will perform a consistent set of tasks: # * Define the optimal model architecture, as determined in Tutorial 2. This architecture will be specific to the simulation used, so we don't write a universal helper function for this purpose. # * Train the model on simulation data and visualize the model's learning curve on training and validation data. # * Compute the model's performance on a held-out test set. # * Visualize motif scores for a positive and negative example. # * Perform in silico mutagenesis for a positive and negative example. # * Compute DeepLIFT scores for a positive and negative example. # # To avoid writing the same code for each scenario, we define a series of helpers functions to perform the tasks above. # + from dragonn.callbacks import * from dragonn.tutorial_utils import plot_learning_curve def train_model(model,data): #We define a custom callback to print training and validation metrics while training. metrics_callback=MetricsCallback(train_data=(data.X_train,data.y_train),validation_data=(data.X_valid,data.y_valid)) #Train the model history=model.fit(x=data.X_train, y=data.y_train, batch_size=128, epochs=150, verbose=0, callbacks=[EarlyStopping(patience=7,restore_best_weights=True), History(), metrics_callback], validation_data=(data.X_valid, data.y_valid)) #Visualize the model's performance curve plot_learning_curve(history) return model # - #Compute model performance on a held out test set def compute_performance(model,data): test_predictions=model.predict(data.X_test) ## Generate a ClassificationResult object to print performance metrics on held-out test set print(ClassificationResult(data.y_test,test_predictions)) def extract_pos_and_neg_validation_sample(data,pos_index,neg_index): #get the indices of the specified positive and negative examples in the validation data split pos_index=np.flatnonzero(data.y_valid==1)[pos_index] pos_X=data.X_valid[pos_index:pos_index+1] neg_index=np.flatnonzero(data.y_valid==0)[neg_index] neg_X=data.X_valid[neg_index:neg_index+1] return pos_X,neg_X #Visualize motif scores for a positive and negative example #pos_index: index value of a simulated positive datapoint to use for motif visualization #neg_index: index value of a simulated negative datapoint to use for motif visualization def visualize_motif_scores(model,pos_X,neg_X,data): pos_motif_scores=get_motif_scores(pos_X,data.motif_names,return_positions=True).squeeze() neg_motif_scores=get_motif_scores(neg_X,data.motif_names,return_positions=True).squeeze() plt.figure(figsize=(20,3)) ax1=plt.subplot(211) ax2=plt.subplot(212) ax1.plot(pos_motif_scores, "-o") ax1.set_ylim(0,max(pos_motif_scores)) ax1.set_xlabel("Sequence base") ax1.set_ylabel("Motif scan score") ax1.set_title("Positive example") ax2.plot(neg_motif_scores, "-o") ax2.set_ylim(0,max(pos_motif_scores)) ax2.set_xlabel("Sequence base") ax2.set_ylabel("Motif scan score") ax2.set_title("Negative example") plt.show() # + from dragonn.tutorial_utils import in_silico_mutagenesis, plot_ism def perform_ism(model,pos_X,neg_X,data): ism_pos=in_silico_mutagenesis(model,pos_X) ism_neg=in_silico_mutagenesis(model,neg_X) min_val=min([np.amin(ism_pos),np.amin(ism_neg)]) max_val=max([np.amax(ism_pos),np.amax(ism_neg)]) plot_ism(ism_pos,"Positive Example",min_val=min_val,max_val=max_val) plot_ism(ism_neg,"Negative Example",min_val=min_val,max_val=max_val) return ism_pos, ism_neg # + from dragonn.tutorial_utils import input_grad def compute_gradxinput_scores(model,pos_X,neg_X,data): gradinput_pos=input_grad(model,pos_X) gradinput_neg=input_grad(model,neg_X) min_val=min([np.amin(gradinput_pos),np.amin(gradinput_neg)]) max_val=max([np.amax(gradinput_pos),np.amax(gradinput_neg)]) plot_seq_importance(gradinput_pos,pos_X,title="Positive",ylim=(min_val,max_val)) plot_seq_importance(gradinput_neg,neg_X,title="Negative",ylim=(min_val,max_val)) return gradinput_pos,gradinput_neg # + from dragonn.tutorial_utils import deeplift def compute_deeplift_scores(model,pos_X,neg_X,data): dl_pos=deeplift(model,pos_X) dl_neg=deeplift(model,neg_X) min_val=min([np.amin(gradinput_pos),np.amin(gradinput_neg)]) max_val=max([np.amax(gradinput_pos),np.amax(gradinput_neg)]) plot_seq_importance(dl_pos,pos_X,title="Positive",ylim=(min_val,max_val)) plot_seq_importance(dl_neg,neg_X,title="Negative",ylim=(min_val,max_val)) return dl_pos,dl_neg # - def interpret(model,pos_X,neg_X,data): visualize_motif_scores(model,pos_X,neg_X,data) ism_pos,ism_neg=perform_ism(model,pos_X,neg_X,data) gradinput_pos,gradinput_neg=compute_gradxinput_scores(model,pos_X,neg_X,data) dl_pos,dl_neg=compute_deeplift_scores(model,pos_X,neg_X,data) return ism_pos, ism_neg, gradinput_pos, gradinput_neg, dl_pos, dl_neg #Run the full data analysis for a given dataset and model #Train the model, compute it's performance on a positive and negative data point def analyze(model,data,pos_index,neg_index): model=train_model(model,data) compute_performance(model,data) pos_X,neg_X=extract_pos_and_neg_validation_sample(data,pos_index,neg_index) ism_pos,ism_neg, gradinput_pos, gradinput_neg, dl_pos,dl_neg=interpret(model,pos_X,neg_X,data) return pos_X, neg_X, ism_pos, ism_neg, gradinput_pos, gradinput_neg, dl_pos,dl_neg # Finally, we set a random seed to ensure that all analyses in this tutorial are reproducible. # Making sure our results are reproducible from numpy.random import seed seed(1234) from tensorflow import set_random_seed set_random_seed(1234) # ## Getting simulation data <a name='3'> # <a href=#outline>Home</a> # # # DragoNN provides a set of simulation functions. Let's use the **print_available_simulations** function to examine the list of simulations supported by DragoNN: print_available_simulations() # ## Single Motif <a name='4'> # <a href=#outline>Home</a> # # We begin with single motif detection of the TAL1_known4 motif: # # ![play button](./primer_tutorial_images/TAL1_known4.png) # Let's find out what parameters are needed for the simulation: print_simulation_info("simulate_single_motif_detection") # In this binary simulation task, we simulate a negative set of 10K 500 bp random sequences and a positive set of 10K 500 bp random sequences with one instance of the TAL1 motif randomly embedded at any position. # + #Define simulation parameters tal1_parameters = { "motif_name": "TAL1_known4", "seq_length": 500, "num_pos": 10000, "num_neg": 10000, "GC_fraction": 0.4} #Get simulation data tal1_data = get_simulation_data("simulate_single_motif_detection", tal1_parameters, validation_set_size=3200, test_set_size=4000) # - # We define the convolutional neural network model architecture: # + #Define the optimal model architecture in keras (Refer to Primer Tutorial 2) tal1_model=Sequential() tal1_model.add(Conv2D(filters=10,kernel_size=(1,15),input_shape=tal1_data.X_train.shape[1::])) tal1_model.add(Activation('relu')) tal1_model.add(MaxPooling2D(pool_size=(1,35))) tal1_model.add(Flatten()) tal1_model.add(Dense(1)) tal1_model.add(Activation("sigmoid")) ##compile the model, specifying the Adam optimizer, and binary cross-entropy loss. tal1_model.compile(optimizer='adam', loss='binary_crossentropy') # - pos_X, neg_X, ism_pos, ism_neg, gradinput_pos, gradinput_neg, dl_pos, dl_neg=analyze(tal1_model,tal1_data,1,1) # Let's zoom in to the portion of the deepLIFT track with the strongest signal: plot_seq_importance(dl_pos,pos_X,xlim=(220,275),title="Positive Zoomed") plot_seq_importance(dl_neg,neg_X,xlim=(220,275),title="Negative Zoomed") # ## Homotypic motif density detection <a name='5'> # <a href=#outline>Home</a> # + #Define simulation parameters density_detection_parameters={ "motif_name": "TAL1_known4", "seq_length": 500, "neg_counts":[0,2], "pos_counts":[3,5], "num_pos": 10000, "num_neg": 10000, "GC_fraction":0.4 } #Get simulation data density_detection_data=get_simulation_data("simulate_motif_counting", density_detection_parameters, validation_set_size=3200,test_set_size=4000) # + #Define the optimal model architecture in keras (Refer to Primer Tutorial 2) density_detection_model=Sequential() density_detection_model.add(Conv2D(filters=10,kernel_size=(1,15),input_shape=density_detection_data.X_train.shape[1::])) density_detection_model.add(Activation('relu')) density_detection_model.add(MaxPooling2D(pool_size=(1,35))) density_detection_model.add(Flatten()) density_detection_model.add(Dense(1)) density_detection_model.add(Activation("sigmoid")) ##compile the model, specifying the Adam optimizer, and binary cross-entropy loss. density_detection_model.compile(optimizer='adam', loss='binary_crossentropy') # - pos_X, neg_X, ism_pos, ism_neg, gradinput_pos, gradinput_neg, dl_pos, dl_neg=analyze(density_detection_model,density_detection_data,1,1) plot_seq_importance(dl_pos,pos_X,xlim=(200,350),title="Positive, Zoomed") plot_seq_importance(dl_neg,neg_X,xlim=(200,350),title="Negative, Zoomed") # ## Homotypic motif density localization <a name='6'> # <a href=#outline>Home</a> # + #Define simulation parameters density_localization_parameters = { "motif_name": "TAL1_known4", "seq_length": 1000, "center_size": 150, "min_motif_counts": 2, "max_motif_counts": 4, "num_pos": 10000, "num_neg": 10000, "GC_fraction": 0.4} #Get simulation data density_localization_data=get_simulation_data("simulate_motif_density_localization", density_localization_parameters, validation_set_size=3200,test_set_size=4000) # + #Define the optimal model architecture in keras (Refer to Primer Tutorial 2) density_localization_model=Sequential() density_localization_model.add(Conv2D(filters=5,kernel_size=(1,10),input_shape=density_localization_data.X_train.shape[1::])) density_localization_model.add(Activation('relu')) density_localization_model.add(MaxPooling2D(pool_size=(1,10))) density_localization_model.add(Flatten()) density_localization_model.add(Dense(1)) density_localization_model.add(Activation("sigmoid")) ##compile the model, specifying the Adam optimizer, and binary cross-entropy loss. density_localization_model.compile(optimizer='adam', loss='binary_crossentropy') # - pos_X, neg_X, ism_pos, ism_neg, gradinput_pos, gradinput_neg, dl_pos, dl_neg=analyze(density_localization_model,density_localization_data,1,1) plot_seq_importance(dl_pos,pos_X,xlim=(220,275),title="Positive") plot_seq_importance(dl_neg,neg_X,xlim=(220,275),title="Negative") # ## Multiple motifs (multi-task)<a name='7'> # <a href=#outline>Home</a> # + #Define simulation parameters multi_motif_parameters = { "motif_names": ["CTCF_known1","ZNF143_known2","SIX5_known1"], "seq_length": 500, "min_num_motifs": 0, "max_num_motifs": 1, "num_seqs": 20000, "GC_fraction": 0.4} #Get simulation data multi_motif_data=get_simulation_data("simulate_multi_motif_embedding", multi_motif_parameters, validation_set_size=3200,test_set_size=4000) # + #Define the optimal model architecture in keras (Refer to Primer Tutorial 2) multi_motif_model=Sequential() multi_motif_model.add(Conv2D(filters=20,kernel_size=(1,20),input_shape=multi_motif_data.X_train.shape[1::])) multi_motif_model.add(Activation('relu')) multi_motif_model.add(MaxPooling2D(pool_size=(1,10))) multi_motif_model.add(Flatten()) multi_motif_model.add(Dense(3)) multi_motif_model.add(Activation("sigmoid")) ##compile the model, specifying the Adam optimizer, and binary cross-entropy loss. multi_motif_model.compile(optimizer='adam', loss='binary_crossentropy') # - pos_X, neg_X, ism_pos, ism_neg, gradinput_pos, gradinput_neg, dl_pos, dl_neg=analyze(multi_motif_model, multi_motif_data,1,1) plot_seq_importance(dl_pos,pos_X,xlim=(220,275),title="Positive") plot_seq_importance(dl_neg,neg_X,xlim=(220,275),title="Negative") # ## Heterotypic motifs spatial grammar<a name='8'> # <a href=#outline>Home</a> # + #Define simulation parameters heterodimer_parameters = { "motif1": "SPI1_known4", "motif2": "IRF_known1", "seq_length": 500, "min_spacing": 2, "max_spacing": 5, "num_pos": 10000, "num_neg": 10000, "GC_fraction": 0.4} #Get simulation data heterodimer_data=get_simulation_data("simulate_heterodimer_grammar", heterodimer_parameters, validation_set_size=3200,test_set_size=4000) # - heterodimer_model=Sequential() heterodimer_model.add(Conv2D(filters=15,kernel_size=(1,15),input_shape=input_shape)) heterodimer_model.add(Activation("relu")) heterodimer_model.add(Conv2D(filters=15,kernel_size=(1,15),input_shape=input_shape)) heterodimer_model.add(Activation("relu")) heterodimer_model.add(Conv2D(filters=15,kernel_size=(1,15),input_shape=input_shape)) heterodimer_model.add(Activation("relu")) heterodimer_model.add(MaxPooling2D(pool_size=(1,35))) heterodimer_model.add(Flatten()) heterodimer_model.add(Dense(num_tasks)) heterodimer_model.add(Activation("sigmoid")) heterodimer_model.compile(optimizer='adam',loss='binary_crossentropy') pos_X, neg_X, ism_pos, ism_neg, gradinput_pos, gradinput_neg, dl_pos, dl_neg=analyze(heterodimer_model,heterodimer_data,1,1) plot_seq_importance(dl_pos,pos_X,xlim=(220,275),title="Positive") plot_seq_importance(dl_neg,neg_X,xlim=(220,275),title="Negative") # ## Conclusions<a name='9'> # <a href=#outline>Home</a>
paper_supplement/PrimerTutorial 3 - Interpreting features induced by DNN's across multiple types of motif grammars.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Practical 4: Modules and Functions - Building Conway's Game of Life # # <div class="alert alert-block alert-success"> # <b>Objectives:</b> In this practical we build on the previous practice on creating functions, modules and conditional statements. We also continue practicing how we access cells from 2D arrays. At the end of this notebook you will have a complete version of Conway's Game of Life which will produce an animation. This will be done through 3 different sections, each of which has an exercise for you to complete: # # - 1) [Creating different shapes through 2D Numpy array modifications](#Part1) # * [Exercise 1: Draw still 'life' from Conway's Universe](#Exercise1) # * [Exercise 2: Draw oscillators and space-ship 'life' from Conway's Universe](#Exercise2) # - 2) [Creating a function that searches a local neighbourhood for values of '1' and '0'](#Part2) # * [Exercise 3: Implement the 4 rules of life](#Exercise3) # * [Exercise 4: Loop through 20 oscillations of the 'Beacon' lifeform](#Exercise4) # - 3) [Populating Conway's Universe with multiple species](#Part3) # # As with our other notebooks, we will provide you with a template for plotting the results. Also please note that you should not feel pressured to complete every exercise in class. These practicals are designed for you to take outside of class and continue working on them. Proposed solutions to all exercises can be found in the 'Solutions' folder. # </div> # <div class="alert alert-block alert-warning"> # <b>Please note:</b> After reading the instructions and aims of any exercise, search the code snippets for a note that reads -------'INSERT CODE HERE'------- to identify where you need to write your code # </div> # # Introduction: The game # # Before we get our teeth into the exercises included in this notebook, lets remind ourselves about the basis for Conway's game of life. In Conway's game of life, the Universe is represented as an infinite 2D space [a 2D Numpy array in our case!] on which each cell can either be alive or dead. If we refer to each cell as having one of two states, we can represent this numerically as each cell having either a value of 1 or 0. If we then assume we can draw 2D shapes that represent a 'specie', as a collection of live cells, we might find patterns such as those in the following figure: # # Every cell interacts with its neighbours, whether they are horizontally, vertically of diagonally adjacent. There are 4 laws that define these interactions: # # <div class="alert alert-block alert-info"> # # - Any live cell with fewer than two live neighbours dies, as if by underpopulation. # # - Any live cell with two or three live neighbours lives on to the next generation. # # - Any live cell with more than three live neighbours dies, as if by overpopulation. # # - Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction. # </div> # # So imagine we are at the beginning of time in our 2D Universe. We need to 'sweep' through every cell and make changes according to these laws. After one sweep, or time step, the distribution of cells will change. Numerically, the distribution of '1's and '0's change across our 2D space. In fact, by defining species as dinstinct groups of cells of a certain shape, as we move through multiple time steps we find 3 types of patterns emerging: # # <div class="alert alert-block alert-info"> # # - Still life: These patterns remain fixed on the board # # - Oscillators: These patterns change shape on every iteration, but return to their initial state after a number of generations. # # - Space-ships: These patterns end up moving across the board according to the rules that define life and death. # </div> # # From a programming perspective, implementing these rules through any number of time steps requires a number of procedures to be implemented via code: # # <div class="alert alert-block alert-info"> # # - 1) Defining 2D arrays that represent species in Conway's Universe. # - 2) Creating a function that searches the immediate neighbouring space of each cell for 1's and 0's. # - 3) Counting the number of 1's and 0's according to the previous point. # - 4) Changing the values of each cell according to the 4 laws stated above. # - 5) Looping through points 2-4 for any number of time steps. # # </div> # # # By sequentially following the proceeding exercises, we will eventually build a variant of Conway's game of life. # # ## Creating different shapes through 2D Numpy array modifications <a name="Part1"></a> # # Before we can run a simulation, lets create some distinct species as groups of cells, and thus patterns. This will help us practice creating 2D arrays and populating each cell with either a '0' or '1' depending on what pattern we want to draw. We will do this with the 3 patterns defined above. In each of the patterns you will be asked to initialise a 2D Numpy array that repeats the pattern seen in the picture. The code to plot, thus visualise, each pattern is given for you. # # ### Still life # # The pictures in Figure 1 and 2 illustrates common types of life in Conway's Universe. Ive given you some code that reproduces the pattern for 'Block', in the code box below. Read through the code and comments and see if this makes sense. # # ![](images/Practical_3_figure1.png "Title") # <h3><center>Figure 1</center></h3> # # ![](images/Practical_3_figure2.png "Title") # <h3><center>Figure 2</center></h3> # + # #%matplotlib inline #this is to help us retrieve those love animations! import numpy as np #import the numerical python library, numpy. Changing the referenced library to 'np' is solely for convenience import matplotlib.pyplot as plt #as per the above, much easier to write over and over again from matplotlib import animation, rc # Lets first create our 'Block'. Dont forget, we can call our arrays and matrices anything we want. In this case Im going to use the name of the pattern we are interested in Block = np.zeros((4,4),dtype=int) #Im telling the Python interpreter I want a numpy array that is 4 rows by 4 columns, contains '0' for now and is expecting my data to be of integer type # What does this look like? print("An empty array",Block) # Can you see a matrix of 0s? # Ok cool. Now lets add some black cells by position some values of 1. For the Block pattern, this is done as follows: Block[1,1]=1 Block[1,2]=1 Block[2,1]=1 Block[2,2]=1 # Remeber how we refer to elements in an array in Python? Everything starts at 0, so here im filling in the central 2x2 matrix with 1s. Lets check this out numerically: print(print("A finished array",Block)) #Now lets plot this to recreate the patterns given in figure x. plt.imshow(Block, cmap='binary') #The cmap, or colour map, gives us a black and white board. plt.title('Block') plt.show() # - # <div class="alert alert-block alert-success"> # <b> Exercise 1: Draw still 'life' from Conway's Universe </b> <a name="Exercise1"></a> # # In this exercise you will need to create a 2D Numpy array that essentially 'draws' both the *Tub* and *Boat* specie from figure 2. Ive left a space in the following code for you to implement these entries. # # </div> # + # We have already imported both Numpy and Matplotlib so no need to import those again. # Initialise our matrices Tub = np.zeros((5,5),dtype=int) Boat = np.zeros((5,5),dtype=int) #-------'INSERT CODE HERE'------- # Now add '1's to the currently empty 2D array Tub #-------------------------------- plt.subplot(1, 2, 1).imshow(Tub, cmap='binary') #The cmap, or colour map, gives us a black and white board. plt.title('Tub') #plt.show() #-------'INSERT CODE HERE'------- # Now add '1's to the currently empty 2D array Boat #-------------------------------- plt.subplot(1, 2, 2).imshow(Boat, cmap='binary') #The cmap, or colour map, gives us a black and white board. plt.title('Boat') plt.show() # - # <div class="alert alert-block alert-success"> # <b> Exercise 2: Draw oscillators and space-ship 'life' from Conway's Universe </b> <a name="Exercise2"></a> # # Following exercise #1,now do the same for 2 types of both *oscillators* and *space ships*: Toad, Beacon, Glider and Light-weight spaceship (LWSS). Can you replicate the patterns shown in figures 1 and 3? Check the size of each array you need, accounting for white space around the outside. Use the space below and copy-paste the code we have already used. # # ![](images/Practical_3_figure3.png "Title") # <h3><center>Figure 3</center></h3> # # </div> # + #Enter the Python code here to create and then visualise a Toad, Beacon and Glider #Initialise each matrix Beacon = np.zeros((6,6),dtype=int) Toad = np.zeros((6,6),dtype=int) Glider = np.zeros((5,5),dtype=int) LWSS = np.zeros((6,7),dtype=int) #Enter values for '1' where you would like a black square #-------'INSERT CODE HERE'------- #-------------------------------- #Now visualise your results. plt.subplot(1, 2, 1).imshow(Beacon, cmap='binary') #The cmap, or colour map, gives us a black and white board. plt.title('Beacon') plt.subplot(1, 2, 2).imshow(Toad, cmap='binary') #The cmap, or colour map, gives us a black and white board. plt.title('Toad') plt.show() plt.subplot(1, 2, 1).imshow(Glider, cmap='binary') #The cmap, or colour map, gives us a black and white board. plt.title('Glider') plt.subplot(1, 2, 2).imshow(LWSS, cmap='binary') #The cmap, or colour map, gives us a black and white board. plt.title('LWSS') plt.show() # - # ## Creating a function that searches a local neighbourhood for values of '1' and '0' <a name="Part2"></a> # # Now we know how to define a specie, according to modifying values in a 2D array, we also need to now create a function that can search the neighbouring space of any cell for the occurance of '1's or '0's. We are going to perform this operation many times so creating a function to do this seems a sensible approach. # # As an example, lets re-create the 2D array that represents the specie 'Beacon' and then pass this array into a new function that will search the neighbouring space of every cell to detect a '1' or '0. In this example I have given you all of the code to perform this operation. Try to understand the syntax used. Does this make sense? First look at the code and then lets formulate the steps in the function as a narrative. # + #Initialise the Beacon matrix Beacon = np.zeros((6,6),dtype=int) #Enter values for '1' where you would like a black square Beacon [1,1]=1 Beacon [1,2]=1 Beacon [2,1]=1 Beacon [3,4]=1 Beacon [4,3]=1 Beacon [4,4]=1 # Now define a function that moves through through each cell in our 2D array and searches the neighbouring space # We pass three variables: # rows - Number of rows in our space to be searched # cols - Number of columns in our space to be searched # space - The 2D array space to be searched def search_each_cell(total_rows,total_cols,space): # 1) First, we need to start moving through each cell of our 'space'. # To do this, we will use two nested 'for' loops for row in range(total_rows): for col in range(total_cols): # So 'row' and 'col' define our current cell. # We now need to search a neighbourhood defined as 1 cell distance around this position # We thus need two more nested for loops. When searching this neighbouring space, we want # to count the number of 1's. Thus we also need a variable that we can increment by 1 # everytime we find a value of 1. Lets call this integer variable count count = 0 for row2 in range(row-1,row+2): # See here that we can define a start and end to our 'range' for col2 in range(col-1,col+2): # We need to check if our new position, defined by [row2,col2] is off the board if (row2<0) or (row2>=total_rows) or (col<0) or (col2>=total_cols): # Do nothing pass elif row2 == row and col2 == col: # Do nothing, its the cell we already have! pass # If we are not off the board or in the same cell as our starting point... # We can check if this new space has a value of 1. It it does, lets count it else: if space[row2,col2]>0: count=count+1 return # At the moment we are not returning anything. Seem odd? We will get back to this. # call the above function search_each_cell(6,6,Beacon) print("Finished function call, nothing to report!") # - # Now lets try to understand what this function is actually doing. As an algorithm, we have the following steps # # <div class="alert alert-block alert-info"> # # - 1) Pass the 2D Numpy array to the new function along with variables that define the total number of rows and columns # # - 2) We need to move through every cell and search its local neighbourhood. Moving through each cell is defined by the first two loops that cycle through both the row and column index of our 2D space. The limits are defined by the variables total_rows and total_cols # # - 3) For each cell, we will want to have an integer variable that counts how many 1's there are in the local neighborhood. We need to initialise this to 0 for each cell we move through. # # - 4) Now we need to look at the local space surrounding our cell. For this we need two more nested loops that look 1 row above, 1 row below, 1 column to the left and one to the right. # # - 5) We we move through this neighborhood we need to check if we are either off the board OR in the same location as the cell we are interested in! # # - 6) If none of the above is true, then check if a cell has a value greater then 0. If it does, increment variable count by 1. # # - 7) For each cell on the board, repeat steps 3-6. # # - 8) When the entire space has been searched, stop the function and return nothing. # # </div> # # <div class="alert alert-block alert-success"> # <b> Exercise 3 - Implement the 4 rules of life <a name="Exercise3"></a> </b> # # Now we have the function that can search the local neighbourhood of any cell and count how many 1s and 0's there are, we can now add on more code that can implement the 4 rules of life and thus keep the value of our current cell or change it. Lets remind ourselves what those rules are: # # <div class="alert alert-block alert-info"> # # - Any live cell with fewer than two live neighbours dies, as if by underpopulation. # # - Any live cell with two or three live neighbours lives on to the next generation. # # - Any live cell with more than three live neighbours dies, as if by overpopulation. # # - Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction. # </div> # # So in this exercise we have a shape that has been passed into our function, and then create a new shape according to the rules of life. In the exercise you will need to add a series of conditional statements that populate the value of cells in our new shape according to these rules. In other words, we can re-write the above rules as: # # <div class="alert alert-block alert-info"> # # - If our current cell is alive [=1]: # # a) If count < 2, current cell = 0 [it dies]. # # b) If 2<=count<=3, current cell = 1 [it stays alive]. # # c) If count>3, current cell = 0 [it dies] # # # - If our current cell is dead [=0] # # a) If count == 3, current cell = 1 [born] # # </div> # # Notice the syntax I have used for the last conditional: If count == 3? When checking a value we use two equals signs '==' as we are not *assigning* a value as we would in, e.g. x = 4. In the code snippet below, I have identified where you need to implement these rules. Notice that we plot the 'Beacon' pattern before we call the function and then the new 2D space which should change the pattern. With this in mind, also note that our function new returns a new version of our 2D space which I have called 'new_space'. If correct, when you run your completed code you should see figure 4. # # ![](images/Practical_3_figure4.png "Title") # <h3><center>Figure 4</center></h3> # # Please note that where I have added 'INSERT CODE HERE' we are using the correct indentation. # # </div> # + #Initialise the Beacon matrix Beacon = np.zeros((6,6),dtype=int) #Enter values for '1' where you would like a black square Beacon [1,1]=1 Beacon [1,2]=1 Beacon [2,1]=1 Beacon [3,4]=1 Beacon [4,3]=1 Beacon [4,4]=1 # Now define a function that moves through through each cell in our 2D array and searches the neighbouring space # We pass three variables: # rows - Number of rows in our space to be searched # cols - Number of columns in our space to be searched # space - The 2D array space to be searched def search_each_cell(total_rows,total_cols,space): new_space = np.zeros((total_rows,total_cols),dtype=int) # 1) First, we need to start moving through each cell of our 'space'. # To do this, we will use two nested 'for' loops for row in range(total_rows): for col in range(total_cols): # So 'row' and 'col' define our current cell index. # We now need to search a neighbourhood defined as 1 cell distance around this position # We thus need two more nested for loops. When searching this neighbouring space, we want # to count the number of 1's. Thus we also need a variable that we can increment by 1 # everytime we find a value of 1. Lets call this integer variable count. count = 0 for row2 in range(row-1,row+2): # See here that we can define a start and end to our 'range' for col2 in range(col-1,col+2): # We need to check if our new position, defined by [row2,col2] is off the board if (row2<0) or (row2>=total_rows) or (col<0) or (col2>=total_cols): # Do nothing pass elif row2 == row and col2 == col: # Do nothing, its the cell we already have! pass # If we are not off the board or in the same cell as our starting point... # We can check if this new space has a value of 1. It it does, lets count it else: if space[row2,col2]>0: count=count+1 #-------'INSERT CODE HERE'------- # Here you need to introduce conditional statements that act on the value of 'count' # Read through the narrative provided above and remember to obey the spacing rules # You will need to check the value of space[row,col] and then, depending on whether # this is greater than 0 OR equals to 0, implement the rules of life. I have provided # the first example. Please do try to complete this. if space[row,col] > 0: elif space[row,col] == 0: #-------------------------------- return new_space # call the above function Beacon_new = search_each_cell(6,6,Beacon) print("Finished function call, now lets compare our pattern before and after...") #Now visualise your results. plt.subplot(1, 2, 1).imshow(Beacon, cmap='binary') #The cmap, or colour map, gives us a black and white board. plt.title('Beacon - before') plt.subplot(1, 2, 2).imshow(Beacon_new, cmap='binary') #The cmap, or colour map, gives us a black and white board. plt.title('Beacon - after') plt.show() # - # <div class="alert alert-block alert-success"> # <b> Exercise 4 - Loop through 20 oscillations of the 'Beacon' lifeform <a name="Exercise4"></a> </b> # # Now that we have build the function that can implement the 4 rules of life, all that is left for us to do is to call this function a set number of times to simulate evolution across our Universe. In the code box below, drop your conditional statements from above in the relevant place and click 'Run'. Do you see the Beacon shape oscillating? As before, I have provided the code for plotting but see if the syntax makes sense. # # </div> # + import numpy as np #import the numerical python library, numpy. Changing the referenced library to 'np' is solely for convenience import matplotlib.pyplot as plt #as per the above, much easier to write over and over again from matplotlib import animation, rc from IPython.display import HTML from IPython.display import clear_output import time #Initialise the Beacon matrix Beacon = np.zeros((6,6),dtype=int) #Enter values for '1' where you would like a black square Beacon [1,1]=1 Beacon [1,2]=1 Beacon [2,1]=1 Beacon [3,4]=1 Beacon [4,3]=1 Beacon [4,4]=1 # Now define a function that moves through through each cell in our 2D array and searches the neighbouring space # We pass three variables: # rows - Number of rows in our space to be searched # cols - Number of columns in our space to be searched # space - The 2D array space to be searched def search_each_cell(total_rows,total_cols,space): new_space = np.zeros((total_rows,total_cols),dtype=int) # 1) First, we need to start moving through each cell of our 'space'. # To do this, we will use two nested 'for' loops for row in range(total_rows): for col in range(total_cols): # So 'row' and 'col' define our current cell index. # We now need to search a neighbourhood defined as 1 cell distance around this position # We thus need two more nested for loops. When searching this neighbouring space, we want # to count the number of 1's. Thus we also need a variable that we can increment by 1 # everytime we find a value of 1. Lets call this integer variable count count = 0 for row2 in range(row-1,row+2): # See here that we can define a start and end to our 'range' for col2 in range(col-1,col+2): # We need to check if our new position, defined by [row2,col2] is off the board if (row2<0) or (row2>=total_rows) or (col<0) or (col2>=total_cols): # Do nothing pass elif row2 == row and col2 == col: # Do nothing, its the cell we already have! pass # If we are not off the board or in the same cell as our starting point... # We can check if this new space has a value of 1. It it does, lets count it else: if space[row2,col2]>0: count=count+1 #-------'INSERT CODE HERE'------- # Here you need to introduce conditional statements that act on the value of 'count' # Read through the narrative provided above and remember to obey the spacing rules if space[row,col] > 0: elif space[row,col] == 0: #-------------------------------- return new_space fig, ax2 = plt.subplots() plt.imshow(Beacon, cmap='binary') #The cmap, or colour map, gives us a black and white board. plt.title('Beacon oscillating') plt.show() # Let us call the function 20 times # Each time we are given a new shape to plot on our figure. # Wait 0.2 seconds before moving on to thje next iteration # We shpuld see oscillating behaviour. for x in range(20): clear_output(wait=True) Beacon_new = search_each_cell(6,6,Beacon) Beacon = Beacon_new plt.imshow(Beacon_new, cmap='binary') #The cmap, or colour map, gives us a black and white board. plt.title('Beacon oscillating') plt.show() time.sleep(0.2) # - # ## Populating Conway's Universe with multiple species <a name=#Part3></a> # # Now we are going to use the defintion of our shapes to populate a miniature Universe in 2D space! Once we have this, following the same procedure as above, we should see some interesting movement! So lets create a space that is big enough for all of our cell types. To do this, we need to create another matrix: Universe=np.zeros((50,50),dtype=int) print(Universe) # You should now see a snapshot of the Universe matrix that is empty. Now check out figure x below that illustrates a collection of our pre-existing cell types. Now you are going to replicate this figure. Whilst you could individually enter each cell with a '1' where required, why repeat what we have done above. Rather, we are going to use our existing matrices and place them on the Universe grid. We do that by definining the exact space in the Universe we want our cells to go. This is a practice in recognising the correct shape of an array/matrix and matching one to another. For example, look at the code below which places the top left corner of an LWSS on the cell in the 12th row and 13th column of my Universe and then visualises the results. Dont forget, indexing in Python starts at 0 so for the 12th row and 13th column, I need to refer to element [11,12]. Im also using the operator ':' which allows us to straddle cells bound by a start and a finish. Why have I chosen the range given below? Feel free to change the values, but if you get the size of space needed to fit in an LWSS, Python will complain it cannot broadcast a given shape: #Define the space in the Universe you would like your LWSS to appear Universe[11:17,12:19] = LWSS #Now visualise our Universe plt.imshow(Universe, cmap='binary') #The cmap, or colour map, gives us a black and white board. plt.title('Universe [with 1 LWSS]') plt.show() # ### Populating the Universe with multiple species # # To finish off the first page of this notebook, in the following code box we fill the Universe with a range of species and then proceed to a simulation. Can you see how we have mapped species shapes into our Universe? It is left for you to copy the working function 'search_each_cell' from above to complete the simulation. # # Have a play with this! What happens if you increase the number of iterations to 300? Please note, we might want to clear our Universe from the above exercise, in which case we could write: Universe[:,:]=0, but lets keep it in for now. # + #Define the space in the Universe you would like your different species to appear Universe[30:36,32:39] = LWSS Universe[11:17,12:19] = LWSS Universe[22:28,12:18] = Beacon Universe[33:39,2:8] = Beacon Universe[19:25,32:38] = Toad Universe[1:6,1:6] = Glider Universe[6:11,25:30] = Boat plt.imshow(Universe, cmap='binary') #The cmap, or colour map, gives us a black and white board. plt.title('Universe [with multiple cell types]') plt.show() #-------'INSERT CODE HERE'------- def search_each_cell(total_rows,total_cols,space): #-------------------------------- fig, ax2 = plt.subplots(figsize=(12, 12)) plt.imshow(Universe, cmap='binary') #The cmap, or colour map, gives us a black and white board. plt.title('Universe simulation') plt.show() for x in range(100): clear_output(wait=True) Universe_new = search_each_cell(50,50,Universe) Universe = Universe_new plt.imshow(Universe_new, cmap='binary') #The cmap, or colour map, gives us a black and white board. plt.title('Universe simulation') plt.show() time.sleep(0.2) # -
Practical 4. Modules and Functions - Building Conway's Game of Life.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # Amazon SageMaker XGBoost Bring Your Own Model # _**Hosting a Pre-Trained scikit-learn Model in Amazon SageMaker XGBoost Algorithm Container**_ # # --- # # --- # # ## Contents # # 1. [Background](#Background) # 1. [Setup](#Setup) # 1. [Optionally, train a scikit learn XGBoost model](#Optionally,-train-a-scikit-learn-XGBoost-model) # 1. [Upload the pre-trained model to S3](#Upload-the-pre-trained-model-to-S3) # 1. [Set up hosting for the model](#Set-up-hosting-for-the-model) # 1. [Validate the model for use](#Validate-the-model-for-use) # # # # # --- # ## Background # # Amazon SageMaker includes functionality to support a hosted notebook environment, distributed, serverless training, and real-time hosting. We think it works best when all three of these services are used together, but they can also be used independently. Some use cases may only require hosting. Maybe the model was trained prior to Amazon SageMaker existing, in a different service. # # This notebook shows how to use a pre-existing scikit-learn trained XGBoost model with the Amazon SageMaker XGBoost Algorithm container to quickly create a hosted endpoint for that model. Please note that scikit-learn XGBoost model is compatible with SageMaker XGBoost container, whereas other gradient boosted tree models (such as one trained in SparkML) are not. # # --- # ## Setup # # Let's start by specifying: # # * AWS region. # * The IAM role arn used to give learning and hosting access to your data. See the documentation for how to specify these. # * The S3 bucket that you want to use for training and model data. # + isConfigCell=true # %%time import os import boto3 import re import json from sagemaker import get_execution_role region = boto3.Session().region_name role = get_execution_role() bucket='<s3 bucket>' # put your s3 bucket name here, and create s3 bucket prefix = 'sagemaker/DEMO-xgboost-byo' bucket_path = 'https://s3-{}.amazonaws.com/{}'.format(region,bucket) # customize to your bucket where you have stored the data # - # ## Optionally, train a scikit learn XGBoost model # # These steps are optional and are needed to generate the scikit-learn model that will eventually be hosted using the SageMaker Algorithm contained. # # ### Install XGboost # Note that for conda based installation, you'll need to change the Notebook kernel to the environment with conda and Python3. # !conda install -y -c conda-forge xgboost # ### Fetch the dataset # + # %%time import pickle, gzip, numpy, urllib.request, json # Load the dataset urllib.request.urlretrieve("http://deeplearning.net/data/mnist/mnist.pkl.gz", "mnist.pkl.gz") f = gzip.open('mnist.pkl.gz', 'rb') train_set, valid_set, test_set = pickle.load(f, encoding='latin1') f.close() # - # ### Prepare the dataset for training # + # %%time import struct import io import boto3 def get_dataset(): import pickle import gzip with gzip.open('mnist.pkl.gz', 'rb') as f: u = pickle._Unpickler(f) u.encoding = 'latin1' return u.load() # + train_set, valid_set, test_set = get_dataset() train_X = train_set[0] train_y = train_set[1] valid_X = valid_set[0] valid_y = valid_set[1] test_X = test_set[0] test_y = test_set[1] # - # ### Train the XGBClassifier # + import xgboost as xgb import sklearn as sk bt = xgb.XGBClassifier(max_depth=5, learning_rate=0.2, n_estimators=10, objective='multi:softmax') # Setup xgboost model bt.fit(train_X, train_y, # Train it to our data eval_set=[(valid_X, valid_y)], verbose=False) # - # ### Save the trained model file # Note that the model file name must satisfy the regular expression pattern: `^[a-zA-Z0-9](-*[a-zA-Z0-9])*;`. The model file also need to tar-zipped. model_file_name = "DEMO-local-xgboost-model" bt._Booster.save_model(model_file_name) # !tar czvf model.tar.gz $model_file_name # ## Upload the pre-trained model to S3 fObj = open("model.tar.gz", 'rb') key= os.path.join(prefix, model_file_name, 'model.tar.gz') boto3.Session().resource('s3').Bucket(bucket).Object(key).upload_fileobj(fObj) # ## Set up hosting for the model # # ### Import model into hosting # This involves creating a SageMaker model from the model file previously uploaded to S3. containers = {'us-west-2': '433757028032.dkr.ecr.us-west-2.amazonaws.com/xgboost:latest', 'us-east-1': '811284229777.dkr.ecr.us-east-1.amazonaws.com/xgboost:latest', 'us-east-2': '825641698319.dkr.ecr.us-east-2.amazonaws.com/xgboost:latest', 'eu-west-1': '685385470294.dkr.ecr.eu-west-1.amazonaws.com/xgboost:latest', 'ap-northeast-1': '501404015308.dkr.ecr.ap-northeast-1.amazonaws.com/xgboost:latest'} container = containers[boto3.Session().region_name] # + # %%time from time import gmtime, strftime model_name = model_file_name + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) model_url = 'https://s3-{}.amazonaws.com/{}/{}'.format(region,bucket,key) sm_client = boto3.client('sagemaker') print (model_url) primary_container = { 'Image': container, 'ModelDataUrl': model_url, } create_model_response2 = sm_client.create_model( ModelName = model_name, ExecutionRoleArn = role, PrimaryContainer = primary_container) print(create_model_response2['ModelArn']) # - # ### Create endpoint configuration # # SageMaker supports configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, you can create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way. In addition, the endpoint configuration describes the instance type required for model deployment. # + from time import gmtime, strftime endpoint_config_name = 'DEMO-XGBoostEndpointConfig-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) print(endpoint_config_name) create_endpoint_config_response = sm_client.create_endpoint_config( EndpointConfigName = endpoint_config_name, ProductionVariants=[{ 'InstanceType':'ml.m4.xlarge', 'InitialInstanceCount':1, 'InitialVariantWeight':1, 'ModelName':model_name, 'VariantName':'AllTraffic'}]) print("Endpoint Config Arn: " + create_endpoint_config_response['EndpointConfigArn']) # - # ### Create endpoint # Lastly, you create the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete. # + # %%time import time endpoint_name = 'DEMO-XGBoostEndpoint-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) print(endpoint_name) create_endpoint_response = sm_client.create_endpoint( EndpointName=endpoint_name, EndpointConfigName=endpoint_config_name) print(create_endpoint_response['EndpointArn']) resp = sm_client.describe_endpoint(EndpointName=endpoint_name) status = resp['EndpointStatus'] print("Status: " + status) while status=='Creating': time.sleep(60) resp = sm_client.describe_endpoint(EndpointName=endpoint_name) status = resp['EndpointStatus'] print("Status: " + status) print("Arn: " + resp['EndpointArn']) print("Status: " + status) # - # ## Validate the model for use # Now you can obtain the endpoint from the client library using the result from previous operations and generate classifications from the model using that endpoint. runtime_client = boto3.client('runtime.sagemaker') # Lets generate the prediction for a single datapoint. We'll pick one from the test data generated earlier. import numpy as np point_X = test_X[0] point_X = np.expand_dims(point_X, axis=0) point_y = test_y[0] np.savetxt("test_point.csv", point_X, delimiter=",") # + # %%time import json file_name = 'test_point.csv' #customize to your test file, will be 'mnist.single.test' if use data above with open(file_name, 'r') as f: payload = f.read().strip() response = runtime_client.invoke_endpoint(EndpointName=endpoint_name, ContentType='text/csv', Body=payload) result = response['Body'].read().decode('ascii') print('Predicted Class Probabilities: {}.'.format(result)) # - # ### Post process the output # Since the result is a string, let's process it to determine the the output class label. floatArr = np.array(json.loads(result)) predictedLabel = np.argmax(floatArr) print('Predicted Class Label: {}.'.format(predictedLabel)) print('Actual Class Label: {}.'.format(point_y)) # ### (Optional) Delete the Endpoint # # If you're ready to be done with this notebook, please run the delete_endpoint line in the cell below. This will remove the hosted endpoint you created and avoid any charges from a stray instance being left on. sm_client.delete_endpoint(EndpointName=endpoint_name)
advanced_functionality/xgboost_bring_your_own_model/xgboost_bring_your_own_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # Markdowns?? # + import numpy as np import pandas as pd from IPython.display import display # Allows the use of display() for DataFrames import matplotlib.pyplot as plt # Import supplementary visualizations code visuals.py import visuals as vs from sklearn.model_selection import train_test_split # Pretty display for notebooks # %matplotlib inline # Load the wholesale customers dataset try: outcomes = pd.read_csv("500_Cities_Health_Outcomes_raw.csv", index_col = 'Unnamed: 0') behaviors = pd.read_csv("500_Cities_Health_Behaviors_raw.csv", index_col = 'Unnamed: 0') #data.drop(['Region', 'Channel'], axis = 1, inplace = True) print "Health Outcomes dataset has {} samples with {} features each.".format(*outcomes.shape) print "Unhealthy Behaviors dataset has {} samples with {} features each.".format(*behaviors.shape) except: print "Dataset could not be loaded. Is the dataset missing?" ''' outcomes = outcomes[pd.notnull(outcomes.CityName)] behaviors = behaviors[pd.notnull(behaviors.CityName)] outcomes.drop(['Category', 'UniqueID', 'Low_Confidence_Limit', 'High_Confidence_Limit', 'MeasureId'], axis = 1, inplace = True) behaviors.drop(['Category', 'UniqueID', 'Low_Confidence_Limit', 'High_Confidence_Limit', 'MeasureId'], axis = 1, inplace = True) outcomes = outcomes[outcomes.Data_Value_Type != 'Crude prevalence'] behaviors = behaviors[behaviors.Data_Value_Type != 'Crude prevalence'] ''' display(outcomes.head()) display(behaviors.head()) # + outcomes['Data_Value'].dtypes #display(outcomes.iloc[0]) #['CityName']) # - disease_types = outcomes.Short_Question_Text.unique() print disease_types import sys behavior_types = behaviors.Short_Question_Text.unique() print "behavior types" print behavior_types # + #display(behave_data.head()) outcomes_head = outcomes.head(10) behaviors_head = behaviors.head(10) #display(head) outcome = outcomes_head.iloc[0] behavior = behaviors_head.iloc[0] outcomes_dict = {outcome.CityName:{outcome.Short_Question_Text: 0}} behaviors_dict = {behavior.CityName:{behavior.Short_Question_Text: 0}} for item in outcomes.itertuples(): if item.CityName in outcomes_dict: outcomes_dict[item.CityName][item.Short_Question_Text] = item.Data_Value else: outcomes_dict[item.CityName] = {item.Short_Question_Text:item.Data_Value} geo_lat, geo_long = tuple(item.GeoLocation.strip('()').split(',')) #print "lat = {} long = {}".format(geo_lat, geo_long) outcomes_dict[item.CityName]['GeoLat'] = float(geo_lat) outcomes_dict[item.CityName]['GeoLong'] = float(geo_long) for item in behaviors.itertuples(): if item.CityName in behaviors_dict: behaviors_dict[item.CityName][item.Short_Question_Text] = item.Data_Value else: behaviors_dict[item.CityName] = {item.Short_Question_Text:item.Data_Value} geo_lat, geo_long = tuple(item.GeoLocation.strip('()').split(',')) #print "lat = {} long = {}".format(geo_lat, geo_long) behaviors_dict[item.CityName]['GeoLat'] = float(geo_lat) behaviors_dict[item.CityName]['GeoLong'] = float(geo_long) #behaviors_dict[item.CityName]['GeoLocation'] = item.GeoLocation print item ''' test_dict = {lil_head.StateDesc:{lil_head.CityName:{lil_head.Short_Question_Text: 0}}} display(test_dict) for item in head.itertuples(): #display(item.StateDesc, item.CityName, item.Short_Question_Text) #display(item.Data_Value) if item.StateDesc in test_dict: if item.CityName in test_dict[item.StateDesc]: test_dict[item.StateDesc][item.CityName][item.Short_Question_Text] = item.Data_Value #print "{} {} {}".format(item.StateDesc, item.CityName, item.Short_Question_Text) else: test_dict[item.StateDesc][item.CityName] = {item.Short_Question_Text:item.Data_Value} #print "{} {}".format(item.StateDesc, item.CityName) else: test_dict[item.StateDesc] = {item.CityName:{item.Short_Question:item.Data_Value}} print "{}".format(item.StateDesc) ''' #display(len(outcomes_dict)) #display(outcomes_dict['Birmingham']['Cancer (except skin)']) display(outcomes_dict['Birmingham'].keys()) display(behaviors_dict['Birmingham'].keys()) # + outcomes_df = pd.DataFrame.from_dict(outcomes_dict, 'index') behaviors_df = pd.DataFrame.from_dict(behaviors_dict, 'index') #display(outcomes_df.Arthritis.head()) #display(outcomes_df.dtypes) #display(behaviors_df.dtypes) # + #outcomes_df.to_csv('500_Cities_Health_Outcomes.csv') # + #behaviors_df.to_csv('500_Cities_Health_Behaviors.csv') # - outcomes_df = pd.read_csv('500_Cities_Health_Outcomes.csv') behaviors_df = pd.read_csv('500_Cities_Health_Behaviors.csv') #outcomes_df = outcomes_df.drop('GeoLong', 1, inplace = True) #outcomes_df = outcomes_df.drop('GeoLat', 1, inplace = True) outcomes_df.columns outcomes_df.drop('GeoLong', 1, inplace = True) outcomes_df.columns outcomes_df.drop('GeoLat', 1, inplace = True) outcomes_df.columns #behaviors_df.set_index('Cities', inplace = True) behaviors_df.rename(columns = {'Unnamed: 0': 'Cities'}, inplace = True) behaviors_df.set_index(['Cities'], inplace = True) behaviors_df.head() outcomes_df.rename(columns = {'Unnamed: 0': 'Cities'}, inplace = True) outcomes_df.set_index(['Cities'], inplace = True) #outcomes_df.index.names = ['Cities'] outcomes_df.head() #display(behaviors_df[['Current Smoking', 'Physical Activity']].head()) #outcomes_cols= outcomes_df.columns #display(outcomes_cols[2]) #frames = [outcomes_df, behaviors_df] frames = [outcomes_df, behaviors_df] datacorr_df = pd.concat(frames, axis=1) #datacorr_df.drop(['GeoLocation']) display(datacorr_df.head(10)) #high_corr = Corr_df.corr() #high_corr = high_corr[high_corr >= 0.9] #display(high_corr) maxCOPD = datacorr_df.COPD.max() minCOPD = datacorr_df.COPD.min() rangeCOPD = maxCOPD - minCOPD print "min = {} max = {} range = {}".format(minCOPD, maxCOPD, rangeCOPD) # + display(datacorr_df.GeoLong.shape) display(datacorr_df.GeoLat.shape) display(len(datacorr_df.GeoLat)) f, ax = plt.subplots(figsize=(55/2,35/2)) ax.set_title('COPD') #plt.title("COPD in Continental US") ax.axis([-125.0, -70.0, 25.0, 50.0]) COPD_prev = (datacorr_df['COPD'] - minCOPD)/rangeCOPD ax.scatter(datacorr_df.GeoLong, datacorr_df.GeoLat, s = 2000*COPD_prev) #plt.figure(figsize =(20,10)) #plt.scatter(datacorr_df.GeoLong, datacorr_df.GeoLat, s = 30*(datacorr_df.COPD-minCOPD)/rangeCOPD) plt.show() # + maxCancer = datacorr_df['Cancer (except skin)'].max() minCancer = datacorr_df['Cancer (except skin)'].min() rangeCancer = maxCancer - minCancer print "min = {} max = {} range = {}".format(minCancer, maxCancer, rangeCancer) Cancer_prev = (datacorr_df['Cancer (except skin)'] - minCancer)/rangeCancer f, ax = plt.subplots(figsize=(55/2,35/2)) ax.set_title('Cancer (except skin) in Continental US') #plt.title("COPD in Continental US") ax.axis([-125.0, -70.0, 25.0, 50.0]) ax.scatter(datacorr_df.GeoLong, datacorr_df.GeoLat, s = 1000*Cancer_prev) #plt.figure(figsize =(20,10)) #plt.scatter(datacorr_df.GeoLong, datacorr_df.GeoLat, s = 30*(datacorr_df.COPD-minCOPD)/rangeCOPD) plt.show() # + high_corr = datacorr_df.corr() high_corr = high_corr[high_corr >= 0.85] #display(high_corr) #display(high_corr.head()) col_names = high_corr.columns #display(high_corr.iloc[0].name) #display(high_corr.iloc[0][0]) #display(col_names[0]) for item in high_corr.head().itertuples(): for i in range(len(item)): if item[i] >= 0.85: print "{} {} {}".format(item.Index, col_names[i], item[i]) #print "{} {}".format(item.Index, col_names[i]) #display(item.index[0]) item[0] '''for item in high_corr.itertuples(): for col in item: if ite ''' # - # adding some changes. adding more changes. high_corr = datacorr_df.corr() high_corr['Stroke'] high_corr['Stroke'] # + #figure out how to generate a list of the names of things highly correlated #with stroke stroke_preds = [] for i in range(len(high_corr['Stroke'])): val = high_corr['Stroke'][i] if val > 0.8: print "{} {}".format(high_corr.axes[0][i], val) stroke_preds.append(high_corr.axes[0][i]) print stroke_preds # - stroke_preds.remove('Stroke') print stroke_preds from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_val_predict #behaviors_df.head() behaviors_only_df = behaviors_df.drop(['GeoLong'], axis = 1) behaviors_only_df.head() # + lr = LinearRegression() X = behaviors_only_df[['GeoLat', 'Current Smoking', 'Physical Activity']] y = outcomes_df.COPD predicted = cross_val_predict(lr, X, y, cv = 10 ) fig, ax = plt.subplots() ax.scatter(y, predicted) ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4) ax.set_xlabel('Measured') ax.set_ylabel('Predicted') plt.show() # - from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.3, random_state = 42) lr.fit(X_train, y_train) lr.score(X_test, y_test) predicted = lr.predict(X_test) len(X_test['Current Smoking']) len(predicted) # + plt.scatter(X_test['Current Smoking'], y_test, color='black') plt.scatter(X_test['Current Smoking'], lr.predict(X_test), color='blue', linewidth=3) plt.xticks(()) plt.yticks(()) plt.show() # + plt.scatter(X['Physical Activity'], y) plt.xticks(()) plt.yticks(()) plt.show() # -
.ipynb_checkpoints/500_Cities_Data_Exploration4-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import pylab import matplotlib.hatch from matplotlib.patches import Polygon # + # Add hatches needed for bar plots triangle = Polygon( [[-0.3, -0.4], [0.3, -0.4], [0., 0.1]], closed=True, fill=False).get_path() # Triangle hatch to use with the identifier 't' class TriangleHatch(matplotlib.hatch.Shapes): filled = False size = 1.0 path = triangle def __init__(self, hatch, density): self.num_rows = (hatch.count('t')) * density self.shape_vertices = self.path.vertices self.shape_codes = self.path.codes matplotlib.hatch.Shapes.__init__(self, hatch, density) revTriangle = Polygon( [[-0.3, 0.1], [0.3, 0.1], [0., -0.4]], closed=True, fill=False).get_path() # Triangle hatch to use with the identifier 'v' class RevTriangleHatch(matplotlib.hatch.Shapes): filled = False size = 1.0 path = revTriangle def __init__(self, hatch, density): self.num_rows = (hatch.count('v')) * density self.shape_vertices = self.path.vertices self.shape_codes = self.path.codes matplotlib.hatch.Shapes.__init__(self, hatch, density) square = Polygon( [[-0.3, -0.3], [0.3, -0.3], [0.3, 0.3], [-0.3, 0.3]], closed=True, fill=False).get_path() # Square hatch to use with the identifier 's' class SquareHatch(matplotlib.hatch.Shapes): filled = False size = 1.0 path = square def __init__(self, hatch, density): self.num_rows = (hatch.count('s')) * density self.shape_vertices = self.path.vertices self.shape_codes = self.path.codes matplotlib.hatch.Shapes.__init__(self, hatch, density) diamond = Polygon( [[0, 0], [0.2, 0.3], [0, 0.6], [-0.2, 0.3]], closed=True, fill=False).get_path() # Diamond hatch to use with the identifier 'd' class DiamondHatch(matplotlib.hatch.Shapes): filled = False size = 1.0 path = diamond def __init__(self, hatch, density): self.num_rows = (hatch.count('d')) * density self.shape_vertices = self.path.vertices self.shape_codes = self.path.codes matplotlib.hatch.Shapes.__init__(self, hatch, density) matplotlib.hatch._hatch_types.append(TriangleHatch) matplotlib.hatch._hatch_types.append(RevTriangleHatch) matplotlib.hatch._hatch_types.append(SquareHatch) matplotlib.hatch._hatch_types.append(DiamondHatch) # - # # Load dataframes dfOrig = pd.read_csv('./original.csv') dfRef = pd.read_csv('./refactored.csv') # Global variables SAVE_FIG = False # # Results from original systems # General parameters FOLDER_IMG = './images/' CLASS_NAME = ['PetClinic', 'Broadleaf', 'WebGoat', 'ts-security-service', 'OpenMRS'] SYSTEMS = ['petclinic', 'broadleaf', 'webgoat', 'trainticket', 'openmrs'] COLORS = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd'] STYLES = ['-o', '-^', '-v', '-s', '-d'] SIZES = [20, 20, 20, 20, 20] LEGENDSIZE = 30 FONT_SIZE = 14 # # Time Analysis tot = 0 counter = 0 for s in ['petclinic', 'webgoat']:#SYSTEMS: totSys = 0 print('### ' + s + ' ###') for l in [25, 50, 75, 100]: for d in [3, 6, 12]: t = 0 dfTmp = dfOrig[(dfOrig['load']==l) & (dfOrig['duration']==d) & (dfOrig['system']==s)] t += dfTmp[(dfTmp['type']=='CTH') & (dfTmp['countTh']==5) & (dfTmp['cpuTh']==10) & (dfTmp['option']=='average')]['time'].iloc[0] t += dfTmp[(dfTmp['type']=='EP') & (dfTmp['methodTimeTh']==5)]['time'].iloc[0] t += dfTmp[(dfTmp['type']=='WCS') & (dfTmp['memoryUsageTh']==5)]['time'].iloc[0] t += dfTmp[(dfTmp['type']=='BLOB') & (dfTmp['numMsgs']==5) & (dfTmp['cpuTh']==10) & (dfTmp['heapTh']==10)]['time'].iloc[0] t += dfTmp[(dfTmp['type']=='TOB') & (dfTmp['methodTimeTh']==5)]['time'].iloc[0] t += dfTmp[(dfTmp['type']=='EDA') & (dfTmp['numGCedObjsTh']==5) & (dfTmp['heapTh']==10)]['time'].iloc[0] t += dfTmp[(dfTmp['type']=='EST') & (dfTmp['numMsgs']==5)]['time'].iloc[0] print(str(l) + '-' + str(d) + ' = {:.1f} sec'.format(t/1000)) totSys += t tot += t counter += 1 print('Average Time = {:.1f} sec'.format((totSys/12)/1000)) print() print('Total Average Time = {:.1f} sec'.format((tot/counter)/1000)) # ## Legend for the paper # + # The figure plotted when this cell is executed is not important. # Please, look for the legend that is created at the specified path. numClasses = len(SYSTEMS) xList = range(10) fig = pylab.figure() figlegend = pylab.figure(figsize=(21.3,0.8)) ax = fig.add_subplot(111) for idx in range(numClasses): plt.plot(xList, [idx]*len(xList), STYLES[idx], lw=5, color=COLORS[idx], label=CLASS_NAME[idx], markersize=SIZES[idx]) figlegend.legend(loc='center', ncol=numClasses, fontsize=LEGENDSIZE, facecolor='white', framealpha=1) #fig.show() if SAVE_FIG: figlegend.savefig(FOLDER_IMG + 'legend.png') # - # ## CTH # + AP = 'CTH' COUNT_TH = 5 CPU_TH = 10 OPTION = 'average' for SYSTEM in SYSTEMS: print('### ' + SYSTEM + ' ###') for LOAD in [25, 50, 75, 100]: for DURATION in [3, 6, 12]: dfTmp = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['system']==SYSTEM) & (dfOrig['countTh']==COUNT_TH) & (dfOrig['cpuTh']==CPU_TH) & (dfOrig['option']==OPTION) ] print(str(LOAD) + '-' + str(DURATION) + ' = ' + str(dfTmp['foundAP'].iloc[0])) # + LOAD = 25 DURATION = 3 AP = 'CTH' # Fixed parameters CPU_TH = 10 OPTION = 'average' dfTmp = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['cpuTh'] == CPU_TH) & (dfOrig['option'] == OPTION) ] for sys, stl, col in zip(SYSTEMS, STYLES, COLORS): plt.plot(dfTmp[dfTmp['system']==sys]['countTh'], dfTmp[dfTmp['system']==sys]['foundAP'], stl, color=col, label=sys) plt.xticks(range(0,100,10), size=FONT_SIZE) plt.yticks(range(0,max(dfTmp['foundAP'])+1), size=FONT_SIZE) plt.xlabel('Count Offset [%]', size=FONT_SIZE) # plt.title(str(LOAD) + ' - ' + str(DURATION) + ' | Th(CPU) = ' + str(CPU_TH) + ' | Option = ' + OPTION) # plt.legend() plt.tight_layout() if SAVE_FIG: plt.savefig(FOLDER_IMG + str(AP) + '_' + str(LOAD) + '-' + str(DURATION) + '_cpu' + str(CPU_TH) + '_opt' + str(OPTION) + '.png') plt.show() X_AXIS = 'countTh' HEADER = '#' + X_AXIS + '\t' + '\t'.join(SYSTEMS) print(HEADER) for th in range(5,96,5): string = str(th) + '\t' for sys in SYSTEMS: string += str(dfTmp[(dfTmp['system']==sys) & (dfTmp[X_AXIS]==th)]['foundAP'].iloc[0]) if sys != SYSTEMS[-1]: string += '\t' print(string) # + LOAD = 25 DURATION = 3 AP = 'CTH' # Fixed parameters COUNT_TH = 5 OPTION = 'average' dfTmp = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['countTh'] == COUNT_TH) & (dfOrig['option'] == OPTION) ] for sys, stl, col in zip(SYSTEMS, STYLES, COLORS): plt.plot(dfTmp[dfTmp['system']==sys]['cpuTh'], dfTmp[dfTmp['system']==sys]['foundAP'], stl, color=col, label=sys) plt.xticks(range(0,100,10), size=FONT_SIZE) plt.yticks(range(0,max(dfTmp['foundAP'])+1), size=FONT_SIZE) plt.xlabel('CPU Threshold [%]', size=FONT_SIZE) # plt.title(str(LOAD) + ' - ' + str(DURATION) + ' | Th(Count) = ' + str(COUNT_TH) + ' | Option = ' + OPTION) # plt.legend() plt.tight_layout() if SAVE_FIG: plt.savefig(FOLDER_IMG + str(AP) + '_' + str(LOAD) + '-' + str(DURATION) + '_count' + str(COUNT_TH) + '_opt' + str(OPTION) + '.png') plt.show() X_AXIS = 'cpuTh' HEADER = '#' + X_AXIS + '\t' + '\t'.join(SYSTEMS) print(HEADER) for th in range(5,96,5): string = str(th) + '\t' for sys in SYSTEMS: string += str(dfTmp[(dfTmp['system']==sys) & (dfTmp[X_AXIS]==th)]['foundAP'].iloc[0]) if sys != SYSTEMS[-1]: string += '\t' print(string) # + WIDTH = 0.1 LOAD = 25 DURATION = 3 AP = 'CTH' # Fixed parameters COUNT_TH = 5 CPU_TH = 10 dfTmp = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['countTh'] == COUNT_TH) & (dfOrig['cpuTh'] == CPU_TH) ] for sys, stl, col, pos, shade in zip(SYSTEMS, STYLES, COLORS, [-2.5*WIDTH, -1.5*WIDTH, -0.5*WIDTH, 0.5*WIDTH, 1.5*WIDTH], ['O', 't', 'v', 's', 'd']): plt.bar([x+pos for x in [0,1,2]], dfTmp[dfTmp['system']==sys]['foundAP'], width=WIDTH, align='edge', color=col, hatch=shade, label=sys) plt.xticks([0,1,2], dfTmp['option'].unique(), size=FONT_SIZE) plt.yticks(range(0,max(dfTmp['foundAP'])+1), size=FONT_SIZE) plt.xlabel('Option', size=FONT_SIZE) # plt.title(str(LOAD) + ' - ' + str(DURATION) + ' | Th(Count) = ' + str(COUNT_TH) + ' | Th(CPU) = ' + str(CPU_TH)) # plt.legend() plt.tight_layout() if SAVE_FIG: plt.savefig(FOLDER_IMG + str(AP) + '_' + str(LOAD) + '-' + str(DURATION) + '_cpu' + str(CPU_TH) + '_count' + str(COUNT_TH) + '.png') plt.show() X_AXIS = 'option' HEADER = '#' + X_AXIS + '\t' + '\t'.join(SYSTEMS) print(HEADER) for th in ['min', 'max', 'average']: string = str(th) + '\t' for sys in SYSTEMS: string += str(dfTmp[(dfTmp['system']==sys) & (dfTmp[X_AXIS]==th)]['foundAP'].iloc[0]) if sys != SYSTEMS[-1]: string += '\t' print(string) # - # ## EP # + AP = 'EP' TIME_TH = 5 for SYSTEM in SYSTEMS: print('### ' + SYSTEM + ' ###') for LOAD in [25, 50, 75, 100]: for DURATION in [3, 6, 12]: dfTmp = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['system']==SYSTEM) & (dfOrig['methodTimeTh']==TIME_TH) ] print(str(LOAD) + '-' + str(DURATION) + ' = ' + str(dfTmp['foundAP'].iloc[0])) # + LOADS = [25, 100] DURATIONS = [3, 12] AP = 'EP' for LOAD, DURATION in zip(LOADS, DURATIONS): dfTmp = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) ] for sys, stl, col in zip(SYSTEMS, STYLES, COLORS): plt.plot(dfTmp[dfTmp['system']==sys]['methodTimeTh'], dfTmp[dfTmp['system']==sys]['foundAP'], stl, color=col, label=sys) plt.xticks(range(0,100,10), size=FONT_SIZE) plt.yticks(range(0,max(dfTmp['foundAP'])+1), size=FONT_SIZE) plt.xlabel('Execution Time Offset [%]', size=FONT_SIZE) # plt.title(str(LOAD) + ' - ' + str(DURATION)) # plt.legend() plt.tight_layout() if SAVE_FIG: plt.savefig(FOLDER_IMG + str(AP) + '_' + str(LOAD) + '-' + str(DURATION) + '.png') plt.show() X_AXIS = 'methodTimeTh' HEADER = '#' + X_AXIS + '\t' + '\t'.join(SYSTEMS) print(HEADER) for th in range(5,96,5): string = str(th) + '\t' for sys in SYSTEMS: string += str(dfTmp[(dfTmp['system']==sys) & (dfTmp[X_AXIS]==th)]['foundAP'].iloc[0]) if sys != SYSTEMS[-1]: string += '\t' print(string) # - # ## WCS # + AP = 'WCS' MEM_TH = 5 for SYSTEM in SYSTEMS: print('### ' + SYSTEM + ' ###') for LOAD in [25, 50, 75, 100]: for DURATION in [3, 6, 12]: dfTmp = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['system']==SYSTEM) & (dfOrig['memoryUsageTh']==MEM_TH) ] print(str(LOAD) + '-' + str(DURATION) + ' = ' + str(dfTmp['foundAP'].iloc[0])) # + LOADS = [25, 75] DURATIONS = [3, 6] AP = 'WCS' for LOAD, DURATION in zip(LOADS, DURATIONS): dfTmp = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) ] for sys, stl, col in zip(SYSTEMS, STYLES, COLORS): plt.plot(dfTmp[dfTmp['system']==sys]['memoryUsageTh'], dfTmp[dfTmp['system']==sys]['foundAP'], stl, color=col, label=sys) plt.xticks(range(0,100,10), size=FONT_SIZE) plt.yticks(range(0,max(dfTmp['foundAP'])+1), size=FONT_SIZE) plt.xlabel('Memory Usage Offset [%]', size=FONT_SIZE) # plt.title(str(LOAD) + ' - ' + str(DURATION)) # plt.legend() plt.tight_layout() if SAVE_FIG: plt.savefig(FOLDER_IMG + str(AP) + '_' + str(LOAD) + '-' + str(DURATION) + '.png') plt.show() X_AXIS = 'memoryUsageTh' HEADER = '#' + X_AXIS + '\t' + '\t'.join(SYSTEMS) print(HEADER) for th in range(5,96,5): string = str(th) + '\t' for sys in SYSTEMS: string += str(dfTmp[(dfTmp['system']==sys) & (dfTmp[X_AXIS]==th)]['foundAP'].iloc[0]) if sys != SYSTEMS[-1]: string += '\t' print(string) # - # ## BLOB # + AP = 'BLOB' MSG_TH = 5 CPU_TH = 10 HEAP_TH = 10 for SYSTEM in SYSTEMS: print('### ' + SYSTEM + ' ###') for LOAD in [25, 50, 75, 100]: for DURATION in [3, 6, 12]: dfTmp = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['system']==SYSTEM) & (dfOrig['numMsgs']==MSG_TH) & (dfOrig['cpuTh']==CPU_TH) & (dfOrig['heapTh']==HEAP_TH) ] print(str(LOAD) + '-' + str(DURATION) + ' = ' + str(dfTmp['foundAP'].iloc[0])) # + LOAD = 25 DURATION = 3 AP = 'BLOB' # Fixed parameters HEAP_TH = 10 MSG_TH = 5 dfTmp = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['heapTh'] == HEAP_TH) & (dfOrig['numMsgs'] == MSG_TH) ] for sys, stl, col in zip(SYSTEMS, STYLES, COLORS): plt.plot(dfTmp[dfTmp['system']==sys]['cpuTh'], dfTmp[dfTmp['system']==sys]['foundAP'], stl, color=col, label=sys) plt.xticks(range(0,100,10), size=FONT_SIZE) plt.yticks(range(0,max(dfTmp['foundAP'])+1), size=FONT_SIZE) plt.xlabel('CPU Threshold [%]', size=FONT_SIZE) # plt.title(str(LOAD) + ' - ' + str(DURATION) + ' | Th(Heap) = ' + str(HEAP_TH) + ' | Th(Msgs) = ' + str(MSG_TH)) # plt.legend() plt.tight_layout() if SAVE_FIG: plt.savefig(FOLDER_IMG + str(AP) + '_' + str(LOAD) + '-' + str(DURATION) + '_heap' + str(HEAP_TH) + '_msg' + str(MSG_TH) + '.png') plt.show() X_AXIS = 'cpuTh' HEADER = '#' + X_AXIS + '\t' + '\t'.join(SYSTEMS) print(HEADER) for th in range(5,96,5): string = str(th) + '\t' for sys in SYSTEMS: string += str(dfTmp[(dfTmp['system']==sys) & (dfTmp[X_AXIS]==th)]['foundAP'].iloc[0]) if sys != SYSTEMS[-1]: string += '\t' print(string) # + LOAD = 25 DURATION = 3 AP = 'BLOB' # Fixed parameters CPU_TH = 10 MSG_TH = 5 dfTmp = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['cpuTh'] == CPU_TH) & (dfOrig['numMsgs'] == MSG_TH) ] for sys, stl, col in zip(SYSTEMS, STYLES, COLORS): plt.plot(dfTmp[dfTmp['system']==sys]['heapTh'], dfTmp[dfTmp['system']==sys]['foundAP'], stl, color=col, label=sys) plt.xticks(range(0,100,10), size=FONT_SIZE) plt.yticks(range(0,max(dfTmp['foundAP'])+1), size=FONT_SIZE) plt.xlabel('Memory Threshold [%]', size=FONT_SIZE) # plt.title(str(LOAD) + ' - ' + str(DURATION) + ' | Th(CPU) = ' + str(CPU_TH) + ' | Th(Msgs) = ' + str(MSG_TH)) # plt.legend() plt.tight_layout() if SAVE_FIG: plt.savefig(FOLDER_IMG + str(AP) + '_' + str(LOAD) + '-' + str(DURATION) + '_cpu' + str(CPU_TH) + '_msg' + str(MSG_TH) + '.png') plt.show() X_AXIS = 'heapTh' HEADER = '#' + X_AXIS + '\t' + '\t'.join(SYSTEMS) print(HEADER) for th in range(5,96,5): string = str(th) + '\t' for sys in SYSTEMS: string += str(dfTmp[(dfTmp['system']==sys) & (dfTmp[X_AXIS]==th)]['foundAP'].iloc[0]) if sys != SYSTEMS[-1]: string += '\t' print(string) # + LOAD = 25 DURATION = 3 AP = 'BLOB' # Fixed parameters CPU_TH = 10 HEAP_TH = 10 dfTmp = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['cpuTh'] == CPU_TH) & (dfOrig['heapTh'] == HEAP_TH) ] for sys, stl, col in zip(SYSTEMS, STYLES, COLORS): plt.plot(dfTmp[dfTmp['system']==sys]['numMsgs'], dfTmp[dfTmp['system']==sys]['foundAP'], stl, color=col, label=sys) plt.xticks(range(0,100,10), size=FONT_SIZE) plt.yticks(range(0,max(dfTmp['foundAP'])+1), size=FONT_SIZE) plt.xlabel('Message Offset [%]', size=FONT_SIZE) # plt.title(str(LOAD) + ' - ' + str(DURATION) + ' | Th(CPU) = ' + str(CPU_TH) + ' | Th(Heap) = ' + str(HEAP_TH)) # plt.legend() plt.tight_layout() if SAVE_FIG: plt.savefig(FOLDER_IMG + str(AP) + '_' + str(LOAD) + '-' + str(DURATION) + '_cpu' + str(CPU_TH) + '_heap' + str(HEAP_TH) + '.png') plt.show() X_AXIS = 'numMsgs' HEADER = '#' + X_AXIS + '\t' + '\t'.join(SYSTEMS) print(HEADER) for th in range(5,96,5): string = str(th) + '\t' for sys in SYSTEMS: string += str(dfTmp[(dfTmp['system']==sys) & (dfTmp[X_AXIS]==th)]['foundAP'].iloc[0]) if sys != SYSTEMS[-1]: string += '\t' print(string) # - # ## TOB # + AP = 'TOB' TIME_TH = 5 for SYSTEM in SYSTEMS: print('### ' + SYSTEM + ' ###') for LOAD in [25, 50, 75, 100]: for DURATION in [3, 6, 12]: dfTmp = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['system']==SYSTEM) & (dfOrig['methodTimeTh']==TIME_TH) ] print(str(LOAD) + '-' + str(DURATION) + ' = ' + str(dfTmp['foundAP'].iloc[0])) # + LOADS = [25, 50] DURATIONS = [3, 3] AP = 'TOB' for LOAD, DURATION in zip(LOADS, DURATIONS): dfTmp = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) ] for sys, stl, col in zip(SYSTEMS, STYLES, COLORS): plt.plot(dfTmp[dfTmp['system']==sys]['methodTimeTh'], dfTmp[dfTmp['system']==sys]['foundAP'], stl, color=col, label=sys) plt.xticks(range(0,100,10), size=FONT_SIZE) plt.yticks(range(0,max(dfTmp['foundAP'])+1), size=FONT_SIZE) plt.xlabel('Execution Time Offset [%]', size=FONT_SIZE) # plt.title(str(LOAD) + ' - ' + str(DURATION)) # plt.legend() plt.tight_layout() if SAVE_FIG: plt.savefig(FOLDER_IMG + str(AP) + '_' + str(LOAD) + '-' + str(DURATION) + '.png') plt.show() X_AXIS = 'methodTimeTh' HEADER = '#' + X_AXIS + '\t' + '\t'.join(SYSTEMS) print(HEADER) for th in range(5,96,5): string = str(th) + '\t' for sys in SYSTEMS: string += str(dfTmp[(dfTmp['system']==sys) & (dfTmp[X_AXIS]==th)]['foundAP'].iloc[0]) if sys != SYSTEMS[-1]: string += '\t' print(string) # - # ## EST # + AP = 'EST' MSGS_TH = 5 for SYSTEM in SYSTEMS: print('### ' + SYSTEM + ' ###') for LOAD in [25, 50, 75, 100]: for DURATION in [3, 6, 12]: dfTmp = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['system']==SYSTEM) & (dfOrig['numMsgs']==MSGS_TH) ] print(str(LOAD) + '-' + str(DURATION) + ' = ' + str(dfTmp['foundAP'].iloc[0])) # + LOADS = [25, 100] DURATIONS = [3, 12] AP = 'EST' for LOAD, DURATION in zip(LOADS, DURATIONS): dfTmp = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) ] for sys, stl, col in zip(SYSTEMS, STYLES, COLORS): plt.plot(dfTmp[dfTmp['system']==sys]['numMsgs'], dfTmp[dfTmp['system']==sys]['foundAP'], stl, color=col, label=sys) plt.xticks(range(0,100,10), size=FONT_SIZE) plt.yticks(range(0,max(dfTmp['foundAP'])+1), size=FONT_SIZE) plt.xlabel('Message Offset [%]', size=FONT_SIZE) # plt.title(str(LOAD) + ' - ' + str(DURATION)) # plt.legend() plt.tight_layout() if SAVE_FIG: plt.savefig(FOLDER_IMG + str(AP) + '_' + str(LOAD) + '-' + str(DURATION) + '.png') plt.show() X_AXIS = 'numMsgs' HEADER = '#' + X_AXIS + '\t' + '\t'.join(SYSTEMS) print(HEADER) for th in range(5,96,5): string = str(th) + '\t' for sys in SYSTEMS: string += str(dfTmp[(dfTmp['system']==sys) & (dfTmp[X_AXIS]==th)]['foundAP'].iloc[0]) if sys != SYSTEMS[-1]: string += '\t' print(string) # - # ## EDA # + AP = 'EDA' OBJS_TH = 5 HEAP_TH = 10 for SYSTEM in SYSTEMS: print('### ' + SYSTEM + ' ###') for LOAD in [25, 50, 75, 100]: for DURATION in [3, 6, 12]: dfTmp = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['system']==SYSTEM) & (dfOrig['numGCedObjsTh']==OBJS_TH) & (dfOrig['heapTh']==HEAP_TH) ] print(str(LOAD) + '-' + str(DURATION) + ' = ' + str(dfTmp['foundAP'].iloc[0])) # + LOADS = [25] DURATIONS = [3] AP = 'EDA' # Fixed parameters HEAP_TH = 10 for LOAD, DURATION in zip(LOADS, DURATIONS): dfTmp = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['heapTh']==HEAP_TH) ] for sys, stl, col in zip(SYSTEMS, STYLES, COLORS): plt.plot(dfTmp[dfTmp['system']==sys]['numGCedObjsTh'], dfTmp[dfTmp['system']==sys]['foundAP'], stl, color=col, label=sys) plt.xticks(range(0,100,10), size=FONT_SIZE) plt.yticks(range(0,max(dfTmp['foundAP'])+1), size=FONT_SIZE) plt.xlabel('GCed Objects Offset [%]', size=FONT_SIZE) # plt.title(str(LOAD) + ' - ' + str(DURATION)) # plt.legend() plt.tight_layout() if SAVE_FIG: plt.savefig(FOLDER_IMG + str(AP) + '_' + str(LOAD) + '-' + str(DURATION) + '.png') plt.show() X_AXIS = 'numGCedObjsTh' HEADER = '#' + X_AXIS + '\t' + '\t'.join(SYSTEMS) print(HEADER) for th in range(5,96,5): string = str(th) + '\t' for sys in SYSTEMS: string += str(dfTmp[(dfTmp['system']==sys) & (dfTmp[X_AXIS]==th)]['foundAP'].iloc[0]) if sys != SYSTEMS[-1]: string += '\t' print(string) # + LOADS = [25] DURATIONS = [3] AP = 'EDA' # Fixed parameters GC_OBJS = 5 for LOAD, DURATION in zip(LOADS, DURATIONS): dfTmp = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['numGCedObjsTh']==GC_OBJS) ] for sys, stl, col in zip(SYSTEMS, STYLES, COLORS): plt.plot(dfTmp[dfTmp['system']==sys]['heapTh'], dfTmp[dfTmp['system']==sys]['foundAP'], stl, color=col, label=sys) plt.xticks(range(0,100,10), size=FONT_SIZE) plt.yticks(range(0,max(dfTmp['foundAP'])+1), size=FONT_SIZE) plt.xlabel('Memory Threshold [%]', size=FONT_SIZE) # plt.title(str(LOAD) + ' - ' + str(DURATION)) # plt.legend() plt.tight_layout() if SAVE_FIG: plt.savefig(FOLDER_IMG + str(AP) + '_' + str(LOAD) + '-' + str(DURATION) + '.png') plt.show() X_AXIS = 'heapTh' HEADER = '#' + X_AXIS + '\t' + '\t'.join(SYSTEMS) print(HEADER) for th in range(5,96,5): string = str(th) + '\t' for sys in SYSTEMS: string += str(dfTmp[(dfTmp['system']==sys) & (dfTmp[X_AXIS]==th)]['foundAP'].iloc[0]) if sys != SYSTEMS[-1]: string += '\t' print(string) # - # # Original vs. Refactored: OpenMRS # ## CTH # + AP = 'CTH' COUNT_TH = 5 CPU_TH = 10 OPTION = 'average' for SYSTEM in ['openmrs']: print('### ' + SYSTEM + ' ###') for LOAD in [25, 50, 75, 100]: for DURATION in [3, 6, 12]: dfTmpO = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['system']==SYSTEM) & (dfOrig['countTh']==COUNT_TH) & (dfOrig['cpuTh']==CPU_TH) & (dfOrig['option']==OPTION) ] dfTmpR = dfRef[(dfRef['load']==LOAD) & (dfRef['duration']==DURATION) & (dfRef['type']==AP) & (dfRef['system']==SYSTEM) & (dfRef['countTh']==COUNT_TH) & (dfRef['cpuTh']==CPU_TH) & (dfRef['option']==OPTION) ] diff = dfTmpR['foundAP'].iloc[0] - dfTmpO['foundAP'].iloc[0] print(str(LOAD) + '-' + str(DURATION) + ' = ' + str(dfTmpR['foundAP'].iloc[0]) + '(' + str(diff) + ')') # + LOAD = 100 DURATION = 3 AP = 'CTH' SYSTEM = 'openmrs' # Fixed parameters CPU_TH = 5 OPTION = 'average' # X-axis parameter X_PARAM = 'countTh' dfTmpOrig = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['system']==SYSTEM) & (dfOrig['type']==AP) & (dfOrig['cpuTh'] == CPU_TH) & (dfOrig['option'] == OPTION) ] dfTmpRef = dfRef[(dfRef['load']==LOAD) & (dfRef['duration']==DURATION) & (dfRef['system']==SYSTEM) & (dfRef['type']==AP) & (dfRef['cpuTh'] == CPU_TH) & (dfRef['option'] == OPTION) ] plt.plot(dfTmpOrig[X_PARAM], dfTmpOrig['foundAP'], '-o', label='Original') plt.plot(dfTmpRef[X_PARAM], dfTmpRef['foundAP'], '-d', label='Refactored') plt.xlabel('Count Threshold') plt.ylabel('Found ' + AP) plt.title(str(LOAD) + ' - ' + str(DURATION) + ' | Th(CPU) = ' + str(CPU_TH) + ' | Option = ' + OPTION) plt.legend() plt.show() # + LOAD = 25 DURATION = 3 AP = 'CTH' SYSTEM = 'openmrs' # Fixed parameters COUNT_TH = 5 OPTION = 'average' # X-axis parameter X_PARAM = 'cpuTh' dfTmpOrig = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['system']==SYSTEM) & (dfOrig['type']==AP) & (dfOrig['countTh'] == COUNT_TH) & (dfOrig['option'] == OPTION) ] dfTmpRef = dfRef[(dfRef['load']==LOAD) & (dfRef['duration']==DURATION) & (dfRef['system']==SYSTEM) & (dfRef['type']==AP) & (dfRef['countTh'] == COUNT_TH) & (dfRef['option'] == OPTION) ] plt.plot(dfTmpOrig[X_PARAM], dfTmpOrig['foundAP'], '-o', label='Original') plt.plot(dfTmpRef[X_PARAM], dfTmpRef['foundAP'], '-d', label='Refactored') plt.xlabel('CPU Threshold') plt.ylabel('Found ' + AP) plt.title(str(LOAD) + ' - ' + str(DURATION) + ' | Th(Count) = ' + str(COUNT_TH) + ' | Option = ' + OPTION) plt.legend() plt.show() # + LOAD = 25 DURATION = 3 AP = 'CTH' SYSTEM = 'openmrs' # Fixed parameters CPU_TH = 5 COUNT_TH = 5 # X-axis parameter X_PARAM = 'option' dfTmpOrig = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['system']==SYSTEM) & (dfOrig['type']==AP) & (dfOrig['cpuTh'] == CPU_TH) & (dfOrig['countTh'] == COUNT_TH) ] dfTmpRef = dfRef[(dfRef['load']==LOAD) & (dfRef['duration']==DURATION) & (dfRef['system']==SYSTEM) & (dfRef['type']==AP) & (dfRef['cpuTh'] == CPU_TH) & (dfRef['countTh'] == COUNT_TH) ] plt.plot(dfTmpOrig[X_PARAM], dfTmpOrig['foundAP'], '-o', label='Original') plt.plot(dfTmpRef[X_PARAM], dfTmpRef['foundAP'], '-d', label='Refactored') plt.xlabel('Option') plt.ylabel('Found ' + AP) plt.title(str(LOAD) + ' - ' + str(DURATION) + ' | Th(CPU) = ' + str(CPU_TH) + ' | Th(Count) = ' + str(COUNT_TH)) plt.legend() plt.show() # - # ## EP # + AP = 'EP' TIME_TH = 5 for SYSTEM in ['openmrs']: print('### ' + SYSTEM + ' ###') for LOAD in [25, 50, 75, 100]: for DURATION in [3, 6, 12]: dfTmpO = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['system']==SYSTEM) & (dfOrig['methodTimeTh']==TIME_TH) ] dfTmpR = dfRef[(dfRef['load']==LOAD) & (dfRef['duration']==DURATION) & (dfRef['type']==AP) & (dfRef['system']==SYSTEM) & (dfRef['methodTimeTh']==TIME_TH) ] diff = dfTmpR['foundAP'].iloc[0] - dfTmpO['foundAP'].iloc[0] print(str(LOAD) + '-' + str(DURATION) + ' = ' + str(dfTmpR['foundAP'].iloc[0]) + '(' + str(diff) + ')') # + LOAD = 25 DURATION = 3 AP = 'EP' SYSTEM = 'openmrs' # X-axis parameter X_PARAM = 'methodTimeTh' dfTmpOrig = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['system']==SYSTEM) & (dfOrig['type']==AP) ] dfTmpRef = dfRef[(dfRef['load']==LOAD) & (dfRef['duration']==DURATION) & (dfRef['system']==SYSTEM) & (dfRef['type']==AP) ] plt.plot(dfTmpOrig[X_PARAM], dfTmpOrig['foundAP'], '-o', label='Original') plt.plot(dfTmpRef[X_PARAM], dfTmpRef['foundAP'], '-d', label='Refactored') plt.xlabel('Method Time Threshold') plt.ylabel('Found ' + AP) plt.title(str(LOAD) + ' - ' + str(DURATION)) plt.legend() plt.show() # - # ## WCS # + AP = 'WCS' MEM_TH = 5 for SYSTEM in ['openmrs']: print('### ' + SYSTEM + ' ###') for LOAD in [25, 50, 75, 100]: for DURATION in [3, 6, 12]: dfTmpO = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['system']==SYSTEM) & (dfOrig['memoryUsageTh']==MEM_TH) ] dfTmpR = dfRef[(dfRef['load']==LOAD) & (dfRef['duration']==DURATION) & (dfRef['type']==AP) & (dfRef['system']==SYSTEM) & (dfRef['memoryUsageTh']==MEM_TH) ] diff = dfTmpR['foundAP'].iloc[0] - dfTmpO['foundAP'].iloc[0] print(str(LOAD) + '-' + str(DURATION) + ' = ' + str(dfTmpR['foundAP'].iloc[0]) + '(' + str(diff) + ')') # + LOAD = 100 DURATION = 3 AP = 'WCS' SYSTEM = 'openmrs' # X-axis parameter X_PARAM = 'memoryUsageTh' dfTmpOrig = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['system']==SYSTEM) & (dfOrig['type']==AP) ] dfTmpRef = dfRef[(dfRef['load']==LOAD) & (dfRef['duration']==DURATION) & (dfRef['system']==SYSTEM) & (dfRef['type']==AP) ] plt.plot(dfTmpOrig[X_PARAM], dfTmpOrig['foundAP'], '-o', label='Original') plt.plot(dfTmpRef[X_PARAM], dfTmpRef['foundAP'], '-d', label='Refactored') plt.xlabel('Memory Usage Threshold') plt.ylabel('Found ' + AP) plt.title(str(LOAD) + ' - ' + str(DURATION)) plt.legend() plt.show() # - # ## BLOB # + AP = 'BLOB' MSG_TH = 5 CPU_TH = 10 HEAP_TH = 10 for SYSTEM in ['openmrs']: print('### ' + SYSTEM + ' ###') for LOAD in [25, 50, 75, 100]: for DURATION in [3, 6, 12]: dfTmpO = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['system']==SYSTEM) & (dfOrig['numMsgs']==MSG_TH) & (dfOrig['cpuTh']==CPU_TH) & (dfOrig['heapTh']==HEAP_TH) ] dfTmpR = dfRef[(dfRef['load']==LOAD) & (dfRef['duration']==DURATION) & (dfRef['type']==AP) & (dfRef['system']==SYSTEM) & (dfRef['numMsgs']==MSG_TH) & (dfRef['cpuTh']==CPU_TH) & (dfRef['heapTh']==HEAP_TH) ] diff = dfTmpR['foundAP'].iloc[0] - dfTmpO['foundAP'].iloc[0] print(str(LOAD) + '-' + str(DURATION) + ' = ' + str(dfTmpR['foundAP'].iloc[0]) + '(' + str(diff) + ')') # + LOAD = 25 DURATION = 6 SYSTEM = 'openmrs' AP = 'BLOB' # Fixed parameters HEAP_TH = 10 MSG_TH = 5 # X-axis parameter X_PARAM = 'cpuTh' dfTmpOrig = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['system']==SYSTEM) & (dfOrig['type']==AP) & (dfOrig['heapTh'] == HEAP_TH) & (dfOrig['numMsgs'] == MSG_TH) ] dfTmpRef = dfRef[(dfRef['load']==LOAD) & (dfRef['duration']==DURATION) & (dfRef['system']==SYSTEM) & (dfRef['type']==AP) & (dfRef['heapTh'] == HEAP_TH) & (dfRef['numMsgs'] == MSG_TH) ] plt.plot(dfTmpOrig[X_PARAM], dfTmpOrig['foundAP'], '-o', label='Original') plt.plot(dfTmpRef[X_PARAM], dfTmpRef['foundAP'], '-d', label='Refactored') plt.xlabel('CPU Threshold') plt.ylabel('Found ' + AP) plt.title(str(LOAD) + ' - ' + str(DURATION) + ' | Th(Heap) = ' + str(HEAP_TH) + ' | Th(Msgs) = ' + str(MSG_TH)) plt.legend() plt.show() # + LOAD = 25 DURATION = 6 SYSTEM = 'openmrs' AP = 'BLOB' # Fixed parameters CPU_TH = 10 MSG_TH = 5 # X-axis parameter X_PARAM = 'heapTh' dfTmpOrig = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['system']==SYSTEM) & (dfOrig['type']==AP) & (dfOrig['cpuTh'] == CPU_TH) & (dfOrig['numMsgs'] == MSG_TH) ] dfTmpRef = dfRef[(dfRef['load']==LOAD) & (dfRef['duration']==DURATION) & (dfRef['system']==SYSTEM) & (dfRef['type']==AP) & (dfRef['cpuTh'] == CPU_TH) & (dfRef['numMsgs'] == MSG_TH) ] plt.plot(dfTmpOrig[X_PARAM], dfTmpOrig['foundAP'], '-o', label='Original') plt.plot(dfTmpRef[X_PARAM], dfTmpRef['foundAP'], '-d', label='Refactored') plt.xlabel('Heap Threshold') plt.ylabel('Found ' + AP) plt.title(str(LOAD) + ' - ' + str(DURATION) + ' | Th(CPU) = ' + str(CPU_TH) + ' | Th(Msgs) = ' + str(MSG_TH)) plt.legend() plt.show() # + LOAD = 100 DURATION = 3 SYSTEM = 'openmrs' AP = 'BLOB' # Fixed parameters CPU_TH = 10 HEAP_TH = 10 # X-axis parameter X_PARAM = 'numMsgs' dfTmpOrig = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['system']==SYSTEM) & (dfOrig['type']==AP) & (dfOrig['cpuTh'] == CPU_TH) & (dfOrig['heapTh'] == HEAP_TH) ] dfTmpRef = dfRef[(dfRef['load']==LOAD) & (dfRef['duration']==DURATION) & (dfRef['system']==SYSTEM) & (dfRef['type']==AP) & (dfRef['cpuTh'] == CPU_TH) & (dfRef['heapTh'] == HEAP_TH) ] plt.plot(dfTmpOrig[X_PARAM], dfTmpOrig['foundAP'], '-o', label='Original') plt.plot(dfTmpRef[X_PARAM], dfTmpRef['foundAP'], '-d', label='Refactored') plt.xlabel('Message Threshold') plt.ylabel('Found ' + AP) plt.title(str(LOAD) + ' - ' + str(DURATION) + ' | Th(CPU) = ' + str(CPU_TH) + ' | Th(Heap) = ' + str(HEAP_TH)) plt.legend() plt.show() # - # ## TOB # + AP = 'TOB' TIME_TH = 5 for SYSTEM in ['openmrs']: print('### ' + SYSTEM + ' ###') for LOAD in [25, 50, 75, 100]: for DURATION in [3, 6, 12]: dfTmpO = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['system']==SYSTEM) & (dfOrig['methodTimeTh']==TIME_TH) ] dfTmpR = dfRef[(dfRef['load']==LOAD) & (dfRef['duration']==DURATION) & (dfRef['type']==AP) & (dfRef['system']==SYSTEM) & (dfRef['methodTimeTh']==TIME_TH) ] diff = dfTmpR['foundAP'].iloc[0] - dfTmpO['foundAP'].iloc[0] print(str(LOAD) + '-' + str(DURATION) + ' = ' + str(dfTmpR['foundAP'].iloc[0]) + '(' + str(diff) + ')') # + LOAD = 100 DURATION = 3 AP = 'TOB' SYSTEM = 'openmrs' # X-axis parameter X_PARAM = 'methodTimeTh' dfTmpOrig = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['system']==SYSTEM) & (dfOrig['type']==AP) ] dfTmpRef = dfRef[(dfRef['load']==LOAD) & (dfRef['duration']==DURATION) & (dfRef['system']==SYSTEM) & (dfRef['type']==AP) ] plt.plot(dfTmpOrig[X_PARAM], dfTmpOrig['foundAP'], '-o', label='Original') plt.plot(dfTmpRef[X_PARAM], dfTmpRef['foundAP'], '-d', label='Refactored') plt.xlabel('Method Time Threshold') plt.ylabel('Found ' + AP) plt.title(str(LOAD) + ' - ' + str(DURATION)) plt.legend() plt.show() # - # ## EST # + AP = 'EST' MSGS_TH = 5 for SYSTEM in ['openmrs']: print('### ' + SYSTEM + ' ###') for LOAD in [25, 50, 75, 100]: for DURATION in [3, 6, 12]: dfTmpO = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['system']==SYSTEM) & (dfOrig['numMsgs']==MSGS_TH) ] dfTmpR = dfRef[(dfRef['load']==LOAD) & (dfRef['duration']==DURATION) & (dfRef['type']==AP) & (dfRef['system']==SYSTEM) & (dfRef['numMsgs']==MSGS_TH) ] diff = dfTmpR['foundAP'].iloc[0] - dfTmpO['foundAP'].iloc[0] print(str(LOAD) + '-' + str(DURATION) + ' = ' + str(dfTmpR['foundAP'].iloc[0]) + '(' + str(diff) + ')') # + LOAD = 25 DURATION = 3 AP = 'EST' SYSTEM = 'openmrs' # X-axis parameter X_PARAM = 'numMsgs' dfTmpOrig = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['system']==SYSTEM) & (dfOrig['type']==AP) ] dfTmpRef = dfRef[(dfRef['load']==LOAD) & (dfRef['duration']==DURATION) & (dfRef['system']==SYSTEM) & (dfRef['type']==AP) ] plt.plot(dfTmpOrig[X_PARAM], dfTmpOrig['foundAP'], '-o', label='Original') plt.plot(dfTmpRef[X_PARAM], dfTmpRef['foundAP'], '-d', label='Refactored') plt.xlabel('Message Offset [%]') plt.ylabel('Found ' + AP) plt.title(str(LOAD) + ' - ' + str(DURATION)) plt.legend() plt.show() # - # ## EDA # + AP = 'EDA' GCED_TH = 5 HEAP_TH = 10 for SYSTEM in ['openmrs']: print('### ' + SYSTEM + ' ###') for LOAD in [25, 50, 75, 100]: for DURATION in [3, 6, 12]: dfTmpO = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['type']==AP) & (dfOrig['system']==SYSTEM) & (dfOrig['numGCedObjsTh']==GCED_TH) & (dfOrig['heapTh'] == HEAP_TH) ] dfTmpR = dfRef[(dfRef['load']==LOAD) & (dfRef['duration']==DURATION) & (dfRef['type']==AP) & (dfRef['system']==SYSTEM) & (dfRef['numGCedObjsTh']==GCED_TH) & (dfRef['heapTh'] == HEAP_TH) ] diff = dfTmpR['foundAP'].iloc[0] - dfTmpO['foundAP'].iloc[0] print(str(LOAD) + '-' + str(DURATION) + ' = ' + str(dfTmpR['foundAP'].iloc[0]) + '(' + str(diff) + ')') # + LOAD = 25 DURATION = 3 AP = 'EDA' SYSTEM = 'openmrs' # Fixed parameters HEAP_TH = 10 # X-axis parameter X_PARAM = 'numGCedObjsTh' dfTmpOrig = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['system']==SYSTEM) & (dfOrig['type']==AP) & (dfOrig['heapTh'] == HEAP_TH) ] dfTmpRef = dfRef[(dfRef['load']==LOAD) & (dfRef['duration']==DURATION) & (dfRef['system']==SYSTEM) & (dfRef['type']==AP) & (dfRef['heapTh'] == HEAP_TH) ] plt.plot(dfTmpOrig[X_PARAM], dfTmpOrig['foundAP'], '-o', label='Original') plt.plot(dfTmpRef[X_PARAM], dfTmpRef['foundAP'], '-d', label='Refactored') plt.xlabel('GCed Objects Offset [%]') plt.ylabel('Found ' + AP) plt.title(str(LOAD) + ' - ' + str(DURATION)) plt.legend() plt.show() # + LOAD = 25 DURATION = 3 AP = 'EDA' SYSTEM = 'openmrs' # Fixed parameters GCED_TH = 5 # X-axis parameter X_PARAM = 'heapTh' dfTmpOrig = dfOrig[(dfOrig['load']==LOAD) & (dfOrig['duration']==DURATION) & (dfOrig['system']==SYSTEM) & (dfOrig['type']==AP) & (dfOrig['numGCedObjsTh'] == GCED_TH) ] dfTmpRef = dfRef[(dfRef['load']==LOAD) & (dfRef['duration']==DURATION) & (dfRef['system']==SYSTEM) & (dfRef['type']==AP) & (dfRef['numGCedObjsTh'] == GCED_TH) ] plt.plot(dfTmpOrig[X_PARAM], dfTmpOrig['foundAP'], '-o', label='Original') plt.plot(dfTmpRef[X_PARAM], dfTmpRef['foundAP'], '-d', label='Refactored') plt.xlabel('Memory Threshold [%]') plt.ylabel('Found ' + AP) plt.title(str(LOAD) + ' - ' + str(DURATION)) plt.legend() plt.show() # -
analysis/analysis.ipynb
-- --- -- jupyter: -- jupytext: -- text_representation: -- extension: .hs -- format_name: light -- format_version: '1.5' -- jupytext_version: 1.14.4 -- kernelspec: -- display_name: Haskell -- language: haskell -- name: haskell -- --- -- ### Input-Output Operations -- -- *Some snippets in this notebook are taken from BLG458E Functional Programming course slides which can be accessed through the following link: https://www.slideshare.net/uyar/tag/blg458e* -- **IO a**: IO actions/programs of type a -- some primitive IO programs are already defined in Haskell -- #### Reading an input -- **getLine** function is used to read a line from std input -- #### One-Element Type () -- If IO operation doesn't produce a result, we use IO () which returns (). -- This is like the **void** return type in C. -- #### Main Module -- When compiling a Haskell program, GHC creates an executable that runs the main function: **main :: IO t** -- The type **t** is generally () -- #### Printing a string -- To print a string, we use the built-in function **putStr :: String -> IO ()** which takes in a string and returns the IO object that prints the string. -- To put a new line at the end automatically, use **putStrLn** helloWorld :: IO () helloWorld = putStr "Hello, World!" helloWorld hello :: String -> IO () hello name = putStr ("Hello, " ++ name ++ "!") hello "Mustafa" putStrLn "Hello" -- #### String Conversion -- From a type to string, use **show** show 14 show 1.23 -- From string to another type, use **read** read "42" :: Integer read "4" :: Float read "3.14" :: Float -- #### Action Sequences -- IO operations consist of actions performed in a sequence -- In order to create an action sequence, use **do** -- do operation is like an imperative block where we define the steps one by one. -- -- **Example** -- putStrLn in prelude is defined as below. -- ```haskell -- putStrLn :: String -> IO () -- putStrLn str = do putStr str -- putStr "\n"``` putStr3Times :: String -> IO () putStr3Times str = do putStrLn str putStrLn str putStrLn str putStr3Times "lol" -- #### Reading an input (without capturing the values) -- + read2Lines :: IO() read2Lines = do getLine getLine putStrLn "2 lines read" -- I know, this function makes no sense. But you got the point! -- - -- #### Reading an input with capturing the values -- Value is captured using "<-" -- *Note: the values that are read can only be used in the do block* helloSomebody :: IO() helloSomebody = do name <- getLine putStrLn ("Hello, " ++ name) -- function that reads a line and prints it in reverse order reverseInput :: IO () reverseInput = do str <- getLine putStrLn (reverse str) -- You can also make *local* definition using **let** that can only be used within the scope -- function that reads 2 lines and prints them in reverse order reverse2Lines :: IO () reverse2Lines = do line1 <- getLine -- beware! it's NOT line1 = getLine! line2 <- getLine let reverse1 = reverse line1 let reverse2 = reverse line2 putStrLn (reverse1 ++ "\n" ++ reverse2) -- #### Returning the Input Read from StdIn -- + getInteger :: IO Integer getInteger = do line <- getLine -- get the line return (read line :: Integer) -- convert it to an integer -- this returns the integer read from standart input -- - -- #### Loops and Recursion -- -- function that prints what it reads recursively copyInfinitely :: IO () copyInfinitely = do line <- getLine putStrLn line copyInfinitely -- function that prints what it reads recursively N times copyNtimes :: Integer -> IO () copyNtimes n | n == 0 = return () | otherwise = do line <- getLine putStrLn line copyNtimes (n-1) -- function that prints what it reads until the input is empty string copyUntilEmpty :: IO () copyUntilEmpty = do line <- getLine if line == "" then return () else do putStr line copyUntilEmpty -- ### Rock - Paper - Scissors -- In this section, a simple version of rock-paper-scissors game is implemented. -- + -- define move types data Move = Rock | Paper | Scissors deriving Show -- define a type for a match -- with 2 lists, 1st: moves of player1, 2nd: moves of player2 type Match = ([Move], [Move]) -- define outcome function -- 1: A wins, -1: B wins, 0: Tie outcome :: Move -> Move -> Integer outcome moveA moveB = case (moveA, moveB) of (Paper, Rock) -> 1 (Scissors, Rock) -> -1 (Rock, Paper) -> -1 (Scissors, Paper) -> 1 (Rock, Scissors) -> 1 (Paper, Scissors) -> -1 _ -> 0 -- define showRound function (toString for a round) showRound :: Move -> Move -> String showRound moveA moveB = "A: " ++ (show moveA) ++ "B: " ++ (show moveB) -- define a strategy -- here are some examples -- always play Rock rock :: [Move] -> Move rock _ = Rock -- cycle through options cycled :: [Move] -> Move cycled moves = case (length moves) `mod` 3 of 0 -> Paper 1 -> Scissors 2 -> Rock -- play what opponent played last echo :: [Move] -> Move echo [] = Rock echo ms = head ms -- interactive game -- A: human B: computer (playing echo) -- convert a char into a move convertToMove :: Char -> Move convertToMove c | c `elem` "rR" = Rock -- elem checks if c is in the list | c `elem` "sS" = Scissors | c `elem` "pP" = Paper | otherwise = error "Invalid move" -- create the interactive game playRound :: Match -> IO () playRound match@(movesA, movesB) = do ch <- getChar -- get the move from user putStrLn "" -- put a new line if ch == '.' -- if input is '.', show the result of the match then putStrLn (showResult match) else do let moveA = convertToMove ch let moveB = echo movesA putStrLn (showRound movesA movesB) playRound (moveA : movesA, moveB : movesB) playInteractive :: IO () playInteractive = playRound ([], []) -- how about an automated version of the game? generateMatch :: Integer -> Match generateMatch 0 = ([], []) generateMatch n = step generateMatch (n-1) where step :: Match -> Match step (movesA, movesB) = (cycled movesB : movesA, echo movesA : movesB)
4IO_operations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="JYuJKcWPsCtI" from __future__ import unicode_literals, print_function, division from io import open import unicodedata import string import re import random import torch import torch.nn as nn from torch import optim import torch.nn.functional as F device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # + [markdown] id="oRL0WEYEuOGd" # # Load data files # + id="JsS4rTWus3kZ" # !wget https://download.pytorch.org/tutorial/data.zip # !unzip data.zip # + id="kXibPEwyten4" SOS_token = 0 EOS_token = 1 class Lang: def __init__(self, name): self.name = name self.word2index = {} self.word2count = {} self.index2word = {0: "SOS", 1: "EOS"} self.n_words = 2 def addSentence(self, sentence): for word in sentence.split(' '): self.addWord(word) def addWord(self, word): if word not in self.word2index: self.word2index[word] = self.n_words self.word2count[word] = 1 self.index2word[self.n_words] = word self.n_words += 1 # + id="BZ4thUYiuMGp" # Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427 def unicodeToAscii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' ) def normalizeString(s): s = unicodeToAscii(s.lower().strip()) s = re.sub(r'([.!?])', r' \1', s) s = re.sub(r'[^a-zA-Z.!?]+', r' ', s) return s # + id="5knVIoVIumIC" def readLangs(lang1, lang2, reverse=False): print("Reading lines...") lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8').\ read().strip().split('\n') pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines] if reverse: pairs = [list(reversed(p)) for p in pairs] input_lang = Lang(lang2) output_lang = Lang(lang1) else: input_lang = Lang(lang1) output_lang = Lang(lang2) return input_lang, output_lang, pairs # + id="9_Ljz62ZvWwS" MAX_LENGTH = 10 eng_prefixes = ( "i am", "i m", "he is", "he s ", "she is", 'she s ', "you are", 'you re ', 'we are', 'we re ', 'they are', 'they re ' ) def filterPair(p): return len(p[0].split(' ')) < MAX_LENGTH and \ len(p[1].split(' ')) < MAX_LENGTH and \ p[1].startswith(eng_prefixes) def filterPairs(pairs): return [pair for pair in pairs if filterPair(pair)] # + colab={"base_uri": "https://localhost:8080/"} id="ekMJXe72waKe" outputId="712ed74b-6f48-42d3-8d9f-9fce22362d4d" def prepareData(lang1, lang2, reverse=False): input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse) print(f'Read {len(pairs)} sentence pairs') pairs = filterPairs(pairs) print(f'Trimmed to {len(pairs)} sentence pairs') print('Counting words...') for pair in pairs: input_lang.addSentence(pair[0]) output_lang.addSentence(pair[1]) print('Counted words:') print(input_lang.name, input_lang.n_words) print(output_lang.name, output_lang.n_words) return input_lang, output_lang, pairs input_lang, output_lang, pairs = prepareData('eng', 'fra', reverse=True) print(random.choice(pairs)) # + [markdown] id="BjGBQ1-Oxjho" # # Seq2Seq - Encoder # + id="z8L2-RI0xleb" class EncoderRNN(nn.Module): def __init__(self, input_size, hidden_size): super(EncoderRNN, self).__init__() self.hidden_size = hidden_size self.embedding = nn.Embedding(input_size, hidden_size) self.gru = nn.GRU(hidden_size, hidden_size) def forward(self, input, hidden): embedded = self.embedding(input).view(1, 1, -1) output = embedded output, hidden = self.gru(output, hidden) return output, hidden def init_hidden(self): return torch.zeros(1, 1, self.hidden_size).to(device) # + [markdown] id="1ejDPIXZyKCW" # # Seq2Seq - Decoder # + id="lfOEOkh9yL5b" class DecoderRNN(nn.Module): def __init__(self, hidden_size, output_size): super(DecoderRNN, self).__init__() self.hidden_size = hidden_size self.embedding = nn.Embedding(output_size, hidden_size) self.gru = nn.GRU(hidden_size, hidden_size) self.out = nn.Linear(hidden_size, output_size) def forward(self, input, hidden): output = self.embedding(input).view(1, 1, -1) output = F.relu(output) output, hidden = self.gru(output, hidden) output = F.log_softmax(self.out(output[0])) return output, hidden def init_hidden(self): return torch.zeros(1, 1, self.hidden_size).to(device) # + [markdown] id="UKNe5ztezB3S" # # Seq2Seq Attention Decoder # + id="GZeFLYWmz34D" class AttnDecoderRNN(nn.Module): def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH): super(AttnDecoderRNN, self).__init__() self.hidden_size = hidden_size self.output_size = output_size self.dropout_p = dropout_p self.max_length = max_length self.embedding = nn.Embedding(self.output_size, self.hidden_size) self.attn = nn.Linear(self.hidden_size * 2, self.max_length) self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size) self.dropout = nn.Dropout(self.dropout_p) self.gru = nn.GRU(self.hidden_size, self.hidden_size) self.out = nn.Linear(self.hidden_size, self.output_size) def forward(self, input, hidden, encoder_outputs): embedded = self.embedding(input).view(1, 1, -1) embedded = self.dropout(embedded) attn_weights = F.softmax(self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1) attn_applied = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.unsqueeze(0)) output = torch.cat((embedded[0], attn_applied[0]), 1) output = self.attn_combine(output).unsqueeze(0) output = F.relu(output) output, hidden = self.gru(output, hidden) output = F.log_softmax(self.out(output[0]), dim=1) return output, hidden, attn_weights def init_hidden(self): return torch.zeros(1, 1, self.hidden_size).to(device) # + [markdown] id="C6nmkXpu21Hg" # # Traning # + id="hwfIsGBc8b-D" def indexesFromSentence(lang, sentence): return [lang.word2index[word] for word in sentence.split(' ')] def tensorFromSentence(lang, sentence): indexes = indexesFromSentence(lang, sentence) indexes.append(EOS_token) return torch.tensor(indexes, dtype=torch.long).view(-1, 1).to(device) def tensorsFromPair(pair): input_tensor = tensorFromSentence(input_lang, pair[0]) target_tensor = tensorFromSentence(output_lang, pair[1]) return (input_tensor, target_tensor) # + id="tYUKpYjH9I-T" teacher_forcing_ratio = 0.5 def train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH): encoder_hidden = encoder.init_hidden() encoder_optimizer.zero_grad() decoder_optimizer.zero_grad() input_length = input_tensor.size(0) target_length = target_tensor.size(0) encoder_outputs = torch.zeros(max_length, encoder.hidden_size).to(device) loss = 0 for ei in range(input_length): encoder_output, encoder_hidden = encoder(input_tensor[ei], encoder_hidden) encoder_outputs[ei] = encoder_output[0, 0] decoder_input = torch.tensor([[SOS_token]]).to(device) decoder_hidden = encoder_hidden use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False if use_teacher_forcing: for di in range(target_length): decoder_output, decoder_hidden, decoder_attention \ = decoder(decoder_input, decoder_hidden, encoder_outputs) loss += criterion(decoder_output, target_tensor[di]) decoder_input = target_tensor[di] else: for di in range(target_length): decoder_output, decoder_hidden, decoder_attention \ = decoder(decoder_input, decoder_hidden, encoder_outputs) topv, topi = decoder_output.topk(1) decoder_input = topi.squeeze().detach() # detach from history as input loss += criterion(decoder_output, target_tensor[di]) if decoder_input.item() == EOS_token: break loss.backward() encoder_optimizer.step() decoder_optimizer.step() return loss.item() / target_length # + id="oZTBkYOiAHmX" import time import math def asMinutes(s): m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) def timeSince(since, percent): now = time.time() s = now - since es = s / (percent) rs = es - s return '%s (- %s)' % (asMinutes(s), asMinutes(rs)) # + id="zNpZhRksAJpg" def trainIters(encoder, decoder, n_iters, print_every=100, plot_every=100, learning_rate=0.01): start = time.time() plot_losses = [] print_loss_total = 0 plot_loss_total = 0 encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate) decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate) training_pairs = [tensorsFromPair(random.choice(pairs)) for i in range(n_iters)] criterion = nn.NLLLoss() for iter in range(1, n_iters+1): training_pair = training_pairs[iter - 1] input_tensor = training_pair[0] target_tensor = training_pair[1] loss = train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion) print_loss_total += loss plot_loss_total += loss if iter % print_every == 0: print_loss_avg = print_loss_total / print_every print_loss_total = 0 print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters), iter, iter / n_iters * 100, print_loss_avg)) if iter % plot_every == 0: plot_loss_avg = plot_loss_total / plot_every plot_losses.append(plot_loss_avg) plot_loss_total = 0 showPlot(plot_losses) # + id="xgJTfD40Babf" import matplotlib.pyplot as plt plt.switch_backend('agg') import matplotlib.ticker as ticker import numpy as np def showPlot(points): plt.figure() fig, ax = plt.subplots() loc = ticker.MultipleLocator(base=0.2) ax.yaxis.set_major_locator(loc) plt.plot(points) # + [markdown] id="Kbbc8cXYy9Rd" # # Evaluation # + id="0ukVKpity_LL" def evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH): with torch.no_grad(): input_tensor = tensorFromSentence(input_lang, sentence) input_length = input_tensor.size()[0] encoder_hidden = encoder.init_hidden() encoder_outputs = torch.zeros(max_length, encoder.hidden_size).to(device) for ei in range(input_length): encoder_output, encoder_hidden = encoder(input_tensor[ei], encoder_hidden) encoder_outputs[ei] += encoder_output[0, 0] decoder_input = torch.tensor([[SOS_token]]).to(device) decoder_hidden = encoder_hidden decoder_words = [] decoder_attentions = torch.zeros(max_length, max_length) for di in range(max_length): decoder_output, decoder_hidden, decoder_attention = decoder( decoder_input, decoder_hidden, encoder_outputs ) decoder_attentions[di] = decoder_attention.data topv, topi = decoder_output.data.topk(1) if topi.item() == EOS_token: decoder_words.append('<EOS>') break else: decoder_words.append(output_lang.index2word[topi.item()]) decoder_input = topi.squeeze().detach() return decoder_words, decoder_attentions[:di + 1] # + id="fwgA463r0Ghn" def evaluateRandomly(encoder, decoder, n=10): for i in range(n): pair = random.choice(pairs) print('>', pair[0]) print('=', pair[1]) output_words, attentions = evaluate(encoder, decoder, pair[0]) output_sentence = ' '.join(output_words) print('<', output_sentence) print('') # + colab={"base_uri": "https://localhost:8080/"} id="MWqFRB5E2X79" outputId="4f8d2e9c-e9dd-4785-879a-d60541853bf5" hidden_size = 256 encoder1 = EncoderRNN(input_lang.n_words, hidden_size).to(device) attn_decoder1 = AttnDecoderRNN(hidden_size, output_lang.n_words, dropout_p=0.1).to(device) trainIters(encoder1, attn_decoder1, 75000, print_every=5000) # + colab={"base_uri": "https://localhost:8080/"} id="mxLkeK6b2pDB" outputId="733fc7dc-0416-41f8-b869-631d265a11f2" evaluateRandomly(encoder1, attn_decoder1) # + [markdown] id="YL6tc0oN2sqh" # # Visualize attention # + id="jklQloul_oBQ" # %matplotlib inline # + colab={"base_uri": "https://localhost:8080/", "height": 292} id="4H5FrLK-2vPW" outputId="3982e92d-6a33-49cc-ad4b-3681254a2c11" output_words, attentions = evaluate(encoder1, attn_decoder1, 'je suis trop froid .') plt.matshow(attentions.numpy()) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="P6OC8dCI25Z2" outputId="c23f2506-bf9f-432e-a2d9-137694b7b46d" def showAttention(input_sentence, output_words, attentions): fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(attentions.numpy(), cmap='bone') fig.colorbar(cax) ax.set_xticklabels([''] + input_sentence.split(' ') + ['<EOS>'], rotation=90) ax.set_yticklabels([''] + output_words) ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) plt.show() def evaluateAndShowAttention(input_sentence): output_words, attentions = evaluate(encoder1, attn_decoder1, input_sentence) print('input = ', input_sentence) print('output = ', ' '.join(output_words)) showAttention(input_sentence, output_words, attentions) evaluateAndShowAttention("elle a cinq ans de moins que moi .") evaluateAndShowAttention("elle est trop petit .") evaluateAndShowAttention("je ne crains pas de mourir .") evaluateAndShowAttention("c est un jeune directeur plein de talent .")
SeqAttnMT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd pd.set_option('max_rows', 5) fruits = pd.DataFrame({'Apples': 30, 'Bananas': 21}, index=[0]) fruits pd.DataFrame([[30, 21]], columns=['Apples', 'Bananas']) fruit_sales = pd.DataFrame([[35, 21], [41, 34]], columns=['Apples', 'Bananas'], index=['2017 Sales', '2018 Sales']) fruit_sales pd.DataFrame({'Apples':[35, 41], 'Bananas': [21, 34]}, index=['2017 Sales', '2018 Sales']) ingredients = pd.Series({'Flour': '4 cups', 'Milk': '1 cup', 'Eggs': '2 large', 'Spam': '1 can'}, name='Dinner') ingredients quantities = ['4 cups', '1 cup', '3 large', '1 can'] items = ['Flour', 'Milk', 'Eggs', 'Spam'] pd.Series(quantities, index=items, name='Dinner')
pandas/Building-dataframes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np from pathlib import Path import matplotlib.pyplot as plt import seaborn as sns color = sns.color_palette() # %matplotlib inline # - # ## Load and inspect data ltc_df = pd.read_csv(Path('../../resources/prices/coin_Litecoin.csv'), index_col='SNo') ltc_df ltc_df['Date'] = pd.to_datetime(ltc_df['Date']).dt.date ltc_df['Date'] = pd.to_datetime(ltc_df['Date']) ltc_df['Spread'] = ltc_df.High - ltc_df.Low ltc_df.info() # ## Plot the closing value of Litecoin over time # + import matplotlib.dates as mdates fig, ax = plt.subplots(figsize=(12,8)) # sns.lineplot(y = ltc_df.Close.values, x=ltc_df.Date_mpl.values, alpha=0.8, color=color[3]) sns.lineplot(y = ltc_df.Close.values, x=ltc_df.Date.values, alpha=0.8, color=color[3]) ax.xaxis.set_major_locator(mdates.AutoDateLocator()) ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y.%m.%d')) # fig.autofmt_xdate() plt.xlabel('Date', fontsize=12) plt.ylabel('Price in USD', fontsize=12) plt.title("Closing price distribution of LTC", fontsize=15) plt.show() # - # ## Candlestick chart # + import matplotlib.ticker as mticker # from matplotlib.finance import candlestick_ohlc import mplfinance as mpf ltc_df['Date_mpl'] = ltc_df['Date'].apply(lambda x: mdates.date2num(x)) temp_ltc_df = ltc_df.copy(deep=False) temp_ltc_df = temp_ltc_df.set_index(['Date']) temp_ltc_df = temp_ltc_df.drop(['Name', 'Symbol', 'Marketcap','Spread'], axis=1) temp_ltc_df # - mpf.plot(temp_ltc_df.loc['2020-9-1':], type='candle', mav=(5,10), volume=True) # ## Price prediction from fbprophet import Prophet # + INPUT_FILE = "coin_Litecoin.csv" price_predict_df = pd.read_csv("../../resources/prices/" + INPUT_FILE, parse_dates=['Date'], usecols=["Date", "Close"]) price_predict_df.columns = ["ds", "y"] price_predict_df = price_predict_df[price_predict_df['ds']>'2020-9-1'] m = Prophet(changepoint_prior_scale=.7) m.fit(price_predict_df); future = m.make_future_dataframe(periods=7) forecast = m.predict(future) forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail() # - m.plot(forecast) m.plot_components(forecast)
notebooks/by_coin/litecoin_notebook_from CSV.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import stanfordnlp model_dir='/pi/ai/corenlp' config = { 'processors': 'tokenize,mwt,pos,lemma,depparse', # Comma-separated list of processors to use 'lang': 'fr', # Language code for the language to build the Pipeline in 'tokenize_model_path': model_dir+'/fr_gsd_models/fr_gsd_tokenizer.pt', # Processor-specific arguments are set with keys "{processor_name}_{argument_name}" 'mwt_model_path': model_dir+'/fr_gsd_models/fr_gsd_mwt_expander.pt', 'pos_model_path': model_dir+'/fr_gsd_models/fr_gsd_tagger.pt', 'pos_pretrain_path': model_dir+'/fr_gsd_models/fr_gsd.pretrain.pt', 'lemma_model_path': model_dir+'/fr_gsd_models/fr_gsd_lemmatizer.pt', 'depparse_model_path': model_dir+'/fr_gsd_models/fr_gsd_parser.pt', 'depparse_pretrain_path': model_dir+'/fr_gsd_models/fr_gsd.pretrain.pt' } nlp = stanfordnlp.Pipeline(**config) # Initialize the pipeline using a configuration dict doc = nlp("<NAME> au sein d'une famille de l'ancienne bourgeoisie.") # Run the pipeline on input text doc.sentences[0].print_tokens() # Look at the result # + def analyse(sents): doc = nlp(sents) print(*[f'text: {word.text+" "}\tlemma: {word.lemma}\tupos: {word.upos}\txpos: {word.xpos}' for sent in doc.sentences for word in sent.words], sep='\n') doc.sentences[0].print_dependencies() sents='Je suis un étudiant' analyse(sents)
notebook/procs-stanfordnlp-fr.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Station listing # # This site provides an overview of available solar radiation monitoring stations worldwide and metadata of the stations. # # The current version of this catalog is in a draft phase, so many stations are not yet shown. A searchable version of the table below is available [here](https://github.com/AssessingSolar/solarstations/blob/main/test.csv). # + tags=["hide-input", "full-width"] import pandas as pd pd.set_option('display.max_rows', None) stations = pd.read_csv('solarstations.csv', encoding='latin1').fillna('') stations # - # ## Improvements # The table of stations above will be made sortable and with clickable url's. # stations.to_csv('trial.csv',index=False) pd.read_csv('trial.csv')
station_listing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 import evals2 from collections import namedtuple import itertools as it import numpy as np import scipy.stats import torch from torch import nn, optim import torch.nn.functional as F SEED = 0 np.random.seed(SEED) torch.manual_seed(SEED) DEVICE = torch.device("cuda") # + Task = namedtuple("Task", "spec features parts colors") PARTS = ["stroke", "fill", "shadow"] COLORS = ["red", "green", "blue", "black"] SPECS1 = [(part, color) for part in PARTS for color in COLORS] SPECS2 = [ ((parts[0], color1), (parts[1], color2)) for parts in it.combinations(PARTS, 2) for color1 in COLORS for color2 in COLORS ] SPECS = SPECS2 # SPECS1 + SPECS2 N_FEATURES = len(PARTS) * len(COLORS) TASKS = [] for spec in SPECS: features = np.zeros((len(PARTS), len(COLORS))) parts = [] colors = [] if isinstance(spec[0], str): spec_parts = [spec] else: spec_parts = spec for part, color in spec_parts: ipart = PARTS.index(part) icolor = COLORS.index(color) features[ipart, icolor] += 1 parts.append(ipart) colors.append(icolor) features = torch.FloatTensor(features.ravel()).to(DEVICE) TASKS.append(Task(spec, features, parts, colors)) # + SOS = "<s>" EOS = "</s>" PAD = "<_>" class Decoder(nn.Module): def __init__(self, vocab, start_sym, pad_sym): hid = N_HIDDEN super().__init__() self.vocab = vocab self.start_id = vocab[start_sym] self.pad_id = vocab[pad_sym] self.rnn = nn.GRU(input_size=len(vocab), hidden_size=hid, num_layers=1) self.predict = nn.Linear(hid, len(vocab)) self.softmax = nn.Softmax(dim=1) def forward(self, state, inp): rep, enc = self.rnn(inp, state) logits = self.predict(rep) return enc, logits def decode(self, init_state, max_len, sample=False): n_stack, n_batch, _ = init_state.shape out = [[self.start_id] for _ in range(n_batch)] tok_inp = [self.start_id for _ in range(n_batch)] state = init_state for i_step in range(max_len): hot_inp = np.zeros((1, n_batch, len(self.vocab))) for i, t in enumerate(tok_inp): hot_inp[0, i, t] = 1 hot_inp = torch.FloatTensor(hot_inp).to(DEVICE) if init_state.is_cuda: hot_inp = hot_inp.cuda() new_state, label_logits = self(state, hot_inp) label_logits = label_logits.squeeze(0) label_logits[:, self.start_id] = -np.inf label_logits[:, self.pad_id] = -np.inf label_probs = self.softmax(label_logits).data.cpu().numpy() new_tok_inp = [] for i, row in enumerate(label_probs): if sample: tok = np.random.choice(row.size, p=row) else: tok = row.argmax() new_tok_inp.append(tok) out[i].append(tok) state = new_state tok_inp = new_tok_inp return out # + N_HIDDEN = 256 N_VOCAB = 16 MSG_LEN = 4 def make_seq(lol, max_len, n_vocab, batch_first=False): rr, cc = (len(lol), max_len) if batch_first else (max_len, len(lol)) seq = np.zeros((rr, cc, n_vocab)) for i, l in enumerate(lol): for j, tok in enumerate(l): r, c = (i, j) if batch_first else (j, i) seq[r, c, tok] = 1 return torch.FloatTensor(seq).to(DEVICE) def make_seq_tgt(lol, max_len, pad): tgt = np.ones((max_len, len(lol))) * pad for i, l in enumerate(lol): for j, tok in enumerate(l): tgt[j, i] = tok return torch.LongTensor(tgt).to(DEVICE) class SeqPredictor(nn.Module): def __init__(self, vocab, n_features): super().__init__() self.vocab = vocab self.vocab[SOS] = len(self.vocab) self.vocab[PAD] = len(self.vocab) self.rev_vocab = {v: k for k, v in self.vocab.items()} self.rep = nn.Linear(n_features, N_HIDDEN) self.decoder = Decoder(self.vocab, SOS, PAD) self.loss = nn.CrossEntropyLoss(reduce=False, ignore_index = self.vocab[PAD]) self.baseline = nn.Linear(n_features, 1) def sample(self, features, max_len, max=False): rep = self.rep(features).unsqueeze(0) dec = self.decoder.decode(rep, MSG_LEN, sample=not max) dec = [d[1:] for d in dec] return dec def forward(self, features, msgs, msgs_tgt): #print(msgs.argmax(dim=2)) #print(msgs_tgt) #assert False rep = self.rep(features).unsqueeze(0) _, logits = self.decoder(rep, msgs) time, batch, vocab = logits.shape logits = logits.view(time * batch, vocab) msgs_tgt = msgs_tgt.view(time * batch) loss = self.loss(logits, msgs_tgt).view(time, batch) ent = -(F.softmax(logits, dim=1) * F.log_softmax(logits, dim=1)).sum(dim=1).view(time, batch) return loss.mean(dim=0), ent.mean(dim=0) def decode(self, seq): return "".join(self.rev_vocab[tok] for tok in seq) class SpeakerModel(SeqPredictor): def __init__(self): speaker_vocab = {chr(ord('a') + i): i for i in range(N_VOCAB)} super().__init__(speaker_vocab, N_FEATURES) def sample(self, features, max=False): return super().sample(features, MSG_LEN, max) class ListenerModel(nn.Module): def __init__(self): super().__init__() part_vocab = {part: i for i, part in enumerate(PARTS)} color_vocab = {color: i for i, color in enumerate(COLORS)} msg_size = MSG_LEN * (N_VOCAB+2) self.part_predictor = SeqPredictor(part_vocab, msg_size) self.color_predictor = SeqPredictor(color_vocab, msg_size) def sample(self, msgs, max=False): msg_feats = make_seq(msgs, MSG_LEN, N_VOCAB+2, batch_first=True) msg_feats = msg_feats.view(msg_feats.shape[0], -1) parts = self.part_predictor.sample(msg_feats, 2, max=max) colors = self.color_predictor.sample(msg_feats, 2, max=max) return parts, colors # + N_TRAIN = len(TASKS) * 2 // 3 N_TEST = len(TASKS) - N_TRAIN N_BATCH = 256 Rollout = namedtuple("Rollout", "tasks msgs parts colors rewards") def rollout(speaker, listener, tasks, max=False): features = torch.stack([task.features for task in tasks]) msgs = speaker.sample(features, max=max) parts, colors = listener.sample(msgs, max=max) parts = [p[:len(t.parts)] for p, t in zip(parts, tasks)] colors = [c[:len(t.colors)] for c, t in zip(colors, tasks)] rewards = [] for part, color, task in zip(parts, colors, tasks): reward = 0.5 * ( np.mean(np.asarray(part) == task.parts) + np.mean(np.asarray(color) == task.colors) ) rewards.append(reward) return Rollout(tasks, msgs, parts, colors, rewards) def make_loss(targets, features, rewards, max_len, n_vocab, decoder, use_baseline=True): pre_targets = [[decoder.vocab[SOS]] + s[:-1] for s in targets] seq = make_seq(pre_targets, max_len, n_vocab) seq_tgt = make_seq_tgt(targets, max_len, decoder.vocab[PAD]) nll, ent = decoder(features, seq, seq_tgt) value = decoder.baseline(features).squeeze(1) baseline = value.detach() if use_baseline else 0 surrogate = -nll * (rewards - baseline) + 0.01 * ent return -surrogate.mean() + F.mse_loss(value, rewards) def step(opt, speaker, listener, result): rewards = torch.FloatTensor(result.rewards).to(DEVICE) speaker_loss = make_loss( result.msgs, torch.stack([t.features for t in result.tasks]), rewards, MSG_LEN, N_VOCAB+2, speaker, ) msg_feats = make_seq(result.msgs, MSG_LEN, N_VOCAB+2, batch_first=True) msg_feats = msg_feats.view(msg_feats.shape[0], -1) part_loss = make_loss( [t.parts for t in result.tasks], #result.parts, msg_feats, torch.ones(len(result.tasks)).to(DEVICE), #rewards, 2, len(PARTS)+2, listener.part_predictor, use_baseline=False ) color_loss = make_loss( [t.colors for t in result.tasks], #result.colors, msg_feats, torch.ones(len(result.tasks)).to(DEVICE), #rewards, 2, len(COLORS)+2, listener.color_predictor, use_baseline=False ) loss = speaker_loss + part_loss + color_loss opt.zero_grad() loss.backward() opt.step() def train(tasks): speaker = SpeakerModel().to(DEVICE) listener = ListenerModel().to(DEVICE) params = it.chain(speaker.parameters(), listener.parameters()) opt = optim.RMSprop(params, lr=0.003) for i in range(200): batch_tasks = [ train_tasks[np.random.randint(N_TRAIN)] for _ in range(N_BATCH) ] result = rollout(speaker, listener, batch_tasks) step(opt, speaker, listener, result) return speaker, listener # + class Compose(nn.Module): def __init__(self): super().__init__() self.lproj = nn.Linear(MSG_LEN, MSG_LEN) self.rproj = nn.Linear(MSG_LEN, MSG_LEN) def forward(self, x, y): x = x.view(1, N_VOCAB+2, MSG_LEN) y = y.view(1, N_VOCAB+2, MSG_LEN) xproj = self.lproj(x) yproj = self.rproj(y) return (xproj + yproj).view(1, (N_VOCAB+2) * MSG_LEN) def lift(msg): data = np.zeros((N_VOCAB+2, MSG_LEN)) for i, tok in enumerate(msg): data[tok, i] = 1 return data.ravel() COMP_FN = Compose() ERR_FN = evals2.L1Dist() def evaluate(speaker, listener, tasks): for task in tasks: result = rollout(speaker, listener, tasks, max=True) return np.mean(result.rewards) def pp(t): if isinstance(t, tuple): return "(%s)" % " ".join(pp(tt) for tt in t) return str(t) def evaluate_language(speaker, listener, train_tasks, test_tasks): result = rollout(speaker, listener, train_tasks + test_tasks, max=True) test_specs = set([t.spec for t in test_tasks]) data = [] for i in range(len(result.tasks)): data.append((result.tasks[i].spec, result.msgs[i], result.rewards[i])) specs, msgs, rewards = zip(*data) msgs = [lift(m) for m in msgs] comp = evals2.evaluate(msgs, specs, COMP_FN, ERR_FN, quiet=True, steps=1000) return np.mean(comp), data # + header = " %10s %10s %10s %10s" % ("train", "test", "diff", "comp") template = "[%3d] %10.2f %10.2f %10.2f %10.2f %10s" languages = [] datapoints = [] print(header) for i in range(100): tasks = list(TASKS) np.random.shuffle(tasks) train_tasks, test_tasks = tasks[:N_TRAIN], tasks[N_TRAIN:] speaker, listener = train(train_tasks) train_acc = evaluate(speaker, listener, train_tasks) test_acc = evaluate(speaker, listener, test_tasks) comp, lang = evaluate_language(speaker, listener, train_tasks, test_tasks) datapoints.append((train_acc, test_acc, train_acc - test_acc, comp)) languages.append(lang) sig = scipy.stats.pearsonr([d[2] for d in datapoints], [d[3] for d in datapoints]) print(template % (i, train_acc, test_acc, train_acc - test_acc, comp, sig)) # + import seaborn as sns from matplotlib import pyplot as plt from pandas import DataFrame sns.set(font_scale=1.5) sns.set_style("ticks", {'font.family': 'serif'}) plt.tight_layout() points = datapoints points = [d for d in datapoints if d[1] > 0.5] train_acc, test_acc, diff, comp = zip(*points) data = DataFrame({'train_acc': train_acc, 'test_acc': test_acc, 'diff': diff, 'comp': comp}) def plot(y, fname, ylabel): sns.lmplot(x='comp', y=y, data=data, fit_reg=True, scatter_kws={'alpha': 0.5}) plt.xlabel('TRE') plt.ylabel(ylabel) plt.savefig('comm_shapes_%s.pdf' % fname, format='pdf') plt.show() plot('train_acc', 'train', 'train accuracy') print(scipy.stats.pearsonr(comp, train_acc)) plot('test_acc', 'test', 'test accuracy') print(scipy.stats.pearsonr(comp, test_acc)) plot('diff', 'gen', 'generalization error') print(scipy.stats.pearsonr(comp, diff)) #sns.lmplot(x='train_acc', y='diff') # + def vis(lang): for spec, msg, rew in sorted(lang, key=lambda x: str(x[0])): print("%55s %-33s %6s %.2f" % ( "", pp(spec), #+ ("*" if spec in test_specs else " "), speaker.decode(msg), rew )) print(datapoints[63]) vis(languages[63]) print() print(datapoints[17]) vis(languages[17]) # -
comm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.6 64-bit # name: python386jvsc74a57bd01baa965d5efe3ac65b79dfc60c0d706280b1da80fedb7760faf2759126c4f253 # --- # + [markdown] id="ccuOSc6C5PnQ" # # PROCESSAMENTO DE LINGUAGEM NATURAL (NLP) # (*NATURAL LANGUAGE PROCESSING*) # + [markdown] id="sbv4m3Wv5j7W" # ![](https://www.encora.com/hs-fs/hubfs/Blog-EncoraML.png?width=800&name=Blog-EncoraML.png) # # Processamento de linguagem natural (NLP) é um subcampo da linguística, ciência da computação e inteligência artificial preocupado com as interações entre computadores e linguagem humana, em particular como programar computadores para processar e analisar grandes quantidades de dados de linguagem natural. O resultado é um computador capaz de "compreender" o conteúdo dos documentos, incluindo as nuances contextuais da linguagem dentro deles. A tecnologia pode então extrair com precisão as informações e percepções contidas nos documentos, bem como categorizar e organizar os próprios documentos. Os desafios no processamento de linguagem natural frequentemente envolvem reconhecimento de fala, compreensão de linguagem natural e geração de linguagem natural. # # O processamento de linguagem natural (ou Linguística computacional está se tornando o estado da arte no mundo de hoje. Evoluiu muitos anos atrás na década de 1960. A tarefa da NLP é entender as expressões humanas naturais em termos de fala ou texto, tomando como entrada e fornecendo A mineração de texto, também chamada de *Text Analytics*, usa o processamento de linguagem natural para transformar corpus não estruturados em documentos ou bancos de dados padronizados e normalizados para análise posterior, aplicando técnicas de inteligência artificial e algoritmos de aprendizado de máquina. # # **APLICAÇÕES DE NLP**<br> # # A seguir está uma lista de algumas das aplicações mais frequentes do processamento de linguagem natural. Algumas dessas tarefas têm aplicativos diretos do mundo real, enquanto outras normalmente servem como subtarefas que são usadas para ajudar na resolução de tarefas maiores. # # Embora as tarefas de processamento de linguagem natural estejam intimamente interligadas, elas podem ser subdivididas em categorias por conveniência, como descrito abaixo: # * **Processamento de texto e fala** # * Análise morfológica # * Análise sintática # * Semântica lexical # * Semântica relacional (semântica de frases individuais) # * Discurso (semântica além das frases individuais) # * Aplicativos de NLP de nível superior # + [markdown] id="k74MDId563uG" # #1.MANIPULAÇÃO DE *STRINGS* # # ***String***: Em programação de computador, uma *string* é tradicionalmente uma sequência de caracteres, seja como uma constante literal ou como algum tipo de variável. Este último pode permitir que seus elementos sejam mutados e o comprimento alterado, ou pode ser corrigido (após a criação). Uma *string* é geralmente considerada como um tipo de dados e frequentemente implementada como uma estrutura de dados de matriz de bytes (ou palavras) que armazena uma sequência de elementos, normalmente caracteres, usando alguma codificação de caracteres.([Leia](https://realpython.com/python-input-output/)) # # Ex: `"olá"` ,`"olá mundo"` ,`"olá mundo, essa frase é uma string"` # # A seguir veremos alguns métodos e propiedades úteis para manipulação de *strings*. # # + [markdown] id="_edSHSzh7CP0" # ## 1.1. Método: *print* # * print() - Retorna valores no terminal # >sintaxe : `print("olá mundo")` # + colab={"base_uri": "https://localhost:8080/"} id="-fxZIxbj5Qwb" executionInfo={"status": "ok", "timestamp": 1621700213289, "user_tz": 180, "elapsed": 363, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="9b5dd530-c13d-42cf-8329-5f042dd25558" frase="olá mundo" print(frase) # + [markdown] id="diMdz1vg-_-R" # ## 1.2. Método *format* # * format()- Insere variáveis dentro de uma string # >sintaxe : `"olá mundo, meu nome é {}".format("Seu nome")` # # + colab={"base_uri": "https://localhost:8080/"} id="tsG0TKOz_Jm2" executionInfo={"status": "ok", "timestamp": 1621700213513, "user_tz": 180, "elapsed": 232, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="d4fed488-c252-4264-9559-6045751a48f4" nome='Felipe' frase="olá mundo, meu nome é {}".format(nome) print(frase) # + colab={"base_uri": "https://localhost:8080/"} id="Tr_wnllO_cSE" executionInfo={"status": "ok", "timestamp": 1621700213514, "user_tz": 180, "elapsed": 29, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="fbebcbbf-fd2c-4f2b-e3b4-304841d3a274" frase=f"olá mundo, meu nome é {nome}" print(frase) # + [markdown] id="pptLffCSAJeq" # ## 1.3. Métodos: *capitalize* , *title*, *upper* e *lower* # # * upper () - Converte uma string em maiúsculas # >sintaxe : ` "olá mundo".upper() = "OLÁ MUNDO` # * lower () - Converte uma string em minúsculas # >sintaxe : ` "olá mundo".lower() = olá mundo` # * capitalize () - Converte o primeiro caractere em maiúsculas # >sintaxe : ` "olá mundo".capitalize() = "Olá mundo"` # * title () - Converte o primeiro caractere de cada palavra em maiúsculas # >sintaxe : ` "olá mundo".title() = "Olá Mundo"` # # # # + id="rnlRE-CLCuBm" #Frase original nome='Felipe' frase="olá mundo, meu nome é {}".format(nome) # + colab={"base_uri": "https://localhost:8080/"} id="l2fur5wNBmPJ" executionInfo={"status": "ok", "timestamp": 1621700213515, "user_tz": 180, "elapsed": 28, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="3fb81b84-7150-4bc7-c5a3-f4de7fe7dd3b" #Upper print(frase.upper()) # + colab={"base_uri": "https://localhost:8080/"} id="zmu7ApVaCl9n" executionInfo={"status": "ok", "timestamp": 1621700213515, "user_tz": 180, "elapsed": 26, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="eb06023f-3919-453e-b402-f343d0dcc88e" #Lower print(frase.lower()) # + colab={"base_uri": "https://localhost:8080/"} id="Q8lSu1RvC5Uu" executionInfo={"status": "ok", "timestamp": 1621700213516, "user_tz": 180, "elapsed": 26, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="99dff8cf-2d4e-4217-b03f-20f5f3636386" #Capitalize print(frase) # + colab={"base_uri": "https://localhost:8080/"} id="-NiUGItyCrtS" executionInfo={"status": "ok", "timestamp": 1621700213516, "user_tz": 180, "elapsed": 25, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="2fdfd021-f513-45c1-8de8-91f5d55da7fb" #Title print(frase) # + [markdown] id="6EKNfugDDoZq" # ## 1.4. Propriedade : *list* # # * Para o python toda string pode ser interpretada como uma lista, dessa forma a maioria dos métodos do objeto *list()* são aplicavéis a strings.Abaixo veremos alguns exemplos: # # + colab={"base_uri": "https://localhost:8080/"} id="RjqH9SqH0OQZ" executionInfo={"status": "ok", "timestamp": 1621700213516, "user_tz": 180, "elapsed": 24, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="926a1d3c-def3-4c9d-a8e3-5bbcddde02bc" #Tamanho len(frase) # + colab={"base_uri": "https://localhost:8080/"} id="Kqpfw0y-Hyqn" executionInfo={"status": "ok", "timestamp": 1621700213517, "user_tz": 180, "elapsed": 24, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="09759ac5-29a3-4096-a901-0f2cb9e3dc52" #Index frase[2] # + colab={"base_uri": "https://localhost:8080/"} id="21EfwDMfH4-M" executionInfo={"status": "ok", "timestamp": 1621700213517, "user_tz": 180, "elapsed": 21, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="a0e40af5-83c9-46e6-ad9b-16a6eee1808b" # Intevarlo frase[0:3] # + colab={"base_uri": "https://localhost:8080/"} id="U7Gi9WQPICRk" executionInfo={"status": "ok", "timestamp": 1621700213517, "user_tz": 180, "elapsed": 20, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="0841dd98-4101-478b-e05b-a6fa8ee5632b" #Inversão frase[::-1] # + colab={"base_uri": "https://localhost:8080/"} id="b75jklQLIsg-" executionInfo={"status": "ok", "timestamp": 1621700213518, "user_tz": 180, "elapsed": 19, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="2dfbb99e-ce8b-4d60-87ed-d1edc7da21fe" #Máximo max(frase) # + colab={"base_uri": "https://localhost:8080/"} id="Q_qhsjrnJfQB" executionInfo={"status": "ok", "timestamp": 1621700213518, "user_tz": 180, "elapsed": 16, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="6874a0b4-c06d-4cf6-c5f5-e78df9d06356" #Mínimo min(frase) # - # Valores únicos set(frase) # + [markdown] id="2gBJW59wJyvI" # ## 1.5. Método *count*, *replace* e *split* # # * count () - Retorna o número de vezes que um valor especificado ocorre em uma string # >sintaxe : ` "olá mundo".count("olá") = 1` # # * replace () - Retorna uma string onde um valor especificado é substituído por um valor especificado # >sintaxe : ` "olá mundo".count("olá","oi") = "oi mundo"` # # * split () - Divide a string no separador especificado e retorna uma lista # >sintaxe : ` "olá mundo".split() = ["olá","mundo"]` # + colab={"base_uri": "https://localhost:8080/"} id="GyZ08JRtJ6Z4" executionInfo={"status": "ok", "timestamp": 1621700213729, "user_tz": 180, "elapsed": 226, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="417588da-0b71-4c40-d2fd-4ad496810d1d" #Count frase.count('mundo') # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="UQrAAZ6VmNKk" executionInfo={"status": "ok", "timestamp": 1621700213730, "user_tz": 180, "elapsed": 26, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="dd5c21b4-4a7f-4ee6-f4a1-71120aadcf63" #Replace frase.replace("olá","oi") # + colab={"base_uri": "https://localhost:8080/"} id="FPw604L1mXtp" executionInfo={"status": "ok", "timestamp": 1621700213730, "user_tz": 180, "elapsed": 25, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="279d72bb-b0d4-4926-8ea9-137e6b4c2126" #Split frase.split() # + colab={"base_uri": "https://localhost:8080/"} id="X832LPqg0Ywq" executionInfo={"status": "ok", "timestamp": 1621700213731, "user_tz": 180, "elapsed": 22, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="f967b493-cdb4-414c-8571-8f20764f0b4e" #Total de palvras na frase len(frase.split()) # + [markdown] id="RsOQr90Sm5cu" # ## 1.6. Teste da seção # # Crie uma função que conte o número de palavras na frase : <br> # **"Um livro é a prova de que os homens são capazes de fazer magia"** # # Resultado esperado:<br> # `um:1, livro:1, é:1, a:7, prova:1 , de:2 , que:1 , os:1 , homens:1 ,são:1 ,capazes:1, fazer:1, magia:1` # + colab={"base_uri": "https://localhost:8080/"} id="Q9dOCusKm7ee" executionInfo={"status": "ok", "timestamp": 1621700213731, "user_tz": 180, "elapsed": 20, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="3e469551-6b40-4fa3-9526-ab3440de0a70" # Definir frase frase_teste="Um livro é a prova de que os homens são capazes de fazer magia" # Definir função def ContaPalvras(frase): [print( f"{palavra}:{str(frase.count(palavra))}") for palavra in set(frase.split())] #Aplicação ContaPalvras(frase_teste) # + [markdown] id="RU_JHqfHs7ON" # #2.OLÁ MUNDO DO NLP E A MINERAÇÃO DE TEXTOS # # Mineração de texto, conhecida também como mineração de dados textuais e semelhante à análise textual, refere-se ao processo de obtenção de informações importantes de um texto. Informações importantes são obtidas normalmente pela elaboração de padrões e tendências através de meios como o padrão estatístico de aprendizagem. Geralmente a mineração de texto envolve o processo de estruturação do texto de entrada (frequentemente análise, junto com a adição de algumas características linguísticas derivadas e com a retirada de outras, e com a subsequente inserção em um banco de dados), de derivação de padrões dentro da estrutura de dados e, por fim, de avaliação e interpretação do resultado. ([Wipedia](https://pt.wikipedia.org/wiki/Minera%C3%A7%C3%A3o_de_texto)) # + id="9AukZnCttbGu" # Ler um arquivo de texto rap_lord=open('Rap Lord.txt','r',encoding="utf8").read() print(rap_lord) # + [markdown] id="KJLvajIEuyOG" # ##2.1.Coleções # Coleções em Python são contêineres usados para armazenar coleções de dados, por exemplo, list, dict, set, tuple etc. Essas são coleções integradas. Vários módulos foram desenvolvidos para fornecer estruturas de dados adicionais para armazenar coleções de dados. Um desses módulos é o *collections* , nativo do próprio Python. # + id="0-ZhZy2gwykS" # Importar a biblioteca from collections import Counter # + colab={"base_uri": "https://localhost:8080/"} id="LQKiR_gqyYxo" executionInfo={"status": "ok", "timestamp": 1621700213733, "user_tz": 180, "elapsed": 17, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="c10621c5-9f00-4746-b4a3-b71e2afc01ed" #Contar letras mais frequentes Counter(rap_lord).most_common(10) # + colab={"base_uri": "https://localhost:8080/"} id="a7qU91hizGCP" executionInfo={"status": "ok", "timestamp": 1621700213733, "user_tz": 180, "elapsed": 12, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="cd98a0fd-d4ca-4de1-c7f7-48c80f811fbd" #Contar palavras mais frequentes Counter(rap_lord.split()).most_common(10) # + [markdown] id="PW3gErZRzpUG" # ## 2.2. Teste da seção # # a. Crie uma função que conte a participação percentual das 10 letras mais frequentes na música Rap Lord <br> # > Resultado esperado: ` " " : 16.69% , "a" : 10.32% , "e" : 9.26% ...` # # b. Crie uma função que conte a participação percentual das 10 palavras mais frequentes na música Rap Lord <br> # > Resultado esperado: `"não" : 3.89%, "que" : 3.78% , "o" : 2.48% ,"de" : 2.05% ...` # # + colab={"base_uri": "https://localhost:8080/"} id="l1nGaW2-0MAE" executionInfo={"status": "ok", "timestamp": 1621700213734, "user_tz": 180, "elapsed": 12, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggsr_IEQOyt4hVc4fKU1wiOpQdA_OAYyjzqJuNRyg=s64", "userId": "07411723242186409819"}} outputId="80ec15a2-1c69-4ac1-b32b-2d93c61d9852" #Definir função def frequencias(texto,top): #Totais total_letras = sum(Counter(texto).values()) #Mais frequentes letras_frequentes = Counter(texto).most_common(top) #Exibir for letter in letras_frequentes: print (f"{letter[0]} : {round((letter[1]/total_letras)*100,2)}%") frequencias(rap_lord,10) # -
Python/Machine Learning With Python/Natural Language Processing/Introductory Class - (UFPA)/NLP PARTE I.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="XvEg8iGwuJ4J" # <div style="color:Navy"> # # <div style="text-align:center"> # # *** # # <u>TP2:</u> # # Manipulation de luminance et Histogrammes # # <p style="text-align: center; color:gray"><i>@Author:</i> <NAME></p> # # *** # </div> # # <u>**Plan:**</u> # # 1. [**Quelques rappels**](#1): histogrammes, luminance, contraste et exposition # 2. [**Opérations linéaires sur la luminance**](#2): inversions, ajout/retrait, etirement, egalisation, egalisation adaptative # 3. [**Opérations non-linéaires sur la luminance**](#3): transformée logarithmique, transformée gamma # # </div> # + [markdown] colab_type="text" id="nONshOr9uJ4O" # # <span style="color: green;text-decoration: underline" id="1">I. Quelques rappels</span> # *** # + [markdown] colab_type="text" id="uEBq1tIZuJ4Q" # Nous abordons ici les techniques de traitement d'images basées sur la modification d'histogrammes. # Ces méthodes font partie de la classe des traitements dits **ponctuels** (*pixel-wise*): la valeur de chaque pixel est corrigée, et ce indépendamment des autres pixels. # # La manipulation d'histogrammes est un outil très important en traitement d'images, permettant d'ajuster la dynamique des niveaux de gris ou des couleurs dans une image afin de la rendre plus agréable visuellement. # + [markdown] colab_type="text" id="bf_XqH91uJ4S" # ## <span style="color: DodgerBlue;text-decoration: underline">I.1 Histogrammes</span> # *** # + [markdown] colab_type="text" id="gdIYFzQRuJ4T" # **L'histogramme d'une image numérique** est une courbe statistique représentant la répartition de ses pixels selon leur intensité. Pour une image en noir et blanc, il indique en abscisse le niveau de gris (entier entre 0 et 255) et en ordonnée, le nombre de pixels ayant cette valeur. # # Lorsque l'histogramme est normalisé (somme à 1), il indique en ordonnée la probabilité $p_i$ de trouver un pixel de niveau de gris $i$ dans l'image. L'histogramme normalisé peut être interprété comme une **distribution de probabilité**. L'intensité d'un pixel est alors vue comme une variable aléatoire discrète. # # # Un **histogramme cumulé normalisé** calcule le pourcentage de pixels ayant une valeur inférieure à un niveau de gris donné. # # # + [markdown] colab_type="text" id="P4VuP69MuJ4X" # ## <span style="color: DodgerBlue;text-decoration: underline">I.2 Luminance</span> # *** # + [markdown] colab_type="text" id="FPoVXjhUuJ4Y" # Quelques notions et mots de vocabulaire: # # - La **luminance** (ou intensité) est une mesure physique qui fait référence à la quantité de lumière absolue émise par un objet (par unité de surface). # - La **luminosité** est une mesure perceptive qui fait référence à la brillance perçue d'un objet par un observateur humain. # - **Tons clairs et foncés** font référence à des couleurs ayant respectivement une luminance élevée et faible. # # Illustration: # # ![Histogramme](img/Illustrations/Histo.png) # # Cette distinction est nécessaire car la luminosité perçue dépends d'autres paramètres de la scène que l'objet lui-même. # # *** # #### **[Parenthèse]** # # Cela se remarque dans certaines illusions d'optique comme: # <img src="img/Illustrations/optical.png" width="400"> # # Notre cerveau fait constamment des hypothèses sur les propriétés de notre monde, et ces a priori viennent influencer notre perception du monde. # Ici en l'occurence, ces a priori influencent la luminosité perçue alors que les deux dalles ont la même teinte et la même luminance : nos yeux reçoivent la même quantité de lumière des deux, mais le dessin est fait de de sorte à donner l'illusion que B est dans une zone d'ombre. Or, pour votre cerveau, la seule manière qu'un objet B dans une zone d'ombre soit aussi brillant qu'un objet A hors de l'ombre est que B doit naturellement être plus clair que A pour que, malgré l'ombre, il brille autant. Et il corrige l'information perçue selon cette hypothèse, résultant en l'illusion observée: A et B ont des couleurs différentes. # # Ce genre d'hypothèses représentent un avantage évolutif et se révèleront être généralement utiles pour formuler des jugements perceptifs corrects rapidement, dans le monde réel. Certaines sont innées, d'autres apprises par l'expérience, mais elles sont omni-présentes dans notre perception du monde. Elles interviennent de manière inconsciente pour optimiser notre prise de décision. Cependant, ce genre d'heuristiques perceptives nous induisent parfois en erreur, notamment lorsqu'elles sont mises à mal intentionnellement par des illusions d'optiques faites spécifiquement pour les mettre à défaut. C'est à ce moment là qu'on les remarque, et qu'on se rend compte de leur nature irrépressible: on à beau savoir que l'on est trompés, on ne peut rien y changer. # # > *Percevoir est une bataille constante entre ce que l'on voit et ce que l'on s'attends à voir.* # # #### **[Fin de la parenthèse]** # *** # # Pour une image en **niveaux de gris**, l'information de luminance est directement donnée par la valeur de chaque pixel, qui représente une quantité de lumière transmise (on parle également d'intensité). # # Pour une image **RGB**, l'information de luminance est "répartie" dans les trois canaux de couleur, et doit être extraire pour que l'image RGB soit convertie en image de niveaux de gris : on parle de séparer les valeurs de **luminance** et de **chrominance**. Les informations de luminance et chrominance (qui représente l'information de couleur pure, sans luminance) sont mélangées dans les canaux RGB, mais peuvent être séparées par des filtres spécifiques. # <img src="img/Illustrations/tri.png" width="600"> # # Par exemple, la fonction `convert("L")` de Pillow converti une image couleur en calculant la valeur de luminance (L) de chaque pixel par la fonction suivante : `L = R * 0.299 + G * 0.587 + B * 0.114` # # **Remarque:** L'information de luminance de chaque pixel va influencer à quel point le pixel (de l'écran) qui affiche ce pixel (de l'image) sera illuminé, ce qui va influencer à son tour la luminosité perçue par la personne devant l'écran. # + [markdown] colab_type="text" id="mLFd9FHEuJ4a" # ## <span style="color: DodgerBlue;text-decoration: underline">I.3 Contraste</span> # *** # + [markdown] colab_type="text" id="6B1XYUKYuJ4d" # Le **contraste** est une propriété intrinsèque d'une image qui quantifie la différence de luminance entre les parties claires et sombres. Cette caractéristique nous donne une idée sur la dispersion/répartition de la luminance au sein de l'image : plus les valeurs de luminance des pixels de l'image se ressemblent, plus faible sera le contraste. A contrario, plus la luminance varie dans l'image, plus le contraste sera élevé. # # En général, une image peu contrastée est terne, tandis qu'une image trop contrastée est visuellement "agressive". Dans les deux cas, l'image manque de clarté car certains de ses détails seront peu, voire pas du tout, visibles. # # **Remarque:** C'est un indice de dispersion (de la distribution de la luminance des pixels --> l'histogramme), au même titre que l'écart type ou la variance indiquent la dispersion des valeurs autour de la moyenne dans une distribution. # # #### *Cas particuliers :* # - Si l'ensemble des pixels ont la même valeur, le contraste sera nul. # - Une image constituée uniquement de pixels noirs et blanc aura un contraste maximal. # # #### Au niveau de l'histogramme, le contraste se traduira par: # - Un histogramme (de luminance) peu dispersé (centré autour de la valeur moyenne de luminance) pour les images à faible contraste: pas de grandes variations dans la luminance. # - Un histogramme plus étendu / dispersé pour les images à fort contraste. # # <img src="img/Illustrations/Hist5.png"> # # # **Ajuster le contraste** d'une image à pour but d'harmoniser la distribution des niveaux de gris de l'image, de sorte que chaque niveau de l'histogramme (`[0, 255]`) contienne idéalement le même nombre de pixels. Les méthodes d'ajustement de contraste vont donc modifier la valeur de luminance des pixels de l'image de sorte à redistribuer la luminance de manière plus équitable entre les pixels. # # Par exemple, accentuer les différences de luminance en rendant les pixels sombres plus sombres et les pixels clairs plus clairs augmentera le contraste. # # Il existe plusieurs méthodes pour accomplir cet objectif, linéaires ou non-linéaires, appartenant toutes à la catégorie générale de ***histogram remapping***. # + [markdown] colab_type="text" id="lxgLT22cuJ4h" # ## <span style="color: DodgerBlue;text-decoration: underline">I.4 Niveau d'exposition</span> # *** # + [markdown] colab_type="text" id="Nttdr8sguJ4k" # Une image **sur-exposée** et **sous-exposée** auront toutes deux un faible contraste: # - La majeure partie des pixels auront une valeur de luminance élevée pour les images sur-exposées (*high-key*), donc la luminance moyenne sera élevée, et le contraste sera faible. # - La majeure partie des pixels auront une valeur de luminance faible pour les images sous-exposées (*low-key*), donc la luminance moyenne sera faible, et le contraste sera faible. # # <u>Faible contraste:</u> # <img src="img/Illustrations/cray_low.jpg" width="340"> # <img src="img/Illustrations/liz_low.jpg" width="300"> # # <u>Fort contraste:</u> # <img src="img/Illustrations/cray_high.jpg" width="340"> # <img src="img/Illustrations/liz_high.jpg" width="300"> # + [markdown] colab_type="text" id="onfxFnn2uJ4n" # # <span style="color: green;text-decoration: underline" id="2">II. Opérations linéaires sur la luminance</span> # *** # + [markdown] colab_type="text" id="ADD_FTbiuJ4p" # ## <span style="color: DodgerBlue;text-decoration: underline">II.1 Inversion</span> # *** # + [markdown] colab_type="text" id="31Al57xmuJ4r" # Une **image négative** est une image dont les couleurs ont été inversées par rapport à l'originale : # - Le rouge devient cyan, le vert devient magenta, le bleu devient jaune et inversement. # - Les régions sombres deviennent claires, le noir devient blanc. # # Au niveau de la luminance, chaque valeur est remplacée par son inverse: $I' = 255 - I$ # # **Exemples:** # <img src="img/Illustrations/neg.jpg" height="400"> # # **Remarque:** L'histogramme d'une image inversée sera l'inverse de celui de l'image d'origine. # + colab={} colab_type="code" id="FK3_JEWZuJ4s" # Code commun from PIL import Image import numpy as np from PIL import Image, ImageOps import matplotlib.pyplot as plt # %matplotlib inline # Affichons l'image d'origine, son inverse, et leurs histogrammes def affichage_inverse(img, imgInv): fig = plt.figure(figsize=(12, 12)) ax = fig.add_subplot(2,2,1) plt.imshow(img, "gray") ax.set_title("Image") ax = fig.add_subplot(2,2,2) plt.hist(img.ravel(), 256, [0,256]) ax.set_title("Histogramme de l'image") ax = fig.add_subplot(2,2,3) plt.imshow(imgInv, "gray") ax.set_title("Image inversée") ax = fig.add_subplot(2,2,4) plt.hist(imgInv.ravel(), 256, [0,256]) ax.set_title("Histogramme de l'image inversée") # + [markdown] colab_type="text" id="eng4dasNuJ4y" # #### Inversion d'images par opérations matricielles: # - # Vous pouvez inverser une image (au format matriciel) de deux manières : # ```python # imgInv = 255 - img # imgInv = ~img # ``` # + colab={} colab_type="code" id="g3idu-2luJ40" outputId="bcdb17b7-add9-44b7-9b2b-535dc1e8b59e" img = np.array(Image.open('img/lena.jpg').convert("L")) # On inverse l'image imgInv = ~img affichage_inverse(img, imgInv) # + [markdown] colab_type="text" id="6Ul6VuJAuJ48" # #### Inversion d'image via des méthodes spécifiques (Pillow): # + [markdown] colab_type="text" id="1AiBXvk2uJ4-" # ##### En niveaux de gris: # + colab={} colab_type="code" id="Kdc0976SuJ4_" outputId="6b941198-aad8-48a9-d3b4-b247a5e56e5a" img = Image.open('img/lena.jpg').convert("L") # On inverse l'image imgInv = ImageOps.invert(img) affichage_inverse(np.array(img), np.array(imgInv)) # + [markdown] colab_type="text" id="MHuvdaxGuJ5E" # ##### En couleur: # + [markdown] colab_type="text" id="mKRMjjUYuJ5G" # ### <span style="color:crimson">**[Exercice]** A vous de jouer:</span> # *** # <div style="color:DarkSlateBlue"> # # 1. **En utilisant la même méthode de Pillow, chargez une image de votre choix en couleur et inversez-là.** # # # 2. **Affichez l'image d'origine et son histogramme, ainsi que l'image modifiée et son histogramme.** # # # 3. **Séparez les canaux de l'image, inversez-les et refusionez-les.** # # </div> # + colab={} colab_type="code" id="jod2t4DOuJ5J" # > Emplacement exercice < # + [markdown] colab_type="text" id="EvnRmLKauJ5O" # ## <span style="color: DodgerBlue;text-decoration: underline">II.2 Ajout / retrait (constantes)</span> # *** # + [markdown] colab_type="text" id="Lj3UjZk2uJ5Q" # Ajouter (ou retirer) une quantité constante de luminance à l'image permet de faire varier son niveau d'exposition (mais pas son contraste, qui est défini comme l'écart / la dispersion des valeurs de luminance dans l'image). # # Les nouvelles valeurs de luminance seront définies par : $I' = I + c$ # + [markdown] colab_type="text" id="CaJn4kebuJ5R" # ### II.2.a Par manipulation direct de la matrice # + colab={} colab_type="code" id="IhD0_iJjuJ5T" # Pour cette section, nous allons utiliser Pillow (PIL) et son module Image pour lire notre image from PIL import Image, ImageEnhance import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # Importons notre image en niveaux de gris avec le module "Image" de "PIL" img = Image.open('img/lena.jpg').convert('L') # Convertissons la en array (matrice) Python img = np.array(img) # Définissons une fonction pour regrouper le code permettant d'afficher une image et son histogramme côte-à-côte: def image_et_histo(img, cmap="gray"): fig = plt.figure(figsize=(12, 6)) ax = fig.add_subplot(1,2,1) plt.imshow(img, cmap) ax.set_title("Image") ax = fig.add_subplot(1,2,2) plt.hist(np.array(img).ravel(), 256, [0,256]) ax.set_title("Histogramme") # + colab={} colab_type="code" id="x33hClInuJ5Z" outputId="0665a2d7-0730-4465-8280-d2ef7c94c450" # Affichons l'image chargée et son histogramme: image_et_histo(img) # + [markdown] colab_type="text" id="YNtyKeAnuJ5g" # ##### Passons à la manipulation: # # Pour modifier la luminance d'une image en **niveaux de gris** stockée sous format de matrice (```np.array```), il suffit de modifier la valeur de chaque pixel de la matrice. # On peut alors ajouter ou retirer une certaine valeur à tous les pixels de l'image, sans discrimination sur leur valeur initiale. # + colab={} colab_type="code" id="vlfe0VBDuJ5h" outputId="8c49a37a-76a4-4037-8e0d-c0cc97de163f" # Ajoutons 80 à tous les pixels de l'image, en vérifiant que la valeur ne va pas excéder les 255 imgL = np.where((255 - img) >= 80, img + 80, 255) image_et_histo(imgL, "gray") # + [markdown] colab_type="text" id="fqFALaskuJ5m" # ##### Observations: # # **Sur l'image:** # - Notre image a bien gagné en luminance, mais on a aussi perdu de nombreux détails dans les zones les plus claires. # - En effet les pixels de l’image d’origine qui avaient une valeur de luminance de 175 ou plus se retrouvent avec une valeur supérieure à 255 après l'ajout de 80. # - On perd donc en détails dans les hautes luminances à cause de cette opération. On parle d’écrêtage du signal (*clipping*) : les nuances de luminance qui existaient entre 175 et 255 sont perdues à jamais (perte d'information non réversible). # # **Sur l'histogramme**: # - On remarque que plus aucun pixel ne semble avoir une valeur < 80. # - On voit également qu'il y a une majorité de pixels ayant une valeur de 255 (à cause de l'écrêtage du signal mentionné au dessus). # * C'est à cause de la mise à l'échelle de l'histogramme qu'il apparait applatit : il y a énormément de pixels avec la valeur 255, du coup les autres quantités semblent dérisoires à côté. Pour remédier à ça, on peut retirer la valeur 255 de l'histogramme artificiellement: # + colab={} colab_type="code" id="-yctiNCjuJ5n" outputId="40380259-d434-46dd-818b-82e3c221a905" fig = plt.figure(figsize=(18, 6)) ax = fig.add_subplot(1,3,1) plt.imshow(imgL, "gray") ax.set_title("Image après le +80") ax = fig.add_subplot(1,3,2) plt.hist(np.array(img).ravel(), 256, [0,255]) ax.set_title("Histogramme de l'image d'origine") ax = fig.add_subplot(1,3,3) plt.hist(np.array(imgL).ravel(), 255, [0,254]) ax.set_title("Histogramme après le +80") # + [markdown] colab_type="text" id="df-iFpkNuJ5w" # Ici, on voit mieux qui l'histogramme à bien été "poussé" vers la droite (vers les valeurs élevées de luminance), et que tous les pixels avec une valeur de 175 et plus dans l'histogramme d'origine (avant la modification de luminance) ont disparus : ils valent tous 255 maintenant, et ne sont plus affichés sur l'histogramme. # # **Remarque:** En photographie, une image avec un histogramme trop décalé vers là droite est dite sur-exposée : un déséquilibre de contraste du faite que trop de lumière à été captée et peu de détails sont disponibles dans les tons sombres. # + colab={} colab_type="code" id="TN7GngQNuJ5x" outputId="42f18b25-789a-4714-d22d-e153dbc6277b" # Retirons 80 à tous les pixels de l'image, en vérifiant que la valeur ne va pas passer sous 0 imgD = np.where(img >= 80, img - 80, 0) # Affichons le résultat fig = plt.figure(figsize=(18, 6)) ax = fig.add_subplot(1,3,1) plt.imshow(imgD, "gray") ax.set_title("Image après le -80") ax = fig.add_subplot(1,3,2) plt.hist(np.array(img).ravel(), 256, [0,255]) ax.set_title("Histogramme de l'image d'origine") ax = fig.add_subplot(1,3,3) plt.hist(np.array(imgD).ravel(), 255, [1,255]) ax.set_title("Histogramme après le -80") # + [markdown] colab_type="text" id="QP3LRxNnuJ52" # Même observations qu'avant, sauf que cette fois-ci l'histogramme à été poussé vers la gauche (valeurs de faible luminance), et que plus aucun pixel n'a de valeur supérieure à 175. # # On parle alors d'image sous-exposée. # + [markdown] colab_type="text" id="UAnIyGgXuJ53" # ### II.2.b Via Pillow : # + [markdown] colab_type="text" id="-iukAGsauJ56" # Pillow fournit un ensemble de méthodes permettant de manipuler une image aisément : le module ```ImageEnhancer``` # Parmi l'ensemble des méthodes de ce [module](https://pillow.readthedocs.io/en/3.1.x/reference/ImageEnhance.html#PIL.ImageEnhance.Brightness), il y à la classe ```Brightness``` qui permet de manipuler la luminance d'une image chargée avec Pillow. # # Elle s'utilise de la manière suivante: # ```Python # # On initialise "l'enhancer" # enhancer = ImageEnhance.Brightness(img) # # On applique une augmentation de 30% de luminance # img2 = enhancer.enhance(1.3) # ``` # + [markdown] colab_type="text" id="YZPP3EKquJ57" # ##### Sur une image en niveaux de gris : # + colab={} colab_type="code" id="kNszCooauJ59" outputId="e8f606a6-e06e-4219-8819-b9ced2cb02eb" # Exemple en niveaux de gris img = Image.open('img/lena.jpg').convert('L') # On créé "l'enhancer" et on applique une réduction de 40% de luminance img2 = ImageEnhance.Brightness(img).enhance(0.6) image_et_histo(img2, "gray") # + [markdown] colab_type="text" id="q3UX-RxIuJ6B" # #### Sur une image couleur: # + [markdown] colab_type="text" id="X7sakxs3uJ6C" # Pillow propose une méthode (```ImageEnhance```) pour modifier la luminance d'une image couleur sans avoir à extraire l'information de luminance des valeurs RGB. # + colab={} colab_type="code" id="yMeorrjuuJ6E" outputId="74f6813f-577d-4586-e94e-b5ff00806fb9" import cv2 from PIL import Image, ImageEnhance import numpy as np import matplotlib.pyplot as plt # %matplotlib inline img = Image.open('img/lena.jpg') # On créé "l'enhancer" et on applique une augmentation de 30% de luminosité img2 = ImageEnhance.Brightness(img).enhance(1.3) # Fonction d'affichage utilisant OpenCV pour calculer l'histogramme de l'image def image_et_histo2(img): fig = plt.figure(figsize=(12, 6)) hist = cv2.calcHist([np.array(img)],[0],None,[256],[0,255]) plt.subplot(121), plt.imshow(img, cmap='gray'), plt.title("Image originale", color='b') plt.subplot(122), plt.plot(hist), plt.title("Histogramme de l'image originale", color='b') plt.show() image_et_histo2(img2) # + [markdown] colab_type="text" id="qAnLnY9nuJ6I" # ### <span style="color:crimson">**[Exercice]** A vous de jouer:</span> # *** # <div style="color:DarkSlateBlue"> # # 1. **Implémentez une fonction permettant d'ajouter une valeur constante de luminance à une image couleur.** # # <u>Astuce</u>: passez l'image couleur dans un espace colorimétrique permettant de modifier sa luminance globale sans avoir à séparer les canaux. # # </div> # + colab={} colab_type="code" id="g8NTXPIduJ6W" # > Emplacement exercice < # + [markdown] colab_type="text" id="OtBZTq8wuJ6e" # ## <span style="color: DodgerBlue;text-decoration: underline">II.3 Etirement</span> # *** # + [markdown] colab_type="text" id="c0YMo7N1uJ6f" # **L'étirement de contraste**, aussi appelé **normalisation d'image**, est une **méthode d'amélioration d'image ponctuelle** dans le domaine spatial, permettant de corriger les défauts d’exposition d'une image (et donc son contraste) en **étirant son histogramme** pour étendre les valeurs de luminance (intensité) à tout l'intervalle disponible (généralement `[0,255]`). # # C'est une **opération linéaire de changement à l'échelle** (*re-scaling*), où la valeur de chaque pixel est remplacée par : # # \begin{equation*} # I′(x,y) = (I'_\max - I'_\min) * \dfrac{(I(x,y) − I_\min)}{(I_\max − I_\min)} + I'_\min # \end{equation*} # # avec : # * $I(x,y)$ et $I′(x,y)$ les intensités du pixel de coordonnées (x,y) respectivement dans l'image mal exposée et la nouvelle image. # * $I'_\min$ et $I'_\max$ les nouvelles intensitées minimales et maximales choisiées (généralement 0 et 255, respectivement) # # <img src="https://i.stack.imgur.com/9gIw6.png" width="400"> # # <img src="https://i.stack.imgur.com/mOoAC.png"> # + colab={} colab_type="code" id="VjaQoy1FuJ6h" ### Code utile pour cette section import cv2 import numpy as np from PIL import ImageOps, Image import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline import os import ipywidgets as widgets from ipywidgets import interact, interact_manual from warnings import simplefilter simplefilter(action='ignore', category=FutureWarning) # Affichons l'image originale, modifiée, et leurs histogrames (*smoothed* via un KDE - Kernel Density Estimation) def affichage_2x2(img1, img2): fig = plt.figure(figsize=(18, 12)) ax1 = plt.subplot(221) ax1.imshow(img1, "gray") ax1.set_title("Image originale") ax2 = plt.subplot(222) sns.kdeplot(np.array(img1).ravel(), shade=True, kernel="gau", bw="scott", clip=[0,255], cut=0) ax2.set_title("Histogramme (KDE) de l'original") ax3 = plt.subplot(223) ax3.imshow(img2, "gray") ax3.set_title("Image ajustée") ax4 = plt.subplot(224, sharex=ax2, sharey=ax2) sns.kdeplot(np.array(img2).ravel(), shade=True, kernel="gau", bw="scott", clip=[0,255], cut=0) ax4.set_title("Histogramme (KDE) de l'image ajustée") plt.show() # + [markdown] colab_type="text" id="QfhIpEtGuJ6l" # #### Exemple avec `ImageOps.autocontrast` et un widdget interactif de selection d'image : # + colab={"base_uri": "https://localhost:8080/", "height": 235} colab_type="code" executionInfo={"elapsed": 863, "status": "error", "timestamp": 1580073785732, "user": {"displayName": "Rivi\u00e8<NAME>\u00e8le", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mBNfjDm1Y0wcm0EDe7v_Sdi-WtWb5EtJA-TV9SO=s64", "userId": "04667194983314352464"}, "user_tz": -60} id="ILHnXT9euJ6m" outputId="7a8f00ea-d915-4d43-d578-4282a94eb399" @interact def autocontrast(image=[f for f in os.listdir("img/") if os.path.isfile(os.path.join("img/", f))]): img = Image.open('img/' + image).convert("L") # Avec ImageOps.autocontrast stretchAuto = ImageOps.autocontrast(img) # Affichage affichage_2x2(img, stretchAuto) # + [markdown] colab_type="text" id="80Mt4QWVuJ6u" # #### Exemple interactif avec `cv2.normalize` : # + colab={} colab_type="code" id="ljcz0pgmuJ6w" outputId="aa9114c0-a796-46e2-b24e-78a3588a348d" @interact def stretch(image=[f for f in os.listdir("img/") if os.path.isfile(os.path.join("img/", f))], beta_val=(0, 3, 0.1)): imgLC = cv2.imread('img/' + image, cv2.IMREAD_COLOR) # normalize float versions imgNorm = cv2.normalize(imgLC, None, alpha=0, beta=beta_val, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) # scale to uint8 imgNorm = np.clip(imgNorm, 0, 1) imgNorm = (255 * imgNorm).astype(np.uint8) # Affichage affichage_2x2(cv2.cvtColor(imgLC, cv2.COLOR_BGR2RGB), cv2.cvtColor(imgNorm, cv2.COLOR_BGR2RGB)) # + [markdown] colab_type="text" id="8UaCyHqJuJ60" # ### <span style="color:crimson">**[Exercice]** A vous de jouer:</span> # *** # <div style="color:DarkSlateBlue"> # # 1. **Complétez le code suivant pour implémenter un type particulier d'étirement d'histogramme: l'étirement *MinMax*.** # # Celui ci est défini par l'équation : $I′(x,y) = 255 * \dfrac{(I(x,y) − I_\min)}{(I_\max − I_\min)}$ # # 2. **Modifiez votre code pour l'appliquer à une image couleur.** # # <u>Astuces</u>: # * Utilisez les fonctions `np.min` et `np.max` pour calculer le min et max d'une image # * Passer l'image couleur en HSV pour pouvoir modifier sa luminance globale sans avoir à séparer les canaux. # * Essayez d'utiliser la notation vectorielle (au lieu de boucles imbriquées) pour optimiser le temps de calcul. # # </div> # + colab={} colab_type="code" id="BCu2Fh8zuJ61" outputId="8e632191-942f-4ac7-abdb-2b1c0336b0f0" # > Emplacement exercice < # Cette fonction prends une image et retourne sa version étirée minmax def apply_minmax(img): # TODO: code à compléter return minmax_img @interact def minmax(image=[f for f in os.listdir("img/") if os.path.isfile(os.path.join("img/", f))]): imgLC = cv2.imread('img/' + image) # On applique la méthode d'autocontraste manuelle img_stretch = apply_minmax(imgLC) # Affichage affichage_2x2_KDE(cv2.cvtColor(imgLC, cv2.COLOR_BGR2RGB), img_stretch) # + [markdown] colab_type="text" id="wJIOJ0tYuJ66" # ## <span style="color: DodgerBlue;text-decoration: underline">II.4 Egalisation d'Histogramme</span> # *** # + [markdown] colab_type="text" id="nusTxNK0uJ69" # **L'égalisation de l'histograme** (*histogram equalization*) est une **méthode globale de normalisation** qui modifie la luminance de chaque pixel de l'image de sorte à ce que toutes les valeurs de luminance soient présentes en quantités relativement équivalentes. # # L’objectif de l’égalisation d’histogramme est de faire en sorte que l'histogramme post-égalisation soit le plus plat possible, et permet donc d'augmenter le contraste global de l'image en "redistribuant" la luminance dans l'image. Cela peut également s'exprimer par le fait de rendre l'histogramme cumulé (CDF) linéaire. # # # <img src="http://www.sci.utah.edu/~acoste/uou/Image/project1/images/equalization.png"> # # <u>Remarque:</u> Des versions plus modernes de cette technique se basent sur plusieurs sous-histogrammes de l'image pour modifier le contraste de manière plus locale, au lieu de travailler directement sur le contraste global. On peut mentionner: # * Adaptive Histogram Equalization (AHE) # * Contrast Limiting Adaptive Histogram Equalization (CLAHE) # * Multipeak Histogram Equalization (MPHE) # * Multipurpose Beta Optimized Bihistogram Equalization (MBOBHE) # + colab={} colab_type="code" id="uUQnENIauJ6_" # Code utile pour cette section import cv2 import numpy as np from PIL import Image, ImageOps, ImageEnhance import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline ''' Affiche dans une grille 2x3: * L'image d'origine, son histogramme (continu, transformé via un KDE [Kernel Density Estimator] Gaussien) et l'histogramme cumulé (CDF) * L'image modifiée, son histogramme (continu, transformé via un KDE) et l'histogramme cumulé (CDF) ''' def affichage2x3(img1, img2, cs="gray"): fig = plt.figure(figsize=(20, 15)) ax1 = plt.subplot(231) ax1.imshow(img1, cs) ax1.set_title("Image originale") ax2 = plt.subplot(232) ax2 = sns.kdeplot(np.array(img1).ravel(), shade=True, kernel="gau", bw="scott", clip=[0,255], cut=0) ax2.set_title("Histogramme (KDE) de l'original") ax3 = plt.subplot(233) ax3.hist(np.array(img1).ravel(), bins=256, density=True, histtype='stepfilled', cumulative=True, color='tab:orange', alpha=0.5) ax3.set_title("CDF de l'original") ax4 = plt.subplot(234) ax4.imshow(img2, cs) ax4.set_title("Image ajustée") ax5 = plt.subplot(235, sharey=ax2) ax5 = sns.kdeplot(np.array(img2).ravel(), shade=True, kernel="gau", bw="scott", clip=[1,254], cut=0) ax5.set_title("Histogramme (KDE) de l'image ajustée") ax6 = plt.subplot(236) ax6.hist(np.array(img2).ravel(), bins=256, density=True, histtype='stepfilled', cumulative=True, color='tab:orange', alpha=0.5) ax6.set_title("CDF de l'image ajustée") ''' Affiche dans une grille 2x2: * L'image d'origine, son histogramme (continu, transformé via un KDE) combiné avec l'histogramme cumulé (CDF) * L'image modifiée, son histogramme (continu, transformé via un KDE) et l'histogramme cumulé (CDF) ''' def affichage2x2(img1, img2, cs="gray"): fig = plt.figure(figsize=(18, 12)) ax1 = plt.subplot(221) ax1.imshow(img1, cs) ax1.set_title("Image originale") ax2 = plt.subplot(222) hist1 = np.array(img1).ravel() ax2 = sns.kdeplot(hist1, shade=True, kernel="gau", bw="scott", clip=[0, 255], cut=0) ax2bis = ax2.twinx() ax2bis = plt.hist(hist1, density=True, cumulative=1, histtype='step', bins=256, color='tab:orange') ax2.set_title("KDE & CDF de l'original") ax3 = plt.subplot(223) ax3.imshow(img2, cs) ax3.set_title("Image ajustée") ax4 = plt.subplot(224, sharey=ax2) hist2 = np.array(img2).ravel() ax4 = sns.kdeplot(hist2, shade=True, kernel="gau", bw="scott", clip=[0, 255], cut=0) ax42 = ax4.twinx() ax42 = plt.hist(hist2, density=True, cumulative=1, histtype='step', bins=256, color='tab:orange') ax4.set_title("KDE & CDF de l'image ajustée") # + [markdown] colab_type="text" id="QExBAYVRuJ7F" # #### Ajustement de contraste avec la méthode `ImageEnhance.Contrast` de Pillow : # + colab={} colab_type="code" id="My5fJUpuuJ7G" outputId="1f631040-11fc-4999-f6ac-def1cb87e9bc" @interact def enhance_contrast(image=[f for f in os.listdir("img/") if os.path.isfile(os.path.join("img/", f))], value=(0,5,0.2)): img = Image.open('img/' + image) # On créé "l'enhancer" et on applique un rehaussement de $value$ % de contraste img_hc = ImageEnhance.Contrast(img).enhance(value) affichage2x3(img, img_hc, "viridis") # + [markdown] colab_type="text" id="U_-wGr8tuJ7K" # #### Egalisation d'histogramme avec `ImageOps.equalize` de Pillow : # + colab={} colab_type="code" id="g98cElcLuJ7O" outputId="60859a72-2800-47da-ba70-f8e52286c645" @interact def equalize(image=[f for f in os.listdir("img/") if os.path.isfile(os.path.join("img/", f))]): img = Image.open('img/' + image) img_equ = ImageOps.equalize(img) affichage2x3(img, img_equ, "viridis") # + [markdown] colab_type="text" id="SFCLQKs7uJ7S" # #### Egalisation d'histogramme d'image N&B avec `cv2.equalizeHist` d'OpenCV : # + colab={} colab_type="code" id="R4rN2pS8uJ7U" outputId="64cbe42b-2be1-48a3-862f-8e780895fd3c" @interact def equalize(image=[f for f in os.listdir("img/") if os.path.isfile(os.path.join("img/", f))]): img = cv2.imread('img/' + image, 0) equ = cv2.equalizeHist(img) affichage2x2(img, equ) # + [markdown] colab_type="text" id="Z5SFo0NpuJ7Y" # On peut voir qu'après égalisation, l'histogramme est à la fois plus étendu et plus plat, et le cdf augmente linéairement. # Visuellement, le contraste des images à été amélioré: les zones sombres sont devenues plus sombres, et les zones claires plus claires. # + [markdown] colab_type="text" id="n42FzHBHuJ7a" # #### Egalisation d'histogramme d'image couleur avec OpenCV: # + [markdown] colab_type="text" id="f0r4k1j6uJ7h" # ### <span style="color:crimson">**[Exercice]** A vous de jouer:</span> # *** # <div style="color:DarkSlateBlue"> # # 1. **Rédigez un programme interactif qui permet de :** # * Choisir une image avec un menu déroulant # * Faire varier son niveau d'exposition avec un curseur # * Choisir une méthode d'ajustement de contraste (stretching / equalization) par menu déroulant et l'appliquer. # * Affiche les résultats sous forme d'une grille 2x2 # # # 2. **Implémentez une méthode permettant d'égaliser une image RGB en égalisant chacun des canaux séparément et l'ajouter aux choix de votre programme.** # # # 3. **Implémentez une méthode permettant d'égaliser une image couleur en la transposant dans un espace colorimétrique ou la valeur de luminance globale peut être modifiée directement.** # # <u>Astuces</u>: vous pouvez utiliser la notation vectorielle pour faciliter votre code : # ``` Python # imgYUV[:,:,0] = cv2.equalizeHist(imgYUV[:,:,0]) # ``` # # </div> # + colab={} colab_type="code" id="8DECyCmAuJ7i" outputId="e7f63f31-8e78-4125-d361-808b7dc202cf" # > Emplacement exercice < # Cette fonction prends une image RGB et égalise chaque canal séparément def equ_canaux(img): # TODO: code à compléter return equ # Cette fonction prends une image couleur et égalise sa luminance globale def equ_global(img): # TODO: code à compléter return equ # Fonction principale (qui permet l'interaction via les widgets de IPython) @interact def equ_interactif(): # TODO: code à compléter pass # + [markdown] colab_type="text" id="8HqHgvrduJ7m" # ## <span style="color: DodgerBlue;text-decoration: underline">II.5 Adaptative Histogram Equalization (AHE)</span> # *** # + [markdown] colab_type="text" id="gbZHA3-nuJ7n" # Parfois, dû à de grandes différences d'illumination entre différentes régions d'une même image, appliquer une égalisation globale de l'histogramme va améliorer le contraste de certaines régions, mais empirer celui d'autres. # # *Exemple:* # + colab={} colab_type="code" id="b1sqdn14uJ7o" outputId="309fc64d-ef5a-43b6-efc6-05d79565f816" import cv2 import matplotlib.pyplot as plt from IPython.display import display, Markdown img = cv2.imread('img/tsukuba.jpg', 0) equ = cv2.equalizeHist(img) plt.figure(figsize=(12, 6)) plt.subplot(121), plt.imshow(img, "gray"), plt.title("Image originale") plt.subplot(122), plt.imshow(equ, "gray"), plt.title("Image egalisée") plt.show() display(Markdown("Ici, on remarque que le contraste de l'étagère (*background*) " "est améliorée, mais celui du mannequin (*foreground*) s'est détérioré.")) # + [markdown] colab_type="text" id="p0s4tgacuJ7r" # **L'égalisation adaptative (AHE)** tente de remédier à ce problème découpant l'image en un **ensemble de sous-régions qui seront égalisées séparément**: on passe d'une égalisation globale à une **égalisation loacale** qui sera donc (potentiellement) plus respectueuse des variations locales de contraste dans l'image. # # Cependant, l'AHE "simple" à tendance à amplifier le bruit dans les régions relativement homogènes de l'image (là où la majorité des pixels ont des valeurs de luminance similaires, entrainant un pic important sur l'histogramme). # # **Contrast Limiting AHE (CLAHE)** est une extension de l'AHE qui permet de remédier à ce problème grâce au *clipping*. # L'algorithme fonctionne de la manière suivante: # * Découpe l'image en une grille de `(n*n)` régions et calcule l'histogramme de chaque région. # * *Clipping:* si une ou des valeurs d'intensité sont trop représentées (fréquence > seuil), une partie des pixels ayant ces luminances sont redistribués uniformément (seront réaffectés une nouvelle valeur de luminance). # * L'histogramme (éventuellement tronqué) de chaque sous-région est égalisé. # # <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/5/5f/Clahe-redist.svg/600px-Clahe-redist.svg.png"> # + colab={"referenced_widgets": ["d88266ff37544641835733e205280d05"]} colab_type="code" id="Vf3V31kEuJ7t" outputId="48d6eb9b-95c8-420c-c9c2-15dfaed0973b" import cv2 import numpy as np import matplotlib.pyplot as plt # %matplotlib inline @interact def clahe(image=[f for f in os.listdir("img/") if os.path.isfile(os.path.join("img/", f))], seuil_clip=(0, 1, 0.1), grid_size=(1,10,1)): img = cv2.imread('img/' + image, 0) clahe = cv2.createCLAHE(clipLimit=seuil_clip, tileGridSize=(grid_size,grid_size)) img_clahe = clahe.apply(img) affichage2x2(img, img_clahe) # + [markdown] colab_type="text" id="5UITM8-IuJ70" # # <span style="color: green;text-decoration: underline" id="3">III. Opérations non-linéaires sur la luminance</span> # *** # + [markdown] colab_type="text" id="OzR8Fu7LuJ71" # ## <span style="color: DodgerBlue;text-decoration: underline">III.1 Transformée logarithmique</span> # *** # + [markdown] colab_type="text" id="pOCB-Z0DuJ72" # La **transformation logarithmique** consiste à remplacer la luminance de chaque pixel par son logarithme. Elle permet d'ajuster les valeurs de luminance (et donc le contraste) de manière non-linéaire: les pixels sombres seront réhaussés bien plus que les pixels clairs. # # Cette transformation va donc transposer une gamme étroite d'intensités (faibles / sombres) de l'image d'entrée sur une gamme plus importante dans l'image de sortie, ce qui augmentera le contraste (et donc la quantité de détails visibles) spécifiquement dans les zones sombres. # # \begin{equation*} # I'(x,y) = T(r) = c * log(1 + I(x,y)) # \end{equation*} # # Avec: # * $I(x,y)$ et $I'(x,y)$ la valeur de luminance d'un pixel d'entrée et de sortie, respectivement. # * $c$ une constante de mise à l'échelle, définie par: $c = \dfrac{255}{log(1 + I_\max)}$ # # La formule peut être résumée en : # # \begin{equation*} # I'(x,y) = 255 * \dfrac{log(1 + I(x,y))}{log(1 + I_\max)} # \end{equation*} # + colab={} colab_type="code" id="2llsfkqMuJ73" ### Code utile pour cette section import os import cv2 import numpy as np import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline from ipywidgets import interact def affichage2x3(img1, img2, cs="gray"): fig = plt.figure(figsize=(20, 15)) ax1 = plt.subplot(231) ax1.imshow(img1, cs) ax1.set_title("Image égalisée") ax2 = plt.subplot(232) ax2 = sns.kdeplot(np.array(img1).ravel(), shade=True, kernel="gau", bw="scott", clip=[0,255], cut=0) ax2.set_title("Histogramme (KDE) de égalisée") ax3 = plt.subplot(233) ax3.hist(np.array(img1).ravel(), bins=256, density=True, histtype='stepfilled', cumulative=True, color='tab:orange', alpha=0.5) ax3.set_title("CDF de l'image égalisée") ax4 = plt.subplot(234) ax4.imshow(img2, cs) ax4.set_title("Image log-transformée") ax5 = plt.subplot(235, sharey=ax2) ax5 = sns.kdeplot(np.array(img2).ravel(), shade=True, kernel="gau", bw="scott", clip=[1,254], cut=0) ax5.set_title("Histogramme (KDE) de l'image log-transformée") ax6 = plt.subplot(236) ax6.hist(np.array(img2).ravel(), bins=256, density=True, histtype='stepfilled', cumulative=True, color='tab:orange', alpha=0.5) ax6.set_title("CDF de l'image log transformée") plt.show() # + colab={"referenced_widgets": ["b7a6f773072c474dac996256c752e2fe"]} colab_type="code" id="nkTP2fkwuJ78" outputId="7a5c8584-3857-4583-81a0-6c9c809a0cc6" @interact def log_transform(image=[f for f in os.listdir("img/") if os.path.isfile(os.path.join("img/", f))]): img = cv2.imread('img/' + image, 0) img = cv2.equalizeHist(img) img_log = 255 * (np.log(img + 1)/(np.log(1 + np.max(img)))) img_log = np.array(img_log, dtype="uint8") affichage2x3(img, img_log) # + [markdown] colab_type="text" id="1rzuQv_3uJ8B" # ## <span style="color: DodgerBlue;text-decoration: underline">III.2 Transformée Gamma</span> # *** # + [markdown] colab_type="text" id="DZUSK7bwuJ8E" # La **correction Gamma (ou *Power Law transform*)** consiste à remplacer la luminance de chaque pixel par une puissance (gamma) de sa valeur. Cette transformation, souvent appliquée nativement par les écrans (avec différentes valeurs de gamma), permet de mieux faire correspondre les variations de luminance du capteur (qui à capturé l'image) a notre perception. # # *The reason we apply gamma correction is because our eyes perceive color and luminance differently than the sensors in a digital camera. When a sensor on a digital camera picks up twice the amount of photons, the signal is doubled. However, our eyes do not work like this. Instead, our eyes perceive "double the amount of light" as only a fraction brighter. Thus, while a digital camera has a linear relationship between brightness our eyes have a non-linear relationship. In order to account for this relationship we apply gamma correction.* # # La correction gamma est décrite par l'équation suivante: # # \begin{equation*} # I'(x,y) = T(r) = c * \left( \frac{I(x,y)}{c} \right)^\gamma # \end{equation*} # # Avec: # * $I(x,y)$ et $I'(x,y)$ la valeur de luminance d'un pixel d'entrée et de sortie, respectivement. # * $c$ une constante de mise à l'échelle, généralement égale à 255. # # On observera : # - `Gamma = 1` : image inchangée # - `Gamma < 1` : image assombrie # - `Gamma > 1` : image éclaircie # + [markdown] colab_type="text" id="8OQq-iGmuJ8G" # ### <span style="color:crimson">**[Exercice]** A vous de jouer:</span> # *** # <div style="color:DarkSlateBlue"> # # 1. **Implémentez le code de la correction gamma.** # # # 2. **Rédigez un programme interactif qui permet de :** # * Choisir une image avec un menu déroulant # * Choisir la valeur du paramère $\gamma$ (de la correction éponyme) via un slider. # * Affiche les résultats sous forme d'une grille 2x2: (Image log, Hist log + CDF) et (Image gamma, Hist gamma + CDF) # * Afficher le tracé de l'équation de transformation appliquée (mise à jour selon la valeur de $\gamma$) # # # <u>Astuces</u>: vous pouvez utiliser la notation vectorielle pour faciliter votre code : # ``` Python # x = np.linspace(0,255,100) # y = # TODO: Equation de la courbe # plt.plot(x, y) # ``` # # 3. **Modifiez votre code de sorte à ce qu'il soit appliquable à une image couleur.** # # </div> # + colab={} colab_type="code" id="QykKaFXquJ8I" # > Emplacement exercice < # + [markdown] colab_type="text" id="b3fBS5XvuJ8O" # <div style="color:Navy"> # # *** # # Fin du TP2 # *** # # </div>
TP2/IP - TP2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Deriving the transfer function of the Simper SVF filter structure # # HTML output built with: jupyter nbconvert --to html svf_z_domain_tf.ipynb # # Source: # https://cytomic.com/files/dsp/SvfLinearTrapOptimised2.pdf # # We will follow the second form of the algorithm, found on page 6. # # Sympy can't (very easily) be bent to display transfer functions in terms of $z^{-1}, z^{-2}, ...$ which is the convention. Plain $z$ will be used here instead - keep in mind it actually means $z^{-1}$. # + from sympy import * init_printing() z = symbols("z") # - # Start with the parameters. # # ``` # g = Tan[π * cutoff / samplerate]; # k = 1/Q = 2 - 2*res; # a1 = 1/(1 + g*(g + k)); # a2 = g*a1; # ``` # # The other coefficients defining the shape of the filter (`m0, m1, m2`) will be ignored for now, as they are only used to "mix" the output. # + g, k = symbols("g k") a1 = 1/(1 + g*(g + k)) a2 = g*a1 (a1, a2) # - # Then the computation. # # The variable `v0` represents the input signal - we will consider it to represent the z-transform of the input over time. `v1` and `v2` represent two other nodes in the block diagram. # # The state variables `ic1eq` and `ic2eq` will be defined as unknowns first, and then we will solve them using their equations. # # The relevant lines of the algorithm are: # # ``` # v1 = a1 * ic1eq + a2 * (v0 - ic2eq); # v2 = ic2eq + g * v1; # ``` # # Notice that the `ic1eq` and `ic2eq` actually refer to the _previous_ values of these samples. This corresponds to multiplying by $z$ (contrary to convetion!) in the z-domain. # + v0, ic1eq, ic2eq = symbols("v0 ic_1 ic_2") v1 = a1 * ic1eq * z + a2 * (v0 - ic2eq * z) v2 = ic2eq * z + g * v1 (v1, v2) # - # The "new" values for `ic1eq, ic2eq` are computed as follows: # # ``` # ic1eq = 2*v1 - ic1eq; # ic2eq = 2*v2 - ic2eq; # ``` # # depending on the current values of `v1, v2`, and the previous values of `ic1eq, ic2eq`. # # Consider this as a system of equations, and solve it: # + equations = [ 2*v1 - ic1eq * z - ic1eq, # = 0 2*v2 - ic2eq * z - ic2eq, # = 0 ] solution = solve(equations, (ic1eq, ic2eq)) solution # - # We may now subsitute the solution into `v1` and `v2` to obtain the transfer functions # # $$ # \begin{aligned} # H_0(z) &= \frac {v_0(z)} {v_0(z)} = 1 \\ # H_1(z) &= \frac {v_1(z)} {v_0(z)} \\ # H_2(z) &= \frac {v_2(z)} {v_0(z)} # \end{aligned} # $$ # + H0 = 1 H1 = v1.subs(solution) / v0 H2 = v2.subs(solution) / v0 H1 = collect(simplify(H1), z) H2 = collect(simplify(H2), z) (H1, H2) # - # How convenient, the denominators seem to be the same! That is to be expected of course, # since taking linear combinations of $H_1, H_2$ cannot result in anything that has more than two poles, because the order of the system is 2. # + (H1_num, H1_denom) = fraction(H1) (H2_num, H2_denom) = fraction(H2) assert H1_denom == H2_denom denom = H1_denom denom # - # We can now assemble the complete transfer function, taking into account the mix coefficients `m0, m1, m2`. # # $$ # H(z) = m_0 H_0(z) + m_1 H_1(z) + m_2 H_2(z) # $$ # + m0, m1, m2 = symbols("m0 m1 m2") H = m0 * H0 + (m1 * H1_num + m2 * H2_num) / denom print(H) H # - # ## Sanity check: High pass filter # + from sympy.functions import tan, exp samplerate = 40_000 cutoff = sqrt(samplerate/2) Q = 0.9 f = symbols("f") H_hp_f = H.subs({ g: tan(pi * cutoff / samplerate), k: 1/Q, m0: 1, m1: -1/Q, # = -k m2: -1, z: exp(2*I*pi * f / samplerate)**-1, }) plot(abs(H_hp_f), (f, 1, samplerate/2), xscale='log', yscale='log')
svf_z_domain_tf.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # April 2020 CME event # # another animation is done at # # https://github.com/helioforecast/Papers/blob/master/Moestl2020_PSP_rate/psp_3dcore.ipynb # + import sys import matplotlib import matplotlib.pyplot as plt import matplotlib.dates as mdates import numpy as np from datetime import timedelta import astropy import astropy.constants as const import time import pickle import seaborn as sns import os import urllib import json import warnings import importlib import heliopy.spice as spice import heliopy.data.spice as spicedata import multiprocessing import sunpy import sunpy.time from sunpy.time import parse_time import astropy.units as unit from astropy.coordinates import SkyCoord from sunpy.coordinates import frames import py3dcore import heliosat from mpl_toolkits.mplot3d import Axes3D from matplotlib.colors import LightSource from matplotlib.colors import ListedColormap from matplotlib import rc #Convert this notebook to a script with jupyter nbconvert --to script cme_rate.ipynb #os.system('jupyter nbconvert --to script psp_3dcore.ipynb') # #%matplotlib inline #matplotlib.use('Qt5Agg') #matplotlib.use('Agg') #warnings.filterwarnings('ignore') # some numpy mean-of-empty-slice runtime warnings ########### make directories first time resdir='results' if os.path.isdir(resdir) == False: os.mkdir(resdir) datadir='data' if os.path.isdir(datadir) == False: os.mkdir(datadir) outputdirectory='results/plots' if os.path.isdir(outputdirectory) == False: os.mkdir(outputdirectory) animdirectory='results/animations' if os.path.isdir(animdirectory) == False: os.mkdir(animdirectory) #rc('text', usetex=True) #matplotlib.rcParams['text.latex.preamble'] = [r'\usepackage{amsmath}'] #load solar orbiter data (made with heliocats) filemag='data/solo_april2020.p' sm=pickle.load(open(filemag, "rb" ) ) filewin='data/wind_2018_now_gse.p' [win,hwin]=pickle.load(open(filewin, "rb" ) ) #set wind awkward data points to nan windspike_ind=np.where(win.time > parse_time('2020-04-20 17:08').datetime)[0] win.bt[windspike_ind[0:2]]=np.nan win.bx[windspike_ind[0:2]]=np.nan win.by[windspike_ind[0:2]]=np.nan win.bz[windspike_ind[0:2]]=np.nan #set wind awkward data points to nan windspike_ind=np.where(win.time > parse_time('2020-04-21 01:20').datetime)[0] win.bt[windspike_ind[0:1]]=np.nan win.bx[windspike_ind[0:1]]=np.nan win.by[windspike_ind[0:1]]=np.nan win.bz[windspike_ind[0:1]]=np.nan #load position data (made with heliocats) [psp, bepi, solo, sta, earth, venus, mars, mercury,frame]= \ pickle.load( open( 'data/positions_psp_solo_bepi_sta_planets_HEEQ_1hour.p', "rb" )) #read data from Johan sopo_nm=pickle.load( open( 'data/epd/sopo_nm_data.pickle', "rb" )) het_gcr=pickle.load( open( 'data/epd/het_gcr_data.pickle', "rb" )) crater=pickle.load( open( 'data/epd/crater_data.pickle', "rb" )) #crater['CRaTER count rate'].values # + ############### Model Settings t_launch = datetime.datetime(2020, 4, 15, 4) #t_launch=obj.t_launch #get model parameters from ABC fit analysis obj = py3dcore.abc.smc.ABCSMC() obj.load('data/APRIL_PARAMS.DAT') #parameters are in obj.particles lon_hci=np.mean(obj.particles[:,1]) lat_hci=np.mean(obj.particles[:,2]) #set coordinates as HCI direction_hci = SkyCoord(lon_hci*unit.deg, lat_hci*unit.deg, frame=frames.HeliocentricInertial,obstime=t_launch) # convert HCI longitude latitude to HEEQ = HeliographicStonyhurst direction_heeq=direction_hci.transform_to(frames.HeliographicStonyhurst) iparams_arr = np.array([[ np.mean(obj.particles[:,0]), # time offset direction_heeq.lon.value, # l_1 (longitude) ********************* direction_heeq.lat.value, # l_2 (latitude) np.mean(obj.particles[:,3]), # o (inclination, orientation) np.mean(obj.particles[:,4]), # d_1au (frontal width at 1AU) np.mean(obj.particles[:,5]), # delta (cross-section aspect ratio) np.mean(obj.particles[:,6]), # r_0 (initialization distance in solar radii) np.mean(obj.particles[:,7]), # v_0 (initial velocty in) np.mean(obj.particles[:,8]), # tau (magnetic field twist) np.mean(obj.particles[:,9]), # b_s (magnetic field scaling parameter) np.mean(obj.particles[:,10]), # b_1au (magnetic field strength at 1au) np.mean(obj.particles[:,11]), # Gamma (solar wind drag coefficient) np.mean(obj.particles[:,12]), # v_sw (solar wind speed) 0,#np.mean(obj.particles[:,13]) ]], dtype=np.float32) model_obj = py3dcore.models.ThinTorusGH3DCOREModel(t_launch, runs=1, use_gpu=False) model_obj.update_iparams(iparams_arr, seed=42) #generic 3DCORE parameters, no fit #iparams_arr2 = np.array([[ # 0, # time offset # 0, # l_1 (longitude) # 0, # l_2 (latitude) # 0, # o (inclination, orientation) # 0.20, # d_1au (frontal width at 1AU) # 4, # delta (cross-section aspect ratio) # 5, # r_0 (initialization distance in solar radii) # 350, # v_0 (initial velocty in) # -2, # tau (magnetic field twist) # 1, # b_s (magnetic field scaling parameter) # 15, # b_1au (magnetic field strength at 1au) # 1.5, # Gamma (solar wind drag coefficient) # 300, # v_sw (solar wind speed) # 0 # sigma (measurement noise) #]], dtype=np.float32) TP_A = t_launch + datetime.timedelta(hours=24) TP_B = t_launch + datetime.timedelta(hours=72) C_A = "xkcd:red" C_B = "xkcd:blue" C0 = "xkcd:black" C1 = "xkcd:magenta" C2 = "xkcd:orange" C3 = "xkcd:azure" print('3DCORE parameters in HEEQ') for i in np.arange(0,14): print(np.round(iparams_arr[0,i],3)) # - # ### functions # + def measure(obj, sat, t0, t1, frame="HEEQ", bframe="HEEQ", satparams=None): if satparams: inst = getattr(heliosat, sat)(satparams) else: inst = getattr(heliosat, sat)() #time resolution in seconds #t_s = [datetime.datetime.fromtimestamp(_) for _ in np.array(list(range(int(t0.timestamp()), int(t1.timestamp()))))] #time resolution in minutes res_in_days=1/(24*60.) t_s = [] while t0 < t1: t_s.append(t0) t0 += timedelta(days=res_in_days) #print('data points',len(t_s)) o_s = inst.trajectory(t_s, frame=frame) if satparams: b = heliosat.spice.transform_frame([satparams] * len(t_s), np.array(obj.sim_fields(t_s, o_s))[:, 0, :], frame, bframe) else: b = heliosat.spice.transform_frame(t_s, np.array(obj.sim_fields(t_s, o_s))[:, 0, :], frame, bframe) b[b == 0] = np.nan return t_s, np.sqrt(np.sum(b**2, axis=1)), b #define sun here so it does not need to be recalculated every time scale=695510/149597870.700*10 #Rs in km, AU in km # sphere with radius Rs in AU u, v = np.mgrid[0:2*np.pi:40j, 0:np.pi:30j] x = np.cos(u)*np.sin(v)*scale y = np.sin(u)*np.sin(v)*scale z = np.cos(v)*scale def plot_configure(ax, **kwargs): view_azim = kwargs.pop("view_azim", -25) view_elev = kwargs.pop("view_elev", 25) view_radius = kwargs.pop("view_radius", .5) ax.view_init(azim=view_azim, elev=view_elev) ax.set_xlim([-view_radius, view_radius]) ax.set_ylim([-view_radius, view_radius]) #adjust scaling as matplotlib lacks automatic aspect ratio setting ax.set_zlim([-view_radius*0.75, view_radius*0.75]) #solar equatorial plane for p in np.arange(-2,2,0.2): ax.plot([-2,2],[p,p],[0,0],lw=0.5,color='black', alpha=0.2,linestyle='--',zorder=0 ) ax.plot([p,p],[-2,2],[0,0],lw=0.5,color='black', alpha=0.2,linestyle='--',zorder=0 ) #draw sun ls = LightSource(azdeg=140, altdeg=40) ax.plot_surface(x, y, z, rstride=1, cstride=1, color='yellow',lightsource=ls, linewidth=0, antialiased=False,zorder=5) ax.set_axis_off() def plot_3dcore(ax, obj, t_snap, **kwargs): kwargs["alpha"] = kwargs.pop("alpha", .12) kwargs["color"] = kwargs.pop("color", "k") kwargs["lw"] = kwargs.pop("lw", 1) model_obj.propagate(t_snap) wf_model = model_obj.visualize_wireframe(index=0) ax.plot_wireframe(*wf_model.T, **kwargs,zorder=3) def plot_3dcore_field(ax, obj, steps=500, step_size=0.005, **kwargs): q0 = kwargs.get("q0", np.array([1, .1, np.pi/2], dtype=np.float32)).astype(np.float32) fl = obj.visualize_fieldline(q0, steps=steps, step_size=step_size) ax.plot(*fl.T, **kwargs) def plot_traj(ax, sat, t_snap, frame="HEEQ", traj_pos=True, traj_major=4, traj_minor=None, **kwargs): kwargs["alpha"] = kwargs.pop("alpha", 1) kwargs["color"] = kwargs.pop("color", "k") kwargs["lw"] = kwargs.pop("lw", 1) kwargs["s"] = kwargs.pop("s", 25) inst = getattr(heliosat, sat)() _s = kwargs.pop("s") if traj_pos: pos = inst.trajectory(t_snap, frame) ax.scatter(*pos.T, s=_s, **kwargs) if traj_major and traj_major > 0: traj = inst.trajectory([t_snap + datetime.timedelta(hours=i) for i in range(-traj_major, traj_major)], frame) #ax.plot(*traj.T, **kwargs) if traj_minor and traj_minor > 0: traj = inst.trajectory([t_snap + datetime.timedelta(hours=i) for i in range(-traj_minor, traj_minor)], frame) if "ls" in kwargs: kwargs.pop("ls") _ls = "--" _lw = kwargs.pop("lw") / 2 ax.plot(*traj.T, ls=_ls, lw=_lw, **kwargs) def plot_shift(axis,extent,cx,cy,cz): #shift center of plot axis.set_xbound(cx-extent, cx+extent) axis.set_ybound(cy-extent, cy+extent) axis.set_zbound(cz-extent*0.75, cz+extent*0.75) # - # ### measure fields #t1, btot1, bxyz1 = measure(model_obj, "PSP", t_launch, TP_A + datetime.timedelta(hours=6), frame="HEEQ", bframe="HEEQ") #t2, btot2, bxyz2 = measure(model_obj, "PSP", t_launch, TP_B + datetime.timedelta(hours=12), frame="HEEQ", bframe="HEEQ") t1, btot1, bxyz1 = measure(model_obj, "SOLO", t_launch, TP_B + datetime.timedelta(hours=96), frame="HEEQ", bframe="SOLO_SUN_RTN") t2, btot2, bxyz2 = measure(model_obj, "Earth", t_launch, TP_B + datetime.timedelta(hours=96), frame="HEEQ", bframe="HEE") # ### simple position plot # + sns.set_style('whitegrid') fig = plt.figure(1,figsize=(19.2, 10.8),dpi=100) #define subplot grid ax1 = plt.subplot2grid((1, 1), (0, 0),projection='3d') ######### tilted view plot_configure(ax1, view_azim=270, view_elev=90, view_radius=.8) #impact time of CME at solar orbiter 2020-04-19 05:07 UT ti = datetime.datetime(2020, 4, 19, 5,7,0) #plot_3dcore(ax1, model_obj, TP_B, color=C_B) #plot_3dcore_field(ax1, model_obj, color=C_A, steps=400, step_size=0.0005, lw=1.0, ls="-") plot_traj(ax1, "Earth", ti, frame="HEEQ", color='mediumseagreen') plot_traj(ax1, "Venus", ti, frame="HEEQ", color='orange') plot_traj(ax1, "Mercury", ti, frame="HEEQ", color='grey') plot_traj(ax1, "SOLO", ti, frame="HEEQ", color='coral') plot_traj(ax1, "PSP", ti, frame="HEEQ", color='black') plot_traj(ax1, "STA", ti, frame="HEEQ", color='red') plot_traj(ax1, "BEPI", ti, frame="HEEQ", color='blue') frame_time_num=mdates.date2num(ti) print( 'current frame_time_num', str(mdates.num2date(frame_time_num))) #these have their own times dct=frame_time_num-(psp.time+ mdates.date2num(np.datetime64('0000-12-31'))) psp_timeind=np.argmin(abs(dct)) dct=frame_time_num-(bepi.time+ mdates.date2num(np.datetime64('0000-12-31'))) bepi_timeind=np.argmin(abs(dct)) dct=frame_time_num-(solo.time+ mdates.date2num(np.datetime64('0000-12-31'))) solo_timeind=np.argmin(abs(dct)) dct=frame_time_num-(sta.time+ mdates.date2num(np.datetime64('0000-12-31'))) sta_timeind=np.argmin(abs(dct)) #all same times dct=frame_time_num-(earth.time+ mdates.date2num(np.datetime64('0000-12-31'))) earth_timeind=np.argmin(abs(dct)) fsize=15 #spacecraft labels ax1.text(psp.x[psp_timeind],psp.y[psp_timeind]+0.07,psp.z[psp_timeind],'Parker Solar Probe', color='black', ha='center',fontsize=fsize-4,verticalalignment='top') ax1.text(solo.x[solo_timeind]-0.15,solo.y[solo_timeind]+0.07,solo.z[solo_timeind],'Solar Orbiter', color='coral', ha='center',fontsize=fsize-4,verticalalignment='top') ax1.text(bepi.x[bepi_timeind]+0.05,bepi.y[bepi_timeind]-0.05,bepi.z[bepi_timeind],'Bepi Colombo', color='blue', ha='center',fontsize=fsize-4,verticalalignment='top') #ax1.text(bepi.x[bepi_timeind]-0.16,bepi.y[bepi_timeind],bepi.z[solo_timeind],'Colombo', color='blue', ha='center',fontsize=fsize-4,verticalalignment='top') ax1.text(sta.x[sta_timeind],sta.y[sta_timeind]+0.09,sta.z[sta_timeind],'STEREO-A', color='red', ha='center',fontsize=fsize-4,verticalalignment='top') ax1.text(earth.x[earth_timeind],earth.y[earth_timeind]+0.08,earth.z[earth_timeind],'Earth', color='mediumseagreen', ha='center',fontsize=fsize-4,verticalalignment='top') ax1.text(venus.x[earth_timeind],venus.y[earth_timeind]+0.08,venus.z[earth_timeind],'Venus', color='orange', ha='center',fontsize=fsize-4,verticalalignment='top') ax1.text(mercury.x[earth_timeind],mercury.y[earth_timeind]+0.07,mercury.z[earth_timeind],'Mercury', color='grey', ha='center',fontsize=fsize-4,verticalalignment='top') #1 AU circle p=np.arange(0,2*np.pi,0.01) r1=1.0 ax1.plot(r1*np.sin(p),r1*np.cos(p),0,lw=0.5,color='black', alpha=0.5,linestyle='--',zorder=0 ) r1=0.8 ax1.plot(r1*np.sin(p),r1*np.cos(p),0,lw=0.5,color='black', alpha=0.5,linestyle='--',zorder=0 ) r1=0.6 ax1.plot(r1*np.sin(p),r1*np.cos(p),0,lw=0.5,color='black', alpha=0.5,linestyle='--',zorder=0 ) r1=0.4 ax1.plot(r1*np.sin(p),r1*np.cos(p),0,lw=0.5,color='black', alpha=0.5,linestyle='--',zorder=0 ) r1=0.2 ax1.plot(r1*np.sin(p),r1*np.cos(p),0,lw=0.5,color='black', alpha=0.5,linestyle='--',zorder=0 ) a_circle = plt.Circle((0, 0), .5,fill=False,zorder=3,linestyle='-') ax1.add_artist(a_circle) plt.savefig('results/plots/positions_solo_apr2020_event.png', dpi=100,bbox_inches='tight') #plt.savefig('results/plots/positions_solo_apr2020_event_tilt.png', dpi=100,bbox_inches='tight') # - # ## model plot # + #define sun here so it does not need to be recalculated every time scale=695510/149597870.700*1 #Rs in km, AU in km # sphere with radius Rs in AU u, v = np.mgrid[0:2*np.pi:40j, 0:np.pi:30j] x = np.cos(u)*np.sin(v)*scale y = np.sin(u)*np.sin(v)*scale z = np.cos(v)*scale def plot_configure2(ax, **kwargs): view_azim = kwargs.pop("view_azim", -25) view_elev = kwargs.pop("view_elev", 25) view_radius = kwargs.pop("view_radius", .5) ax.view_init(azim=view_azim, elev=view_elev) ax.set_xlim([-view_radius, view_radius]) ax.set_ylim([-view_radius, view_radius]) #adjust scaling as matplotlib lacks automatic aspect ratio setting ax.set_zlim([-view_radius*0.75, view_radius*0.75]) #solar equatorial plane for p in np.arange(-2,2,0.2): ax.plot([-2,2],[p,p],[0,0],lw=0.5,color='black', alpha=0.2,linestyle='--',zorder=0 ) ax.plot([p,p],[-2,2],[0,0],lw=0.5,color='black', alpha=0.2,linestyle='--',zorder=0 ) #draw sun ls = LightSource(azdeg=140, altdeg=40) ax.plot_surface(x, y, z, rstride=1, cstride=1, color='yellow',lightsource=ls, linewidth=0, antialiased=False,zorder=5) #1 AU circle p=np.arange(0,2*np.pi,0.01) r1=1.0 ax.plot(r1*np.sin(p),r1*np.cos(p),0,lw=0.5,color='black', alpha=0.5,linestyle='--',zorder=0 ) ax.plot(r1*np.sin(p),r1*np.cos(p),0,lw=0.5,color='black', alpha=0.5,linestyle='--',zorder=0 ) ax.plot(r1*np.sin(p),r1*np.cos(p),0,lw=0.5,color='black', alpha=0.5,linestyle='--',zorder=0 ) ax.set_axis_off() sns.set_style('whitegrid') fig = plt.figure(1,figsize=(19.2, 10.8),dpi=100) #define subplot grid ax1 = plt.subplot2grid((4, 3), (0, 0),rowspan=2,colspan=2,projection='3d') ax2 = plt.subplot2grid((4, 3), (0, 2),projection='3d',rowspan=2) ax3 = plt.subplot2grid((4, 3), (2, 2),projection='3d') ax4 = plt.subplot2grid((4, 3), (2, 0)) ax5 = plt.subplot2grid((4, 3), (3, 0)) ax6 = plt.subplot2grid((4, 3), (3, 1)) ax7 = plt.subplot2grid((4, 3), (3, 2)) #manually set axes positions #https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.set_position.html#matplotlib.axes.Axes.set_position ax1.set_position([0,0.2,0.6,0.7], which='both') ax2.set_position([0.65,0.35,0.35,0.65], which='both') ax3.set_position([0.6,0,0.4,0.4], which='both') ax4.set_position([0.05,0.19,0.29,0.15], which='both') ax5.set_position([0.05,0.03,0.29,0.15], which='both') ax6.set_position([0.40,0.19,0.27,0.15], which='both') ax7.set_position([0.40,0.03,0.27,0.15], which='both') ######### tilted view plot_configure2(ax1, view_azim=-30, view_elev=30, view_radius=.2) plot_3dcore(ax1, model_obj, TP_B, color=C_B) #plot_3dcore_field(ax1, model_obj, color=C_A, steps=400, step_size=0.0005, lw=1.0, ls="-") plot_traj(ax1, "Earth", TP_B, frame="HEEQ", color='mediumseagreen') plot_traj(ax1, "SOLO", TP_B, frame="HEEQ", color='coral') plot_traj(ax1, "Venus", TP_B, frame="HEEQ", color='orange') plot_traj(ax1, "Mercury", TP_B, frame="HEEQ", color='grey') plot_traj(ax1, "PSP", TP_B, frame="HEEQ", color='black') plot_traj(ax1, "STA", TP_B, frame="HEEQ", color='red') plot_traj(ax1, "BEPI", TP_B, frame="HEEQ", color='blue') #plot_3dcore(ax1, model_obj, TP_B, color=C_B) #plot_3dcore_field(ax1, model_obj, color=C_B, steps=900, step_size=0.001, lw=1.0, ls="-")# #plot_traj(ax1, "Earth", TP_B, frame="ECLIPJ2000", color=C_B,lw=1.5) #dotted trajectory #plot_traj(ax1, "Earth", TP_B, frame="ECLIPJ2000", color="k", traj_pos=False, traj_major=None, traj_minor=144,lw=1.5) #shift center plot_shift(ax1,0.3,0.6,0.0,0) ########### top view panel plot_configure2(ax2, view_azim=0, view_elev=90, view_radius=.4) #plot_traj(ax2, "PSP", TP_B, frame="ECLIPJ2000", color="k", traj_pos=False, traj_major=None, traj_minor=144,lw=1.5) plot_3dcore(ax2, model_obj, TP_B, color=C_B) plot_traj(ax2, "Earth", TP_B, frame="HEEQ", color='mediumseagreen') plot_traj(ax2, "SOLO", TP_B, frame="HEEQ", color='coral') plot_traj(ax2, "Venus", TP_B, frame="HEEQ", color='orange') plot_traj(ax2, "Mercury", TP_B, frame="HEEQ", color='grey') plot_traj(ax2, "PSP", TP_B, frame="HEEQ", color='black') plot_traj(ax2, "STA", TP_B, frame="HEEQ", color='red') plot_traj(ax2, "BEPI", TP_B, frame="HEEQ", color='blue') #plot_3dcore_field(ax2, model_obj, color=C_A, steps=400, step_size=0.0005, lw=1.0, ls="-") #plot_traj(ax2, "PSP", TP_A, frame="ECLIPJ2000", color=C_A) #plot_3dcore(ax2, model_obj, TP_B, color=C_B) #plot_3dcore_field(ax2, model_obj, color=C_B, steps=900, step_size=0.001, lw=1.0, ls="-") #plot_traj(ax2, "PSP", TP_B, frame="ECLIPJ2000", color=C_B,lw=1.5) plot_shift(ax2,0.5,0.7,0.08,0.0) ############### edge on view panel plot_configure2(ax3, view_azim=-90, view_elev=0, view_radius=.4) plot_3dcore(ax3, model_obj, TP_B, color=C_B) plot_traj(ax3, "Earth", TP_B, frame="HEEQ", color='mediumseagreen') plot_traj(ax3, "SOLO", TP_B, frame="HEEQ", color='coral') plot_traj(ax3, "Venus", TP_B, frame="HEEQ", color='orange') plot_traj(ax3, "Mercury", TP_B, frame="HEEQ", color='grey') plot_traj(ax3, "PSP", TP_B, frame="HEEQ", color='black') plot_traj(ax3, "STA", TP_B, frame="HEEQ", color='red') plot_traj(ax3, "BEPI", TP_B, frame="HEEQ", color='blue') #plot_traj(ax3, "PSP", TP_B, frame="ECLIPJ2000", color="k", traj_pos=False, traj_major=None, traj_minor=144,lw=1.5) #plot_3dcore(ax3, model_obj, TP_A, color=C_A) #plot_3dcore_field(ax3, model_obj, color=C_A, steps=400, step_size=0.0005, lw=1.0, ls="-") #plot_traj(ax3, "PSP", TP_A, frame="ECLIPJ2000", color=C_A) #plot_3dcore(ax3, model_obj, TP_B, color=C_B) #plot_3dcore_field(ax3, model_obj, color=C_B, steps=900, step_size=0.001, lw=1.0, ls="-") #plot_traj(ax3, "PSP", TP_B, frame="ECLIPJ2000", color=C_B,lw=1.0) plot_shift(ax3,0.3,+0.5,-.5,0.0) ############################## magnetic field panel #simtime1=np.round((parse_time(t3).plot_date-parse_time(t_launch).plot_date)*24,4) #frametime=np.round((parse_time(TP_B).plot_date-parse_time(t_launch).plot_date)*24,1) # ------------------ Solar Orbiter setalpha=0.5 ax4.plot(t1, btot1, color=C0, label="$|B|$") ax4.plot(t1, bxyz1[:, 0], color=C1, label="$B_R$") ax4.plot(t1, bxyz1[:, 1], color=C2, label="$B_T$") ax4.plot(t1, bxyz1[:, 2], color=C3, label="$B_N$") ax4.plot(sm.time, sm.bt, color=C0,lw=0.5,alpha=setalpha) ax4.plot(sm.time, sm.bx, color=C1,lw=0.5,alpha=setalpha) ax4.plot(sm.time, sm.by, color=C2,lw=0.5,alpha=setalpha) ax4.plot(sm.time, sm.bz, color=C3,lw=0.5,alpha=setalpha) ax4.legend(loc="lower right", fontsize=12,ncol=1,edgecolor='white') ax4.set_ylabel('Solar Orbiter B [nT] RTN') ax4.set_xlim(datetime.datetime(2020,4,18,12,0),datetime.datetime(2020,4,22,0,0)) ax4.set_ylim(-25,25) ax4.set_xticklabels([]) # ---------------------- Wind ax5.plot(t2, btot2, color=C0, label="$|B|$") ax5.plot(t2, bxyz2[:, 0], color=C1, label="$B_R$") ax5.plot(t2, bxyz2[:, 1], color=C2, label="$B_T$") ax5.plot(t2, bxyz2[:, 2], color=C3, label="$B_N$") ax5.plot(win.time, win.bt, color=C0,lw=0.5,alpha=setalpha) ax5.plot(win.time, -win.bx, color=C1,lw=0.5,alpha=setalpha) ax5.plot(win.time, -win.by, color=C2,lw=0.5,alpha=setalpha) ax5.plot(win.time, win.bz, color=C3,lw=0.5,alpha=setalpha) ax5.set_ylabel('Wind B [nT] HEE') ax5.set_xlim(datetime.datetime(2020,4,18,12,0),datetime.datetime(2020,4,22,0,0)) ax5.set_ylim(-25,25) #ax5.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%b %d %Hh')) # ---------------------- EPD ax6.plot(het_gcr) ax6.set_xticklabels([]) ax6.set_xlim(datetime.datetime(2020,4,18,12,0),datetime.datetime(2020,4,22,0,0)) ax6.set_ylabel('EPD/HET $[s^{-1}]$') # ------------------------- south pole ax7.plot(sopo_nm/60) #ax7.plot(crater) ax7.set_xlim(datetime.datetime(2020,4,18,12,0),datetime.datetime(2020,4,22,0,0)) ax7.set_ylabel('South Pole $[s^{-1}]$') #ax4.set_xlim(datetime.datetime(2020,6,1,23,0),datetime.datetime(2022,6,3,4,0)) #line at current time #ax4.plot([frametime[k],frametime[k]], [-2000,2000], color='black',linewidth=1,alpha=0.8) #ax4.set_xlabel('hours since launch time') #ax4.xaxis.set_major_locator(matplotlib.ticker.MultipleLocator(5)) #ax4.xaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(1)) #ax4.yaxis.set_major_locator(matplotlib.ticker.MultipleLocator(500)) #ax4.set_xlim(0,30) #ax4.grid(True) #write hours since launch time #plt.annotate('$t_{launch}$ +',[0.45,0.15],ha='center',xycoords='figure fraction',fontsize=20) #plt.annotate(str(frametime),[0.5,0.15],ha='center',xycoords='figure fraction',fontsize=20) #plt.annotate('hours',[0.54,0.15],ha='center',xycoords='figure fraction',fontsize=20) #1 AU circle p=np.arange(0,2*np.pi,0.01) r1=1.0 ax1.plot(r1*np.sin(p),r1*np.cos(p),0,lw=0.5,color='black', alpha=0.5,linestyle='--',zorder=0 ) ax2.plot(r1*np.sin(p),r1*np.cos(p),0,lw=0.5,color='black', alpha=0.5,linestyle='--',zorder=0 ) ax3.plot(r1*np.sin(p),r1*np.cos(p),0,lw=0.5,color='black', alpha=0.5,linestyle='--',zorder=0 ) #panel labels plt.annotate('(a)',[0.00,0.93],xycoords='figure fraction',fontsize=20) plt.annotate('(b)',[0.69,0.93],xycoords='figure fraction',fontsize=20) plt.annotate('(c)',[0.69,0.32],xycoords='figure fraction',fontsize=20) plt.annotate('(d)',[0.00,0.32],xycoords='figure fraction',fontsize=20) plt.annotate('(e)',[0.345,0.32],xycoords='figure fraction',fontsize=20) #plt.annotate(parse_time(t_launch).iso[0:16],[0.55,0.35],ha='center',xycoords='figure fraction',fontsize=20) #plt.savefig('results/plots/visual_solo1.pdf',bbox_inches='tight') plt.savefig('results/plots/visual_solo_2.png', dpi=100)#,bbox_inches='tight') # - # # Animation # + sns.set_style('whitegrid') sns.set_style("ticks",{'grid.linestyle': '--'}) def make_frame(k): sns.set_style('whitegrid') fig = plt.figure(1,figsize=(19.2, 10.8),dpi=100) #define subplot grid ax1 = plt.subplot2grid((4, 3), (0, 0),rowspan=2,colspan=2,projection='3d') ax2 = plt.subplot2grid((4, 3), (0, 2),projection='3d',rowspan=2) ax3 = plt.subplot2grid((4, 3), (2, 2),projection='3d') ax4 = plt.subplot2grid((4, 3), (2, 0)) ax5 = plt.subplot2grid((4, 3), (3, 0)) ax6 = plt.subplot2grid((4, 3), (3, 1)) ax7 = plt.subplot2grid((4, 3), (3, 2)) #manually set axes positions #https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.set_position.html#matplotlib.axes.Axes.set_position ax1.set_position([0,0.2,0.6,0.7], which='both') ax2.set_position([0.65,0.35,0.35,0.65], which='both') ax3.set_position([0.6,0,0.4,0.4], which='both') ax4.set_position([0.05,0.19,0.29,0.15], which='both') ax5.set_position([0.05,0.03,0.29,0.15], which='both') ax6.set_position([0.40,0.19,0.27,0.15], which='both') ax7.set_position([0.40,0.03,0.27,0.15], which='both') ######### tilted view plot_configure2(ax1, view_azim=-30, view_elev=30, view_radius=.2) plot_3dcore(ax1, model_obj, tlist[k], color=C_B) #plot_3dcore_field(ax1, model_obj, color=C_A, steps=400, step_size=0.0005, lw=1.0, ls="-") #plot_traj(ax1, "Earth", tlist[k], frame="HEEQ", color='mediumseagreen') #plot_traj(ax1, "SOLO", tlist[k], frame="HEEQ", color='coral') #plot_traj(ax1, "Venus", tlist[k], frame="HEEQ", color='orange') #plot_traj(ax1, "Mercury", tlist[k], frame="HEEQ", color='grey') #plot_traj(ax1, "PSP", tlist[k], frame="HEEQ", color='black') #plot_traj(ax1, "STA", tlist[k], frame="HEEQ", color='red') #plot_traj(ax1, "BEPI", tlist[k], frame="HEEQ", color='blue') frame_time_num=mdates.date2num( tlist[k]) #print( 'current frame_time_num', str(mdates.num2date(frame_time_num))) #these have their own times dct=frame_time_num-(psp.time+ mdates.date2num(np.datetime64('0000-12-31'))) psp_timeind=np.argmin(abs(dct)) dct=frame_time_num-(bepi.time+ mdates.date2num(np.datetime64('0000-12-31'))) bepi_timeind=np.argmin(abs(dct)) dct=frame_time_num-(solo.time+ mdates.date2num(np.datetime64('0000-12-31'))) solo_timeind=np.argmin(abs(dct)) dct=frame_time_num-(sta.time+ mdates.date2num(np.datetime64('0000-12-31'))) sta_timeind=np.argmin(abs(dct)) #all same times dct=frame_time_num-(earth.time+ mdates.date2num(np.datetime64('0000-12-31'))) earth_timeind=np.argmin(abs(dct)) ax1.scatter(psp.x[psp_timeind],psp.y[psp_timeind],psp.z[psp_timeind],color='black') ax1.scatter(solo.x[solo_timeind],solo.y[solo_timeind],solo.z[solo_timeind],color='coral') ax1.scatter(bepi.x[bepi_timeind],bepi.y[bepi_timeind],bepi.z[bepi_timeind],color='blue') ax1.scatter(sta.x[sta_timeind],sta.y[sta_timeind],sta.z[sta_timeind],color='red',) ax1.scatter(earth.x[earth_timeind],earth.y[earth_timeind],earth.z[earth_timeind], color='mediumseagreen') ax1.scatter(venus.x[earth_timeind],venus.y[earth_timeind],venus.z[earth_timeind], color='orange') ax1.scatter(mercury.x[earth_timeind],mercury.y[earth_timeind],mercury.z[earth_timeind],color='grey') #plot_3dcore(ax1, model_obj, tlist[k], color=C_B) #plot_3dcore_field(ax1, model_obj, color=C_B, steps=900, step_size=0.001, lw=1.0, ls="-")# #plot_traj(ax1, "Earth", tlist[k], frame="ECLIPJ2000", color=C_B,lw=1.5) #dotted trajectory #plot_traj(ax1, "Earth", tlist[k], frame="ECLIPJ2000", color="k", traj_pos=False, traj_major=None, traj_minor=144,lw=1.5) #shift center plot_shift(ax1,0.3,0.6,0.0,0) ########### top view panel plot_configure2(ax2, view_azim=0, view_elev=90, view_radius=.4) #plot_traj(ax2, "PSP", tlist[k], frame="ECLIPJ2000", color="k", traj_pos=False, traj_major=None, traj_minor=144,lw=1.5) plot_3dcore(ax2, model_obj, tlist[k], color=C_B) #plot_traj(ax2, "Earth", tlist[k], frame="HEEQ", color='mediumseagreen') #plot_traj(ax2, "SOLO", tlist[k], frame="HEEQ", color='coral') #plot_traj(ax2, "Venus", tlist[k], frame="HEEQ", color='orange') #plot_traj(ax2, "Mercury", tlist[k], frame="HEEQ", color='grey') #plot_traj(ax2, "PSP", tlist[k], frame="HEEQ", color='black') #plot_traj(ax2, "STA", tlist[k], frame="HEEQ", color='red') #plot_traj(ax2, "BEPI", tlist[k], frame="HEEQ", color='blue') ax2.scatter(psp.x[psp_timeind],psp.y[psp_timeind],psp.z[psp_timeind],color='black') ax2.scatter(solo.x[solo_timeind],solo.y[solo_timeind],solo.z[solo_timeind],color='coral') ax2.scatter(bepi.x[bepi_timeind],bepi.y[bepi_timeind],bepi.z[bepi_timeind],color='blue') ax2.scatter(sta.x[sta_timeind],sta.y[sta_timeind],sta.z[sta_timeind],color='red',) ax2.scatter(earth.x[earth_timeind],earth.y[earth_timeind],earth.z[earth_timeind], color='mediumseagreen') ax2.scatter(venus.x[earth_timeind],venus.y[earth_timeind],venus.z[earth_timeind], color='orange') ax2.scatter(mercury.x[earth_timeind],mercury.y[earth_timeind],mercury.z[earth_timeind],color='grey') #plot_3dcore_field(ax2, model_obj, color=C_A, steps=400, step_size=0.0005, lw=1.0, ls="-") #plot_traj(ax2, "PSP", TP_A, frame="ECLIPJ2000", color=C_A) #plot_3dcore(ax2, model_obj, tlist[k], color=C_B) #plot_3dcore_field(ax2, model_obj, color=C_B, steps=900, step_size=0.001, lw=1.0, ls="-") #plot_traj(ax2, "PSP", tlist[k], frame="ECLIPJ2000", color=C_B,lw=1.5) plot_shift(ax2,0.5,0.7,0.08,0.0) ############### edge on view panel plot_configure2(ax3, view_azim=-90, view_elev=0, view_radius=.4) plot_3dcore(ax3, model_obj, tlist[k], color=C_B) #plot_traj(ax3, "Earth", tlist[k], frame="HEEQ", color='mediumseagreen') #plot_traj(ax3, "SOLO", tlist[k], frame="HEEQ", color='coral') #plot_traj(ax3, "Venus", tlist[k], frame="HEEQ", color='orange') #plot_traj(ax3, "Mercury", tlist[k], frame="HEEQ", color='grey') #plot_traj(ax3, "PSP", tlist[k], frame="HEEQ", color='black') #plot_traj(ax3, "STA", tlist[k], frame="HEEQ", color='red') #plot_traj(ax3, "BEPI", tlist[k], frame="HEEQ", color='blue') ax3.scatter(psp.x[psp_timeind],psp.y[psp_timeind],psp.z[psp_timeind],color='black') ax3.scatter(solo.x[solo_timeind],solo.y[solo_timeind],solo.z[solo_timeind],color='coral') ax3.scatter(bepi.x[bepi_timeind],bepi.y[bepi_timeind],bepi.z[bepi_timeind],color='blue') ax3.scatter(sta.x[sta_timeind],sta.y[sta_timeind],sta.z[sta_timeind],color='red',) ax3.scatter(earth.x[earth_timeind],earth.y[earth_timeind],earth.z[earth_timeind], color='mediumseagreen') ax3.scatter(venus.x[earth_timeind],venus.y[earth_timeind],venus.z[earth_timeind], color='orange') ax3.scatter(mercury.x[earth_timeind],mercury.y[earth_timeind],mercury.z[earth_timeind],color='grey') #plot_traj(ax3, "PSP", tlist[k], frame="ECLIPJ2000", color="k", traj_pos=False, traj_major=None, traj_minor=144,lw=1.5) #plot_3dcore(ax3, model_obj, TP_A, color=C_A) #plot_3dcore_field(ax3, model_obj, color=C_A, steps=400, step_size=0.0005, lw=1.0, ls="-") #plot_traj(ax3, "PSP", TP_A, frame="ECLIPJ2000", color=C_A) #plot_3dcore(ax3, model_obj, tlist[k], color=C_B) #plot_3dcore_field(ax3, model_obj, color=C_B, steps=900, step_size=0.001, lw=1.0, ls="-") #plot_traj(ax3, "PSP", tlist[k], frame="ECLIPJ2000", color=C_B,lw=1.0) plot_shift(ax3,0.3,+0.5,-.5,0.0) ############################## magnetic field panel #Solar Orbiter ax4.plot(t1, btot1, color=C0, label="$|B|$") ax4.plot(t1, bxyz1[:, 0], color=C1, label="$B_R$") ax4.plot(t1, bxyz1[:, 1], color=C2, label="$B_T$") ax4.plot(t1, bxyz1[:, 2], color=C3, label="$B_N$") ax4.set_xlim(datetime.datetime(2020,4,18,12,0),datetime.datetime(2020,4,22,0,0)) ax4.set_xticklabels([]) ax4.set_ylabel('Solar Orbiter B [nT]') ax4.set_ylim(-25,25) ax4.legend(loc="lower right", fontsize=12,ncol=1,edgecolor='white') #line at current time ax4.plot([tlist[k],tlist[k]], [-200,200], color='black',linewidth=1,alpha=0.8) ax4.plot(sm.time, sm.bt, color=C0,lw=0.5,alpha=setalpha) ax4.plot(sm.time, sm.bx, color=C1,lw=0.5,alpha=setalpha) ax4.plot(sm.time, sm.by, color=C2,lw=0.5,alpha=setalpha) ax4.plot(sm.time, sm.bz, color=C3,lw=0.5,alpha=setalpha) #Wind ax5.plot(t2, btot2, color=C0, label="$|B|$") ax5.plot(t2, bxyz2[:, 0], color=C1, label="$B_R$") ax5.plot(t2, bxyz2[:, 1], color=C2, label="$B_T$") ax5.plot(t2, bxyz2[:, 2], color=C3, label="$B_N$") ax5.set_xlim(datetime.datetime(2020,4,18,12,0),datetime.datetime(2020,4,22,0,0)) ax5.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%b %d %Hh')) ax5.set_ylabel('Wind B [nT]') ax5.set_ylim(-25,25) #line at current time ax5.plot([tlist[k],tlist[k]], [-200,200], color='black',linewidth=1,alpha=0.8) ax5.plot(win.time, win.bt, color=C0,lw=0.5,alpha=setalpha) ax5.plot(win.time, win.bx, color=C1,lw=0.5,alpha=setalpha) ax5.plot(win.time, win.by, color=C2,lw=0.5,alpha=setalpha) ax5.plot(win.time, win.bz, color=C3,lw=0.5,alpha=setalpha) # ---------------------- EPD ax6.plot(het_gcr) ax6.set_xticklabels([]) ax6.set_xlim(datetime.datetime(2020,4,18,12,0),datetime.datetime(2020,4,22,0,0)) ax6.set_ylabel('EPD/HET $[s^{-1}]$') ax6.plot([tlist[k],tlist[k]], [-200,200], color='black',linewidth=1,alpha=0.8) ax6.set_ylim(26.3, 27.70) # ------------------------- south pole ax7.plot(sopo_nm/60) #ax7.plot(crater) ax7.set_xlim(datetime.datetime(2020,4,18,12,0),datetime.datetime(2020,4,22,0,0)) ax7.set_ylabel('South Pole $[s^{-1}]$') ax7.plot([tlist[k],tlist[k]], [-200,200], color='black',linewidth=1,alpha=0.8) ax7.set_ylim(5.35,5.51) #-------------------------------------------- #panel labels plt.annotate('(a)',[0.00,0.93],xycoords='figure fraction',fontsize=20) plt.annotate('(b)',[0.69,0.93],xycoords='figure fraction',fontsize=20) plt.annotate('(c)',[0.69,0.32],xycoords='figure fraction',fontsize=20) plt.annotate('(d)',[0.00,0.32],xycoords='figure fraction',fontsize=20) plt.annotate('(e)',[0.345,0.32],xycoords='figure fraction',fontsize=20) #current time plt.annotate(parse_time(tlist[k]).iso[0:16],[0.55,0.35],ha='center',xycoords='figure fraction',fontsize=20) #write hours since launch time #plt.annotate('$t_{launch}$ +',[0.43,0.45],ha='center',xycoords='figure fraction',fontsize=20) #plt.annotate(str(frametime[k]),[0.5,0.45],ha='center',xycoords='figure fraction',fontsize=20) #plt.annotate('hours',[0.54,0.45],ha='center',xycoords='figure fraction',fontsize=20) framestr = '%05i' % (k) plt.savefig(animdirectory+'/3dcore_solo_'+framestr+'.jpg',dpi=100) print('frame:', k) plt.close(2) ################## make animation #time for the animation as list tlist=[] for i in np.arange(0,24*8*60,60): tlist.append(t_launch+datetime.timedelta(minutes=float(i))) print('number of frames',len(tlist)) #sns.set_style('whitegrid') #simulation time since launch frametime=np.round((parse_time(tlist).plot_date-parse_time(t_launch).plot_date)*24,2) #clock computing time starttime1=time.time() ################################## single #make all frames #for k in np.arange(1,6):#len(tlist)): #make_frame(120) ############################## multi #number of processes depends on your machines memory; check with command line "top" #how much memory is used by all your processesii nr_of_processes_used=50 print('Using multiprocessing, nr of cores',multiprocessing.cpu_count(), \ 'with nr of processes used: ',nr_of_processes_used) #run multiprocessing pool to make all movie frames, depending only on frame number pool = multiprocessing.Pool(processes=nr_of_processes_used) input=[i for i in range(len(tlist))] pool.map(make_frame, input) pool.close() #pool.join() os.system('ffmpeg -r 25 -i '+animdirectory+'/3dcore_solo_%05d.jpg -b 5000k -r 25 results/moestl_apr2020_3dcore_solo.mp4 -y -loglevel quiet') print('movie finished in',np.round((time.time()-starttime1)/60,2),' minutes') # -
old_scripts/event_visuals.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Developing match up function v2 # # - Move to a 1D model output # + import xarray as xr import numpy as np import matplotlib.pyplot as plt import pandas as pd import rompy from rompy import utils ## Should we import utils in __init__.py? from shapely.geometry import MultiPoint,Point # %matplotlib inline xr.set_options(display_style = 'text') # + cat = rompy.cat model_ds = cat.csiro.swan.swan_perth_fc.map(fcdate='2021-02').to_dask() x = model_ds.longitude.values y = model_ds.latitude.values xx,yy = np.meshgrid(x,y) points = MultiPoint(list(map(Point,zip(xx.ravel(),yy.ravel())))) geom = points.convex_hull.buffer(0.01).simplify(tolerance=0.01) df=cat.aodn.nrt_wave_buoys(startdt='2021-02',enddt='2021-04',geom=geom.to_wkt()).read() obs = df[['TIME','LATITUDE','LONGITUDE','WHTH']] obs['TIME'] = pd.to_datetime(obs['TIME']) model_ds # - obs out_ds = rompy.utils.find_matchup_data(obs,model_ds,{'WHTH':'hs'},time_thresh=None,KDtree_kwargs={}) out_ds # + fig, ax = plt.subplots(figsize=(12,8)) ax.scatter(out_ds['model_hs'],out_ds['meas_whth']) ax.plot([0,3],[0,3],ls='--',c='#252525') ax.set_ylim(0,2.5) ax.set_xlim(0,2.5) ax.set_xlabel('Model') ax.set_ylabel('Measured') ax.set_title('Hs')
notebooks/rompy-dev_matchup_code v2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Balance a pole on a cart with DQN # <br> # In this notebook we'll build a Deep Q-Network with TensorFlow deep learning framework and OpenAI gym <a href='https://github.com/openai/gym/blob/master/gym/envs/classic_control/cartpole.py'>CartPole-v0</a> environment to balance a pole on a cart. <br><br><br> # # # CartPole-v0 # # <br><br> # # <img src='./images/CartPole-v1.gif'> # A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The system is controlled by applying a force of +1 or -1 to the cart. The pendulum starts upright, and the goal is to prevent it from falling over. A reward of +1 is provided for every timestep that the pole remains upright. The episode ends when the pole is more than 15 degrees from vertical, or the cart moves more than 2.4 units from the center. <br><br><br> import gym import random import numpy as np import tensorflow as tf tf.reset_default_graph() class DQN: def __init__(self, input_shape:object = None, action_size:object = None, scope:object = None) -> object: if input_shape is None: input_shape = [None, 4] self.max_size = 1000000 self.mem = [] with tf.variable_scope(scope): self.scope = scope self.input_shape = input_shape self.action_size = action_size self.states = tf.placeholder(shape=input_shape, dtype=tf.float32) self.actions = tf.placeholder(shape=[None], dtype=tf.int32) self.actions_onehot = tf.one_hot( self.actions, action_size, dtype=tf.float32) fc1 = tf.layers.dense(self.states, 256, activation=tf.nn.relu) fc2 = tf.layers.dense(fc1, 256, activation=tf.nn.relu) self.q = tf.layers.dense(fc2, action_size, activation=None) self.target_q = tf.placeholder(shape=[None], dtype=tf.float32) self.responsible_output = tf.reduce_sum(tf.multiply( self.q, self.actions_onehot), axis=1, keep_dims=False) self.loss = tf.reduce_mean( tf.square(self.responsible_output - self.target_q)) self.update_model = tf.train.AdamOptimizer().minimize(self.loss) def action(self, sess, state): q = sess.run(self.q, feed_dict={self.states: state[np.newaxis, ...]}) return np.argmax(q) def train(self, sess, batch, learning_rate, tnet): assert len(batch) > 0 states = np.vstack(batch[:, 0]) actions = np.array(batch[:, 1]) rewards = batch[:, 2] next_states = np.vstack(batch[:, 3]) dones = batch[:, 4] next_q = sess.run(tnet.q, feed_dict={tnet.states: next_states}) next_q = rewards + (1. - dones.astype(np.float32)) * \ learning_rate * np.amax(next_q, axis=1, keepdims=False) sess.run(self.update_model, feed_dict={self.states: states, self.actions: actions, self.target_q: next_q}) def add(self, element): self.mem.append(element) if len(self.mem) > self.max_size: self.mem.pop(0) def sample(self, size): size = min(size, len(self.mem)) return random.sample(self.mem, size) def update_graph(self, from_graph:object, to_graph:object) -> object: from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_graph) to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_graph) holder = [] for from_var, to_var in zip(from_vars, to_vars): holder.append(to_var.assign(from_var)) return holder # + target_update = 200 epsilon_max = 1.0 epsilon_min = 0.01 epsilon_decay = 0.001 learning_rate = 0.99 batch_size = 64 EPISODES = 300 env = gym.make('CartPole-v0') action_size = env.action_space.n input_shape = [None] + list(env.observation_space.shape) time_step = 0.0 epsilon = epsilon_max Q_Network = DQN(input_shape=input_shape, action_size=action_size, scope='Q_Network') Target_Network = DQN(input_shape=input_shape, action_size=action_size, scope='Target_Network') update_ops = Q_Network.update_graph('Q_Network', 'Target_Network') saver = tf.train.Saver() score = [] with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for epoch in range(EPISODES): total_reward = 0 state = env.reset() while True: env.render() if np.random.rand() < epsilon: action = np.random.randint(action_size) else: action = Q_Network.action(sess, state) next_state, reward, done, _ = env.step(action) total_reward += reward Q_Network.add([state, action, reward, next_state, done]) time_step += 1. epsilon = epsilon_min + \ (epsilon_max - epsilon_min) * \ np.exp(-epsilon_decay * time_step) batch = np.array(Q_Network.sample(batch_size)) Q_Network.train(sess, batch, learning_rate, Target_Network) state = np.copy(next_state) if int(time_step) % target_update == 0: sess.run(update_ops) saver.save(sess, "./checkpoints/CartPole_DQN.ckpt") if done: print('Epoch:', epoch, 'Total Rewards:', total_reward) score.append(total_reward) break env.close() # - # ## Fundamental Performance Analysis # + score = np.array(score) totla_epoch = len(score) print("Total Epoch:", totla_epoch) print("Min Reward:", score.min()) print("Max Reward:", score.max()) print("Average Reward:", score.mean()) print("Highest Performance:", score.max(), "Reached at:", np.argmax(score) + 1, "Episode") # + import matplotlib.pyplot as plt # %matplotlib inline plt.figure(1, figsize=(18,10)) plt.grid(True) plt.xlabel("Epoch") plt.ylabel("Rewards") plt.plot(range(totla_epoch), score) plt.show()
Classic Control With DQN/02_CartPole.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: gpu4sci_20 # language: python # name: gpu4sci_20 # --- # # OpenACC Loop Optimizations # This version of the lab is intended for C/C++ programmers. The Fortran version of this lab is available [here](../Fortran/README.ipynb). # You will receive a warning five minutes before the lab instance shuts down. Remember to save your work! If you are about to run out of time, please see the [Post-Lab](#Post-Lab-Summary) section for saving this lab to view offline later. # --- # Let's execute the cell below to display information about the GPUs running on the server. To do this, execute the cell block below by giving it focus (clicking on it with your mouse), and hitting Ctrl-Enter, or pressing the play button in the toolbar above. If all goes well, you should see some output returned below the grey cell. # !pgaccelinfo # --- # # ## Introduction # # Our goal for this lab is to use the OpenACC Loop clauses to opimize our Parallel Loops. # # <img src="../images/development-cycle.png" alt="OpenACC development cycle" width="50%"> # # This is the OpenACC 3-Step development cycle. # # **Analyze** your code, and predict where potential parallelism can be uncovered. Use profiler to help understand what is happening in the code, and where parallelism may exist. # # **Parallelize** your code, starting with the most time consuming parts. Focus on maintaining correct results from your program. # # **Optimize** your code, focusing on maximizing performance. Performance may not increase all-at-once during early parallelization. # # We are currently tackling the **optimize** step. We will include the OpenACC loop clauses to optimize the execution of our parallel loop nests. # --- # # ## Run the Code # # In the previous labs, we have built up a working parallel code that can run on both a multicore CPU and a GPU. Let's run the code and note the performance, so that we can compare the runtime to any future optimizations we make. The code should take about 1 or 2 seconds to run at this point. # !pgcc -fast -ta=tesla -Minfo=accel -o laplace_baseline laplace2d.c jacobi.c && ./laplace_baseline # ### Optional: Analyze the Code # # If you would like a refresher on the code files that we are working on, you may view both of them using the two links below. # # **jacobi.c** (File -> Open -> jacobi.c) # # **laplace2d.c** (File -> Open -> laplace2d.c) # --- # # ## Optimize Loop Schedules # # The compiler has analyzed the loops in our two main functions and scheduled the iterations of the loops to run in parallel on our GPU and Multicore CPU. The compiler is usually pretty good at choosing how to break up loop iterations to run well on parallel accelerators, but sometimes we can eek out just a little more performance by guiding the compiler to make specific choices. First, let's look at the choices the compiler made for us. We'll focus on the `calcNext` routine, but you should look at the `swap` routine too. Here's the compiler feedback for that routine: # # ``` # calcNext: # 48, Generating copyin(A[:m*n]) # Accelerator kernel generated # Generating Tesla code # 49, #pragma acc loop gang /* blockIdx.x */ # Generating reduction(max:error) # 51, #pragma acc loop vector(128) /* threadIdx.x */ # 48, Generating implicit copy(error) # Generating copyout(Anew[:m*n]) # 51, Loop is parallelizable # ``` # # The main loops on interest in `calcNext` are on lines 49 and 51. I see that the compiler has told me what loop clauses it chose for each of those loops. The outermost loop is treated as a *gang* loop, meaning it broke that loop up into chunks that can be spread out across the GPU or CPU cores easily. If you have programmed in CUDA before, you'll recognize that the compiler is mapping this loop to the CUDA thread blocks. The innermost loop is mapped instead to *vector* parallelism. You can think of a vector as some number of data cells that get the same operation applied to them at the same time. On any modern processor technology you need this mixture of *coarse grained* and *fine grained* parallelism to effectively use the hardware. Vector (fine grained) parallelism can operate extremely efficiently when performing the same operation on a bunch of data, but there's limits to how long a vector you can build. Gang (coarse grained) parallelism is highly scalable, because each chunk of work can operate completely independently of each other chunk, making it ideal for allowing processor cores to operate independently of each other. # # Let's look at some loop clauses that allow you to tune how the compiler maps our loop iterations to these different types of parallelism. # ### Collapse Clause # # The `collapse` clause allows us to transform a multi-dimensional loop nests into a single-dimensional loop. This process is helpful for increasing the overall length (which usually increases parallelism) of our loops, and will often help with memory locality. In our case, instead of looking at our loops as `n` and `m` iteration loops, it looks at them as a single `n * m` iteration loop, which gives it more flexibility in how to break up the iterations. Let's look at the syntax. # # ```cpp # #pragma acc parallel loop collapse( N ) # ``` # # Where N is the number of loops to collapse. # # ```cpp # #pragma acc parallel loop collapse( 3 ) # for(int i = 0; i < N; i++) # { # for(int j = 0; j < M; j++) # { # for(int k = 0; k < Q; k++) # { # < loop code > # } # } # } # ``` # # This code will combine the 3-dimensional loop nest into a single 1-dimensional loop. The loops in our example code are fairly long-running, so I don't expect a lot of speed-up from collapsing them together, but let's try it anyway. # #### Implementing the Collapse Clause # # Use the following link to edit our code. Use the **collapse clause** to collapse our multi-dimensional loops into a single dimensional loop. # # **laplace2d.c** (File -> Open -> laplace2d.c) # # (make sure to save your code with ctrl+s) # # Then run the following script to see how the code runs. # !pgcc -ta=tesla -Minfo=accel -o laplace_collapse jacobi.c laplace2d.c && ./laplace_collapse # Did your code speed-up at all? Mine actually slowed down by about 0.1 seconds. # # So when should you use the `collapse` clause? The collapse clause is particularly useful in two specific cases that occur when you have very deep loop nests (4, 5, or more loops nested together). The first case is when the innermost loops have very few iterations. The compiler will generally favor inner loops for *vector* parallelism, so if there's not enough loop iterations to fill our vector, we're going to be wasting our computational power. Take a loop at this code: # # ```cpp # #pragma acc parallel loop # for(int i = 0; i < N; i++) # { # for(int j = 0; j < 8; j++) # { # for(int k = 0; k < 8; k++) # { # < loop code > # } # } # } # ``` # # In this code, our innermost loop, which the compiler will likely want to vectorize has just 8 iterations. On a CPU, this may be OK, but on a GPU we generally want longer vectors. If I collapse the two innermost loops together, that gives me 64 iterations, which is starting to get in the range where GPUs make sense. So instead, I should write this: # # ```cpp # #pragma acc parallel loop # for(int i = 0; i < N; i++) # { # #pragma acc loop collapse(2) # for(int j = 0; j < 8; j++) # { # for(int k = 0; k < 8; k++) # { # < loop code > # } # } # } # ``` # # The other common case happens when you have sort-of short loops on the outermost loops of a loop nest. This is where the compiler looks first for *coarse grained* parallelism to spread across the CPU or GPU. If there's not enough parallelism here, then we're limited in how many CPU cores or how large a GPU we can effectively use. So in the example below, I took two 32 iteration loops and turn them into a single 1024 iteration loop to give the compiler the opportunity to parallelize the region on larger GPUs. # # ```cpp # #pragma acc parallel loop collapse( 2 ) # for(int i = 0; i < 32; i++) # { # for(int j = 0; j < 32; j++) # { # for(int k = 0; k < N; k++) # { # < loop code > # } # } # } # ``` # # As a rule of thumb, if your code has loops that are tightly-nested together, meaning there's nothing inside of one loop except the nested loop, it's worth trying to collapse the loops completely. This won't always give you the best performance, but it will frequently provide better performance than the uncollapsed version. # Let's look at another clause that may help our code. # # ### Tile Clause # # The `tile` clause allows us to break up a multi-dimensional loop into *tiles*, or *blocks*. This is often useful for increasing memory locality in codes like ours. Let's look at the syntax. # # ```cpp # #pragma acc parallel loop tile( x, y, z, ... ) # ``` # # Our tiles can have as many dimensions as we want, though we must be careful to not create a tile that is too large. Let's look at an example: # # ```cpp # #pragma acc parallel loop tile( 32, 32 ) # for(int i = 0; i < N; i++) # { # for(int j = 0; j < M; j++) # { # < loop code > # } # } # ``` # # The above code will break our loop iterations up into 32x32 tiles (or blocks), and then execute those blocks in parallel. Let's look at a slightly more specific code. # # ```cpp # #pragma acc parallel loop tile( 32, 32 ) # for(int i = 0; i < 128; i++) # { # for(int j = 0; j < 128; j++) # { # < loop code > # } # } # ``` # # In this code, we have 128x128 loop iterations, which are being broken up into 32x32 tiles. This means that we will have 16 tiles, each tile being size 32x32. # #### Implementing the Tile Clause # # Use the following link to edit our code. Replace the `collapse` clause with the `tile` clause to break our multi-dimensional loops into smaller tiles. Try using a variety of different tile sizes, but for now keep one of the dimensions as a **multiple of 32**. We will talk later about why this is important. # # **laplace2d.c** (File -> Open -> laplace2d.c) # # (make sure to save your code with ctrl+s) # # Then run the following script to see how the code runs. # !pgcc -ta=tesla -Minfo=accel -o laplace_tile jacobi.c laplace2d.c && ./laplace_tile # Unlike the `collapse` clause, we need to do some experimentation to find the best value for our code. Here's some values that I tried and their results. I know that the compiler is using a vector length of 128, according to the compiler feedback above, so I started with values that multiplied to 128, but then tried a few more values. For the speed-up column I'm comparing against the results from using the `collapse` clause. # # | Clause | Time (s) | Speed-up | # |-------------|----------|----------| # | collapse(2) | **1.60** | 1.00X | # | tile(2,64) | 1.92 | 0.83X | # | tile(64,2) | **1.56** | 1.03X | # | tile(4,32) | 1.67 | 0.96X | # | tile(32,4) | 1.63 | 0.98X | # | tile(8,16) | 1.72 | 0.93X | # | tile(16,8) | **1.54** | 1.04X | # | tile(16,16) | 1.88 | 0.85X | # | tile(16,32) | 1.99 | 0.80X | # | tile(32,16) | 1.91 | 0.84X | # | tile(32,32) | 2.02 | 0.79X | # # Notice that I didn't limit myself to having 32 as one of the values, but I did make sure that if you multiply the two parts together they are always divisible by 32. NVIDIA GPUs always operated in groups of 32 threads, so it's best to make sure that there's some multiple of 32 worth of work for the threads to do. I stopped at 32 x 32 though, because NVIDIA GPUs are limited to at most 1024 threads in a gang. # # For this code on my GPU (TITAN Xp), two of the tile clauses were faster than the simple `collapse` clause. For a 2D code like this one, it's always worth trying the `tile` clause, because it may allow the data used within the gang to have more reuse than it would with the `collapse` clause. On some other GPUs, the `collapse` clause does slightly better for this example. # --- # # There's quite a bit more we can say about how to optimize your loops with OpenACC, but we'll leave that to a more advanced lab. With just the `tile` and `collapse` clauses in your toolbox you should already be in good shape for optimizing your loops with OpenACC. # --- # # ## Conclusion # # Our primary goal when using OpenACC is to parallelize our large for loops. To accomplish this, we must use the OpenACC loop directive and loop clauses. There are many ways to alter and optimize our loops, though it is up to the programmer to decide which route is the best to take. At this point in the lab series, you should be able to begin parallelizing your own personal code, and to be able to achieve a relatively high performance using OpenACC. # --- # # ## Bonus Task (More about Gangs, Workers, and Vectors) # # This week's bonus task is to learn a bit more about how OpenACC breaks up the loop iterations into *gangs*, *workers*, and *vectors*, which was discussed very briefly in the first lab. [Click Here](Bonus.ipynb) for more information about these *levels of parallelism*. # # + [markdown] tags=["remove_cell"] # ## Post-Lab Summary # # If you would like to download this lab for later viewing, it is recommend you go to your browsers File menu (not the Jupyter notebook file menu) and save the complete web page. This will ensure the images are copied down as well. # # You can also execute the following cell block to create a zip-file of the files you've been working on, and download it with the link below. # + tags=["remove_cell"] language="bash" # rm -f openacc_files.zip *.o laplace_baseline laplace_collapse laplace_tile # zip -r openacc_files.zip * # + [markdown] tags=["remove_cell"] # **After** executing the above zip command, you should be able to download the zip file [here](files/openacc_files.zip)
labs/lab3/English/C/README.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Disease Outbreak Response Decision-making Under Uncertainty: A retrospective analysis of measles in Sao Paulo # + # %matplotlib inline import pandas as pd import numpy as np import numpy.ma as ma from datetime import datetime import matplotlib.pyplot as plt import seaborn as sb sb.set() import pdb from pymc3 import Model, sample, find_MAP, Metropolis, NUTS from pymc3 import (Normal, HalfCauchy, Uniform, MvNormal, Beta, Binomial, Dirichlet, Multinomial, Bernoulli, Poisson, Wishart, StudentT, GaussianRandomWalk, Exponential, NegativeBinomial) from pymc3 import Potential, Deterministic import theano.tensor as T from IPython.core.display import HTML def css_styling(): styles = open("styles/custom.css", "r").read() return HTML(styles) css_styling() # - from theano import config config.warn.reduce_join = False data_dir = "data/" # Import outbreak data measles_data = pd.read_csv(data_dir+"measles.csv", index_col=0) measles_data.NOTIFICATION = pd.to_datetime(measles_data.NOTIFICATION) measles_data.BIRTH = pd.to_datetime(measles_data.BIRTH) measles_data.ONSET = pd.to_datetime(measles_data.ONSET) measles_data = (measles_data.replace({'DISTRICT': {'BRASILANDIA':'BRAZILANDIA'}}) .drop('AGE', axis=1)) # Sao Paulo population by district sp_pop = pd.read_csv(data_dir+'sp_pop.csv', index_col=0) _names = sp_pop.index.values _names[_names=='BRASILANDIA'] = 'BRAZILANDIA' sp_pop.set_index(_names, inplace = True) sp_pop.head() # Plot of cumulative cases by district measles_onset_dist = measles_data.groupby(['DISTRICT','ONSET']).size().unstack(level=0).fillna(0) measles_onset_dist.cumsum().plot(legend=False, grid=False) total_district_cases = measles_onset_dist.sum() # Top 5 districts by number of cases totals = measles_onset_dist.sum() totals.sort(ascending=False) totals[:5] # Age distribution of cases, by confirmation status by_conclusion = measles_data.groupby(["YEAR_AGE", "CONCLUSION"]) counts_by_cause = by_conclusion.size().unstack().fillna(0) ax = counts_by_cause.plot(kind='bar', stacked=True, xlim=(0,50), figsize=(15,5)) # ### Vaccination Data vaccination_data = pd.read_csv('data/BrazilVaxRecords.csv', index_col=0) vaccination_data.head() # ```python # # sia_susc = np.ones(len(vax_97)) # birth_year = np.arange(1922, 1998)[::-1] # by_mask = (birth_year > 1983) & (birth_year < 1992) # by_mask = (birth_year > 1972) & (birth_year < 1986) # sia_susc[by_mask] *= 0.2 # ``` vax_97 = np.r_[[0]*(1979-1921+1), vaccination_data.VAX[:17]] n = len(vax_97) FOI_mat = np.resize((1 - vax_97*0.9), (n,n)).T FOI_mat vacc_susc = (1 - vax_97*0.9)[::-1] vacc_susc[0] = 0.5 vacc_susc sia_susc = np.ones(len(vax_97)) birth_year = np.arange(1922, 1998)[::-1] by_mask = ((birth_year > 1983) & (birth_year < 1992)) | ((birth_year > 1972) & (birth_year < 1986)) sia_susc[by_mask] *= 0.2 sia_susc # ## Stochastic Disease Transmission Model # # As a baseline for comparison, we can fit a model to all the clinically-confirmed cases, regardless of lab confirmation status. For this, we will use a simple SIR disease model, which will be fit using MCMC. # # This model fits the series of 2-week infection totals in each district $i$ as a set of Poisson models: # # \\[Pr(I(t)_{i} | \lambda(t)_i) = \text{Poisson}(\lambda(t)_i) \\] # # Where the outbreak intensity is modeled as: # # \\[\lambda(t)_i = \beta [I^{(w)}(t-1)_i]^{\alpha} S(t-1)_i\\] # # \\[\alpha \sim \text{Exp}(1)\\] # # We will assume here that the transmission rate is constant over time (and across districts): # # \\[\beta \sim \text{Gamma}(1, 0.1)\\] # # To account for the influence of infected individuals from neighboring districts on new infections, the outbreak intensity was modeled using a spatial-weighted average of infecteds across districts, where populations were weighted as an exponential function of the distance between district centroids: # # \\[w_{d} = \text{exp}(-\theta d)\\] # # \\[\theta \sim \text{Exp}(1)\\] # # ### Confirmation Sub-model # # Rather than assume all clinical cases are true cases, we can adjust the model to account for lab confirmation probability. This is done by including a sub-model that estimates age group-specific probabilities of confirmation, and using these probabilities to estimate the number of lab-confirmed cases. These estimates are then plugged into the model in place of the clinically-confirmed cases. # # We specified a structured confirmation model to retrospectively determine the age group-specific probabilities of lab confirmation for measles, conditional on clinical diagnosis. Individual lab confirmation events $c_i$ were modeled as Bernoulli random variables, with the probability of confirmation being allowed to vary by age group: # # $$c_i \sim \text{Bernoulli}(p_{a(i)})$$ # # where $a(i)$ denotes the appropriate age group for the individual indexed by i. There were 16 age groups, the first 15 of which were 5-year age intervals $[0,5), [5, 10), \ldots , [70, 75)$, with the 16th interval including all individuals 75 years and older. # # Since the age interval choices were arbitrary, and the confirmation probabilities of adjacent groups likely correlated, we modeled the correlation structure directly, using a multivariate logit-normal model. Specifically, we allowed first-order autocorrelation among the age groups, whereby the variance-covariance matrix retained a tridiagonal structure. # # $$\begin{aligned} # \Sigma = \left[{ # \begin{array}{c} # {\sigma^2} & {\sigma^2 \rho} & 0& \ldots & {0} & {0} \\ # {\sigma^2 \rho} & {\sigma^2} & \sigma^2 \rho & \ldots & {0} & {0} \\ # {0} & \sigma^2 \rho & {\sigma^2} & \ldots & {0} & {0} \\ # \vdots & \vdots & \vdots & & \vdots & \vdots\\ # {0} & {0} & 0 & \ldots & {\sigma^2} & \sigma^2 \rho \\ # {0} & {0} & 0 & \ldots & \sigma^2 \rho & {\sigma^2} # \end{array} # }\right] # \end{aligned}$$ # # From this, the confirmation probabilities were specified as multivariate normal on the inverse-logit scale. # # $$ \text{logit}(p_a) = \{a\} \sim N(\mu, \Sigma)$$ # # Priors for the confirmation sub-model were specified by: # # $$\begin{aligned} # \mu_i &\sim N(0, 100) \\ # \sigma &\sim \text{HalfCauchy}(25) \\ # \rho &\sim U(-1, 1) # \end{aligned}$$ # Age classes are defined in 5-year intervals. age_classes = [0,5,10,15,20,25,30,35,40,100] measles_data.dropna(subset=['YEAR_AGE'], inplace=True) measles_data['YEAR_AGE'] = measles_data.YEAR_AGE.astype(int) measles_data['AGE_GROUP'] = pd.cut(measles_data.YEAR_AGE, age_classes, right=False) # Lab-checked observations are extracted for use in estimating lab confirmation probability. CONFIRMED = measles_data.CONCLUSION == 'CONFIRMED' CLINICAL = measles_data.CONCLUSION == 'CLINICAL' DISCARDED = measles_data.CONCLUSION == 'DISCARDED' # Extract confirmed and clinical subset, with no missing county information. lab_subset = measles_data[(CONFIRMED | CLINICAL) & measles_data.COUNTY.notnull()].copy() age = lab_subset.YEAR_AGE.values ages = lab_subset.YEAR_AGE.unique() counties = lab_subset.COUNTY.unique() confirmed = (lab_subset.CONCLUSION=='CONFIRMED').values clinic_subset = measles_data[CLINICAL & measles_data.COUNTY.notnull()].copy() _lab_subset = lab_subset.replace({"CONCLUSION": {"CLINICAL": "UNCONFIRMED"}}) by_conclusion = _lab_subset.groupby(["YEAR_AGE", "CONCLUSION"]) counts_by_cause = by_conclusion.size().unstack().fillna(0) ax = counts_by_cause.plot(kind='bar', stacked=True, xlim=(0,50), figsize=(15,5), grid=False) lab_subset.shape confirmed.sum() # Proportion of lab-confirmed cases older than 20 years (measles_data[CONFIRMED].YEAR_AGE>20).mean() age_classes age_group = pd.cut(age, age_classes, right=False) age_index = np.array([age_group.categories.tolist().index(i) for i in age_group]) age_groups = age_group.categories age_groups #Extract cases by age and time. age_group = pd.cut(age, age_classes, right=False) age_index = np.array([age_group.categories.tolist().index(i) for i in age_group]) age_group.categories age_slice_endpoints = [g[1:-1].split(',') for g in age_groups] age_slices = [slice(int(i[0]), int(i[1])) for i in age_slice_endpoints] # Get index from full crosstabulation to use as index for each district dates_index = measles_data.groupby( ['ONSET', 'AGE_GROUP']).size().unstack().index unique_districts = measles_data.DISTRICT.dropna().unique() excludes = ['BOM RETIRO'] N = sp_pop.drop(excludes).ix[unique_districts].sum().drop('Total') N_age = N.iloc[:8] N_age.index = age_groups[:-1] N_age[age_groups[-1]] = N.iloc[8:].sum() N_age # Compile bi-weekly confirmed and unconfirmed data by Sao Paulo district # + # All confirmed cases, by district confirmed_data = lab_subset[lab_subset.CONCLUSION=='CONFIRMED'] confirmed_counts = (confirmed_data.groupby(['ONSET', 'AGE_GROUP']) .size() .unstack() .reindex(dates_index) .fillna(0) .sum()) all_confirmed_cases = (confirmed_counts.reindex_axis(measles_data['AGE_GROUP'].unique()) .fillna(0).values.astype(int)) # - confirmed_counts_2w = (confirmed_data .groupby(['ONSET', 'AGE_GROUP']) .size() .unstack() .reindex(dates_index) .fillna(0) .resample('2W', how='sum')) # + # All clinical cases, by district clinical_counts = (clinic_subset.groupby(['ONSET', 'AGE_GROUP']) .size() .unstack() .reindex(dates_index) .fillna(0) .sum()) all_clinical_cases = (clinical_counts.reindex_axis(measles_data['AGE_GROUP'].unique()) .fillna(0).values.astype(int)) # - clinical_counts_2w = (clinic_subset .groupby(['ONSET', 'AGE_GROUP']) .size() .unstack() .reindex(dates_index) .fillna(0) .resample('2W', how='sum')) age_groups = np.sort(measles_data['AGE_GROUP'].unique()) age_groups # Check shape of data frame # # - 28 bi-monthly intervals, 9 age groups assert clinical_counts_2w.shape == (28, len(age_groups)) confirmed_counts_2w clinical_counts_2w obs_date = '1997-06-15' confirmed_obs_t = confirmed_counts_2w[confirmed_counts_2w.index <= obs_date].values clinical_obs_t = clinical_counts_2w[clinical_counts_2w.index <= obs_date].values N_age.values.astype(int) # Downsample annual series to observed age groups downsample = lambda x: np.array([x[s].mean() for s in age_slices]) def interpolate(x0, y0, x): x = np.array(x) idx = np.searchsorted(x0, x) dl = np.array(x - x0[idx - 1]) dr = np.array(x0[idx] - x) d=dl+dr wl = dr/d return wl*y0[idx-1] + (1-wl)*y0[idx] # + invlogit = T.nnet.sigmoid confirmation = True def specify_model(model, observation_date, confirmation=True, structured_susc=False): with model: n_periods, n_age_groups = confirmed_counts_2w.shape ### Confirmation sub-model if confirmation: # Specify priors on age-specific means age_classes = np.unique(age_index) p_age = Beta('p_age', 1, 1, shape=n_age_groups) p_confirm = p_age[age_index] # Confirmation likelihood lab_confirmed = Bernoulli('lab_confirmed', p=p_confirm, observed=confirmed) ''' Truncate data at observation period ''' obs_index = confirmed_counts_2w.index <= observation_date confirmed_obs_t = confirmed_counts_2w[obs_index].values clinical_obs_t = clinical_counts_2w[obs_index].values # Index for observation date, used to index out values of interest # from the model. t_obs = obs_index.sum() - 1 if confirmation: clinical_cases = [Binomial('clinical_cases_%i' % n, i, p_age, shape=n_age_groups) for n,i in enumerate(clinical_obs_t)] I_age = [Deterministic('I_age_%i' % i, clinical + confirmed_obs_t[i]) for i,clinical in enumerate(clinical_cases)] I = T.stacklists(I_age) else: I = confirmed_obs_t + clinical_obs_t # Calcuate age distribution from observed distribution of infecteds to date age_dist = Dirichlet('age_dist', np.ones(n_age_groups), shape=n_age_groups) age_dist_like = Potential('age_dist_like', Multinomial.dist(I.sum(), age_dist).logp(I.sum(0))) # Transmission parameter beta = Uniform('beta', 1, 100) if structured_susc: A = 75./(beta - 1) lt_sum = downsample(np.tril(FOI_mat).sum(0)[::-1]) natural_susc = Deterministic('natural_susc', T.exp((-1/A) * lt_sum)) μ_p = Deterministic('μ_p', downsample(sia_susc) * downsample(vacc_susc) * natural_susc) σ_p = Uniform('σ_p', 0, 100, testval=5) p_susceptible = Beta('p_susceptible', mu=μ_p, sd=σ_p, shape=n_age_groups) else: σ_p = HalfCauchy('σ_p', 25) μ_p = GaussianRandomWalk('μ_p', sd=σ_p, shape=n_age_groups) p_susceptible = Deterministic('p_susceptible', invlogit(μ_p)) # Estimated total initial susceptibles S_0 = Binomial('S_0', n=N_age.values.astype(int), p=p_susceptible, shape=n_age_groups) S = Deterministic('S', S_0 - I.cumsum(axis=0)) # Force of infection λ = Deterministic('λ', beta * I * S / N_age) # λ = Deterministic('λ', beta * I.sum(1) * S.sum(1) / N_age.sum()) # λ = Deterministic('λ', I.dot(beta) * S / N_age) new_cases = Potential('new_cases', Poisson.dist(λ[:-1]).logp(I[1:])) return model # - # Run models for June 15 and July 15 observation points, both with and without clinical confirmation. n_iterations = 20000 n_burn = 10000 structured_susc = True # June 15, with lab confirmation # + model_june = specify_model(Model(), '1997-06-15', structured_susc=structured_susc) with model_june: june_trace = sample(n_iterations) # - # July 15, with lab confirmation # + model_july = specify_model(Model(), '1997-07-15', structured_susc=structured_susc) with model_july: july_trace = sample(n_iterations) # - # June 15, no lab confirmation # + model_june_noconf = specify_model(Model(), '1997-06-15', confirmation=False, structured_susc=structured_susc) with model_june_noconf: start = find_MAP() step = Metropolis() june_noconf_trace = sample(n_iterations, step=step, start=start) # - # July 15, no lab confirmation # + model_july_noconf = specify_model(Model(), '1997-07-15', confirmation=False, structured_susc=structured_susc) with model_july_noconf: july_noconf_trace = sample(n_iterations) # - # ## Summary of model output # # Age-specific confirmation, June model # + from pymc3 import traceplot, forestplot forestplot(june_trace[n_burn:], vars=['p_age']) # - traceplot(june_trace[n_burn:], vars=['σ_p', 'α']) # Lab confirmation rates, June model # + p_age = pd.DataFrame(june_trace.get_values('p_age'), columns=age_groups) f, axes = plt.subplots(figsize=(14,6)) sb.boxplot(data=p_age, linewidth=0.3, fliersize=0, ax=axes, color=sb.color_palette("coolwarm", 5)[0], order=age_group.categories) axes.set_ylabel('Confirmation rate') axes.set_xlabel('Age group') # - # Proportion of population susceptible, June model. forestplot(june_trace[n_burn:], vars=['p_susceptible']) # Proportion of population susceptible, June model with no confirmation correction forestplot(june_noconf_trace[n_burn:], vars=['p_susceptible']) # Epidemic intensity estimates at June and July. forestplot(june_trace[n_burn:], vars=['λ']) forestplot(july_trace[n_burn:], vars=['λ']) # Transmission paramter traceplot(june_trace[n_burn:], vars=['beta']) traceplot(june_noconf_trace[n_burn:], vars=['beta']) # Epidemic intensity for lab- versus clinical-confirmation models from pymc3 import df_summary # + lam_june = df_summary(june_trace[n_burn:], vars='λ') fig, axes = plt.subplots(2, 1, sharey=True) lam_june['mean'].plot(ax=axes[0], alpha=0.4) axes[0].set_ylabel('Epidemic intensity') axes[0].set_xlabel('time (2-week periods)') axes[0].set_title('Lab confirmation') lam_june_noconf = df_summary(june_noconf_trace[n_burn:], vars='λ') lam_june_noconf['mean'].plot(ax=axes[1], alpha=0.4) axes[1].set_ylabel('Epidemic intensity') axes[1].set_xlabel('time (2-week periods)') axes[1].set_title('Clinical confirmation') plt.tight_layout() # - plt.boxplot(july_trace.get_values('S')[n_burn:,-1,:]); S_age_june = pd.DataFrame(june_trace.get_values('S')[n_burn:, -1, :], columns=age_group.categories).unstack().reset_index() S_age_june.columns = 'Age', 'Iteration', 'S' S_age_june['Confirmation'] = 'Lab' # + S_age_june = pd.DataFrame(june_trace.get_values('S')[n_burn:, -1, :], columns=age_group.categories).unstack().reset_index() S_age_june.columns = 'Age', 'Iteration', 'S' S_age_june['Confirmation'] = 'Lab' S_age_june_noconf = pd.DataFrame(june_noconf_trace.get_values('S')[n_burn:, -1, :], columns=age_group.categories).unstack().reset_index() S_age_june_noconf.columns = 'Age', 'Iteration', 'S' S_age_june_noconf['Confirmation'] = 'Clinical' S_age_june = pd.concat([S_age_june, S_age_june_noconf], ignore_index=True) # + S_age_july = pd.DataFrame(july_trace.get_values('S')[n_burn:, -1, :], columns=age_group.categories).unstack().reset_index() S_age_july.columns = 'Age', 'Iteration', 'S' S_age_july['Confirmation'] = 'Lab' S_age_july_noconf = pd.DataFrame(june_noconf_trace.get_values('S')[n_burn:, -1, :], columns=age_group.categories).unstack().reset_index() S_age_july_noconf.columns = 'Age', 'Iteration', 'S' S_age_july_noconf['Confirmation'] = 'Clinical' S_age_july = pd.concat([S_age_july, S_age_july_noconf], ignore_index=True) # - # Numbers of suscepibles in each age group, under lab vs clinical confirmation g = sb.factorplot("Age", "S", "Confirmation", S_age_june, kind="box", palette="hls", size=6, aspect=2, linewidth=0.3, fliersize=0, order=age_group.categories) g.despine(offset=10, trim=True) g.set_axis_labels("Age Group", "Susceptibles"); g = sb.factorplot("Age", "S", "Confirmation", S_age_july, kind="box", palette="hls", size=6, aspect=2, linewidth=0.3, fliersize=0, order=age_group.categories) g.despine(offset=10, trim=True) g.set_axis_labels("Age Group", "Susceptibles"); # ## Vaccination coverage # + n_age_groups = len(age_groups) coverage_5 = np.r_[[0.95], [0]*(n_age_groups - 1)] coverage_15 = np.r_[[0.95]*3, [0]*(n_age_groups - 3)] coverage_30 = np.r_[[0.95]*6, [0]*(n_age_groups - 6)] coverage_adult = np.r_[[0.95, 0, 0, 0, 0.95, 0.95] + [0]*(n_age_groups - 6)] coverage_dict = {'pct_5': coverage_5, 'pct_15': coverage_15, 'pct_30': coverage_30, 'pct_adult': coverage_adult} # - def calc_coverage(trace, p): n = trace.get_values('S')[:,-1,:].copy().astype(int) n[n<0] = 0 vacc = np.random.binomial(n, p).sum(1) pct = vacc/trace.get_values('S')[:,-1,:].sum(1) return pct # + labels = ['pct_5', 'pct_15', 'pct_30', 'pct_adult'] june_coverage = pd.DataFrame({v: calc_coverage(june_trace[n_burn:], coverage_dict[v]) for v in labels}) june_coverage['Month'] = 'June' june_coverage['Confirmation'] = 'Lab' # - june_coverage.boxplot(); # + june_noconf_coverage = pd.DataFrame({v: calc_coverage(june_noconf_trace[n_burn:], coverage_dict[v]) for v in labels}) june_noconf_coverage['Month'] = 'June' june_noconf_coverage['Confirmation'] = 'Clinical' july_coverage = pd.DataFrame({v: calc_coverage(july_trace[n_burn:], coverage_dict[v]) for v in labels}) july_coverage['Month'] = 'July' july_coverage['Confirmation'] = 'Lab' july_noconf_coverage = pd.DataFrame({v: calc_coverage(july_noconf_trace[n_burn:], coverage_dict[v]) for v in labels}) july_noconf_coverage['Month'] = 'July' july_noconf_coverage['Confirmation'] = 'Clinical' # - coverage = pd.concat([june_coverage, june_noconf_coverage, july_coverage, july_noconf_coverage], ignore_index=True) sb.factorplot(row="Month", col="Confirmation", data=coverage, kind='box', row_order=['June', 'July'], order=['pct_5', 'pct_15', 'pct_30', 'pct_adult'], palette="YlGnBu_d", linewidth=0.7, fliersize=0, aspect=1.25).despine(left=True) # ## Mapping spatial effects # + from mpl_toolkits.basemap import Basemap import geopandas as gpd lllat=-24 urlat=-23.3 lllon=-47 urlon=-46.3 SP_base = Basemap(ax=None, lon_0=(urlon + lllon) / 2, lat_0=(urlat + lllat) / 2, llcrnrlat=lllat, urcrnrlat=urlat, llcrnrlon=lllon, urcrnrlon=urlon, resolution='i', epsg='4326') # - SP_dist = gpd.GeoDataFrame.from_file('Sao Paulo/Brazil_full/BRA_adm3.shp').to_crs({'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84'}) SP_dist['DIST_NAME'] = [trans.trans(_).upper() for _ in SP_dist.NAME_3] λ_june = pd.Series(model_june.λ_t.stats()['mean'], index=sp_districts) λ_june SP_dist_merged = SP_dist.merge(pd.DataFrame(λ_june, columns=['λ']), left_on='DIST_NAME', right_index=True) measles_onset_conf = measles_data[CONFIRMED].groupby(['DISTRICT','ONSET']).size().unstack(level=0).fillna(0).sum() measles_onset_conf _rates = measles_onset_conf/sp_pop.sum(1) SP_dist_conf = SP_dist.merge(pd.DataFrame(_rates, columns=['rate']), left_on='DIST_NAME', right_index=True) # Estimated expected value for infecteds, by district # + from matplotlib.pyplot import cm map_fig = plt.figure(figsize=(16,12)) map_ax = plt.gca() SP_base.drawcoastlines() SP_base.drawrivers() SP_dist_merged.plot(column='λ', colormap=cm.Reds, axes=map_ax) # - # Observed confirmed cases, by district # + map_fig = plt.figure(figsize=(16,12)) map_ax = plt.gca() SP_base.drawcoastlines() SP_base.drawrivers() SP_dist_conf.plot(column='rate', colormap=cm.Reds, axes=map_ax)
Archive/Non-Spatial Measles Model (PyMC 3).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Capítulo 5 - Uso de Selenium para automatizar acciones en el navegador # ___ # ## Ejemplo práctico # ___ # importamos librerías clave: # ! pip install selenium from selenium import webdriver # hay que haber ejecutado `pip install selenium` para que funcione la importación # Para levantar el explorador deberá estar presente en la misma carpeta que este documento el driver (descomprimido como .exe) de la versión del navegador que queramos automatizar. Se puede bajar de aquí: https://sites.google.com/chromium.org/driver/ driver = webdriver.Chrome() driver.get("http://www.python.org") # comprobación de texto en el título: print(driver.title) assert "Python" in driver.title # referenciar un elemento de la web: # # orden de rapidez: 1.- id | 2.- name | 3.- css selector | 4.- xpath # + from selenium.webdriver.common.by import By # id tbx__search = driver.find_element(By.ID, "id-search-field") # name tbx__search = driver.find_element(By.NAME, "q") # css selector tbx__search = driver.find_element(By.CSS_SELECTOR, "input#id-search-field") # id tbx__search = driver.find_element(By.CSS_SELECTOR, "input.search-field") # class tbx__search = driver.find_element(By.CSS_SELECTOR, "input[placeholder='Search']") # otro atributo tbx__search = driver.find_element(By.CSS_SELECTOR, "input#id-search-field.search-field[placeholder='Search']") # más de un atributo a la vez tbx__search = driver.find_element(By.CSS_SELECTOR, "input[placeholder^='Sea']") # empieza con tbx__search = driver.find_element(By.CSS_SELECTOR, "input[placeholder$='arch']") # termina con tbx__search = driver.find_element(By.CSS_SELECTOR, "input[placeholder*='ear']") # contiene # xpath tbx__search = driver.find_element(By.XPATH, "//input[@placeholder='Search']") # - # Cundo el identificador no es único se puede coger una lista de elementos con el siguiente método: # + lista_de_elementos = driver.find_elements(By.CSS_SELECTOR, "input#id-search-field") # devuelve un lista tbx__search = lista_de_elementos[0] # en este caso es 0 porque queremos el primero, pondríamos 1 si fuera el segundo, etc. # - # escribir en el buscador y pulsar Enter: # + from selenium.webdriver.common.by import By tbx__search = driver.find_element(By.ID, "id-search-field") tbx__search.clear() tbx__search.send_keys("no va a haber resultados para esta frase") from selenium.webdriver.common.keys import Keys # con esta línea importamos la clase Keys de selenium, que nos hará fata a continuación tbx__search.send_keys(Keys.RETURN) # pulsar Enter # - # escribir en el buscador y clicar en 'go': # + tbx__search = driver.find_element(By.ID, "id-search-field") tbx__search.clear() tbx__search.send_keys("tampoco va a haber resultados para esta frase") btn__go = driver.find_element(By.ID, "submit") btn__go.click() # - # más comprobaciones: # + print("···") print( driver.page_source[21500:21800] ) print("···") assert "No results found." in driver.page_source # + txt__results = driver.find_element(By.CSS_SELECTOR, "#content > div > section > form > ul > p") print(txt__results.text) assert "No results found." in txt__results.text # - # cerrar el navegador: driver.quit() # ## Page objects # ___ # vamos a transformar el portal http://www.python.org en un pageobject # + class PortalPage(object): # constructor def __init__(self, driver): self.driver = driver # elementos self.tbx__search = lambda: self.driver.find_element(By.ID, "id-search-field") self.btn__go = lambda: self.driver.find_element(By.ID, "submit") # métodos def get(self): driver.get("http://www.python.org") return self def search(self, text): self.tbx__search().clear() self.tbx__search().send_keys(text) self.btn__go().click() class ResultsPage(object): def __init__(self, driver): self.driver = driver self.txt__results = lambda: self.driver.find_element(By.CSS_SELECTOR, "#content > div > section > form > ul > p") def assert_no_results_visible(self): assert self.txt__results().is_displayed() assert "No results found." in self.txt__results().text # + import time driver = webdriver.Chrome() try: page__portal = PortalPage(driver).get() page__portal.search("algo que no existe") page__results = ResultsPage(driver) page__results.assert_no_results_visible() finally: time.sleep(3) driver.close() # - # el flujo utilizando solo los atributos sería así: # + import time driver = webdriver.Chrome() try: driver.get("http://www.python.org") # Portal page__portal = PortalPage(driver) page__portal.tbx__search().clear() page__portal.tbx__search().send_keys("<PASSWORD> no existe") page__portal.btn__go().click() # Results assert "No results found." in ResultsPage(driver).txt__results().text print("No results found.") finally: time.sleep(3) driver.close() # - # otro ejemplo de uso: # # dada esta web (http://the-internet.herokuapp.com/login), crear debajo un script que habrá la página se loguee y luego desloguee (incluyendo asserciones) # + from selenium import webdriver driver = webdriver.Chrome() try: driver.get("http://the-internet.herokuapp.com/login") tbx__search = driver.find_element(By.NAME, "username") tbx__search.send_keys("tomsmith") tbx__search = driver.find_element(By.NAME, "password") tbx__search.send_keys("<PASSWORD>!") btn__login = driver.find_element(By.CSS_SELECTOR, "#login > button > i") btn__login.click() btn__logout = driver.find_element(By.CSS_SELECTOR, "#content > div > a > i") btn__logout.click() finally: driver.quit() # - # usando pageobjects quedaría así: # + from selenium import webdriver from selenium.webdriver.common.keys import Keys class Portal(object): def __init__(self, driver): self.driver = driver self.tbx__username = lambda: self.driver.find_element(By.NAME, "username") self.tbx__password = lambda: self.driver.find_element(By.NAME, "password") self.btn__login = lambda: self.driver.find_element(By.CSS_SELECTOR, "#login > button > i") def get(self): self.driver.get("https://the-internet.herokuapp.com/login") return self def login(self, username, password): self.tbx__username().clear() self.tbx__username().send_keys(username) self.tbx__password().clear() self.tbx__password().send_keys(password) self.btn__login().click() class SecureArea(object): def __init__(self, driver): self.driver = driver self.btn__logout = lambda: self.driver.find_element(By.XPATH, "//*[@id='content']/div/a") def assert_page_loaded(self): assert self.btn__logout().is_displayed() driver = webdriver.Chrome() try: page__portal = Portal(driver).get() page__portal.login("tomsmith", "<PASSWORD>!") page__secure_area = SecureArea(driver) page__secure_area.assert_page_loaded() page__secure_area.btn__logout().click() finally: driver.quit()
test-lab/05 - selenium.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import cv2 import os from keras.models import load_model import numpy as np from pygame import mixer import time from keras.preprocessing.image import img_to_array import imutils from keras.models import load_model # + #mixer.init() #sound = mixer.Sound('alarm.wav') # parameters for loading data and images detection_model_path = 'haarcascade_files/haarcascade_frontalface_default.xml' emotion_model_path = 'models/_mini_XCEPTION.102-0.66.hdf5' face = cv2.CascadeClassifier('haar cascade files\haarcascade_frontalface_alt.xml') leye = cv2.CascadeClassifier('haar cascade files\haarcascade_lefteye_2splits.xml') reye = cv2.CascadeClassifier('haar cascade files\haarcascade_righteye_2splits.xml') glass_cascade= cv2.CascadeClassifier('haarcascade_eye_tree_eyeglasses.xml') def detect(gray,pic): fc=cv2.CascadeClassifier('haarcascade_files/haarcascade_frontalface_default.xml') face=fc.detectMultiScale(gray,1.3,5) for (x,y,w,h) in face: cv2.rectangle(pic,(x,y),(x+w,y+h),(255,0,0),2) converted_gray=gray[y:y+h,x:x+w] converted_color=pic[y:y+h,x:x+w] glass = glass_cascade.detectMultiScale(converted_gray,1.04,5) for(ex,ey,ew,eh) in glass: imag=cv2.rectangle(converted_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) cv2.putText(imag,'Attentive',(ex,ey-10),cv2.FONT_HERSHEY_SIMPLEX,0.9,(36,255,12),2) return pic # + # hyper-parameters for bounding boxes shape # loading models face_detection = cv2.CascadeClassifier(detection_model_path) emotion_classifier = load_model(emotion_model_path, compile=False) EMOTIONS = ["angry" ,"disgust","scared", "happy", "sad", "surprised", "neutral"] lbl=['Close','Open'] model = load_model('/models/cnn.py/') path = os.getcwd() cap = cv2.VideoCapture(0) font = cv2.FONT_HERSHEY_COMPLEX_SMALL count=0 score=0 thicc=2 rpred=[99] lpred=[99] # + # starting video streaming cv2.namedWindow('your_face') camera = cv2.VideoCapture(0) while True: frame = camera.read()[1] #reading the frame frame = imutils.resize(frame,width=300) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) draw=detect(gray,frame) cv2.imshow('Video',draw) faces = face_detection.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=5,minSize=(30,30),flags=cv2.CASCADE_SCALE_IMAGE) #left_eye = leye.detectMultiScale(gray) #right_eye = reye.detectMultiScale(gray) canvas = np.zeros((250, 300, 3), dtype="uint8") frameClone = frame.copy() if len(faces) > 0: faces = sorted(faces, reverse=True, key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0] (fX, fY, fW, fH) = faces # - # Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare # the ROI for classification via the CNN roi = gray[fY:fY + fH, fX:fX + fW] roi = cv2.resize(roi, (64, 64)) roi = roi.astype("float") / 255.0 roi = img_to_array(roi) roi = np.expand_dims(roi, axis=0) preds = emotion_classifier.predict(roi)[0] emotion_probability = np.max(preds) label = EMOTIONS[preds.argmax()] else: continue for (i, (emotion, prob)) in enumerate(zip(EMOTIONS, preds)): # construct the label text text = "{}: {:.2f}%".format(emotion, prob * 100) # draw the label + probability bar on the canvas # emoji_face = feelings_faces[np.argmax(preds)] w = int(prob * 300) cv2.rectangle(canvas, (7, (i * 35) + 5), (w, (i * 35) + 35), (0, 0, 255), -1) cv2.putText(canvas, text, (10, (i * 35) + 23), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (255, 255, 255), 2) cv2.putText(frameClone, label, (fX, fY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2) cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH), (0, 0, 255), 2) height,width = frame.shape[:2] gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face.detectMultiScale(gray,minNeighbors=5,scaleFactor=1.1,minSize=(25,25)) left_eye = leye.detectMultiScale(gray) right_eye = reye.detectMultiScale(gray) cv2.rectangle(frame, (0,height-50) , (200,height) , (0,0,0) , thickness=cv2.FILLED ) for (x,y,w,h) in faces: cv2.rectangle(frame, (x,y) , (x+w,y+h) , (100,100,100) , 1 ) for (x,y,w,h) in right_eye: r_eye=frame[y:y+h,x:x+w] count=count+1 r_eye = cv2.cvtColor(r_eye,cv2.COLOR_BGR2GRAY) r_eye = cv2.resize(r_eye,(24,24)) r_eye= r_eye/255 r_eye= r_eye.reshape(24,24,-1) r_eye = np.expand_dims(r_eye,axis=0) rpred = model.predict_classes(r_eye) if(rpred[0]==1): lbl='Open' if(rpred[0]==0): lbl='Closed' break for (x,y,w,h) in left_eye: l_eye=frame[y:y+h,x:x+w] count=count+1 l_eye = cv2.cvtColor(l_eye,cv2.COLOR_BGR2GRAY) l_eye = cv2.resize(l_eye,(24,24)) l_eye= l_eye/255 l_eye=l_eye.reshape(24,24,-1) l_eye = np.expand_dims(l_eye,axis=0) lpred = model.predict_classes(l_eye) if(lpred[0]==1): lbl='Open' if(lpred[0]==0): lbl='Closed' break if(rpred[0]==0 and lpred[0]==0): score=score+1 cv2.putText(frame,"Closed",(10,height-20), font, 1,(255,255,255),1,cv2.LINE_AA) # if(rpred[0]==1 or lpred[0]==1): else: score=score-1 cv2.putText(frame,"Open",(10,height-20), font, 1,(255,255,255),1,cv2.LINE_AA) if(score<0): score=0 cv2.putText(frame,'Score:'+str(score),(100,height-20), font, 1,(255,255,255),1,cv2.LINE_AA) if(score>15): #person is feeling sleepy so we beep the alarm cv2.imwrite(os.path.join(path,'image.jpg'),frame) try: sound.play() except: # isplaying = False pass if(thicc<16): thicc= thicc+2 else: thicc=thicc-2 if(thicc<2): thicc=2 cv2.rectangle(frame,(0,0),(width,height),(0,0,255),thicc) gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) draw=detect(gray,frame) cv2.imshow('Video',draw) # + #cv2.imshow('frame',frame) cv2.imshow('your_face', frameClone) cv2.imshow("Probabilities", canvas) if cv2.waitKey(1) & 0xFF == ord('q'): break camera.release() cv2.destroyAllWindows() # -
.ipynb_checkpoints/Our Project Attention-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # As we've seen, both Series and DataFrames can have indices applied to them. The index is essentially a row level # label, and in pandas the rows correspond to axis zero. Indices can either be either autogenerated, such as when # we create a new Series without an index, in which case we get numeric values, or they can be set explicitly, like # when we use the dictionary object to create the series, or when we loaded data from the CSV file and set # appropriate parameters. Another option for setting an index is to use the set_index() function. This function # takes a list of columns and promotes those columns to an index. In this lecture we'll explore more about how # indexes work in pandas. # + # The set_index() function is a destructive process, and it doesn't keep the current index. # If you want to keep the current index, you need to manually create a new column and copy into # it values from the index attribute. # Lets import pandas and our admissions dataset import pandas as pd df = pd.read_csv("datasets/Admission_Predict.csv", index_col=0) df.head() # + # Let's say that we don't want to index the DataFrame by serial numbers, but instead by the # chance of admit. But lets assume we want to keep the serial number for later. So, lets # preserve the serial number into a new column. We can do this using the indexing operator # on the string that has the column label. Then we can use the set_index to set index # of the column to chance of admit # So we copy the indexed data into its own column df['Serial Number'] = df.index # Then we set the index to another column df = df.set_index('Chance of Admit ') df.head() # + # You'll see that when we create a new index from an existing column the index has a name, # which is the original name of the column. # We can get rid of the index completely by calling the function reset_index(). This promotes the # index into a column and creates a default numbered index. df = df.reset_index() df.head() # + # One nice feature of Pandas is multi-level indexing. This is similar to composite keys in # relational database systems. To create a multi-level index, we simply call set index and # give it a list of columns that we're interested in promoting to an index. # Pandas will search through these in order, finding the distinct data and form composite indices. # A good example of this is often found when dealing with geographical data which is sorted by # regions or demographics. # Let's change data sets and look at some census data for a better example. This data is stored in # the file census.csv and comes from the United States Census Bureau. In particular, this is a # breakdown of the population level data at the US county level. It's a great example of how # different kinds of data sets might be formatted when you're trying to clean them. # Let's import and see what the data looks like df = pd.read_csv('datasets/census.csv') df.head() # + # In this data set there are two summarized levels, one that contains summary # data for the whole country. And one that contains summary data for each state. # I want to see a list of all the unique values in a given column. In this # DataFrame, we see that the possible values for the sum level are using the # unique function on the DataFrame. This is similar to the SQL distinct operator # Here we can run unique on the sum level of our current DataFrame df['SUMLEV'].unique() # + # We see that there are only two different values, 40 and 50 # - # Let's exclue all of the rows that are summaries # at the state level and just keep the county data. df=df[df['SUMLEV'] == 50] df.head() # + # Also while this data set is interesting for a number of different reasons, # let's reduce the data that we're going to look at to just the total population # estimates and the total number of births. We can do this by creating # a list of column names that we want to keep then project those and # assign the resulting DataFrame to our df variable. columns_to_keep = ['STNAME','CTYNAME','BIRTHS2010','BIRTHS2011','BIRTHS2012','BIRTHS2013', 'BIRTHS2014','BIRTHS2015','POPESTIMATE2010','POPESTIMATE2011', 'POPESTIMATE2012','POPESTIMATE2013','POPESTIMATE2014','POPESTIMATE2015'] df = df[columns_to_keep] df.head() # + # The US Census data breaks down population estimates by state and county. We can load the data and # set the index to be a combination of the state and county values and see how pandas handles it in # a DataFrame. We do this by creating a list of the column identifiers we want to have indexed. And then # calling set index with this list and assigning the output as appropriate. We see here that we have # a dual index, first the state name and second the county name. df = df.set_index(['STNAME', 'CTYNAME']) df.head() # + # An immediate question which comes up is how we can query this DataFrame. We saw previously that # the loc attribute of the DataFrame can take multiple arguments. And it could query both the # row and the columns. When you use a MultiIndex, you must provide the arguments in order by the # level you wish to query. Inside of the index, each column is called a level and the outermost # column is level zero. # If we want to see the population results from Washtenaw County in Michigan the state, which is # where I live, the first argument would be Michigan and the second would be Washtenaw County df.loc['Michigan', 'Washtenaw County'] # + # If you are interested in comparing two counties, for example, Washtenaw and Wayne County, we can # pass a list of tuples describing the indices we wish to query into loc. Since we have a MultiIndex # of two values, the state and the county, we need to provide two values as each element of our # filtering list. Each tuple should have two elements, the first element being the first index and # the second element being the second index. # Therefore, in this case, we will have a list of two tuples, in each tuple, the first element is # Michigan, and the second element is either Washtenaw County or Wayne County df.loc[ [('Michigan', 'Washtenaw County'), ('Michigan', 'Wayne County')] ] # - # Okay so that's how hierarchical indices work in a nutshell. They're a special part of the pandas library which I # think can make management and reasoning about data easier. Of course hierarchical labeling isn't just for rows. # For example, you can transpose this matrix and now have hierarchical column labels. And projecting a single # column which has these labels works exactly the way you would expect it to. Now, in reality, I don't tend to use # hierarchical indicies very much, and instead just keep everything as columns and manipulate those. But, it's a # unique and sophisticated aspect of pandas that is useful to know, especially if viewing your data in a tabular # form.
Course - 1: Introduction to Data Science in Python/resources/week-2/IndexingDataFrame_ed.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.5 64-bit # name: python3 # --- # # Work with scikit-learn # # Our code can work with the famous package `sklearn` easily, and here is an example. from abess.linear import LogisticRegression from sklearn.datasets import load_breast_cancer from sklearn.pipeline import Pipeline from sklearn.metrics import roc_auc_score, make_scorer, roc_curve from sklearn.preprocessing import PolynomialFeatures from sklearn.model_selection import GridSearchCV import matplotlib.pyplot as plt # - Establish the process # # Suppose we would like to extend the original variables to their interactions, and then do `LogisticRegression` on them. This can be record with `Pipeline`: pipe = Pipeline([ ('poly', PolynomialFeatures(include_bias=False)), # without intercept ('alogistic', LogisticRegression()) ]) # - Parameter grid # # We can give different parameters to model and let the program choose the best. Here we should give parameters for `PolynomialFeatures`, for example: param_grid = { 'poly__interaction_only': [True, False], # whether the "self-combination" (e.g. X^2, X^3) exists 'poly__degree': [1, 2, 3] # the degree of polynomial } # Note that the program would try ALL combinations of what we give, which means that there are $2\times3=6$ combinations of parameters will be tried. # # - Criterion # # After giving a grid of parameters, we should define what is a "better" result. For example, the AUR (area under ROC curve) can be a criterion and the larger, the better. scorer = make_scorer(roc_auc_score, greater_is_better = True) # - Cross Validation # # For more accurate results, cross validation is often formed. In this example, we use 5-fold CV for parameters searching: grid_search = GridSearchCV(pipe, param_grid, scoring = scorer, cv = 5) # - Model fitting # # Eveything is prepared now. We can simply load the data and put it into `grid_search`: X, y = load_breast_cancer(return_X_y = True) grid_search.fit(X, y) print([grid_search.best_score_, grid_search.best_params_]) # The best choice of parameter combination is shown above: 2 degree with "self-combination". Here is its ROC curve: proba = grid_search.predict_proba(X) fpr, tpr, _ = roc_curve(y, proba) plt.plot(fpr, tpr) plt.plot([0, 1], [0, 1], 'k--') plt.show()
docs/Tutorial/work_with_sklearn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Errors and Exceptions Homework - Solution # ### Problem 1 # Handle the exception thrown by the code below by using <code>try</code> and <code>except</code> blocks. try: for i in ['a','b','c']: print(i**2) except: print("An error occurred!") # ### Problem 2 # Handle the exception thrown by the code below by using <code>try</code> and <code>except</code> blocks. Then use a <code>finally</code> block to print 'All Done.' x = 5 y = 0 try: z = x/y except ZeroDivisionError: print("Can't divide by Zero!") finally: print('All Done!') # ### Problem 3 # Write a function that asks for an integer and prints the square of it. Use a <code>while</code> loop with a <code>try</code>, <code>except</code>, <code>else</code> block to account for incorrect inputs. def ask(): while True: try: n = int(input('Input an integer: ')) except: print('An error occurred! Please try again!') continue else: break print('Thank you, your number squared is: ',n**2) ask() # # Great Job!
Complete-Python-3-Bootcamp-master/06-Errors and Exception Handling/03-Errors and Exceptions Homework - Solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # Please find jax implementation of this notebook here: https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/book1/14/resnet_jax.ipynb # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/Nirzu97/pyprobml/blob/resnet-torch/notebooks/resnet_torch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="7EQDIeH6vSKO" # # Residual networks # # We implement residual network CNN. # # Based on 7.6 of http://d2l.ai/chapter_convolutional-modern/resnet.html # + id="5eoGnFmRvRns" import numpy as np import matplotlib.pyplot as plt import math from IPython import display try: import torch except ModuleNotFoundError: # %pip install -qq torch import torch try: import torchvision except ModuleNotFoundError: # %pip install -qq torchvision import torchvision from torch import nn from torch.nn import functional as F from torch.utils import data from torchvision import transforms import random import os import time np.random.seed(seed=1) torch.manual_seed(1) # !mkdir figures # for saving plots # + [markdown] id="UdZEPBsbvmUm" # # Residual block # + id="kewykWLNvfAl" class Residual(nn.Module): """The Residual block of ResNet.""" def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1): super().__init__() self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1, stride=strides) self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1) if use_1x1conv: self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides) else: self.conv3 = None self.bn1 = nn.BatchNorm2d(num_channels) self.bn2 = nn.BatchNorm2d(num_channels) def forward(self, X): Y = F.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3: X = self.conv3(X) Y += X return F.relu(Y) # + [markdown] id="Ubwg53Uxvthh" # Example where number of input and output channels is the same. # + colab={"base_uri": "https://localhost:8080/"} id="i8nJa6MMvvkR" outputId="c1043aff-2fc9-4cf4-cc30-c3cf11a77e79" blk = Residual(3, 3) X = torch.rand(4, 3, 6, 6) # batch size x channels x height x width Y = blk(X) Y.shape # + [markdown] id="DU4gZgG7vyq8" # Example where we change the number of channels. # + colab={"base_uri": "https://localhost:8080/"} id="zSQPWj2dvv90" outputId="0e757838-2770-442e-8bbc-aa9f18252e9b" blk = Residual(3, 6, use_1x1conv=True) blk(X).shape # + [markdown] id="Hkfnt2awwBQp" # Example where we change the number of channels and the spatial size. # + colab={"base_uri": "https://localhost:8080/"} id="3m5B8Mfiv2IC" outputId="62cbadc1-ae36-4b48-b2f8-38c021ba8b46" blk = Residual(3, 6, use_1x1conv=True, strides=2) blk(X).shape # + [markdown] id="iZwA0xexwN5G" # # Resnet block # # We define a resnet block to be a sequence of residual blocks, where the first element in the sequence has a 1x1 convolution. However, the first such resnet block does not have 1x1 conv. # + id="7CO_qY2zwTmf" def resnet_block(input_channels, num_channels, num_residuals, first_block=False): blk = [] for i in range(num_residuals): if i == 0 and not first_block: blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2)) else: blk.append(Residual(num_channels, num_channels)) return blk # + [markdown] id="HqUv07jrwdcu" # # The full resnet18 model # + id="hOwb9tKCwUWU" b1 = nn.Sequential( nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1), ) b2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True)) b3 = nn.Sequential(*resnet_block(64, 128, 2)) b4 = nn.Sequential(*resnet_block(128, 256, 2)) b5 = nn.Sequential(*resnet_block(256, 512, 2)) # We assume 10 output classes (for MNIST) net = nn.Sequential(b1, b2, b3, b4, b5, nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten(), nn.Linear(512, 10)) # + colab={"base_uri": "https://localhost:8080/"} id="h5PtiGofxML8" outputId="5d2a53bf-216c-4580-ac1e-17233ae666d9" X = torch.rand(size=(1, 1, 224, 224)) for layer in net: X = layer(X) print(layer.__class__.__name__, "output shape:\t", X.shape) # + [markdown] id="w__7JNTxxVzB" # # Train on Fashion-MNIST # # We upscale images from 28x28 to 96x96, so that the input to the global average pooling layer has size 3x3 (since the network downscales by a factor of 32). # + id="ilYJZjkq6C9D" def load_data_fashion_mnist(batch_size, resize=None): """Download the Fashion-MNIST dataset and then load it into memory.""" trans = [transforms.ToTensor()] if resize: trans.insert(0, transforms.Resize(resize)) trans = transforms.Compose(trans) mnist_train = torchvision.datasets.FashionMNIST(root="../data", train=True, transform=trans, download=True) mnist_test = torchvision.datasets.FashionMNIST(root="../data", train=False, transform=trans, download=True) return ( data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=4), data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=4), ) # + id="tA_QeZH5rp1o" class Animator: """For plotting data in animation.""" def __init__( self, xlabel=None, ylabel=None, legend=None, xlim=None, ylim=None, xscale="linear", yscale="linear", fmts=("-", "m--", "g-.", "r:"), nrows=1, ncols=1, figsize=(3.5, 2.5), ): # Incrementally plot multiple lines if legend is None: legend = [] display.set_matplotlib_formats("svg") self.fig, self.axes = plt.subplots(nrows, ncols, figsize=figsize) if nrows * ncols == 1: self.axes = [ self.axes, ] # Use a lambda function to capture arguments self.config_axes = lambda: set_axes(self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend) self.X, self.Y, self.fmts = None, None, fmts def add(self, x, y): # Add multiple data points into the figure if not hasattr(y, "__len__"): y = [y] n = len(y) if not hasattr(x, "__len__"): x = [x] * n if not self.X: self.X = [[] for _ in range(n)] if not self.Y: self.Y = [[] for _ in range(n)] for i, (a, b) in enumerate(zip(x, y)): if a is not None and b is not None: self.X[i].append(a) self.Y[i].append(b) self.axes[0].cla() for x, y, fmt in zip(self.X, self.Y, self.fmts): self.axes[0].plot(x, y, fmt) self.config_axes() display.display(self.fig) display.clear_output(wait=True) class Timer: """Record multiple running times.""" def __init__(self): self.times = [] self.start() def start(self): """Start the timer.""" self.tik = time.time() def stop(self): """Stop the timer and record the time in a list.""" self.times.append(time.time() - self.tik) return self.times[-1] def avg(self): """Return the average time.""" return sum(self.times) / len(self.times) def sum(self): """Return the sum of time.""" return sum(self.times) def cumsum(self): """Return the accumulated time.""" return np.array(self.times).cumsum().tolist() class Accumulator: """For accumulating sums over `n` variables.""" def __init__(self, n): self.data = [0.0] * n def add(self, *args): self.data = [a + float(b) for a, b in zip(self.data, args)] def reset(self): self.data = [0.0] * len(self.data) def __getitem__(self, idx): return self.data[idx] # + id="FqnIuSGwxG0L" def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend): """Set the axes for matplotlib.""" axes.set_xlabel(xlabel) axes.set_ylabel(ylabel) axes.set_xscale(xscale) axes.set_yscale(yscale) axes.set_xlim(xlim) axes.set_ylim(ylim) if legend: axes.legend(legend) axes.grid() # + id="sLX1MPdU7bp-" def try_gpu(i=0): """Return gpu(i) if exists, otherwise return cpu().""" if torch.cuda.device_count() >= i + 1: return torch.device(f"cuda:{i}") return torch.device("cpu") # + id="vBqC89zOr_Si" def accuracy(y_hat, y): """Compute the number of correct predictions.""" if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: y_hat = torch.argmax(y_hat, axis=1) cmp_ = y_hat.type(y.dtype) == y return float(cmp_.type(y.dtype).sum()) def evaluate_accuracy_gpu(net, data_iter, device=None): """Compute the accuracy for a model on a dataset using a GPU.""" if isinstance(net, torch.nn.Module): net.eval() # Set the model to evaluation mode if not device: device = next(iter(net.parameters())).device # No. of correct predictions, no. of predictions metric = Accumulator(2) for X, y in data_iter: X = X.to(device) y = y.to(device) metric.add(accuracy(net(X), y), y.numel()) return metric[0] / metric[1] # + [markdown] id="yUaWdoFC--Hy" # ## Training Function # + id="qoVfKNjN6fHS" def train(net, train_iter, test_iter, num_epochs, lr, device): """Train a model with a GPU (defined in Chapter 6).""" def init_weights(m): if type(m) == nn.Linear or type(m) == nn.Conv2d: nn.init.xavier_uniform_(m.weight) net.apply(init_weights) print("training on", device) net.to(device) optimizer = torch.optim.SGD(net.parameters(), lr=lr) loss = nn.CrossEntropyLoss() animator = Animator(xlabel="epoch", xlim=[1, num_epochs], legend=["train loss", "train acc", "test acc"]) timer, num_batches = Timer(), len(train_iter) for epoch in range(num_epochs): # Sum of training loss, sum of training accuracy, no. of examples metric = Accumulator(3) net.train() for i, (X, y) in enumerate(train_iter): timer.start() optimizer.zero_grad() X, y = X.to(device), y.to(device) y_hat = net(X) l = loss(y_hat, y) l.backward() optimizer.step() with torch.no_grad(): metric.add(l * X.shape[0], accuracy(y_hat, y), X.shape[0]) timer.stop() train_l = metric[0] / metric[2] train_acc = metric[1] / metric[2] if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None)) test_acc = evaluate_accuracy_gpu(net, test_iter) animator.add(epoch + 1, (None, None, test_acc)) print(f"loss {train_l:.3f}, train acc {train_acc:.3f}, " f"test acc {test_acc:.3f}") print(f"{metric[2] * num_epochs / timer.sum():.1f} examples/sec " f"on {str(device)}") # + [markdown] id="MMfLc2KE9DHN" # # Learning curve # + colab={"base_uri": "https://localhost:8080/", "height": 295, "referenced_widgets": ["2711b080dba54e6eaef97aad3623104f", "02bb50e833fc46749bb2fde99cc73c17", "985c9df935824a67902e4042c7fc710e", "990d20060661487491b9be65b91ab418", "01e2b452adee41c8945640edf942c219", "8b791e021458486c8af7877ab0766679", "5a761e5cc74a43d3b3fb47b1f73eb327", "<KEY>", "<KEY>", "b67a8f40f8164f29b608aab4d51c457e", "<KEY>", "62fee43c585b4deca7f69da88fefe1f2", "40a8429325624c79a6fd961e93730f6b", "87da13a58ffa4c92a21dc64940c483eb", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "f09947254a15473fbe251418f208c7d3", "a276a6692f674a9e9b6a84c4777817a3", "9127d0dfc6e44ca29bb474b6c57a5572", "b02cf2ac897e4ca98732005718f5256a", "<KEY>", "736ff67d1b3c4267ae33f1e21a56adc9", "<KEY>", "3a5957ade2984463812348db6da51600", "d20596ba38914c3a932c385e5b6c562d", "<KEY>", "3446d2c4640e4f2982578248e04f497d", "5abf5202b063405483a494dc78474a3d", "4b1902d98a9c49a882918b75574b596a", "<KEY>"]} id="13v5bACDxMY8" outputId="a5567947-5543-4885-a371-7cb9c182b2da" lr, num_epochs, batch_size = 0.05, 10, 256 train_iter, test_iter = load_data_fashion_mnist(batch_size, resize=96) train(net, train_iter, test_iter, num_epochs, lr, try_gpu())
notebooks/book1/14/resnet_torch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #default_exp data.core # - #export from local.torch_basics import * from local.test import * from local.data.load import * from local.notebook.showdoc import * # # Data core # # > Core functionality for gathering data # The classes here provide functionality for applying a list of transforms to a set of items (`TfmdList`, `DataSource`) or a `DataLoader` (`TfmdDl`) as well as the base class used to gather the data for model training: `DataBunch`. # ## TfmdDL - #export @typedispatch def show_batch(x, y, samples, ctxs=None, max_n=10, **kwargs): if ctxs is None: ctxs = Inf.nones for i in range_of(samples[0]): ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))] return ctxs # `show_batch` is a type-dispatched function that is responsible for showing decoded `samples`. `x` and `y` are the input and the target in the batch to be shown, and are passed along to dispatch on their types. There is a different implementation of `show_batch` if `x` is a `TensorImage` or a `TensorText` for instance (see vision.core or text.data for more details). `ctxs` can be passed but the function is responsible to create them if necessary. `kwargs` depend on the specific implementation. #export @typedispatch def show_results(x, y, samples, outs, ctxs=None, max_n=10, **kwargs): if ctxs is None: ctxs = Inf.nones for i in range(len(samples[0])): ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))] for i in range(len(outs[0])): ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(i),ctxs,range(max_n))] return ctxs # `show_results` is a type-dispatched function that is responsible for showing decoded `samples` and their corresponding `outs`. Like in `show_batch`, `x` and `y` are the input and the target in the batch to be shown, and are passed along to dispatch on their types. `ctxs` can be passed but the function is responsible to create them if necessary. `kwargs` depend on the specific implementation. #export _all_ = ["show_batch", "show_results"] #export _batch_tfms = ('after_item','before_batch','after_batch') #export @delegates() class TfmdDL(DataLoader): "Transformed `DataLoader`" def __init__(self, dataset, bs=16, shuffle=False, num_workers=None, **kwargs): if num_workers is None: num_workers = min(16, defaults.cpus) for nm in _batch_tfms: kwargs[nm] = Pipeline(kwargs.get(nm,None), as_item=(nm=='before_batch')) super().__init__(dataset, bs=bs, shuffle=shuffle, num_workers=num_workers, **kwargs) for nm in _batch_tfms: kwargs[nm].setup(self) def _one_pass(self): its = self.after_batch(self.do_batch([self.do_item(0)])) self._device = find_device(its) self._n_inp = 1 if not isinstance(its, (list,tuple)) or len(its)==1 else len(its)-1 self._retain_dl = partial(retain_types, typs=mapped(type,its)) def _retain_dl(self,b): self._one_pass() # we just replaced ourselves, so this is *not* recursive! :) return self._retain_dl(b) def before_iter(self): super().before_iter() split_idx = getattr(self.dataset, 'split_idx', None) for nm in _batch_tfms: f = getattr(self,nm) if isinstance(f,Pipeline): f.split_idx=split_idx def decode(self, b): return self.before_batch.decode(self.after_batch.decode(self._retain_dl(b))) def decode_batch(self, b, max_n=10, full=True): return self._decode_batch(self.decode(b), max_n, full) def _decode_batch(self, b, max_n=10, full=True): f = self.after_item.decode f = compose(f, partial(getattr(self.dataset,'decode',noop), full = full)) return L(batch_to_samples(b, max_n=max_n)).map(f) def _pre_show_batch(self, b, max_n=10): b = self.decode(b) if hasattr(b, 'show'): return b,None,None its = self._decode_batch(b, max_n, full=False) if not is_listy(b): b,its = [b],L((o,) for o in its) return detuplify(b[:self.n_inp]),detuplify(b[self.n_inp:]),its def show_batch(self, b=None, max_n=10, ctxs=None, **kwargs): "Show `b` (defaults to `one_batch`), a list of lists of pipeline outputs (i.e. output of a `DataLoader`)" if b is None: b = self.one_batch() show_batch(*self._pre_show_batch(b, max_n=max_n), ctxs=ctxs, max_n=max_n, **kwargs) def show_results(self, b, out, max_n=10, ctxs=None, **kwargs): x,y,its = self._pre_show_batch(b, max_n=max_n) b_out = b[:self.n_inp] + (tuple(out) if is_listy(out) else (out,)) x1,y1,outs = self._pre_show_batch(b_out, max_n=max_n) if its is not None: show_results(x, y, its, outs.itemgot(slice(self.n_inp,None)), ctxs=ctxs, max_n=max_n, **kwargs) #its None means that a batch knows how to show itself as a whole, so we pass x, x1 else: show_results(x, x1, its, outs, ctxs=ctxs, max_n=max_n, **kwargs) @property def device(self): if not hasattr(self, '_device'): _ = self._one_pass() return self._device @property def n_inp(self): if hasattr(self.dataset, 'n_inp'): return self.dataset.n_inp if not hasattr(self, '_n_inp'): self._one_pass() return self._n_inp # A `TfmdDL` is a `DataLoader` that creates `Pipeline` from a list of `Transform`s for the callbacks `after_item`, `before_batch` and `after_batch`. As a result, it can decode or show a processed `batch`. add_docs(TfmdDL, decode="Decode `b` using `tfms`", decode_batch="Decode `b` entirely", show_batch="Show each item of `b`", show_results="Show each item of `b` and `out`", before_iter="override") class _Category(int, ShowTitle): pass # + #Test retain type class NegTfm(Transform): def encodes(self, x): return torch.neg(x) def decodes(self, x): return torch.neg(x) tdl = TfmdDL([(TensorImage([1]),)] * 4, after_batch=NegTfm(), bs=4, num_workers=4) b = tdl.one_batch() test_eq(type(b[0]), TensorImage) b = (tensor([1.,1.,1.,1.]),) test_eq(type(tdl.decode_batch(b)[0][0]), TensorImage) # + class A(Transform): def encodes(self, x): return x def decodes(self, x): return Int(x) @Transform def f(x)->None: return Tuple((x,x)) start = torch.arange(50) test_eq_type(f(2), Tuple((2,2))) # + a = A() tdl = TfmdDL(start, after_item=lambda x: (a(x), f(x)), bs=4) x,y = tdl.one_batch() test_eq(type(y), Tuple) s = tdl.decode_batch((x,y)) test_eq(type(s[0][1]), Tuple) # - tdl = TfmdDL(torch.arange(0,50), after_item=A(), after_batch=NegTfm(), bs=4) test_eq(tdl.dataset[0], start[0]) test_eq(len(tdl), (50-1)//4+1) test_eq(tdl.bs, 4) test_stdout(tdl.show_batch, '0\n1\n2\n3') # ### Methods show_doc(TfmdDL.one_batch) tfm = NegTfm() tdl = TfmdDL(start, after_batch=tfm, bs=4) b = tdl.one_batch() test_eq(tensor([0,-1,-2,-3]), b) show_doc(TfmdDL.decode) test_eq(tdl.decode(b), tensor(0,1,2,3)) show_doc(TfmdDL.decode_batch) test_eq(tdl.decode_batch(b), [0,1,2,3]) show_doc(TfmdDL.show_batch) # ## DataBunch - # export @docs class DataBunch(GetAttr): "Basic wrapper around several `DataLoader`s." _default='train_dl' def __init__(self, *dls, path='.'): self.dls,self.path = dls,Path(path) def __getitem__(self, i): return self.dls[i] def new_empty(self): dls = [dl.new(dl.dataset.new_empty()) for dl in self.dls] return type(self)(*dls) train_dl,valid_dl = add_props(lambda i,x: x[i]) train_ds,valid_ds = add_props(lambda i,x: x[i].dataset) @classmethod @delegates(TfmdDL.__init__) def from_dblock(cls, dblock, source, path='.', type_tfms=None, item_tfms=None, batch_tfms=None, **kwargs): return dblock.databunch(source, path=path, type_tfms=type_tfms, item_tfms=item_tfms, batch_tfms=batch_tfms, **kwargs) _docs=dict(__getitem__="Retrieve `DataLoader` at `i` (`0` is training, `1` is validation)", train_dl="Training `DataLoader`", valid_dl="Validation `DataLoader`", train_ds="Training `Dataset`", valid_ds="Validation `Dataset`", new_empty="Create a new empty version of `self` with the same transforms", from_dblock="Create a databunch from a given `dblock`") dbch = DataBunch(tdl,tdl) x = dbch.train_dl.one_batch() x2 = first(tdl) test_eq(x,x2) x2 = dbch.one_batch() test_eq(x,x2) # ### Methods show_doc(DataBunch.__getitem__) x2 = dbch[0].one_batch() test_eq(x,x2) show_doc(DataBunch.train_dl, name="train_dl") show_doc(DataBunch.valid_dl, name="valid_dl") show_doc(DataBunch.train_ds, name="train_ds") show_doc(DataBunch.valid_ds, name="valid_ds") # ## TfmdList - # + #export class FilteredBase: "Base class for lists with subsets" _dl_type = TfmdDL def __init__(self, *args, dl_type=None, **kwargs): if dl_type is not None: self._dl_type = dl_type self.databunch = delegates(self._dl_type.__init__)(self.databunch) super().__init__(*args, **kwargs) @property def n_subsets(self): return len(self.splits) def _new(self, items, **kwargs): return super()._new(items, splits=self.splits, **kwargs) def subset(self): raise NotImplemented def databunch(self, bs=16, val_bs=None, shuffle_train=True, n=None, path='.', dl_type=None, dl_kwargs=None, **kwargs): if dl_kwargs is None: dl_kwargs = [{}] * self.n_subsets ns = self.n_subsets-1 bss = [bs] + [2*bs]*ns if val_bs is None else [bs] + [val_bs]*ns shuffles = [shuffle_train] + [False]*ns if dl_type is None: dl_type = self._dl_type dls = [dl_type(self.subset(i), bs=b, shuffle=s, drop_last=s, n=n if i==0 else None, **kwargs, **dk) for i,(b,s,dk) in enumerate(zip(bss,shuffles,dl_kwargs))] return DataBunch(*dls, path=path) FilteredBase.train,FilteredBase.valid = add_props(lambda i,x: x.subset(i), 2) # - #export class TfmdList(FilteredBase, L, GetAttr): "A `Pipeline` of `tfms` applied to a collection of `items`" _default='tfms' def __init__(self, items, tfms, use_list=None, do_setup=True, as_item=True, split_idx=None, train_setup=True, splits=None): super().__init__(items, use_list=use_list) self.splits = L([slice(None),[]] if splits is None else splits).map(mask2idxs) if isinstance(tfms,TfmdList): tfms = tfms.tfms if isinstance(tfms,Pipeline): do_setup=False self.tfms = Pipeline(tfms, as_item=as_item, split_idx=split_idx) if do_setup: self.setup(train_setup=train_setup) def _new(self, items, **kwargs): return super()._new(items, tfms=self.tfms, do_setup=False, **kwargs) def subset(self, i): return self._new(self._get(self.splits[i]), split_idx=i) def _after_item(self, o): return self.tfms(o) def __repr__(self): return f"{self.__class__.__name__}: {self.items}\ntfms - {self.tfms.fs}" def __iter__(self): return (self[i] for i in range(len(self))) def show(self, o, **kwargs): return self.tfms.show(o, **kwargs) def decode(self, x, **kwargs): return self.tfms.decode(x, **kwargs) def __call__(self, x, **kwargs): return self.tfms.__call__(x, **kwargs) def setup(self, train_setup=True): self.tfms.setup(getattr(self,'train',self) if train_setup else self) def overlapping_splits(self): return L(Counter(self.splits.concat()).values()).filter(gt(1)) def __getitem__(self, idx): res = super().__getitem__(idx) if self._after_item is None: return res return self._after_item(res) if is_indexer(idx) else res.map(self._after_item) add_docs(TfmdList, setup="Transform setup with self", decode="From `Pipeline", show="From `Pipeline", overlapping_splits="All splits that are in more than one split", subset="New `TfmdList` that only includes subset `i`") #exports def decode_at(o, idx): "Decoded item at `idx`" return o.decode(o[idx]) #exports def show_at(o, idx, **kwargs): "Show item at `idx`", return o.show(o[idx], **kwargs) # A `TfmdList` combines a collection of object with a `Pipeline`. `tfms` can either be a `Pipeline` or a list of transforms, in which case, it will wrap them in a `Pipeline`. `use_list` is passed along to `L` with the `items`, `as_item` and `split_idx` are passed to each transform of the `Pipeline`. `do_setup` indicates if the `Pipeline.setup` method should be called during initialization. # + class IntFloatTfm(Transform): def encodes(self, x): return Int(x) def decodes(self, x): return Float(x) foo=1 int_tfm=IntFloatTfm() def neg(x): return -x neg_tfm = Transform(neg, neg) class B(Transform): def encodes(self, x): return x+1 def decodes(self, x): return x-1 add1 = B() add1.split_idx = 1 # - tl = TfmdList([1.,2.,3.], [neg_tfm, int_tfm], splits=[[0,2],[1]]) t = tl[1] test_eq_type(t, Int(-2)) test_eq(decode_at(tl, 1), 2) test_eq_type(tl.decode(t), Float(2.0)) assert not tl.overlapping_splits() test_stdout(lambda: show_at(tl, 2), '-3') tl tl = TfmdList([1.,2.,3.], [neg_tfm, int_tfm, add1], splits=[[0,2],[1]]) test_eq(tl[0], -1) test_eq(tl[1], -2) test_eq(tl.valid[0], -1) #add1 is only applied on the validation set p2 = tl.subset(0) test_eq(p2, [-1,-3]) test_eq(map(type, p2), (Int,Int)) test_eq(tl[tensor(1)], tl[1]) df = pd.DataFrame(dict(a=[1,2,3],b=[2,3,4])) tl = TfmdList(df, lambda o: o.a, splits=[[0],[1,2]]) test_eq(tl[1,2], [2,3]) p2 = tl.subset(1) test_eq(p2, [2,3]) # + class B(Transform): def __init__(self): self.a = 2 def encodes(self, x): return x+self.a def decodes(self, x): return x-self.a def setups(self, items): self.a = tensor(items).float().mean().item() tl1 = TfmdList([1,2,3,4], B()) test_eq(tl1.tfms[0].a, 2.5) # - tfilts = [tensor([0,2]), [1,3,4]] tl = TfmdList(range(5), tfms=[None], splits=tfilts) test_eq(len(tl.splits), 2) test_eq(tl.subset(0), [0,2]) test_eq(tl.train, [0,2]) # Subset 0 is aliased to `train` test_eq(tl.subset(1), [1,3,4]) test_eq(tl.valid, [1,3,4]) # Subset 1 is aliased to `valid` test_eq(tl.valid[2], 4) # Here's how we can use `TfmdList.setup` to implement a simple category list, getting labels from a mock file list: # + class _Cat(Transform): order = 1 def encodes(self, o): return int(self.o2i[o]) def decodes(self, o): return Str(self.vocab[o]) def setups(self, items): self.vocab,self.o2i = uniqueify(L(items), sort=True, bidir=True) def _lbl(o): return Str(o.split('_')[0]) test_fns = ['dog_0.jpg','cat_0.jpg','cat_2.jpg','cat_1.jpg','dog_1.jpg'] tcat = _Cat() # Check that tfms are sorted by `order` tl = TfmdList(test_fns, [tcat,_lbl]) exp_voc = ['cat','dog'] test_eq(tcat.vocab, exp_voc) test_eq(tl.tfms.vocab, exp_voc) test_eq(tl.vocab, exp_voc) # - test_eq(tl, (1,0,0,0,1)) t = L(tl) test_eq(t, [1,0,0,0,1]) test_eq(tl[-1], 1) test_eq(tl[0,1], (1,0)) test_eq([tl.decode(o) for o in t], ('dog','cat','cat','cat','dog')) test_stdout(lambda:show_at(tl, 0), "dog") tl test_fns = ['dog_0.jpg','cat_0.jpg','cat_2.jpg','cat_1.jpg','dog_1.jpg','kid_05.jpg'] tcat = _Cat() tl = TfmdList(test_fns, [tcat,_lbl], splits=[[0,1,2,3,4], [5]]) #Check only the training set is taken into account for setup test_eq(tcat.vocab, ['cat','dog']) tfm = NegTfm(split_idx=1) tds = TfmdList(start, A()) tdl = TfmdDL(tds, after_batch=tfm, bs=4) x = tdl.one_batch() test_eq(x, torch.arange(4)) tds.split_idx = 1 x = tdl.one_batch() test_eq(x, -torch.arange(4)) tds.split_idx = 0 x = tdl.one_batch() test_eq(x, torch.arange(4)) tds = TfmdList(start, A()) tdl = TfmdDL(tds, after_batch=NegTfm(), bs=4) test_eq(tdl.dataset[0], start[0]) test_eq(len(tdl), (len(tds)-1)//4+1) test_eq(tdl.bs, 4) test_stdout(tdl.show_batch, '0\n1\n2\n3') show_doc(TfmdList.subset) # ## DataSource - #export @docs @delegates(TfmdList) class DataSource(FilteredBase): "A dataset that creates a tuple from each `tfms`, passed thru `item_tfms`" def __init__(self, items=None, tfms=None, tls=None, n_inp=None, dl_type=None, **kwargs): super().__init__(dl_type=dl_type) self.tls = L(tls if tls else [TfmdList(items, t, **kwargs) for t in L(ifnone(tfms,[None]))]) self.n_inp = (1 if len(self.tls)==1 else len(self.tls)-1) if n_inp is None else n_inp def __getitem__(self, it): res = tuple([tl[it] for tl in self.tls]) return res if is_indexer(it) else list(zip(*res)) def __getattr__(self,k): return gather_attrs(self, k, 'tls') def __dir__(self): return super().__dir__() + gather_attr_names(self, 'tls') def __len__(self): return len(self.tls[0]) def __iter__(self): return (self[i] for i in range(len(self))) def __repr__(self): return coll_repr(self) def decode(self, o, full=True): return tuple(tl.decode(o_, full=full) for o_,tl in zip(o,tuplify(self.tls, match=o))) def subset(self, i): return type(self)(tls=L(tl.subset(i) for tl in self.tls), n_inp=self.n_inp) def _new(self, items, *args, **kwargs): return super()._new(items, tfms=self.tfms, do_setup=False, **kwargs) def overlapping_splits(self): return self.tls[0].overlapping_splits() @property def splits(self): return self.tls[0].splits @property def split_idx(self): return self.tls[0].tfms.split_idx @property def items(self): return self.tls[0].items @items.setter def items(self, v): for tl in self.tls: tl.items = v def show(self, o, ctx=None, **kwargs): for o_,tl in zip(o,self.tls): ctx = tl.show(o_, ctx=ctx, **kwargs) return ctx def new_empty(self): tls = [tl._new([], split_idx=tl.split_idx) for tl in self.tls] return type(self)(tls=tls, n_inp=self.n_inp) _docs=dict( decode="Compose `decode` of all `tuple_tfms` then all `tfms` on `i`", show="Show item `o` in `ctx`", databunch="Get a `DataBunch`", overlapping_splits="All splits that are in more than one split", subset="New `DataSource` that only includes subset `i`", new_empty="Create a new empty version of the `self`, keeping only the transforms") # A `DataSource` creates a tuple from `items` (typically input,target) by applying to them each list of `Transform` (or `Pipeline`) in `tfms`. Note that if `tfms` contains only one list of `tfms`, the items given by `DataSource` will be tuples of one element. # # `n_inp` is the number of elements in the tuples that should be considered part of the input and will default to 1 if `tfms` consists of one set of transforms, `len(tfms)-1` otherwise. In most cases, the number of elements in the tuples spit out by `DataSource` will be 2 (for input,target) but it can happen that there is 3 (Siamese networks or tabular data) in which case we need to be able to determine when the inputs end and the targets begin. items = [1,2,3,4] dsrc = DataSource(items, [[neg_tfm,int_tfm], [add(1)]]) t = dsrc[0] test_eq(t, (-1,2)) test_eq(dsrc[0,1,2], [(-1,2),(-2,3),(-3,4)]) test_eq(dsrc.n_inp, 1) dsrc.decode(t) class Norm(Transform): def encodes(self, o): return (o-self.m)/self.s def decodes(self, o): return (o*self.s)+self.m def setups(self, items): its = tensor(items).float() self.m,self.s = its.mean(),its.std() # + items = [1,2,3,4] nrm = Norm() dsrc = DataSource(items, [[neg_tfm,int_tfm], [neg_tfm,nrm]]) x,y = zip(*dsrc) test_close(tensor(y).mean(), 0) test_close(tensor(y).std(), 1) test_eq(x, (-1,-2,-3,-4,)) test_eq(nrm.m, -2.5) test_stdout(lambda:show_at(dsrc, 1), '-2') test_eq(dsrc.m, nrm.m) test_eq(dsrc.norm.m, nrm.m) test_eq(dsrc.train.norm.m, nrm.m) # + #hide #Check filtering is properly applied class B(Transform): def encodes(self, x)->None: return int(x+1) def decodes(self, x): return Int(x-1) add1 = B(split_idx=1) dsrc = DataSource(items, [neg_tfm, [neg_tfm,int_tfm,add1]], splits=[[3],[0,1,2]]) test_eq(dsrc[1], [-2,-2]) test_eq(dsrc.valid[1], [-2,-1]) test_eq(dsrc.valid[[1,1]], [[-2,-1], [-2,-1]]) test_eq(dsrc.train[0], [-4,-4]) # + #hide #Test setup works with train attribute def _lbl(o): return o.split('_')[0] test_fns = ['dog_0.jpg','cat_0.jpg','cat_2.jpg','cat_1.jpg','kid_1.jpg'] tcat = _Cat() dsrc = DataSource(test_fns, [[tcat,_lbl]], splits=[[0,1,2], [3,4]]) test_eq(tcat.vocab, ['cat','dog']) test_eq(dsrc.train, [(1,),(0,),(0,)]) test_eq(dsrc.valid[0], (0,)) test_stdout(lambda: show_at(dsrc.train, 0), "dog") # + inp = [0,1,2,3,4] dsrc = DataSource(inp, tfms=[None]) test_eq(*dsrc[2], 2) # Retrieve one item (subset 0 is the default) test_eq(dsrc[1,2], [(1,),(2,)]) # Retrieve two items by index mask = [True,False,False,True,False] test_eq(dsrc[mask], [(0,),(3,)]) # Retrieve two items by mask # - inp = pd.DataFrame(dict(a=[5,1,2,3,4])) dsrc = DataSource(inp, tfms=attrgetter('a')).subset(0) test_eq(*dsrc[2], 2) # Retrieve one item (subset 0 is the default) test_eq(dsrc[1,2], [(1,),(2,)]) # Retrieve two items by index mask = [True,False,False,True,False] test_eq(dsrc[mask], [(5,),(3,)]) # Retrieve two items by mask #test n_inp inp = [0,1,2,3,4] dsrc = DataSource(inp, tfms=[None]) test_eq(dsrc.n_inp, 1) dsrc = DataSource(inp, tfms=[[None],[None],[None]]) test_eq(dsrc.n_inp, 2) dsrc = DataSource(inp, tfms=[[None],[None],[None]], n_inp=1) test_eq(dsrc.n_inp, 1) # + # splits can be indices dsrc = DataSource(range(5), tfms=[None], splits=[tensor([0,2]), [1,3,4]]) test_eq(dsrc.subset(0), [(0,),(2,)]) test_eq(dsrc.train, [(0,),(2,)]) # Subset 0 is aliased to `train` test_eq(dsrc.subset(1), [(1,),(3,),(4,)]) test_eq(dsrc.valid, [(1,),(3,),(4,)]) # Subset 1 is aliased to `valid` test_eq(*dsrc.valid[2], 4) #assert '[(1,),(3,),(4,)]' in str(dsrc) and '[(0,),(2,)]' in str(dsrc) dsrc # + # splits can be boolean masks (they don't have to cover all items, but must be disjoint) splits = [[False,True,True,False,True], [True,False,False,False,False]] dsrc = DataSource(range(5), tfms=[None], splits=splits) test_eq(dsrc.train, [(1,),(2,),(4,)]) test_eq(dsrc.valid, [(0,)]) # - # apply transforms to all items tfm = [[lambda x: x*2,lambda x: x+1]] splits = [[1,2],[0,3,4]] dsrc = DataSource(range(5), tfm, splits=splits) test_eq(dsrc.train,[(3,),(5,)]) test_eq(dsrc.valid,[(1,),(7,),(9,)]) test_eq(dsrc.train[False,True], [(5,)]) # only transform subset 1 class _Tfm(Transform): split_idx=1 def encodes(self, x): return x*2 def decodes(self, x): return Str(x//2) dsrc = DataSource(range(5), [_Tfm()], splits=[[1,2],[0,3,4]]) test_eq(dsrc.train,[(1,),(2,)]) test_eq(dsrc.valid,[(0,),(6,),(8,)]) test_eq(dsrc.train[False,True], [(2,)]) dsrc #hide #Test DataSource pickles dsrc1 = pickle.loads(pickle.dumps(dsrc)) test_eq(dsrc.train, dsrc1.train) test_eq(dsrc.valid, dsrc1.valid) dsrc = DataSource(range(5), [_Tfm(),noop], splits=[[1,2],[0,3,4]]) test_eq(dsrc.train,[(1,1),(2,2)]) test_eq(dsrc.valid,[(0,0),(6,3),(8,4)]) start = torch.arange(0,50) tds = DataSource(start, [A()]) tdl = TfmdDL(tds, after_item=NegTfm(), bs=4) b = tdl.one_batch() test_eq(tdl.decode_batch(b), ((0,),(1,),(2,),(3,))) test_stdout(tdl.show_batch, "0\n1\n2\n3") # + # only transform subset 1 class _Tfm(Transform): split_idx=1 def encodes(self, x): return x*2 dsrc = DataSource(range(8), [None], splits=[[1,2,5,7],[0,3,4,6]]) dbch = dsrc.databunch(bs=4, after_batch=_Tfm(), shuffle_train=False) test_eq(dbch.train_dl, [(tensor([1,2,5, 7]),)]) test_eq(dbch.valid_dl, [(tensor([0,6,8,12]),)]) test_eq(dbch.n_inp, 1) # - # ### Methods items = [1,2,3,4] dsrc = DataSource(items, [[neg_tfm,int_tfm]]) show_doc(DataSource.decode) test_eq(*dsrc[0], -1) test_eq(*dsrc.decode((-1,)), 1) show_doc(DataSource.show) test_stdout(lambda:dsrc.show(dsrc[1]), '-2') # ## Add test set for inference # + # only transform subset 1 class _Tfm1(Transform): split_idx=0 def encodes(self, x): return x*3 dsrc = DataSource(range(8), [[_Tfm(),_Tfm1()]], splits=[[1,2,5,7],[0,3,4,6]]) test_eq(dsrc.train, [(3,),(6,),(15,),(21,)]) test_eq(dsrc.valid, [(0,),(6,),(8,),(12,)]) # - #export def test_set(dsrc, test_items, rm_tfms=0): "Create a test set from `test_items` using validation transforms of `dsrc`" test_tls = [tl._new(test_items, split_idx=1) for tl in dsrc.tls[:dsrc.n_inp]] rm_tfms = tuplify(rm_tfms, match=test_tls) for i,j in enumerate(rm_tfms): test_tls[i].tfms.fs = test_tls[i].tfms.fs[j:] return DataSource(tls=test_tls) # + class _Tfm1(Transform): split_idx=0 def encodes(self, x): return x*3 dsrc = DataSource(range(8), [[_Tfm(),_Tfm1()]], splits=[[1,2,5,7],[0,3,4,6]]) test_eq(dsrc.train, [(3,),(6,),(15,),(21,)]) test_eq(dsrc.valid, [(0,),(6,),(8,),(12,)]) #Tranform of the validation set are applied tst = test_set(dsrc, [1,2,3]) test_eq(tst, [(2,),(4,),(6,)]) # + #hide #Test with various input lengths dsrc = DataSource(range(8), [[_Tfm(),_Tfm1()],[_Tfm(),_Tfm1()],[_Tfm(),_Tfm1()]], splits=[[1,2,5,7],[0,3,4,6]]) tst = test_set(dsrc, [1,2,3]) test_eq(tst, [(2,2),(4,4),(6,6)]) dsrc = DataSource(range(8), [[_Tfm(),_Tfm1()],[_Tfm(),_Tfm1()],[_Tfm(),_Tfm1()]], splits=[[1,2,5,7],[0,3,4,6]], n_inp=1) tst = test_set(dsrc, [1,2,3]) test_eq(tst, [(2,),(4,),(6,)]) # + #hide #Test with rm_tfms dsrc = DataSource(range(8), [[_Tfm(),_Tfm()]], splits=[[1,2,5,7],[0,3,4,6]]) tst = test_set(dsrc, [1,2,3]) test_eq(tst, [(4,),(8,),(12,)]) dsrc = DataSource(range(8), [[_Tfm(),_Tfm()]], splits=[[1,2,5,7],[0,3,4,6]]) tst = test_set(dsrc, [1,2,3], rm_tfms=1) test_eq(tst, [(2,),(4,),(6,)]) dsrc = DataSource(range(8), [[_Tfm(),_Tfm()], [_Tfm(),_Tfm()]], splits=[[1,2,5,7],[0,3,4,6]], n_inp=2) tst = test_set(dsrc, [1,2,3], rm_tfms=(1,0)) test_eq(tst, [(2,4),(4,8),(6,12)]) # - #export @delegates(TfmdDL.__init__) def test_dl(dbunch, test_items, rm_type_tfms=0, **kwargs): "Create a test dataloader from `test_items` using validation transforms of `dbunch`" test_ds = test_set(dbunch.valid_ds, test_items, rm_tfms=rm_type_tfms) if isinstance(dbunch.valid_ds, DataSource) else test_items return dbunch.valid_dl.new(test_ds, **kwargs) dsrc = DataSource(range(8), [[_Tfm(),_Tfm1()]], splits=[[1,2,5,7],[0,3,4,6]]) dbunch = dsrc.databunch(bs=4) tst_dl = test_dl(dbunch, [2,3,4,5]) test_eq(list(tst_dl), [(tensor([ 4, 6, 8, 10]),)]) #Test you can change transforms tst_dl = test_dl(dbunch, [2,3,4,5], after_item=add1) test_eq(list(tst_dl), [(tensor([ 5, 7, 9, 11]),)]) # ## Export - #hide from local.notebook.export import notebook2script notebook2script(all_fs=True)
dev/05_data_core.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/1_getting_started_roadmap/1_getting_started_with_monk/3)%20Dog%20Vs%20Cat%20Classifier%20Using%20Keras%20Backend.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # - # # Table of Contents # # # ## [Install Monk](#0) # # # ## [Importing pytorch backend](#1) # # # ## [Creating and Managing experiments](#2) # # # ## [Training a Cat Vs Dog image classifier](#3) # # # ## [Validating the trained classifier](#4) # # # ## [Running inference on test images](#5) # <a id='0'></a> # # Install Monk # ## Using pip (Recommended) # # - colab (gpu) # - All bakcends: `pip install -U monk-colab` # # # - kaggle (gpu) # - All backends: `pip install -U monk-kaggle` # # # - cuda 10.2 # - All backends: `pip install -U monk-cuda102` # - Gluon bakcned: `pip install -U monk-gluon-cuda102` # - Pytorch backend: `pip install -U monk-pytorch-cuda102` # - Keras backend: `pip install -U monk-keras-cuda102` # # # - cuda 10.1 # - All backend: `pip install -U monk-cuda101` # - Gluon bakcned: `pip install -U monk-gluon-cuda101` # - Pytorch backend: `pip install -U monk-pytorch-cuda101` # - Keras backend: `pip install -U monk-keras-cuda101` # # # - cuda 10.0 # - All backend: `pip install -U monk-cuda100` # - Gluon bakcned: `pip install -U monk-gluon-cuda100` # - Pytorch backend: `pip install -U monk-pytorch-cuda100` # - Keras backend: `pip install -U monk-keras-cuda100` # # # - cuda 9.2 # - All backend: `pip install -U monk-cuda92` # - Gluon bakcned: `pip install -U monk-gluon-cuda92` # - Pytorch backend: `pip install -U monk-pytorch-cuda92` # - Keras backend: `pip install -U monk-keras-cuda92` # # # - cuda 9.0 # - All backend: `pip install -U monk-cuda90` # - Gluon bakcned: `pip install -U monk-gluon-cuda90` # - Pytorch backend: `pip install -U monk-pytorch-cuda90` # - Keras backend: `pip install -U monk-keras-cuda90` # # # - cpu # - All backend: `pip install -U monk-cpu` # - Gluon bakcned: `pip install -U monk-gluon-cpu` # - Pytorch backend: `pip install -U monk-pytorch-cpu` # - Keras backend: `pip install -U monk-keras-cpu` # ## Install Monk Manually (Not recommended) # # ### Step 1: Clone the library # - git clone https://github.com/Tessellate-Imaging/monk_v1.git # # # # # ### Step 2: Install requirements # - Linux # - Cuda 9.0 # - `cd monk_v1/installation/Linux && pip install -r requirements_cu90.txt` # - Cuda 9.2 # - `cd monk_v1/installation/Linux && pip install -r requirements_cu92.txt` # - Cuda 10.0 # - `cd monk_v1/installation/Linux && pip install -r requirements_cu100.txt` # - Cuda 10.1 # - `cd monk_v1/installation/Linux && pip install -r requirements_cu101.txt` # - Cuda 10.2 # - `cd monk_v1/installation/Linux && pip install -r requirements_cu102.txt` # - CPU (Non gpu system) # - `cd monk_v1/installation/Linux && pip install -r requirements_cpu.txt` # # # - Windows # - Cuda 9.0 (Experimental support) # - `cd monk_v1/installation/Windows && pip install -r requirements_cu90.txt` # - Cuda 9.2 (Experimental support) # - `cd monk_v1/installation/Windows && pip install -r requirements_cu92.txt` # - Cuda 10.0 (Experimental support) # - `cd monk_v1/installation/Windows && pip install -r requirements_cu100.txt` # - Cuda 10.1 (Experimental support) # - `cd monk_v1/installation/Windows && pip install -r requirements_cu101.txt` # - Cuda 10.2 (Experimental support) # - `cd monk_v1/installation/Windows && pip install -r requirements_cu102.txt` # - CPU (Non gpu system) # - `cd monk_v1/installation/Windows && pip install -r requirements_cpu.txt` # # # - Mac # - CPU (Non gpu system) # - `cd monk_v1/installation/Mac && pip install -r requirements_cpu.txt` # # # - Misc # - Colab (GPU) # - `cd monk_v1/installation/Misc && pip install -r requirements_colab.txt` # - Kaggle (GPU) # - `cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt` # # # # ### Step 3: Add to system path (Required for every terminal or kernel run) # - `import sys` # - `sys.path.append("monk_v1/");` # <a id='1'></a> # # Imports # + #Using keras backend # When installed using pip from monk.keras_prototype import prototype # When installed manually (Uncomment the following) #import os #import sys #sys.path.append("monk_v1/"); #sys.path.append("monk_v1/monk/"); #from monk.keras_prototype import prototype # + #Using pytorch backend # When installed using pip from monk.pytorch_prototype import prototype # When installed manually (Uncomment the following) #import os #import sys #sys.path.append("monk_v1/"); #sys.path.append("monk_v1/monk/"); #from monk.pytorch_prototype import prototype # + #Using gluon backend # When installed using pip from monk.gluon_prototype import prototype # When installed manually (Uncomment the following) #import os #import sys #sys.path.append("monk_v1/"); #sys.path.append("monk_v1/monk/"); #from monk.gluon_prototype import prototype # - # <a id='2'></a> # # Creating and managing experiments # - Provide project name # - Provide experiment name # - For a specific data create a single project # - Inside each project multiple experiments can be created # - Every experiment can be have diferent hyper-parameters attached to it gtf = prototype(verbose=1); gtf.Prototype("sample-project-1", "sample-experiment-1"); # ### This creates files and directories as per the following structure # # # workspace # | # |--------sample-project-1 (Project name can be different) # | # | # |-----sample-experiment-1 (Experiment name can be different) # | # |-----experiment-state.json # | # |-----output # | # |------logs (All training logs and graphs saved here) # | # |------models (all trained models saved here) # # <a id='3'></a> # # Training a Cat Vs Dog image classifier # ## Quick mode training # # - Using Default Function # - dataset_path # - model_name # - num_epochs # # # ## Dataset folder structure # # parent_directory # | # | # |------cats # | # |------img1.jpg # |------img2.jpg # |------.... (and so on) # |------dogs # | # |------img1.jpg # |------img2.jpg # |------.... (and so on) # Download dataset import os if not os.path.isfile("datasets.zip"): os.system("! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt") if not os.path.isdir("datasets"): os.system("! unzip -qq datasets.zip") # + gtf.Default(dataset_path="datasets/dataset_cats_dogs_train", model_name="resnet50", num_epochs=5); #Read the summary generated once you run this cell. # + #Start Training gtf.Train(); #Read the training summary generated once you run the cell and training is completed # - # <a id='4'></a> # # Validating the trained classifier # ## Load the experiment in validation mode # - Set flag eval_infer as True gtf = prototype(verbose=1); gtf.Prototype("sample-project-1", "sample-experiment-1", eval_infer=True); # ## Load the validation dataset gtf.Dataset_Params(dataset_path="datasets/dataset_cats_dogs_eval"); gtf.Dataset(); # ## Run validation accuracy, class_based_accuracy = gtf.Evaluate(); # <a id='5'></a> # # Running inference on test images # ## Load the experiment in inference mode # - Set flag eval_infer as True gtf = prototype(verbose=1); gtf.Prototype("sample-project-1", "sample-experiment-1", eval_infer=True); # ## Select image and Run inference # + img_name = "datasets/dataset_cats_dogs_test/0.jpg"; predictions = gtf.Infer(img_name=img_name); #Display from IPython.display import Image Image(filename=img_name) # + img_name = "datasets/dataset_cats_dogs_test/90.jpg"; predictions = gtf.Infer(img_name=img_name); #Display from IPython.display import Image Image(filename=img_name) # -
study_roadmaps/1_getting_started_roadmap/4_quick_prototyping_mode/tmp1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # %matplotlib inline # Import dependencies. import matplotlib.pyplot as plt # + # Set the x-axis to a list of strings for each month. x_axis = ["Jan", "Feb", "Mar", "April", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"] # Set the y-axis to a list of floats as the total fare in US dollars accumulated for each month. y_axis = [10.02, 23.24, 39.20, 35.42, 32.34, 27.04, 43.82, 10.56, 11.85, 27.90, 20.71, 20.09] # - # Create the plot plt.plot(x_axis, y_axis) # Create the plot with ax.plt() fig, ax = plt.subplots() ax.plot(x_axis, y_axis) # Create the plot with ax.plt() fig = plt.figure() ax = fig.add_subplot() ax.plot(x_axis, y_axis) # Create the plot. plt.plot(x_axis, y_axis) plt.show() # Create the plot and add a label for the legend. plt.plot(x_axis, y_axis, label='Boston') # Create labels for the x and y axes. plt.xlabel("Date") plt.ylabel("Fare($)") # Set the y limit between 0 and 45. plt.ylim(0, 45) # Create a title. plt.title("PyBer Fare by Month") # Add the legend. plt.legend() # Create the plot. plt.plot(x_axis, y_axis, marker="*", color="blue", linewidth=2, label='Boston') # Create labels for the x and y axes. plt.xlabel("Date") plt.ylabel("Fare($)") # Set the y limit between 0 and 45. plt.ylim(0, 45) # Create a title. plt.title("PyBer Fare by Month") # Add a grid. plt.grid() # Add the legend. plt.legend() # ### SKILL DRILL # Create the plot. fig, ax = plt.subplots() ax.plot(x_axis, y_axis, marker="d", color="green", linewidth=2, label='Boston') # Create labels for the x and y axes. ax.set_xlabel("Date") ax.set_ylabel("Fare($)") # Set the y limit between 0 and 45. ax.set_ylim(0, 45) # Create a title. ax.set_title("PyBer Fare by Month") # Add a grid. ax.grid() # Add the legend. ax.legend() # + # Set the x-axis to a list of strings for each month. x_axis = ["Jan", "Feb", "Mar", "April", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"] # Set the y-axis to a list of floats as the total fare in US dollars accumulated for each month. y_axis = [10.02, 23.24, 39.20, 35.42, 32.34, 27.04, 43.82, 10.56, 11.85, 27.90, 20.71, 20.09] # - # Create the plot. plt.bar(x_axis, y_axis, color="green", label='Boston') # Create labels for the x and y axes. plt.xlabel("Date") plt.ylabel("Fare($)") # Create a title. plt.title("PyBer Fare by Month") # Add the legend. plt.legend() # Create the plot plt.barh(x_axis, y_axis) # Create the plot. plt.barh(x_axis, y_axis) plt.gca().invert_yaxis() # SKILL DRILL 5.1 # Using the Matplotlib MATLAB plotting approach, make the following changes: # Create the plot. plt.barh(x_axis, y_axis, color="magenta", label='Boston') # Create labels for the x and y axes. plt.xlabel("Date") plt.ylabel("Fare($)") # Create a title. plt.title("PyBer Fare by Month") # Add the legend. plt.legend() # ## 5.1.6 Create Bar Charts Using the Object-Oriented Approach # + # Set the x-axis to a list of strings for each month. x_axis = ["Jan", "Feb", "Mar", "April", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"] # Set the y-axis to a list of floats as the total fare in US dollars accumulated for each month. y_axis = [10.02, 23.24, 39.20, 35.42, 32.34, 27.04, 43.82, 10.56, 11.85, 27.90, 20.71, 20.09] # - # Create the plot with ax.plt() fig, ax = plt.subplots() ax.bar(x_axis, y_axis) # Create the plot with ax.plt() fig, ax = plt.subplots() ax.barh(x_axis, y_axis) # Create the plot with ax.plt() fig, ax = plt.subplots() ax.barh(y_axis, x_axis) # ## SKILL DRILL # + # create the plot with ax.plt() fig, ax = plt.subplots() ax.barh(x_axis, y_axis, color= "cyan", label = 'Chicago') # Annotate the chart # Create labels for the x and y axes. ax.set_ylabel("Date") ax.set_xlabel("Fare($)") # Create a title. ax.set_title("PyBer Fare by Month") # Add the legend. ax.legend() # - # # 5.1.7 Create Scatter Plots and Bubble Charts # + # Set the x-axis to a list of strings for each month. x_axis = ["Jan", "Feb", "Mar", "April", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"] # Set the y-axis to a list of floats as the total fare in US dollars accumulated for each month. y_axis = [10.02, 23.24, 39.20, 35.42, 32.34, 27.04, 43.82, 10.56, 11.85, 27.90, 20.71, 20.09] # - plt.plot(x_axis, y_axis, 'o') plt.scatter(x_axis, y_axis) # ## SKILL DRILL # ### Using the Matplotlib MATLAB plotting approach, make the following changes to your scatter plot: # + # Set the x-axis to a list of strings for each month. x_axis = ["Jan", "Feb", "Mar", "April", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"] # Set the y-axis to a list of floats as the total fare in US dollars accumulated for each month. y_axis = [10.02, 23.24, 39.20, 35.42, 32.34, 27.04, 43.82, 10.56, 11.85, 27.90, 20.71, 20.09] plt.scatter(y_axis, x_axis, c="red") plt.gca().invert_yaxis() plt.title("PyBer Fare by Month") plt.xlabel("Fares($)") plt.ylabel("Date") plt.xlim(0, 50) # - # ### Create a Bubble Chart Using the MATLAB Method plt.scatter(x_axis, y_axis, s=y_axis) y_axis_larger = [] for data in y_axis: y_axis_larger.append(data*3) plt.scatter(x_axis, y_axis, s=y_axis_larger) # ### You can use list comprehension to replace many for and while loops plt.scatter(x_axis, y_axis, s = [i * 3 for i in y_axis]) # ### Create a Scatter Plot Using the Object-Oriented Interface fig, ax = plt.subplots() ax.scatter(x_axis, y_axis) # ### Create a Bubble Chart Using the Object-Oriented Interface fig, ax = plt.subplots() ax.scatter(x_axis, y_axis, s=y_axis) # ## SKILL DRILL # ### Using the Matplotlib MATLAB plotting approach, make the following changes to your scatter plot: fig, ax = plt.subplots() ax.scatter(y_axis, x_axis,s =[i*5 for i in y_axis], c="skyblue", edgecolor="black", alpha=0.8, label='Boston', linewidth=2) plt.gca().invert_yaxis() ax.legend() ax.set_title("PyBer Fare by Month") ax.set_xlim(0,50) ax.set_xlabel("Fare($)") ax.set_ylabel("Date") # # 5.1.8 Create Pie Charts # ### Create a Pie Chart Using the MATLAB Method plt.pie(y_axis, labels=x_axis) plt.show() plt.subplots(figsize=(8, 8)) explode_values = (0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0) plt.pie(y_axis, explode=explode_values, labels=x_axis, autopct='%.1f%%') # + # Assign 12 colors, one for each month. colors = ["slateblue", "magenta", "lightblue", "green", "yellowgreen", "greenyellow", "yellow", "orange", "gold", "indianred", "tomato", "mistyrose"] explode_values = (0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0) plt.subplots(figsize=(8, 8)) plt.pie(y_axis, explode=explode_values, colors=colors, labels=x_axis, autopct='%.1f%%') plt.show() # - fig, ax = plt.subplots() ax.pie(y_axis,labels=x_axis) plt.show() # # SKILL DRILL # ### Using the object-oriented approach, make the following changes to the pie chart: # + colors = ["slateblue", "magenta", "lightblue", "green", "yellowgreen", "greenyellow", "yellow", "orange", "gold", "indianred", "tomato", "mistyrose"] explode_values = (0, 0, 0.2, 0, 0, 0, 0.2, 0, 0, 0, 0, 0) fig, ax= plt.subplots(figsize=(8, 8)) ax.pie(y_axis, explode=explode_values, colors=colors, labels=x_axis, shadow=True, counterclock=False, startangle=100, autopct='%.1f%%') plt.show() # -
Module practice and skill drills/matplotlib_practice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # DATA 512- A2: Bias in Data # The purpose of this notebook is to explore and analyze the <i>toxicity</i> dataset from the <b>Wikipedia Talk corpus</b>, and indentify the source of inherent bias if present in the data. # ### Contents: # 1. Loading the data # 2. Exploratory Analysis # ## 1. Loading the data # For the purpose of this assignment, we download the Wikipedia Talk labels dataset from [Figshare](https://figshare.com/projects/Wikipedia_Talk/16731). This data set includes over 100k labeled discussion comments from English Wikipedia. Each comment was labeled by multiple annotators via Crowdflower on whether it is a toxic or healthy contribution. We also include some demographic data for each crowd-worker. # We need the following data-sets for our analysis: # # <b>Toxicity</b> # 1. toxicity_annotated_comments.tsv # 2. toxicity_annotations.tsv # 3. toxicity_worker_demographic.tsv \ # [available here-](https://figshare.com/articles/dataset/Wikipedia_Talk_Labels_Toxicity/4563973) # # <b>Personal Attacks</b> # 1. attack_annotated_comments.tsv # 2. attack_annotations.tsv \ # [available here-](https://figshare.com/articles/dataset/Wikipedia_Talk_Labels_Personal_Attacks/4054689) import pandas as pd import urllib import matplotlib.pyplot as plt import numpy as np import seaborn as sns import nltk from nltk import word_tokenize, pos_tag, pos_tag_sents from nltk.corpus import wordnet from collections import Counter # Defining a function to fetch data from the specified URL: def download_data(url, file_name): urllib.request.urlretrieve(url, file_name) # + #Setting up the URLs for the respective files toxic_comments_URL = 'https://ndownloader.figshare.com/files/7394542' toxic_annotations_URL = 'https://ndownloader.figshare.com/files/7394539' toxic_demographic_URL = 'https://ndownloader.figshare.com/files/7640581' attack_comments_URL = 'https://ndownloader.figshare.com/files/7554634' attack_annotations_URL = 'https://ndownloader.figshare.com/files/7554637' #Downloading the required files download_data(toxic_comments_URL, 'toxicity_annotated_comments.tsv') download_data(toxic_annotations_URL, 'toxicity_annotations.tsv') download_data(toxic_demographic_URL, 'toxicity_worker_demographic.tsv') download_data(attack_comments_URL, 'attack_annotated_comments.tsv') download_data(attack_annotations_URL, 'attack_annotations.tsv') # - # Read the downloaded files as pandas dataframe # + # Toxicity data t_comments = pd.read_csv('toxicity_annotated_comments.tsv', sep = '\t') t_annotations = pd.read_csv('toxicity_annotations.tsv', sep = '\t') worker_demographic = pd.read_csv('toxicity_worker_demographic.tsv', sep = '\t') # Attacks data a_comments = pd.read_csv('attack_annotated_comments.tsv', sep = '\t') a_annotations = pd.read_csv('attack_annotations.tsv', sep = '\t') # - # ## 2. Exploratory Analysis # In this section we analyze the Toxicity dataset, to determine presence of any inherent bias in the data. # The annotators from Crowdflower were responsible for labeling the comments as either 'toxic' or 'healthy'. They did this by giving each comment a toxicity score ranging from -2(very toxic) to +2(very healthy) with 0 being neutral. Then the comments which received a toxicity score < 0 were flagged as toxic(1). Hence it is logical to think that the labeling of the comments is impacted by the demographics of the Crowflower annotators. # # In this section we will perform the following analyses: # - Analyze influence of worker demographic on toxicity annotations # - Identify most speech characteristics associated to toxicity # - Comparative analysis of speech characteristics between toxicity and personal-attacks # #### Analyze influence of worker demographic on toxicity annotations # # Gender difference plays a significant role in this scenario as people perceive and react differently to certain comments. # - Influence of gender and age # - Influence of gender and education # Merge the annotations and worker_demographic data using an inner join, because for this analysis we do not want those annotation records for which worker_id is missing. #1 Toxicity by gender and age df = t_annotations.merge(worker_demographic, left_on = 'worker_id', right_on = 'worker_id') df.count() # Create two separate tables, one aggregated by age, gender and other by education, gender. # # Creating a calculated feild representing the 'percentage of comments' labelled as toxic for each group-by level. # + df_age_gender = df.groupby(['gender', 'age_group'], as_index = False).agg({'toxicity':'sum', 'rev_id':'count'}) df_edu_gender = df.groupby(['gender', 'education'], as_index = False).agg({'toxicity':'sum', 'rev_id':'count'}) df_age_gender['toxic_annotations_percent'] = (df_age_gender['toxicity'] / df_age_gender['rev_id'])*100 df_edu_gender['toxic_annotations_percent'] = (df_edu_gender['toxicity'] / df_edu_gender['rev_id'])*100 # + plt.figure(figsize=(15,5)) ax = sns.barplot(y="toxicity", hue="gender", x="age_group", palette="pastel", edgecolor=".6", data=df_age_gender) ax.set(ylabel = "Number of 'toxic' annotations", xlabel = 'Age Group') ax.set_title("Influence of gender, age on Toxicity annotations") plt.legend(bbox_to_anchor=(1.01, 0.63), loc=2, borderaxespad=0.) # - plt.figure(figsize=(15,5)) ax = sns.barplot(y="toxicity", hue="gender", x="education", palette="pastel", edgecolor=".6", data=df_edu_gender) ax.set(ylabel = "Number of 'toxic' annotations", xlabel = 'Education') ax.set_title("Influence of gender, education on Toxicity annotations") plt.legend(bbox_to_anchor=(1.01, 0.63), loc=2, borderaxespad=0.) # + df_gender = df.groupby(['gender', 'english_first_language'], as_index = False).agg({'toxicity':'sum', 'rev_id':'count'}) df_gender['toxic_annotations_percent'] = (df_age_gender['toxicity'] / df_age_gender['rev_id'])*100 plt.figure(figsize=(15,5)) ax = sns.barplot(y="toxicity", hue="english_first_language", x="gender", palette="pastel", edgecolor=".6", data=df_gender) ax.set(ylabel = "Percentage of toxic annotations(%)", xlabel = 'Gender') ax.set_title("Influence of gender and language on Toxicity annotations") plt.legend(bbox_to_anchor=(1.01, 0.63), loc=2, borderaxespad=0.) # - # #### Identify most speech characteristics associated to toxicit # In this section we find the keywords most associated with toxic comments, to understand it's text characteristics. # # To normalize the bias from worker demographic, we will subset the comments data for only those comments that are tagged as 'toxic' by more than 50% of the annotators. This will give us a relatively bias-free insight into the text characteristics. # + # labels a comment as toxic if the majority of annoatators did so labels = t_annotations.groupby('rev_id', as_index = False)['toxicity'].mean() labels['toxic_flag'] = 'False' labels.loc[labels['toxicity'] > 0.5, 'toxic_flag'] = 'True' # - # Add the new derived tocicity_flag to the comments table and subset only the toxic comments # + t_comments['toxic_flag'] = labels['toxic_flag'] toxic = t_comments[t_comments['toxic_flag']== 'True'] toxic.head(5) # - # Clean the data by removing unwanted strings from the comments toxic['comment'] = toxic['comment'].apply(lambda x: x.replace("TOKEN", "")) toxic['comment'] = toxic['comment'].apply(lambda x: x.replace("TAB_TOKEN", "")) toxic['comment'] = toxic['comment'].apply(lambda x: x.replace("NEWLINE", "")) toxic['comment'] = toxic['comment'].apply(lambda x: x.replace("_", "")) toxic['comment'] = toxic['comment'].apply(lambda x: x.replace("==", "")) toxic['comment'] = toxic['comment'].apply(lambda x: x.replace("===", "")) toxic.head() # # Here we use the <b>nltk</b> package to tokenize and tag the comments inorder to identify and extract nouns, verbs and adjectives from the text present in the data. # # We will then get the most frequent keywords associated with toxicity, using the 'Counter' function from the <b>collections</b> package. tokens = pos_tag_sents(toxic["comment"].apply(word_tokenize).tolist()) # The above code returns a list of touples containing the words and their corresponding pos_tag. \ # For eg: # [('Rome','NN'), ('beautiful','JJ')] # # We will subset the nouns and adjectives from each text as these are most likely to be the keywords associated to toxicity. # + words = [] for rows in tokens: r = [t[0] for t in rows if t[1] in ['NNP', 'NN', 'JJ']] words.append(r) # + #Flatten the sublists into a single list words_flat = [item for sublist in words for item in sublist] len(words_flat) # + # Counting the most frequent words (atleast 1000 occurances) in the toxic comments words_flat = [x.lower() for x in words_flat] keywords = Counter(words_flat).most_common(1000) toxicity_keywords_df = pd.DataFrame(keywords[0:20], columns =['Keyword', 'Count']) # + toxicity_keywords_df['total_comments'] = len(toxic['comment']) toxicity_keywords_df['Frequency(%)'] = (toxicity_keywords_df['Count']/toxicity_keywords_df['total_comments'])*100 #List of unwanted words that might pass through the pos_tag filter stopwords = ['you', 'my', 'i', 'u', 'a'] toxicity_keywords_df_clean = toxicity_keywords_df[~toxicity_keywords_df['Keyword'].isin(stopwords)] # - # Ploting the most associated keywords to toxicity # + fig, ax = plt.subplots(figsize=(8, 8)) # Plot horizontal bar graph toxicity_keywords_df_clean.sort_values(by='Frequency(%)').plot.barh(x='Keyword', y='Frequency(%)', ax=ax, color="purple") ax.set_title("Keywords most associated with 'toxicity'") plt.show() # - # #### Comparative analysis of speech characteristics between toxicity and personal-attacks # # # In this section we find the keywords most associated with personal attacks, to understand it's text characteristics and compare them with the characteristics of the 'toxicity' dataset. # # To normalize the bias from worker demographic, we will subset the comments data for only those comments that are tagged as 'personal attack' by more than 50% of the annotators. This will give us a relatively bias-free insight into the text characteristics. # # All the following steps are similar to the ones performed for identifying the keywords for toxicity in the previous section. # + # labels a comment as toxic if the majority of annoatators did so labels = a_annotations.groupby('rev_id', as_index = False)['attack'].mean() labels['attack_flag'] = 'False' labels.loc[labels['attack'] > 0.5, 'attack_flag'] = 'True' # + a_comments['attack_flag'] = labels['attack_flag'] attack = a_comments[a_comments['attack_flag']== 'True'] attack.head(5) # - attack['comment'] = attack['comment'].apply(lambda x: x.replace("TOKEN", "")) attack['comment'] = attack['comment'].apply(lambda x: x.replace("TAB_TOKEN", "")) attack['comment'] = attack['comment'].apply(lambda x: x.replace("NEWLINE", "")) attack['comment'] = attack['comment'].apply(lambda x: x.replace("_", "")) attack['comment'] = attack['comment'].apply(lambda x: x.replace("==", "")) attack['comment'] = attack['comment'].apply(lambda x: x.replace("===", "")) att_tokens = pos_tag_sents(attack["comment"].apply(word_tokenize).tolist()) # + a_words = [] for rows in att_tokens: r = [t[0] for t in rows if t[1] in ['NNP', 'NN', 'JJ']] a_words.append(r) #Flatten the sublists into a single list awords_flat = [item for sublist in a_words for item in sublist] len(awords_flat) # + # Counting the most frequent words (atleast 1000 occurances) in the attack comments awords_flat = [x.lower() for x in awords_flat] attack_keywords = Counter(awords_flat).most_common(1000) attack_keywords_df = pd.DataFrame(attack_keywords[0:20], columns =['Keyword', 'Count']) # + attack_keywords_df['total_comments'] = len(attack['comment']) attack_keywords_df['Frequency(%)'] = (attack_keywords_df['Count']/attack_keywords_df['total_comments'])*100 #List of unwanted words that might pass through the pos_tag filter stopwords = ['you', 'my', 'i', 'u', 'a'] #same as used for toxicity attack_keywords_df_clean = attack_keywords_df[~attack_keywords_df['Keyword'].isin(stopwords)] # - # Plotting the keywords for both the datasets for comparison # + fig, (ax1, ax2) = plt.subplots(2,figsize=(10, 10)) # Plot horizontal bar graph attack_keywords_df_clean.sort_values(by='Frequency(%)').plot.barh(x='Keyword', y='Frequency(%)', ax=ax1, color="green") ax1.set_title("Keywords most associated with 'personal attacks'") # Plot horizontal bar graph toxicity_keywords_df_clean.sort_values(by='Frequency(%)').plot.barh(x='Keyword', y='Frequency(%)', ax=ax2, color="purple") ax2.set_title("Keywords most associated with 'toxicity'") plt.show() # -
data512_a2/A2_Bias_in_Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: drlnd_gpu # language: python # name: drlnd_gpu # --- # # Continuous Control # # --- # # In this notebook, you will learn how to use the Unity ML-Agents environment for the second project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893) program. # # ### 1. Start the Environment # # We begin by importing the necessary packages. If the code cell below returns an error, please revisit the project instructions to double-check that you have installed [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Installation.md) and [NumPy](http://www.numpy.org/). # %load_ext autoreload # %autoreload 2 # + import sys sys.path.insert(1, 'scripts/') # TODO: insert at 1, 0 is the script path (or '' in REPL) from unityagents import UnityEnvironment import numpy as np import torch from collections import deque import matplotlib.pyplot as plt # %matplotlib inline from d4pg_agent import D4PG_Agent from params import Params from logger import Logger # - # Next, we will start the environment! **_Before running the code cell below_**, change the `file_name` parameter to match the location of the Unity environment that you downloaded. # # - **Mac**: `"path/to/Reacher.app"` # - **Windows** (x86): `"path/to/Reacher_Windows_x86/Reacher.exe"` # - **Windows** (x86_64): `"path/to/Reacher_Windows_x86_64/Reacher.exe"` # - **Linux** (x86): `"path/to/Reacher_Linux/Reacher.x86"` # - **Linux** (x86_64): `"path/to/Reacher_Linux/Reacher.x86_64"` # - **Linux** (x86, headless): `"path/to/Reacher_Linux_NoVis/Reacher.x86"` # - **Linux** (x86_64, headless): `"path/to/Reacher_Linux_NoVis/Reacher.x86_64"` # # For instance, if you are using a Mac, then you downloaded `Reacher.app`. If this file is in the same folder as the notebook, then the line below should appear as follows: # ``` # env = UnityEnvironment(file_name="Reacher.app") # ``` env = UnityEnvironment(file_name='Reacher_Windows_multi_agent_x86_64/Reacher.exe') #env = UnityEnvironment(file_name='Reacher_Windows_single_agent_x86_64/Reacher.exe') # Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python. # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] # ### 2. Examine the State and Action Spaces # # In this environment, a double-jointed arm can move to target locations. A reward of `+0.1` is provided for each step that the agent's hand is in the goal location. Thus, the goal of your agent is to maintain its position at the target location for as many time steps as possible. # # The observation space consists of `33` variables corresponding to position, rotation, velocity, and angular velocities of the arm. Each action is a vector with four numbers, corresponding to torque applicable to two joints. Every entry in the action vector must be a number between `-1` and `1`. # # Run the code cell below to print some information about the environment. # + # reset the environment env_info = env.reset(train_mode=True)[brain_name] # number of agents num_agents = len(env_info.agents) print('Number of agents:', num_agents) # size of each action action_size = brain.vector_action_space_size print('Size of each action:', action_size) # examine the state space states = env_info.vector_observations state_size = states.shape[1] print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size)) print('The state for the first agent looks like:', states[0]) # - # ### 3. Take Random Actions in the Environment # # In the next code cell, you will learn how to use the Python API to control the agent and receive feedback from the environment. # # Once this cell is executed, you will watch the agent's performance, if it selects an action at random with each time step. A window should pop up that allows you to observe the agent, as it moves through the environment. # # Of course, as part of the project, you'll have to change the code so that the agent is able to use its experience to gradually choose better actions when interacting with the environment! # + # env_info = env.reset(train_mode=False)[brain_name] # reset the environment # states = env_info.vector_observations # get the current state (for each agent) # scores = np.zeros(num_agents) # initialize the score (for each agent) # while True: # actions = np.random.randn(num_agents, action_size) # select an action (for each agent) # actions = np.clip(actions, -1, 1) # all actions between -1 and 1 # env_info = env.step(actions)[brain_name] # send all actions to tne environment # next_states = env_info.vector_observations # get next state (for each agent) # rewards = env_info.rewards # get reward (for each agent) # dones = env_info.local_done # see if episode finished # scores += env_info.rewards # update the score (for each agent) # states = next_states # roll over states to next time step # if np.any(dones): # exit loop if episode finished # break # print('Total score (averaged over agents) this episode: {}'.format(np.mean(scores))) # - # ## Get a rough estimate of bounds of reward function, to init V_min and V_max # + # NOTE: INTERRUPT KERNAL TO STOP ANALYSIS env_info = env.reset(train_mode=False)[brain_name] # reset the environment states = env_info.vector_observations # get the current state (for each agent) scores = np.zeros(num_agents) # initialize the score (for each agent) max_reward = 0.0 min_reward = 0.0 max_action = 0.0 min_action = 0.0 eps_max = 1000 iters = 0 while iters < eps_max: actions = np.random.randn(num_agents, action_size) # select an action (for each agent) #actions = np.clip(actions, -1, 1) # all actions between -1 and 1 env_info = env.step(actions)[brain_name] # send all actions to tne environment next_states = env_info.vector_observations # get next state (for each agent) rewards = env_info.rewards # get reward (for each agent) # DEBUG: PLOT GRAPH OF REWARDS max_reward = max(rewards) if (max(rewards) > max_reward) else max_reward min_reward = min(rewards) if (min(rewards) < min_reward) else min_reward max_action = np.amax(actions) if (np.amax(actions) > max_action) else max_action min_action = np.amin(actions) if (np.amin(actions) < min_action) else min_action iters += 1 print("\rEpisode: {} \tMAX REWARD: {:.2f} \tMIN REWARD: {:.2f} \tMAX ACTION: {:.2f} \tMIN ACTION: {:.2f}".format(iters, max_reward, min_reward, max_action, min_action), end="") dones = env_info.local_done # see if episode finished scores += env_info.rewards # update the score (for each agent) states = next_states # roll over states to next time step #if np.any(dones): # exit loop if episode finished # break # - # When finished, you can close the environment. # + # env.close() # - # ### 4. It's Your Turn! # # Now it's your turn to train your own agent to solve the environment! When training the environment, set `train_mode=True`, so that the line for resetting the environment looks like the following: # ```python # env_info = env.reset(train_mode=True)[brain_name] # ``` def d4pg(params=Params(), logger=Logger()): env_info = env.reset(train_mode=True)[brain_name] for i_episode in range(1, params.n_episodes+1): env_info = env.reset(train_mode=True)[brain_name] # reset the environment states = env_info.vector_observations # get the current state agent.reset() score = 0 for t in range(params.max_t): # Perform actions from each agent's policy network (clipped actions [0, -1]) actions = [ agent.act(state) for state in states ] actions = np.stack(actions) env_info = env.step(actions)[brain_name] next_states, rewards, dones = env_info.vector_observations, env_info.rewards, env_info.local_done agent.step(states, actions, rewards, next_states, dones, clear_nstep_buffer=(t==0)) states = next_states score += (sum(rewards) / len(rewards)) # ALT METRIC: Max # Prevent agent from learning NaN rewards has_NAN_in_rewards = np.isnan(rewards) if any(has_NAN_in_rewards): rewards[has_NAN_in_rewards] = -5 print("NaN Reward Found! Converting to R=-5 instead...") # Break if any of the agent is done if any(dones): break # Print crucial results for progress tracking if agent.memory_buffer_prefilled(): logger.scores_deque.append(score) logger.actor_loss_deque.append(agent.actor_loss) logger.critic_loss_deque.append(agent.critic_loss) logger.scores_list.append(score) logger.actor_loss_list.append(agent.actor_loss) logger.critic_loss_list.append(agent.critic_loss) print('\rEpisode {}\ Score: {:.2f}'.format(i_episode, logger.scores_deque[-1]), end="") if i_episode % params.save_every == 0: torch.save(agent.actor_local.state_dict(), "weights/checkpoint_actor/checkpoint_actor_ep{}.pth".format(i_episode)) torch.save(agent.critic_local.state_dict(), "weights/checkpoint_critic/checkpoint_critic_ep{}.pth".format(i_episode)) if i_episode % params.print_every == 0: print('\rEpisode {}: \tActor Loss: {:.2f} \tCritic Loss: {:.2f} \n\t\tAverage Score: {:.2f}\n' .format(i_episode, np.mean(logger.actor_loss_deque), np.mean(logger.critic_loss_deque), np.mean(logger.scores_deque))) if np.mean(logger.scores_deque) >= params.target_score: print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}' .format(i_episode, np.mean(logger.scores_deque))) if params.terminate_on_target_score: break # Plot graphs & save final weights torch.save(agent.actor_local.state_dict(), "weights/checkpoint_actor/checkpoint_actor_ep{}.pth".format(i_episode)) torch.save(agent.critic_local.state_dict(), "weights/checkpoint_critic/checkpoint_critic_ep{}.pth".format(i_episode)) if params.plot_stats: logger.plot_stats() return logger.scores_list # + # Params Override: Initialized in params.py params = Params() params.verbose = True params.random_seed = 0 params.n_episodes=300 params.max_t = 1000 params.print_every = 25 params.save_every = 100 params.prefill_memory_qty=5000 params.terminate_on_target_score = False params.plot_stats = True params.lr_actor = 5e-4 params.lr_critic = 1e-3 #### MAIN ##### logger = Logger(params) agent = D4PG_Agent(state_size, action_size, params=params) d4pg(params, logger) # + def plot_graph(logger): _, axs = plt.subplots(1, 3, figsize=(20, 5)) # Scores axs[0].plot(np.arange(1, len(logger.scores_list)+1), logger.scores_list) axs[0].set(xlabel='Episode #', ylabel='Score') axs[0].set_title('Rewards') # Actor Loss axs[1].plot(np.arange(1, len(logger.actor_loss_list)+1), logger.actor_loss_list) axs[1].set(xlabel='Episode #', ylabel='Loss') axs[1].set_title('Actor Loss') # Critic Loss axs[2].plot(np.arange(1, len(logger.critic_loss_list)+1), logger.critic_loss_list) axs[2].set(xlabel='Episode #', ylabel='Loss') axs[2].set_title('Critic Loss') plt.show() plot_graph(logger) # + # ######################################################################################## # # PARAMETER SWEEP # ######################################################################################## # print("CUDA AVAILABLE: ", torch.cuda.is_available()) # ################## # Params Override: Initialized in params.py params = Params() params.verbose = True params.random_seed = 0 params.n_episodes=300 params.max_t = 1000 params.print_every = 20 params.save_every = 100 params.prefill_memory_qty=5000 params.terminate_on_target_score = False params.plot_stats = True params.lr_actor = 5e-4 params.lr_critic = 7.5e-4 #### MAIN ##### logger = Logger(params) agent = D4PG_Agent(state_size, action_size, params=params) d4pg(params, logger) # ################## # Params Override: Initialized in params.py params = Params() params.verbose = True params.random_seed = 0 params.n_episodes=300 params.max_t = 1000 params.print_every = 20 params.save_every = 100 params.prefill_memory_qty=5000 params.terminate_on_target_score = False params.plot_stats = True params.lr_actor = 5e-4 params.lr_critic = 1e-3 params.num_atoms = 51 #### MAIN ##### logger = Logger(params) agent = D4PG_Agent(state_size, action_size, params=params) d4pg(params, logger) # ################## # Params Override: Initialized in params.py params = Params() params.verbose = True params.random_seed = 0 params.n_episodes=300 params.max_t = 1000 params.print_every = 20 params.save_every = 100 params.prefill_memory_qty=5000 params.terminate_on_target_score = False params.plot_stats = True params.lr_actor = 5e-4 params.lr_critic = 1e-3 params.num_atoms = 250 #### MAIN ##### logger = Logger(params) agent = D4PG_Agent(state_size, action_size, params=params) d4pg(params, logger) # - # ### 4. Watch a Smart Agent! # # In the next code cell, you will load the trained weights from file to watch a smart agent! # + # load the weights from file params = Params() logger = Logger(params) agent = D4PG_Agent(state_size, action_size, params=params) agent.actor_local.load_state_dict(torch.load('weights/saved/actor_atom100_ep300.pth')) agent.critic_local.load_state_dict(torch.load('weights/saved/critic_atom100_ep300.pth')) env_info = env.reset(train_mode=False)[brain_name] # reset the environment states = env_info.vector_observations # get the current state score = 0 # initialize the score while True: # Perform actions from each agent's policy network (clipped actions [0, -1]) actions = [ agent.act(state, add_noise=False) for state in states ] actions = np.stack(actions) env_info = env.step(actions)[brain_name] # send the action to the environment next_states = env_info.vector_observations # get the next state rewards = env_info.rewards # get the reward dones = env_info.local_done # see if episode has finished score += max(rewards) # update the score states = next_states # roll over the state to next time step if any(dones): # exit loop if episode finished break print("Score: {}".format(score)) # - # When finished, you can close the environment. env.close()
p2_continuous-control/Reacher_MultiAgent_Control_Exercise/MultiAgent_Continuous_Control.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 - AzureML # language: python # name: python3-azureml # --- # # Tune Hyperparameters # # There are many machine learning algorithms that require *hyperparameters* (parameter values that influence training, but can't be determined from the training data itself). For example, when training a logistic regression model, you can use a *regularization rate* hyperparameter to counteract bias in the model; or when training a convolutional neural network, you can use hyperparameters like *learning rate* and *batch size* to control how weights are adjusted and how many data items are processed in a mini-batch respectively. The choice of hyperparameter values can significantly affect the performance of a trained model, or the time taken to train it; and often you need to try multiple combinations to find the optimal solution. # # In this case, you'll train a classification model with two hyperparameters, but the principles apply to any kind of model you can train with Azure Machine Learning. # ## Connect to your workspace # # To get started, connect to your workspace. # # > **Note**: If you haven't already established an authenticated session with your Azure subscription, you'll be prompted to authenticate by clicking a link, entering an authentication code, and signing into Azure. # + import azureml.core from azureml.core import Workspace # Load the workspace from the saved config file ws = Workspace.from_config() print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name)) # - # ## Prepare data # # In this lab, you'll use a dataset containing details of diabetes patients. Run the cell below to create this dataset (if it already exists, the existing version will be used) # + from azureml.core import Dataset default_ds = ws.get_default_datastore() if 'diabetes dataset' not in ws.datasets: default_ds.upload_files(files=['./data/diabetes.csv', './data/diabetes2.csv'], # Upload the diabetes csv files in /data target_path='diabetes-data/', # Put it in a folder path in the datastore overwrite=True, # Replace existing files of the same name show_progress=True) #Create a tabular dataset from the path on the datastore (this may take a short while) tab_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'diabetes-data/*.csv')) # Register the tabular dataset try: tab_data_set = tab_data_set.register(workspace=ws, name='diabetes dataset', description='diabetes data', tags = {'format':'CSV'}, create_new_version=True) print('Dataset registered.') except Exception as ex: print(ex) else: print('Dataset already registered.') # - # ## Prepare a training script # # Now let's create a folder for the training script you'll use to train the model. # + import os experiment_folder = 'diabetes_training-hyperdrive' os.makedirs(experiment_folder, exist_ok=True) print('Folder ready.') # - # Now create the Python script to train the model. In this example, you'll use a *Gradient Boosting* algorithm to train a classification model. The script must include: # # - An argument for each hyperparameter you want to optimize (in this case, the learning rate and number of estimators for the Gradient Boosting algorithm) # - Code to log the performance metric you want to optimize for (in this case, you'll log both AUC and accuracy, so you can choose to optimize the model for either of these) # + # %%writefile $experiment_folder/diabetes_training.py # Import libraries import argparse, joblib, os from azureml.core import Run import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.ensemble import GradientBoostingClassifier from sklearn.metrics import roc_auc_score, roc_curve # Get the experiment run context run = Run.get_context() # Get script arguments parser = argparse.ArgumentParser() # Input dataset parser.add_argument("--input-data", type=str, dest='input_data', help='training dataset') # Hyperparameters parser.add_argument('--learning_rate', type=float, dest='learning_rate', default=0.1, help='learning rate') parser.add_argument('--n_estimators', type=int, dest='n_estimators', default=100, help='number of estimators') # Add arguments to args collection args = parser.parse_args() # Log Hyperparameter values run.log('learning_rate', np.float(args.learning_rate)) run.log('n_estimators', np.int(args.n_estimators)) # load the diabetes dataset print("Loading Data...") diabetes = run.input_datasets['training_data'].to_pandas_dataframe() # Get the training data from the estimator input # Separate features and labels X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values # Split data into training set and test set X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0) # Train a Gradient Boosting classification model with the specified hyperparameters print('Training a classification model') model = GradientBoostingClassifier(learning_rate=args.learning_rate, n_estimators=args.n_estimators).fit(X_train, y_train) # calculate accuracy y_hat = model.predict(X_test) acc = np.average(y_hat == y_test) print('Accuracy:', acc) run.log('Accuracy', np.float(acc)) # calculate AUC y_scores = model.predict_proba(X_test) auc = roc_auc_score(y_test,y_scores[:,1]) print('AUC: ' + str(auc)) run.log('AUC', np.float(auc)) # Save the model in the run outputs os.makedirs('outputs', exist_ok=True) joblib.dump(value=model, filename='outputs/diabetes_model.pkl') run.complete() # - # ## Create compute # # Hyperparameter tuning involves running multiple training iterations with different hyperparameter values and comparing the performance metrics of the resulting models. To do this efficiently, we'll take advantage of on-demand cloud compute and create a cluster - this will allow multiple training iterations to be run concurrently. # # Use the following code to specify an Azure Machine Learning compute cluster (it will be created if it doesn't already exist). # # > **Important**: Change *your-compute-cluster* to the name of your compute cluster in the code below before running it! Cluster names must be globally unique names between 2 to 16 characters in length. Valid characters are letters, digits, and the - character. # + from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException cluster_name = "dp100cluster" try: # Check for existing compute target training_cluster = ComputeTarget(workspace=ws, name=cluster_name) print('Found existing cluster, use it.') except ComputeTargetException: # If it doesn't already exist, create it try: compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS11_V2', max_nodes=2) training_cluster = ComputeTarget.create(ws, cluster_name, compute_config) training_cluster.wait_for_completion(show_output=True) except Exception as ex: print(ex) # - # ## Run a hyperparameter tuning experiment # # Azure Machine Learning includes a hyperparameter tuning capability through *hyperdrive* experiments. These experiments launch multiple child runs, each with a different hyperparameter combination. The run producing the best model (as determined by the logged target performance metric for which you want to optimize) can be identified, and its trained model selected for registration and deployment. # # > **Note**: In this example, we aren't specifying an early stopping policy. Such a policy is only relevant if the training script performs multiple training iterations, logging the primary metric for each iteration. This approach is typically employed when training deep neural network models over multiple *epochs*. # + from azureml.core import Experiment, ScriptRunConfig, Environment from azureml.core.conda_dependencies import CondaDependencies from azureml.train.hyperdrive import GridParameterSampling, HyperDriveConfig, PrimaryMetricGoal, choice from azureml.widgets import RunDetails # Create a Python environment for the experiment sklearn_env = Environment("sklearn-env") # Ensure the required packages are installed (we need scikit-learn, Azure ML defaults, and Azure ML dataprep) packages = CondaDependencies.create(conda_packages=['scikit-learn','pip'], pip_packages=['azureml-defaults','azureml-dataprep[pandas]']) sklearn_env.python.conda_dependencies = packages # Get the training dataset diabetes_ds = ws.datasets.get("diabetes dataset") # Create a script config script_config = ScriptRunConfig(source_directory=experiment_folder, script='diabetes_training.py', # Add non-hyperparameter arguments -in this case, the training dataset arguments = ['--input-data', diabetes_ds.as_named_input('training_data')], environment=sklearn_env, compute_target = training_cluster) # Sample a range of parameter values params = GridParameterSampling( { # Hyperdrive will try 6 combinations, adding these as script arguments '--learning_rate': choice(0.01, 0.1, 1.0), '--n_estimators' : choice(10, 100) } ) # Configure hyperdrive settings hyperdrive = HyperDriveConfig(run_config=script_config, hyperparameter_sampling=params, policy=None, # No early stopping policy primary_metric_name='AUC', # Find the highest AUC metric primary_metric_goal=PrimaryMetricGoal.MAXIMIZE, max_total_runs=6, # Restict the experiment to 6 iterations max_concurrent_runs=2) # Run up to 2 iterations in parallel # Run the experiment experiment = Experiment(workspace=ws, name='mslearn-diabetes-hyperdrive') run = experiment.submit(config=hyperdrive) # Show the status in the notebook as the experiment runs RunDetails(run).show() run.wait_for_completion() # - # You can view the experiment run status in the widget above. You can also view the main Hyperdrive experiment run and its child runs in [Azure Machine Learning studio](https://ml.azure.com). # # > **Note**: If a message indicating that a non-numeric can't be visualized is displayed, you can ignore it. # # ## Determine the best performing run # # When all of the runs have finished, you can find the best one based on the performance metric you specified (in this case, the one with the best AUC). # + # Print all child runs, sorted by the primary metric for child_run in run.get_children_sorted_by_primary_metric(): print(child_run) # Get the best run, and its metrics and arguments best_run = run.get_best_run_by_primary_metric() best_run_metrics = best_run.get_metrics() script_arguments = best_run.get_details() ['runDefinition']['arguments'] print('Best Run Id: ', best_run.id) print(' -AUC:', best_run_metrics['AUC']) print(' -Accuracy:', best_run_metrics['Accuracy']) print(' -Arguments:',script_arguments) # - # Now that you've found the best run, you can register the model it trained. # + from azureml.core import Model # Register model best_run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model', tags={'Training context':'Hyperdrive'}, properties={'AUC': best_run_metrics['AUC'], 'Accuracy': best_run_metrics['Accuracy']}) # List registered models for model in Model.list(ws): print(model.name, 'version:', model.version) for tag_name in model.tags: tag = model.tags[tag_name] print ('\t',tag_name, ':', tag) for prop_name in model.properties: prop = model.properties[prop_name] print ('\t',prop_name, ':', prop) print('\n') # - # > **More Information**: For more information about Hyperdrive, see the [Azure ML documentation](https://docs.microsoft.com/azure/machine-learning/how-to-tune-hyperparameters).
.ipynb_checkpoints/11 - Tune Hyperparameters-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/fabian692/Forense-Luis-Bertel/blob/master/Taller_1_pillow.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="ujYZYYpRKDA_" # Taller 1 Pillow # # <NAME>; # Seguridad Informatica y forense # # Autores # <NAME> # <NAME> # + id="iAvN3v0C9Xjh" outputId="f3fcb83d-43f4-4e0f-891b-523865b5e642" colab={"base_uri": "https://localhost:8080/", "height": 34} pip install pillow # + id="KOd_J04i_jhb" import argparse import sys from PIL import Image from PIL.ExifTags import TAGS # + id="e2kOkUIKEkeY" outputId="89b79b3d-a279-4874-ff16-8029cf4174f4" colab={"base_uri": "https://localhost:8080/", "height": 598} #abrir imagen imageame= "image1.jpeg" # apertura y lectura de imagen image = Image.open("/content/drive/My Drive/foto.jpg.jpg") # getexif() metodo de retorno de imagen exifdata = image.getexif() # for tag_id in exifdata: # conversion tag = TAGS.get(tag_id,tag_id) #extracción de datos data = exifdata.get(tag_id) if isinstance(data,bytes): #isistance (5,int) data=data.decode() print(f"{tag:40}:{data}")#nombre: foto.jpg
Taller_1_pillow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # GPT Model Inference # # Welcome! This notebook is a tutorial on how to use the model you've just trained on the Bittensor network. import os import torch import bittensor from nuclei.gpt2 import GPT2Nucleus from torch.nn import functional as F # ## Load the trained model # You can find the model under `~/.bittensor/miners/gpt2-exodus/<wallet-coldkey>-<wallet-hotkey>/model.torch`. This is the default place that miners will store models. Note that the loss stored with the model is the `combined` loss, that is, the local loss + remote network loss + distillation loss. As the Bittensor network grows and more sophisticated models join, this combined loss will come down close to 0 ideally. # # However, for now, the high loss does not necessarily mean the model will perform badly as a model may have a low local loss but a high remote loss. This happens when the local model is powerful and correctly training, but all the models it is talking to on the network are not so good. This can happen when it's talking to N number of models that are all the same. Since this project is still in the early days, this may happen initially. As the network grows there will be more and more sophisticated # + model_path = os.path.expanduser('~/.bittensor/miners/default-default/gpt2_exodus/') # Check which device this machine is on, just in case we're not loading the model on the same machine that we trained it device = torch.device("cuda" if torch.cuda.is_available() else "cpu") checkpoint = torch.load("{}/model.torch".format(model_path), map_location=device) # Let's load up a Bittensor config config = GPT2Nucleus.config() # Let's load up the same nucleus config we trained our model with config.nucleus.n_head = 32 config.nucleus.n_layer = 12 config.nucleus.block_size = 20 config.nucleus.device = device # Load up the model model = GPT2Nucleus(config) model.load_state_dict(checkpoint['nucleus_state']) print("Combined loss (local, remote, and distilled) of preloaded model: {}:".format(checkpoint['epoch_loss'])) # Load up the huggingface tokenizer tokenizer = bittensor.tokenizer() # - # ## Inference function # In essence, the output of the current GPT model is simply encoded using the HuggingFace tokenizer that Bittensor uses. We need to simply decode that information out using the same tokenizer and turn it into text. # + def top_k_logits(logits, k): v, ix = torch.topk(logits, k) out = logits.clone() out[out < v[:, [-1]]] = -float('Inf') return out @torch.no_grad() def sample(model, x, steps, temperature=1.0, sample=False, top_k=None): """ take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in the sequence, feeding the predictions back into the model each time. Clearly the sampling has quadratic complexity unlike an RNN that is only linear, and has a finite context window of block_size, unlike an RNN that has an infinite context window. """ block_size = model.get_block_size()-1 model.eval() for k in range(steps): x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed # Run a local forward call through the model logits = model.local_forward(x_cond, training=False) # The final layer from the local forward (the local hidden layer) needs to be pushed # through the target layer. This helps push the dimensionality to bittensor.__vocab_size__ # making it possible to push this information through the tokenizer's decode function to get # words out. logits = model.target_layer(logits.local_hidden) # pluck the logits at the final step and scale by temperature logits = logits[:, -1, :] / temperature # optionally crop probabilities to only the top k options if top_k is not None: logits = top_k_logits(logits, top_k) # apply softmax to convert to probabilities probs = F.softmax(logits, dim=-1) # sample from the distribution or take the most likely if sample: ix = torch.multinomial(probs, num_samples=1) else: _, ix = torch.topk(probs, k=1, dim=-1) # append to the sequence and continue x = torch.cat((x, ix), dim=1) return x # - # ## Sampling from the trained model # # Now that we've got our `sample` function built, let's actually use it! We start our sentence using the `context` variable by giving it a name, and we let the model do the rest. Note that we can actually ask the model to predict whatever number of words we want. In this case, we made it 10 words as that produces legible sentences. The lower your model's loss is, the better predictions you'll get. # # Bring it as close to 0 as you can by changing up the `nucleus` parameters to adjust the model's architecture (number of heads, number of layers, etc.) or you can change up the training settings by changing the `miner` settings (things like learning rate, weight decay rate, etc.). # + context = "John" # Tokenize the input x = tokenizer(context, padding=True, truncation=True)['input_ids'] # Turn it into a tensor x = torch.tensor(x, dtype=torch.long) # Give it an extra dimension for the network's sake (expects a 2D tensor input) x = x.unsqueeze(0) num_words_predict = 10 # Let's sample the network for some output y = sample(model, x, num_words_predict, temperature=1.0, sample=True, top_k=10) # Decode the output completion = ''.join([tokenizer.decode(i, skip_special_tokens=True) for i in y]) # Print what the model has predicted print(completion) # -
notebooks/GPT_model_inference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/aTom-Pie/dw_matrix/blob/master/Matrix2_day2_visualisation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="kK9ulby1jU4Q" colab_type="code" colab={} import pandas as pd import numpy as np # + id="ezam9LCQi5e3" colab_type="code" outputId="458e4e5d-0e43-46c2-9359-cc934d999466" colab={"base_uri": "https://localhost:8080/", "height": 272} # !pip install --upgrade tables # + id="3kvmakQQqE3m" colab_type="code" outputId="86a3a388-484e-489e-99b8-5bcb6d2cae1b" colab={"base_uri": "https://localhost:8080/", "height": 34} # cd /content/drive/My Drive/Colab Notebooks/Matrix2/data # + id="kQ9G0m0YhYfR" colab_type="code" outputId="19c2f4f9-424b-4fc1-d32c-d109acc42b89" colab={"base_uri": "https://localhost:8080/", "height": 34} # !pwd # + id="Rfb3zbFFhqqV" colab_type="code" outputId="be3c29c6-440b-47fe-b19c-cb2b17069f86" colab={"base_uri": "https://localhost:8080/", "height": 119} # !curl -L http://bit.ly/dw_car_data -o car.h5 # + id="JdS-eZyxiRDG" colab_type="code" outputId="37b7c3cd-2dbb-41c3-eb3b-602a60e29aea" colab={"base_uri": "https://localhost:8080/", "height": 85} # !ls -lh # + id="5DGYEcl1iipL" colab_type="code" outputId="edb75525-ff11-469b-85d7-7c89d46f7e94" colab={"base_uri": "https://localhost:8080/", "height": 34} df = pd.read_hdf('/content/drive/My Drive/Colab Notebooks/Matrix2/data/car.h5') df.shape # + id="Q7LvXY5Oj5zf" colab_type="code" outputId="f1fb48c0-7500-49c0-8ece-25fc4d722701" colab={"base_uri": "https://localhost:8080/", "height": 692} df.sample(6) # + id="wivnDoc4lC67" colab_type="code" outputId="0c838066-a70e-4c86-dec1-4af051d49304" colab={"base_uri": "https://localhost:8080/", "height": 678} df.head() # + id="BpY9yARYlFbw" colab_type="code" outputId="b327b624-fd69-42a4-ea48-fec6b8c51f0f" colab={"base_uri": "https://localhost:8080/", "height": 1000} df.columns.values # + id="kNxqepEGlOkZ" colab_type="code" outputId="c9fed389-9c78-4876-8197-d494004f195b" colab={"base_uri": "https://localhost:8080/", "height": 265} df['price_value'].hist(bins=120); # + id="kGPv5RA7rsYz" colab_type="code" outputId="33382df7-00a1-4e11-f061-e2b29149a042" colab={"base_uri": "https://localhost:8080/", "height": 34} df['price_value'].max() # + id="ykYAmo3wtDFf" colab_type="code" outputId="4050e0e4-2e43-4781-e0c4-921375b08bdd" colab={"base_uri": "https://localhost:8080/", "height": 170} df['price_value'].describe() # + id="hXO1flE_tTFB" colab_type="code" outputId="eefd7c33-3212-4494-9b84-53d796db92e3" colab={"base_uri": "https://localhost:8080/", "height": 306} df['param_marka-pojazdu'].unique() # + id="QhUWjTL1ty4n" colab_type="code" outputId="c29dcfca-b6f0-4dcd-f9a5-4a8b2a89f814" colab={"base_uri": "https://localhost:8080/", "height": 238} df.groupby('param_marka-pojazdu')['price_value'].mean() # + id="MGUDeAzbulPh" colab_type="code" outputId="4fde0fea-a687-4c46-f48a-e1ca14a813ef" colab={"base_uri": "https://localhost:8080/", "height": 347} df.groupby('param_marka-pojazdu')['price_value'].agg(np.mean).plot(kind='bar'); # + id="zqb1eFCyvkhx" colab_type="code" outputId="eb46c145-91c5-497c-c5e2-60d39cc4b612" colab={"base_uri": "https://localhost:8080/", "height": 401} (df .groupby('param_marka-pojazdu')['price_value'] .agg(np.mean) .sort_values(ascending=False) ).plot(kind='bar', figsize=(18, 5)); # + id="kiTiDYd5wXFM" colab_type="code" outputId="869456f2-9b51-4669-a0b5-dd7bca8a21f6" colab={"base_uri": "https://localhost:8080/", "height": 401} (df .groupby('param_marka-pojazdu')['price_value'] .agg(np.mean) .sort_values(ascending=False) .head(50) ).plot(kind='bar', figsize=(18, 5)); # + id="PJxcIAv_xNDk" colab_type="code" outputId="de805a12-ccac-45b8-da33-873cfdb86dc8" colab={"base_uri": "https://localhost:8080/", "height": 405} (df .groupby('param_marka-pojazdu')['price_value'] .agg(np.median) .sort_values(ascending=False) .head(50) ).plot(kind='bar', figsize=(18, 5)); # + id="ERvJ0y1Yxgf2" colab_type="code" outputId="57fbc897-606e-47ff-abd2-26ed53c45f8f" colab={"base_uri": "https://localhost:8080/", "height": 405} (df .groupby('param_marka-pojazdu')['price_value'] .agg([np.mean, np.median, np.size]) .sort_values(by='mean', ascending=False) .head(50) ).plot(kind='bar', figsize=(18, 5)); # + id="USRNHwDuyBzg" colab_type="code" outputId="f37fea76-b3cb-4146-ccaf-5fddeb75b3df" colab={"base_uri": "https://localhost:8080/", "height": 485} (df .groupby('param_marka-pojazdu')['price_value'] .agg([np.mean, np.median, np.size]) .sort_values(by='mean', ascending=False) .head(50) ).plot(kind='bar', figsize=(18, 5), subplots=True) # + id="RUWJTg52zGKD" colab_type="code" outputId="7c8a8e00-d813-4a59-a417-c6fe4ee74fa7" colab={"base_uri": "https://localhost:8080/", "height": 485} (df .groupby('param_marka-pojazdu')['price_value'] .agg([np.mean, np.median, np.size]) .sort_values(by='size', ascending=False) .head(50) ).plot(kind='bar', figsize=(20, 5), subplots=True) # + id="YeH5z1xVzmnA" colab_type="code" colab={} def group_and_barplot (feat_groupby, feat_arg='price_value', agg_funcs=[np.mean, np.median, np.size], feat_sort='mean', top=50, subplots=True): return (df .groupby(feat_groupby)[feat_arg] .agg(agg_funcs) .sort_values(by=feat_sort, ascending=False) .head(top) ).plot(kind='bar', figsize=(20, 5), subplots=subplots) # + id="oR_OvA1Q2CYX" colab_type="code" outputId="9c24ec0f-14d1-4bc7-ab57-cd4f53ecd2a1" colab={"base_uri": "https://localhost:8080/", "height": 417} group_and_barplot('param_marka-pojazdu'); # + id="hUvebJ_V4bth" colab_type="code" outputId="400f57ca-02e4-421e-9dcb-a3bf298423bb" colab={"base_uri": "https://localhost:8080/", "height": 436} group_and_barplot('param_kraj-pochodzenia'); # + id="CBWvKzuS5q-r" colab_type="code" outputId="85978a00-ba13-4eb6-cbf7-4409349761cd" colab={"base_uri": "https://localhost:8080/", "height": 436} group_and_barplot('param_kraj-pochodzenia', feat_sort='size'); # + id="klvCl8OX6vJM" colab_type="code" outputId="b8b4d544-d765-4f6d-a4d2-70fc27c5cffd" colab={"base_uri": "https://localhost:8080/", "height": 390} group_and_barplot('param_kolor', feat_sort='mean'); # + id="Sg4Njxut7IuR" colab_type="code" colab={}
Matrix2_day2_visualisation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <NAME>: # We are in a competition to win the archery contest in Sherwood. With our bow and arrows we shoot on a target and try to hit as close as possible to the center. # # The center of the target is represented by the values (0, 0) on the coordinate axes. # # ![](images/arrows.jpg) # # ## Goals: # * data structures: lists, sets, tuples # * logical operators: if-elif-else # * loop: while/for # * minimum (optional sorting) # # ## Description: # In the 2-dimensional space, a point can be defined by a pair of values that correspond to the horizontal coordinate (x) and the vertical coordinate (y). The space can be divided into 4 zones (quadrants): Q1, Q2, Q3, Q4. Whose single point of union is the point (0, 0). # # If a point is in Q1 both its x coordinate and the y are positive. I leave a link to wikipedia to familiarize yourself with these quadrants. # # https://en.wikipedia.org/wiki/Cartesian_coordinate_system # # https://en.wikipedia.org/wiki/Euclidean_distance # # ## Shots # ``` # points = [(4, 5), (-0, 2), (4, 7), (1, -3), (3, -2), (4, 5), # (3, 2), (5, 7), (-5, 7), (2, 2), (-4, 5), (0, -2), # (-4, 7), (-1, 3), (-3, 2), (-4, -5), (-3, 2), # (5, 7), (5, 7), (2, 2), (9, 9), (-8, -9)] # ``` # # ## Tasks # 1. <NAME> is famous for hitting an arrow with another arrow. Did you get it? # 2. Calculate how many arrows have fallen in each quadrant. # 3. Find the point closest to the center. Calculate its distance to the center. # 4. If the target has a radius of 9, calculate the number of arrows that must be picked up in the forest. # + # Variables points = [(4, 5), (-0, 2), (4, 7), (1, -3), (3, -2), (4, 5), (3, 2), (5, 7), (-5, 7), (2, 2), (-4, 5), (0, -2), (-4, 7), (-1, 3), (-3, 2), (-4, -5), (-3, 2), (5, 7), (5, 7), (2, 2), (9, 9), (-8, -9)] # - # 1. <NAME> is famous for hitting an arrow with another arrow. Did you get it? points = [(4, 5), (-0, 2), (4, 7), (1, -3), (3, -2), (4, 5), (3, 2), (5, 7), (-5, 7), (2, 2), (-4, 5), (0, -2), (-4, 7), (-1, 3), (-3, 2), (-4, -5), (-3, 2), (5, 7), (5, 7), (2, 2), (9, 9), (-8, -9)] print([x for x in points if points.count(x) > 1]) # + # 2. Calculate how many arrows have fallen in each quadrant. Q1 = [] Q2 = [] Q3 = [] Q4 = [] points = [(4, 5), (-0, 2), (4, 7), (1, -3), (3, -2), (4, 5), (3, 2), (5, 7), (-5, 7), (2, 2), (-4, 5), (0, -2), (-4, 7), (-1, 3), (-3, 2), (-4, -5), (-3, 2), (5, 7), (5, 7), (2, 2), (9, 9), (-8, -9)] for x, y in points: if x >= 0 and y > 0: Q1.append((x,y)) elif x < 0 and y > 0: Q2.append((x,y)) elif x < 0 and y < 0: Q3.append((x,y)) else: Q4.append((x,y)) print("arrows in Q1 =", Q1) print("arrows in Q2 =", Q2) print("arrows in Q3 =", Q3) print("arrows in Q4 =", Q4) # + # 3. Find the point closest to the center. Calculate its distance to the center # Defining a function that calculates the distance to the center can help. points = [(4, 5), (-0, 2), (4, 7), (1, -3), (3, -2), (4, 5), (3, 2), (5, 7), (-5, 7), (2, 2), (-4, 5), (0, -2), (-4, 7), (-1, 3), (-3, 2), (-4, -5), (-3, 2), (5, 7), (5, 7), (2, 2), (9, 9), (-8, -9)] points_distance =[] pointsdist_center = [] for x, y in points: import math import numpy as np distance = math.sqrt(np.square(x) + np.square(y)) points_distance.append(distance) print(points_distance) # + # 4. If the target has a radius of 9, calculate the number of arrows that # must be picked up in the forest. a=0 for i in points_distance: if i>9: a+=1 print ('Arrows to pick are =', a) # - #
robin-hood.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # # US - Baby Names # ### Introduction: # # We are going to use a subset of [US Baby Names](https://www.kaggle.com/kaggle/us-baby-names) from Kaggle. # In the file it will be names from 2004 until 2014 # # # ### Step 1. Import the necessary libraries # ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/06_Stats/US_Baby_Names/US_Baby_Names_right.csv). # ### Step 3. Assign it to a variable called baby_names. # ### Step 4. See the first 10 entries # ### Step 5. Delete the column 'Unnamed: 0' and 'Id' # ### Step 6. Is there more male or female names in the dataset? # ### Step 7. Group the dataset by name and assign to names # ### Step 8. How many different names exist in the dataset? # ### Step 9. What is the name with most occurrences? # ### Step 10. How many different names have the least occurrences? # ### Step 11. What is the median name occurrence? # ### Step 12. What is the standard deviation of names? # ### Step 13. Get a summary with the mean, min, max, std and quartiles.
06_Stats/US_Baby_Names/Exercises.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np X_train = np.load('../data/processed/X_train.npy') X_val = np.load('../data/processed/X_val.npy' ) X_test = np.load('../data/processed/X_test.npy' ) y_train = np.load('../data/processed/y_train.npy') y_val = np.load('../data/processed/y_val.npy' ) y_test = np.load('../data/processed/y_test.npy' ) X_train.shape from sklearn.preprocessing import PolynomialFeatures poly = PolynomialFeatures(2) # raising power of 2 - increases number of features exponentially using powers X_train = poly.fit_transform(X_train) X_train.shape X_val = poly.transform(X_val) X_test = poly.transform(X_test) from sklearn.linear_model import LinearRegression reg = LinearRegression() reg.fit(X_train, y_train) # + from joblib import dump dump(reg, '../models/linear_poly_2.joblib') # - y_train_preds = reg.predict(X_train) y_val_preds = reg.predict(X_val) from sklearn.metrics import mean_squared_error as mse from sklearn.metrics import mean_absolute_error as mae print(mse(y_train, y_train_preds, squared=False)) print(mae(y_train, y_train_preds)) print(mse(y_val, y_val_preds, squared=False)) print(mae(y_val, y_val_preds))
notebooks/2_linearr_poly.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Tutorial 5. Introduction to functions # # # Created by <NAME> 2019. All content contained in this notebook is licensed under a [Creative Commons License 4.0](https://creativecommons.org/licenses/by/4.0/). The code is licensed under a [MIT license](https://opensource.org/licenses/MIT). # + import numpy as np import pandas as pd import seaborn as sns import scipy.stats as stats import matplotlib.pyplot as plt import TCD19_utils as TCD TCD.set_plotting_style_2() # Magic command to enable plotting inside notebook # %matplotlib inline # Magic command to enable svg format in plots # %config InlineBackend.figure_format = 'svg' # - # The purpose of programming is to make our life easier by speeding up the process of making certain operations on data. So far we've used pre-built and customed made functions, but we still have to understand the structure and syntax of functions in python. **Functions** make code reusable and thus is one of the most useful parts of coding. From now on, we will focus on making functions after we've made our data analysis workflows in our jupyter notebooks. # ## Structure and syntax of customized functions in python # As we saw in the previous module, we can think of computational functions the same way we think of math functions. Functions have **arguments** as inputs, they perform certain operations based on the arguments, and then **returns** and output. # # Let's think of a simple function that makes the summation operation. # # \begin{align} # f(x)= SUM(X) = \sum_{i=1}^{n} x_{i} = x_{1} + x_{2} + ... + x_{n} # \end{align} # The function above takes in a vector $X$ as argument, and returns the sum over all of its elements. # # To code it in python, the function has to be **defined** using the `def` keyword, followed by the function's name (automatically colored in blue) and a colon. The [PEP-8](https://realpython.com/python-pep8/) guideline recommends to name functions with more than one word should be linked by underscores (e.g. `my_function`). # # After the colon, if you click enter, python automatically makes an [indentation](https://en.wikipedia.org/wiki/Indentation_style). After that it is highly recommended to have a doc string with a brief description of the function, its arguments, and its outputs. The doc string is usually written inside triple quotes. Furthermore, after the doc string, there is a block of code which is the operation of the function and finally, to get the result of the function, you need to write `return` and the name of the variable of your output, without parenthesis. # # Thus the basic style of a function is the following: # # def my_function_name(args): # # """Docstring explaining the function""" # block of code # # return result # # Now we can write down the function of our summation function. def summation(x): """ Takes in a list or array and returns the sum of its elements. """ result = 0 for element in x: result += element return result # That's it, we can now call our function. summation(np.array([1,2,3])) # Luckily, there are a whole bunch of built-in mathematical functions inside numpy and scipy and we can readily use. In general, if you want to do something that seems really common, someone has probably already coded it in python. # # For example, the numpy module have built-in **methods** for the sum, mean, median, standard deviations and other operations in numpy arrays. To call this method we just have to write the dot syntax as everything in python is an object. # + x = np.array([1,2,3]) x.sum() # - summation(x) == x.sum() # ## Built-in functions # Beside the functions inside the different python packages, python has several built-in functions. We have already seen functions like `len()`, `sorted()`, `max()`, `range()`, `print()`, `reversed()`, in addition to data type conversions like `list()`. The complete list of built-in functions can be found in this [link](https://docs.python.org/3/library/functions.html). # Another good programming conventions is the following: # # > Never name a function with the same name as a python built-in function. # ## Functions don't need to have arguments # After writing functions, it is also useful to know that you can build your own modules to avoid pasting functions to a notebook and making undesirably long notebooks. A module is contained in a file that ends in `.py`. Let's explore the little module I made for the workshop. We can load the `.py` file into the notebook using the `%load` command. After running it, it will become commented out followed by a hash. You can also comment out the `import` statements in order to speed up the processing. # # %load TCD19_utils.py # As you can see the plotting functions contained do not have arguments. This is an example to show that functions can be called without arguments and they will work ! #Initialize linear spacr y = np.linspace(0,1, 100) #Call plotting style function set_plotting_style() plt.plot(y) plt.show() #Call plotting style function 2 set_plotting_style_2() plt.plot(y) plt.show() # ## Keyword arguments # A neat feature when writing functions in python is that you don't need to specify the number of arguments you'll use in a function by using `*args` and `**kwargs`. This means you can pass a variable number of arguments to a function. In brief, `*args` help you to specify arguments to send a to send a non-keyworded variable-length argument list to functions. Moreover the `**kwargs` are used to pass a keyworded, variable-length argument dictionary to a function. Let's use an example for you to see their amazing usefulness. # def multiply(*args): z = 1 for num in args: z *= num print(z) multiply(44, 5) multiply(10, 92.3) multiply(2, 3, 44) multiply(3, 5, 10, 6) multiply(3,4 ,555, 73.4,3) # + def print_kwargs(**kwargs): print(kwargs) print_kwargs(kwargs_1="Izamal", kwargs_2=4.5, kwargs_3=True) # -
notebooks/t05-TCD-intro-to-functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # # Troubleshooting Azure Sentinel Notebooks # # If you are having trouble with Jupyter notebooks run this notebook to help # identify where the problem might be. # # Select the notebook menu item `Cell->Run All` - check for any warnings or errors. # # Read the text above the cell(s) that produce errors - the text # contains links to resources that describe how to fix the error. # ## Python Version Check # # > **Note** # > You can set the default Python version in Azure Notebooks project settings. # > # > For details on how to do this see [AzureNotebooks-ConfigurePythonVersion](https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/HowTos/AzureNotebooks-ConfigurePythonVersion.ipynb) # > # > If you are using a Data Science Virtual Machine as your # > Azure Notebooks compute you should read [Provisioning a DSVM](https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/HowTos/Provisioning%20DSVM.ipynb) # + import sys from IPython.display import display, HTML, Markdown MIN_REQ_PYTHON = (3, 6) errors = [] warns = [] info = [] def setup_err(mssg): display(Markdown("<h3><font color='red'>Setup Error</font></h3>")) display(Markdown("<h4><font color='red'>%s</font></h4>" % mssg)) errors.append(mssg) def setup_ok(mssg): display(Markdown("<h4><font color='blue'>%s Ok</font></h4>" % mssg)) info.append(mssg) def setup_warn(mssg): display(Markdown("<h4><font color='orange'>%s</font></h4>" % mssg)) warns.append(mssg) display(Markdown("#### Checking Python version...")) if sys.version_info < MIN_REQ_PYTHON: setup_err("Python version") display(Markdown('Check the Kernel->Change Kernel menu and ensure that Python 3.6')) display(Markdown('or later is selected as the active kernel.')) else: setup_ok( "Python version {}.{}.{}".format( sys.version_info[0], sys.version_info[1], sys.version_info[2] ) ) # - # ## Package Import Check # # This section checks the import of `msticpy` and its dependent packages. # # > **Note** # > If you are repeatedly seeing packages going missing when working in Azure Notebooks # > this may be because the docker containers running the Python kernel are # > recycled after a few hours when not in use. This causes the environments # > to reset to defaults. # > # > To prevent this you should configure you Azure Notebooks project with a # > requirements.txt file that is automatically run (and packages installed) # > when the contain is initialized. # > # > For details on how to do this see [AzureNotebooks-ConfigurePythonVersion](https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/HowTos/AzureNotebooks-ConfigurePythonVersion.ipynb) # + import importlib import sys import warnings from IPython.display import display, HTML, Markdown REQ_PYTHON_VER = (0, 2, 7) display(Markdown("#### Checking msticpy...")) warn_mssg = [] err_mssg = [] restart_req = False MISSING_PKG_ERR = """ <h3><font color='red'>Warning {package} is not installed or has an unsupported version</h3></font> """ need_update = False try: import msticpy mp_version = tuple([int(v) for v in msticpy.__version__.split(".")]) if mp_version < MSTICPY_REQ_VERSION: setup_err("msticpy %s.%s.%s or later is required." % MSTICPY_REQ_VERSION) need_update = True except ImportError: display(HTML(MISSING_PKG_ERR.format(package="msticpy"))) need_update = True else: setup_ok(f"msticpy version {msticpy.__version__}") if need_update: resp = input("Install the package now? (y/n)") if resp.casefold().startswith("y"): # !pip install --upgrade msticpy if "msticpy" in sys.modules: importlib.reload(sys.modules["msticpy"]) else: import msticpy print(f"msticpy installed - version {msticpy.__version__}") else: setup_warn("msticpy missing or out-of-date.") display(Markdown("Please run `pip install --upgrade msticpy` to upgrade/install msticpy")) # - # ## Pandas Version Check # # Many of the notebooks and msticpy features require a mininum # pandas version of 0.25.0. # + display(Markdown("#### Checking pandas...")) PANDAS_REQ_VERSION = (0, 25, 0) need_update = False try: import pandas as pd pd_version = tuple([int(v) for v in pd.__version__.split(".")]) if pd_version < PANDAS_REQ_VERSION: setup_err("pandas %s.%s.%s or later is required." % PANDAS_REQ_VERSION) need_update = True except ImportError: display(HTML(MISSING_PKG_ERR.format(package="pandas"))) need_update = True else: setup_ok(f"Pandas version {pd.__version__}") if need_update: resp = input("Install the package now? (y/n)") if resp.casefold().startswith("y"): # !pip install --upgrade pandas if "pandas" in sys.modules: importlib.reload(pd) else: import pandas as pd print(f"pandas installed - version {pandas.__version__}") else: setup_warn("pandas missing or out-of-date.") display(Markdown("Please run `pip install --upgrade pandas` to upgrade/install pandas")) # - # ## Workspace Configuration Check # # This section checks for presence of configuration files `config.json` # and `msticpyconfig.yaml` # # The `msticpyconfig.yaml` can store the workspace and tenant information # for your Azure Sentinel workspace. It can also store values for multiple # workspaces. If you have the values configured in this file you do not # need to worry about the values in `config.json`. # # You can specify the location of your `msticpyconfig.yaml` in the # environment variable `MSTICPYCONFIG`. This will make the file # accessible to all notebooks running on the system. For # more information on configuring `msticpyconfig.yaml` see the next # cell [mstipcy Configuration](#msticpy-Configuration) # # If you want to transfer your workspace settings to `msticpyconfig.yaml` # from `config.json`, simply copy the value of the `tenant_id` and # `workspace_id` settings to the relevant section. # # > **Note** the value names in msticpyconfig.yaml use slightly different naming # > conventions: # ``` # WorkspaceId: 0cd830ff-60dc-40d1-8045-11d2b7b277e1 # TenantId: aff2102d-1d6c-4501-9efb-6053ab7efb19 # ``` # # ### Workspace Configuration - config.json # Creating an Azure Notebooks project from Azure Sentinel # will automatically create a `config.json` file in the root of # your Azure Notebooks project and populate values # for your Azure Sentinel workspace. # # If you have copied the notebooks elsewhere (e.g. to run them locally, # or you are running them on a Data Science Virtual machine) you # should copy this original config.json to the folder from which # you are running notebooks. # # > **Note** if you are using a `msticpyconfig.yaml` to store your # > workspace settings, most notebooks will take values from that. # > As with `config.json` - you must have a locally accessible copy # > of this file, so you will need to copy it to other systems if # > you are running notebooks from there. # # # If you are using the config.json (default config for Azure Sentinel # with Azure Notebooks), your config.json should look something like this # ```json # { # "tenant_id": "aff2102d-1d6c-4501-9efb-6053ab7efb19", # "subscription_id": "9ce7caeb-1f42-4141-b076-7f448a00aceb", # "resource_group": "MyResourceGroup", # "workspace_id": "0cd830ff-60dc-40d1-8045-11d2b7b277e1", # "workspace_name": "MyResourceSubscription" # } # ``` # # The tenant_id and workspace_id values must be configured, other values # are optional but recommended. # + import os import json from pathlib import Path import uuid import yaml def valid_uuid(uuid_str): try: uuid.UUID(uuid_str) except (ValueError, TypeError): return False return True def check_mp_config_ws(mp_path): with open(mp_path, "r") as mp_yml: mp_config = yaml.safe_load(mp_yml) mp_errors = [] as_settings = mp_config.get("AzureSentinel", {}) if not as_settings: mp_errors.append(f"Missing or empty 'AzureSentinel' section in {mp_path}") ws_settings = as_settings.get("Workspaces", {}) if not ws_settings: mp_errors.append(f"Missing or empty 'Workspaces' section in {mp_path}") no_default = True for ws, ws_settings in ws_settings.items(): if ws == "Default": no_default = False ws_id = ws_settings.get("WorkspaceId") if not ws_id and not valid_uuid(ws_id): mp_errors.append(f"Invalid GUID for WorkspaceId in {ws} section") ten_id = ws_settings.get("TenantId") if not ten_id and not valid_uuid(ten_id): mp_errors.append(f"Invalid GUID for TenantId in {ws} section") warnings = ["No default workspace set"] if no_default else [] return mp_errors, warnings def check_json_config(json_path): j_conf_errs = [] with open(json_path, "r") as json_file: conf_json = json.load(json_file) conf_tenant = conf_json.get("tenant_id") if conf_tenant == "{{cookiecutter.tenant_id}}": j_conf_errs.append("Tenant Id is set to default value") elif not valid_uuid(conf_tenant): j_conf_errs.append("Tenant ID is not a valid GUID.") conf_ws = conf_json.get("workspace_id") if conf_ws == "{{cookiecutter.workspace_id}}": j_conf_errs.append("Workspace Id is set to default value") elif not valid_uuid(conf_ws): j_conf_errs.append("Workspace ID is not a valid GUID.") return j_conf_errs mp_warnings = [] display(Markdown("#### Checking Azure Sentinel Workspace config...")) mp_path = os.environ.get("MSTICPYCONFIG", "./msticpyconfig.yaml") if Path(mp_path).exists(): mp_errs, mp_warnings = check_mp_config_ws(mp_path) else: mp_errs = [f"{mp_path} not found"] DEF_CONF_JSON = "./config.json" if Path(DEF_CONF_JSON).exists(): jc_errs = check_json_config(DEF_CONF_JSON) if jc_errs and mp_errs: setup_err("No valid workspace configuration found.") if jc_errs: print(jc_errs) if mp_errs: print(mp_errs) else: if not jc_errs: setup_ok(f"Workspace configuration found in '{DEF_CONF_JSON}'") if not mp_errs: setup_ok(f"Workspace configuration found in '{mp_path}'") else: setup_warn(f"Workspace configuration: Cannot find msticpy config {mp_path}") if mp_warnings: display(Markdown(f"<h5><font color='orange'>{', '.join(mp_warnings)}</font></h5>")) # - # # msticpy Configuration # # The msticpy configuration file `msticpyconfig.yaml` holds setting # required by TI Providers and other data providers such as GeoIP. # # These features will not work unless you valid API Keys from your # accounts with these providers stored in `msticpyconfig.yaml`. # # > **Note** you can store the actual values of the keys in # > environment variables # > and use the settings in `msticpyconfig.yaml` to reference these. # # For more information on `msticpy` configuration file settings, # please refer to the following items: # # - [Configuration guide notebook](https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/ConfiguringNotebookEnvironment.ipynb) # - [msticpy configuration documentation](https://msticpy.readthedocs.io/en/latest/getting_started/msticpyconfig.html) # + import os import json from pathlib import Path import uuid import yaml def check_mp_config_providers(mp_path): mp_errors = [] if not Path(mp_path).exists(): mp_errors.append(f"No msticpyconfig.yaml found.") return mp_errors with open(mp_path, "r") as mp_yml: mp_config = yaml.safe_load(mp_yml) for conf_section in ["TIProviders", "OtherProviders"]: display(Markdown(f"Checking {conf_section}...")) settings = mp_config.get(conf_section, {}) mp_errors += check_provider_settings(settings, conf_section) def check_provider_settings(settings, section): mp_errors = [] if not settings: mp_errors.append(f"Missing or empty '{section}' section in {mp_path}") for p_name, p_setting in settings.items(): print(p_name, end=" ") ti_args = p_setting.get("Args") if p_name in ["OTX", "VirusTotal", "XForce", "OpenPageRank"]: if ( "AuthKey" not in ti_args or not ti_args["AuthKey"] ): mp_errors.append(f"{section}: Missing or invalid AuthKey for {p_name} section") if p_name == "XForce": if ( "ApiKey" not in ti_args or not ti_args["ApiKey"] ): mp_errors.append(f"{section}: Missing or invalid ApiKey for {p_name} section") if p_name == "AzureSentinel": ws_id = p_setting.get("WorkspaceID") if not ws_id or not valid_uuid(ws_id): mp_errors.append(f"{section}: Invalid GUID for WorkspaceID in {p_name} section") ten_id = p_setting.get("TenantID") if not ten_id or not valid_uuid(ten_id): mp_errors.append(f"{section}: Invalid GUID for TenantID in {p_name} section") print() return mp_errors display(Markdown("#### Checking msticpy config...")) mp_path = os.environ.get("MSTICPYCONFIG", "./msticpyconfig.yaml") if not Path(mp_path).exists(): setup_err(f"Cannot find msticpy config {mp_path}") if "MSTICPYCONFIG" in os.environ: setup_warn(f"'MSTICPYCONFIG' points to non-existent file") else: mp_errs = check_mp_config_providers(mp_path) if mp_errs: setup_err("Invalid msticpy configuration found.") print(mp_errs) else: setup_ok(f"msticpy configuration in '{mp_path}'") # - # # Summary if errors: display(Markdown(f"<h3><font color='red'><u>{len(errors)} errors:</u></font>")) for mssg in errors: display(Markdown(f"<font color='red'>{mssg}</font>")) if warns: display(Markdown(f"<h3><font color='orange'><u>{len(warns)} warnings:</u></font>")) for mssg in warns: display(Markdown(f"<font color='orange'>{mssg}</font>")) display(Markdown(f"<h3><font color='blue'><u>Info/Success:</u></font>")) for mssg in info: display(Markdown(f"<font color='blue'>{mssg}</font>"))
TroubleShootingNotebooks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Projeto Ciência de Dados - Previsão de Vendas # # - Nosso desafio é conseguir prever as vendas que vamos ter em determinado período com base nos gastos em anúncios nas 3 grandes redes que a empresa Hashtag investe: TV, Jornal e Rádio # # - Base de Dados: https://drive.google.com/drive/folders/1o2lpxoi9heyQV1hIlsHXWSfDkBPtze-V?usp=sharing # ### Passo a Passo de um Projeto de Ciência de Dados # # - Passo 1: Entendimento do Desafio # - Passo 2: Entendimento da Área/Empresa # - Passo 3: Extração/Obtenção de Dados # - Passo 4: Ajuste de Dados (Tratamento/Limpeza) # - Passo 5: Análise Exploratória # - Passo 6: Modelagem + Algoritmos (Aqui que entra a Inteligência Artificial, se necessário) # - Passo 7: Interpretação de Resultados # # Projeto Ciência de Dados - Previsão de Vendas # # - Nosso desafio é conseguir prever as vendas que vamos ter em determinado período com base nos gastos em anúncios nas 3 grandes redes que a empresa Hashtag investe: TV, Jornal e Rádio # - TV, Jornal e Rádio estão em milhares de reais # - Vendas estão em milhões # #### Importar a Base de dados # #### Análise Exploratória # - Vamos tentar visualizar como as informações de cada item estão distribuídas # - Vamos ver a correlação entre cada um dos itens # #### Com isso, podemos partir para a preparação dos dados para treinarmos o Modelo de Machine Learning # # - Separando em dados de treino e dados de teste # #### Temos um problema de regressão - Vamos escolher os modelos que vamos usar: # # - Regressão Linear # - RandomForest (Árvore de Decisão) # #### Teste da AI e Avaliação do Melhor Modelo # # - Vamos usar o R² -> diz o % que o nosso modelo consegue explicar o que acontece # #### Visualização Gráfica das Previsões # #### Como fazer uma nova previsão?
Arquivo Inicial - Aula 4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/unica-isde/isde/blob/master/notebooks/lab02.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="w4HHiu0MPjNl" # # Python - Programming Exercise # # In this programming exercise, we will implement a simple machine-learning algorithm to classify handwritten digits. # # We start by writing a simple function to load the popular MNIST handwritten digit images and visualize some examples. # + id="qlOvfEOXPhS3" outputId="59281d42-33b5-4686-bf6a-74052cefe392" colab={"base_uri": "https://localhost:8080/", "height": 203} import pandas as pd import numpy as np def load_mnist_data(filename, n_samples=None): """This function returns MNIST handwritten digits and labels as ndarrays.""" data = pd.read_csv(filename) data = np.array(data) # cast pandas dataframe to numpy array if n_samples is not None: # only returning the first n_samples data = data[:n_samples, :] y = data[:,0] x = data[:,1:] / 255.0 return x, y # loads data from a CSV file hosted in our repository filename = "https://github.com/unica-isde/isde/raw/master/data/mnist_data.csv" x, y = load_mnist_data(filename) # , n_samples=1000) print(x.shape) print(y.shape) print(y, type(y)) print(x, type(x)) # + [markdown] id="bUzdAdqFZdJe" # Let's count how many digit images we have for each class. # + id="MKs4suZ-VqAq" outputId="d305a9f6-9a31-4b1d-c613-5fef14e7df7a" colab={"base_uri": "https://localhost:8080/", "height": 220} num_classes = np.unique(y).size # number of unique elements in y counts = np.zeros(shape=(num_classes,), dtype=int) for i in range(num_classes): counts[i] = np.array(y==i).sum() print(i, counts[i]) print(counts) # + [markdown] id="2iMjNSABZwke" # Let's plot some digit examples. Each digit is stored as a row vector of 784 elements. To display it as an image, we need to reshape this vector as a 28x28 matrix, and then use the command ```imshow```. # + id="G_kVQRxCW7cs" outputId="577d0f32-8e93-424c-e1fc-16953836e17b" colab={"base_uri": "https://localhost:8080/", "height": 328} import matplotlib.pyplot as plt def plot_image(im, shape=(28,28)): '''Plot one image after reshaping.''' im = im.reshape(shape) # reshape the digit to be 28x28 plt.imshow(im, cmap='binary') def plot_ten_images(x, y, shape=(28,28)): '''Plot the first ten images from x in a 2x5 plot, after reshaping.''' plt.figure(figsize=(10,5)) # new figure with height=5, width=10 for i in range(10): plt.subplot(2, 5, i+1) image = x[i, :] plot_image(image, shape=shape) plt.title("label: " + str(y[i])) plot_ten_images(x,y,shape=(28,28)) # + [markdown] id="HEs6VorSIFaW" # ## Splitting data into training and test # + id="4PEnMQ7hIJ5G" outputId="864d230c-c420-4b20-91bf-ffe49056f73f" colab={"base_uri": "https://localhost:8080/", "height": 120} # x: 1000, 784; y=1000 tr_frac = 0.6 n_samples = x.shape[0] n_tr = int(tr_frac*n_samples) n_ts = n_samples - n_tr # create a vector of indices from 0 to 999 = [0, 1, 2, ... 999] idx = np.linspace(0, n_samples, num=n_samples, endpoint=False, dtype='int') np.random.shuffle(idx) # shuffling the elements of idx (in-place) print(idx[:10]) tr_idx = idx[:n_tr] # extract the subset of training indices (first n_tr samples) ts_idx = idx[n_tr:] # extract the subset of training indices (first n_tr samples) xtr = x[tr_idx,:] ytr = y[tr_idx] xts = x[ts_idx,:] yts = y[ts_idx] print(n_tr, tr_idx.size) print(n_ts, ts_idx.size) print(xtr.shape, ytr.shape) print(xts.shape, yts.shape) assert(n_tr==tr_idx.size) assert(n_ts==ts_idx.size) # + id="WSB4Y2HkOzu3" def data_split(x, y, tr_frac=0.6): n_samples = x.shape[0] n_tr = int(tr_frac*n_samples) n_ts = n_samples - n_tr # create a vector of indices from 0 to 999 = [0, 1, 2, ... n_samples-1] idx = np.linspace(0, n_samples, num=n_samples, endpoint=False, dtype='int') np.random.shuffle(idx) # shuffling the elements of idx (in-place) tr_idx = idx[:n_tr] # extract the subset of training indices ts_idx = idx[n_tr:] # extract the subset of test indices # check coherence with the number of extracted elements assert(n_tr==tr_idx.size) assert(n_ts==ts_idx.size) xtr = x[tr_idx,:] ytr = y[tr_idx] xts = x[ts_idx,:] yts = y[ts_idx] return xtr, ytr, xts, yts tr_frac = 0.6 xtr, ytr, xts, yts = data_split(x, y, tr_frac=tr_frac) # + [markdown] id="R1Syau-KbSN6" # ## Implementing the fit function # # We now implement the fit function of the nearest-mean-centroid (NMC) classifier. It consists of estimating the average centroid for each class. Then, during prediction, the classifier will assign the input sample to the class of the closest centroid. # # Let's start by estimating the centroid for class 0. # + id="7RAlqFZ8mXKZ" outputId="f12e561e-fa87-4080-dbdd-b9144c7d807d" colab={"base_uri": "https://localhost:8080/", "height": 378} # fit my classification function (estimate centroids from training data) x0 = xtr[ytr==0, :] y0 = ytr[ytr==0] print(y0.size) print(x0.shape[0]) plot_ten_images(x0,y0,shape=(28,28)) centroid0 = x0.mean(axis=0) print(centroid0.shape) # + id="ipbJIfc_tASV" outputId="f99a04ac-21a5-4baf-efa5-2522f8a9f631" colab={"base_uri": "https://localhost:8080/", "height": 286} plot_image(centroid0) # + [markdown] id="06AixVIkb30Z" # We can now generalize our algorithm to compute the centroids of all classes. # + id="S-I774YbtVZ0" outputId="9e4a6c8c-4841-4979-8997-28557ae84378" colab={"base_uri": "https://localhost:8080/", "height": 345} # compute centroid for each class num_features = xtr.shape[1] centroids = np.zeros(shape=(num_classes, num_features)) labels = np.zeros(shape=(num_classes, ), dtype='int') print(centroids.shape) for i in range(num_classes): xi = xtr[ytr==i, :] # all digits from class i centroids[i,:] = xi.mean(axis=0) # average digit for class i labels[i] = i # label of i-th centroid plot_ten_images(centroids,labels,shape=(28,28)) # + [markdown] id="mqnnoYracZRj" # Finally, we create the function ```fit```. # + id="AM3Sc2HGxTh1" outputId="30b57868-d853-4385-f139-2f7cda25740b" colab={"base_uri": "https://localhost:8080/", "height": 328} def fit(x_tr, y_tr): """Estimate the centroid for each class from the training data""" labels = np.unique(y_tr) centroids = np.zeros(shape=(labels.size, x_tr.shape[1])) for i, label in enumerate(labels): centroids[i,:] = x_tr[y_tr==label, :].mean(axis=0) # centr. for class i return centroids, labels centroids, labels = fit(xtr, ytr) plot_ten_images(centroids,labels,shape=(28,28)) # + [markdown] id="XXYtzTtfQyLs" # ## Predict # + id="ul87S7CoQ0jX" outputId="b9e3b414-0d5b-4d13-f1f3-f58d7b1d28fd" colab={"base_uri": "https://localhost:8080/", "height": 186} # xts, centroids n_samples = xts.shape[0] n_classes = centroids.shape[0] dist = np.zeros(shape=(n_samples, n_classes)) ypred = np.zeros(shape=(n_samples, ), dtype='int') print(dist.shape) for i in range(n_samples): for k in range(n_classes): dist[i,k] = np.linalg.norm(xts[i,:]-centroids[k,:], ord=2) ypred[i] = np.argmin(dist[i,:]) ypred2 = np.argmin(dist, axis=1) print(ypred==ypred2) dist2 = np.zeros(shape=(n_samples, n_classes)) for i in range(n_samples): dist2[i,:] = np.linalg.norm(xts[i,:]-centroids, ord=2, axis=1) print(dist2) # + id="MJj5qPSShzXB" outputId="3a1c26d4-2278-4173-903d-2918bcf08db4" colab={"base_uri": "https://localhost:8080/", "height": 319} dist3 = np.zeros(shape=(n_samples, n_classes)) for k in range(n_classes): m = xts-centroids[k,:] print(m.shape) dist3[:, k] = np.linalg.norm(xts-centroids[k,:], ord=2, axis=1) print(dist3) # + id="syRo9o6-jJdm" outputId="480b9938-b0e3-46e3-985b-1ac97cd24810" colab={"base_uri": "https://localhost:8080/", "height": 153} from sklearn.metrics import pairwise_distances dist = pairwise_distances(xts, centroids) # dist.shape = (400, 10) ypred = np.argmin(dist, axis=1) print(dist) # + id="6KRUZ8ahdKp_" def predict_for_loops(xts, centroids): n_samples = xts.shape[0] n_classes = centroids.shape[0] dist = np.zeros(shape=(n_samples, n_classes)) ypred = np.zeros(shape=(n_samples, ), dtype='int') for i in range(n_samples): for k in range(n_classes): dist[i,k] = np.linalg.norm(xts[i,:]-centroids[k,:], ord=2) ypred[i] = np.argmin(dist[i,:]) return ypred def predict(xts, centroids): dist = pairwise_distances(xts, centroids) # dist.shape = (400, 10) ypred = np.argmin(dist, axis=1) return ypred # + [markdown] id="Mqn4-D6nlV_U" # ## Evaluate test error # # + id="H6QgnC2_lbXW" outputId="14d59ede-c6f9-44ff-91ce-84c6936ed404" colab={"base_uri": "https://localhost:8080/", "height": 87} # Given yts, ypred, how do I compute the test error? err=0 n_samples = yts.size for i in range(n_samples): if yts[i]!=ypred[i]: err += 1 print(err) print("Error: ", 100.0*err/n_samples, "%") # print(yts!=ypred) print("Error: ", 100.0*(yts!=ypred).mean(), "%")
notebooks/lab02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Optimization - Duality # # > Lagrangian Duality, Weak and Strong Duality, Optimality Conditions, Farkas' Lemma, and Theorems of the Alternative # # - hide: false # - toc: true # - badges: true # - comments: true # - categories: ['Optimization','Applied Mathematics','Proofs'] # # Introduction # # Every convex optimization problem, designated as the ***primal***, has a related problem called its ***dual*** which can be colloquially thought of as its evil twin. The primal and the dual represent two different perspectives on the same problem. # # In the most general case, if the primal is a minimization problem, its dual is a maximization problem. In the case of constrained optimization, if the primal is minimization in $n$ variables and $m$ constraints then its dual is a maximization in $m$ variables and $n$ constraints. # # Furthermore, *any* feasible value of the dual is a lower-bound for *all* feasible values of the primal. In particular, should they both exist, the dual optimum is a lower bound for the primal optimum. This property, called ***weak duality***, lies at the core of ***duality theory***. The utility of formulating a problem whose solution obtains, at least, a lower-bound for the primal optimum and, in the special case, the primal optimum itself should be self-evident. # # In the best case scenario a problem exhibits a property called ***strong duality***, which guarantees that the primal and the dual optima agree. Such problems are called ***strongly dual problems*** and include, but are not limited to, all linear programs (LPs) and a category of convex non-linear optimization problems. For strongly dual problems, solving the dual guarantees that we've also solved the primal. Furthermore, as we shall see, taking the dual of the dual gives back the primal. So this relationship is true in the converse — if we've solved the primal then we've also solved its dual. # # This is what makes duality theory so useful in practice. Having a related, usually easier, optimization problem gives applied scientists a huge computational advantage. However, even if the dual does not turn out to be any easier to solve and/or strong duality fails to hold, we still stand to gain structural insight about the primal problem itself. # # In this post we show how the dual of a problem arises, we examine its relationship with the primal, and list all possible primal-dual outcomes. In doing so, we look at duality in the general case of constrained optimization, in the specific case of linear programs, and in a category of unconstrained problems. # # The Dual of a Constrained Problem # # First, let's focus on deriving the dual of a constrained optimization problem. We shall see that, in a sense, constraints are what give rise to duality through the [Lagrangian](https://en.wikipedia.org/wiki/Lagrangian_relaxation). Certain types of unconstrained problems also have duals which arise either from introducing dummy constraints, or directly through the [Fenchel-Legendre Transform](https://en.wikipedia.org/wiki/Convex_conjugate). # # Take the most general form of a convex, constrained problem with $m$ inequality and $n$ equality constraints. To make the discussion interesting, assume the problem is non-trivial (i.e. its constraint set is non-empty and contains more than one feasible point). Furthermore, so that we may have a solution to speak of, assume the problem is bounded with the finite optimum $f_0(x^*)$ for some optimizer $x^*$. # <br> # $$ # \begin{aligned} # \min_x &: f_0(x) # \\ # s.t. &: \begin{aligned} &f_i(x) \leq 0 \ \ i = 1, ...,m # \\ # &h_i(x) = 0 \ \ i = 1, ... ,p # \end{aligned} # \end{aligned} # $$ # <br> # > Note: The $f_i$'s and the $h_i$'s in the constraints must necessarily be convex in order for their sublevel-sets, and hence the problem itself, to be convex. However, the equality constraints may be given as $Ax = b$ in some sources. These representations are practically almost equivalent. The $0$-th level-set of $Ax - b$, $\{ x : Ax = b\}$, is indeed a convex set. However, $h_i$'s in the equality constraints $h_i(x) = 0$ need not be linear for the $0$-th level-set, $\{ x : h_i(x) = 0 \}$, to be convex. For example, in $\mathbb{R}$, $x^2 = 0$ does represent a convex level-set. Note, however, that $x^2 = 0$ can be reduced to $x = 0$ which is, indeed, linear. To be precise, the notion of [quasi-linearity](https://en.wikipedia.org/wiki/Quasiconvex_function) is what's needed here. But, in practice, we simply *define* a general convex problem as having only linear equality constraints. Doing so assists in the analysis of problems and in the development of the computational methods that solve them. # Since optimizing an unconstrained problem is considerably easier than optimizing one that is constrained, we seek to augment the constrained problem into an equivalent unconstrained problem. # # The idea is to penalize infeasible $x$ using functions that express our *displeasure* for certain choices. # # At first we use the *infinitely-hard* penalty functions $\mathbb{1}_-$ and $\mathbb{1}_0$ which are defined as follows: # <br> # $$\mathbb{1}_-(u) = # \begin{cases} # \begin{aligned} # &0 &\textrm{if} \ u \leq 0 # \\ # &\infty &\textrm{if} \ u > 0 # \end{aligned} # \end{cases}$$ # <br> # $$\mathbb{1}_0(u) = # \begin{cases} # \begin{aligned} # &0 &\textrm{if} \ u = 0 # \\ # &\infty &\textrm{if} \ u \ne 0 # \end{aligned} # \end{cases}$$ # <br> # Then the equivalent unconstrained problem can be stated as: # <br> # $$\min_x: \mathcal{J}(x)$$ # <br> # where $\mathcal{J}(x) = f_0(x) + \sum_{i=1}^m \mathbb{1}_-(f_i(x)) + \sum_{i=1}^p \mathbb{1}_0(h_i(x))$. # # Equivalently, by naming the primal feasible set $\mathcal{X}$, we can express the objective $\mathcal{J}(x)$ as: # <br> # $$\mathcal{J}(x) = \begin{cases}\begin{aligned} # &f_0(x) \ \ x \in \mathcal{X} # \\ # &\infty \ \ \textrm{otherwise} # \end{aligned}\end{cases}$$ # <br> # Informally, if an $\hat x$ is chosen s.t *one or more* of the constraints are broken then the minimization incurs an infinitely positive penalty. Therefore, such a $\hat x$ will never be selected over any feasible choice, $x \in \mathcal{X}$, which gives a finite value of $f_0(x)$. Moreover, by optimality of $x^*$ in the original problem, we have $f_0(x) \leq f_0(x^*) \ \ \forall x$. So, the optimum of $\mathcal{J}$ will also be $f_0(x^*)$. # # That is: # # <br> # $$\min_x \mathcal{J}(x) = f_0(x^*) \tag{1}$$ # <br> # # Moreover, since the optimizer $x^*$ for the original problem is feasible, $\mathcal{J}(x^*) = f_0(x^*)$ by definition. It follows, by substitution into $(1)$, that: # # <br> # $$\mathcal{J}(x^*) = \min_x \mathcal{J}(x) \tag{2.1}$$ # <br> # # Or, equivalently: # # <br> # $$x^* = \arg \min_x \mathcal{J}(x) \tag{2.2}$$ # <br> # # $(1)$ says that it suffices to minimize the unconstrained objective $\mathcal{J}$ instead of the original problem since doing so yields $f_0(x^*)$, the optimum of the unconstrained problem. $(2.1)$ and $(2.2)$, on the other hand, say that it suffices to find an optimizer $x^*$ of the unconstrained problem, since such a point will also be an optimizer of the constrained problem. # # To convince ourselves of this result, it helps to look at a simple example. # # --- # # Consider minimizing the quadratic form $f_0(x) = ||x||_2^2$ in $\mathbb{R}^2$ subject to the single circular inequality constraint $x_1^2 + x_2^2 \leq 1$.Below are the 3D plots of the constrained and the unconstrained problem. # + #hide_input # Imports and configurations import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm plt.rcParams['figure.autolayout'] = False plt.style.use('dark_background') figsize = (9.80, 4.90) background = '#0a0a0a' # Constrained objective function def f(x,y): return x**2 + y**2 # Unconstrained augmentation of the objective def j(x, y): j = f(x,y) # The copy array takes the values of the objective in the feasible region mask = ~(x**2 + y**2 <= 1) # Infeasible region j[mask] = 2 # Value on infeasible region. Use 2 instead of np.inf for visual purposes return j # Creating the data x_min, x_max = -2, 2 y_min, y_max = -2, 2 x, y = np.meshgrid(np.linspace(x_min, x_max, 1000), np.linspace(y_min, y_max, 1000)) z1 = np.ma.masked_where(~(x**2 + y**2 <= 1), f(x,y)) z2 = j(x,y) # Creating the plot fig = plt.figure(figsize = figsize, layout = 'tight') fig.set_facecolor(background) # First subplot ax = fig.add_subplot(1, 2, 1, projection = '3d') ax.contour3D(x, y, z1, 50, cmap = cm.plasma) ax.set_zlim3d(0, 2) ax.set_facecolor(background) ax.set_title(r'Constrained objective: $||x||_2^2 \ s.t. \ x_1^2 + x_2^2 \leq 1$', pad = 5) ax.set_xticks(np.arange(x_min, x_max, 1.0)) ax.set_yticks(np.arange(y_min, y_max, 1.0)) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') # Second subplot ax = fig.add_subplot(1, 2, 2, projection = '3d') ax.contour3D(x, y, z2, 100, cmap = cm.plasma) ax.set_zlim3d(0, 2) ax.set_facecolor(background) ax.set_title(r'Unconstrained objective: $\mathcal{J}(x) = ||x||_2^2 + \mathbb{1}_-(x_1^2 + x_2^2 - 1)$', pad = 5) ax.set_xticks(np.arange(x_min, x_max, 1.0)) ax.set_yticks(np.arange(y_min, y_max, 1.0)) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') # Display result plt.show() # - # We can clearly see in the plots that the optimum and the optimizer are the same for both problems: namely, $f_0(x^*) = 0$ and $x^* = 0$. # # --- # # As we know, the local optima of unconstrained problems occur at their *stationary points* which can be easily identified using the *unconstrained optimality condition*. # > **Unconstrained Optimality Condition:** &nbsp; If $x^*$ is an optimizer of the unconstrained objective $f_0(x)$ then $\nabla f_0(x^*) = 0$. That is $x^*$ is a ***stationary point*** of $f_0(x)$. # Once such stationary points have been found, a global minimizer can be identified among them simply by evaluating the objective at each stationary point. # # However, we're immediately beset by a problem. We cannot find the gradient of $\mathcal{J}$ and set it to zero because the infinitely-hard penalty functions are discontinuous and non-differentiable. That is, $\nabla \mathcal{J}(x)$ simply does not exist. # # To sidestep this difficulty we use linear relaxations instead of $\mathbb{1}_-$ and $\mathbb{1}_0$. # # ## The Lagrangian, Dual Variables, and the Dual Function # # The ***Lagrangian linear relaxation***, sometimes simply referred to as the ***Lagrangian***, is: # # <br> # $$\mathcal{L}(x,\lambda,\mu) = f_0(x) + \sum_{i=1}^m \lambda_i f_i(x) + \sum_{i=1}^p \mu_i h_i(x)$$ # $$\textrm{where} \ \lambda \geq 0$$ # <br> # # We call the $\lambda_i$'s the ***Lagrange multipliers*** corresponding to the inequality constraints, and the $\mu_i$'s those corresponding to the equality constraints. The vectors $\lambda$ and $\mu$, composed of these Lagrange multipliers, are called the ***Lagrange multiplier vectors*** or, for reasons that will soon become apparent, the ***dual variables***. # > Note: In some sources, the Lagrangian is simply stated as $\mathcal{L}(x,\lambda) = f_0(x) + \sum_{i=1}^n \lambda_i f_i(x)$. Indeed, by separating the equality constraints $h_i(x) = 0$ into $h_i(x) \leq 0$ and $-h_i(x) \leq 0$, we can transform a problem with equality constraints into one with only inequality constraints. So, this formulation of the Lagrangian is still general enough to account for problems with equality constraints. # ## A Lagrangian Lower-Bound # # Not only does the Lagrangian ($\mathcal{L}$) relax the unconstrained problem, it also plays a natural role in the formulation of the ***dual problem***. # # The first thing to note about the Lagrangian is that the coordinate-wise $\lambda \geq 0$ condition is crucial. This is because, in the event that an inequality constraint is violated, say $f_i(x) > 0$, the corresponding $\lambda_i$ must be non-negative in order to apply a positive penalty to the minimization. On the other hand, $\mu$ is free to assume any value since the equality constraints can be violated in either direction and both scenarios must be positively penalized. # # The second thing to note about the Lagrangian is that, even though it applies a positive penalty that scales linearly in the severity of the violation, this penalty is, nevertheless, not as severe as the infinite penalty applied in $\mathcal{J}$. Also, in the Lagrangian, we may actually be *rewarding* feasible choices of $x$ that have margin. That is, in the event that $f_i(x) < 0$, $\lambda_if_i(x)$ is a non-positive reward for the minimization problem. # # All of this is to say that $\mathcal{L}$ is a point-wise lower-bound on $\mathcal{J}$. That is, the following inequality holds: # # <br> # $$\mathcal{L}(x,\lambda,\mu) \leq J(x) \ \ \forall x, \lambda \geq 0, \mu \tag{3.1}$$ # <br> # # This fact is also obvious by plotting each of the $m + p$ linear penalties, superimposing them against the plots of the corresponding infinitely hard penalty functions, and noticing that in each case $\lambda_i f_i(x) \leq \mathbb{1}_-(f_i(x))$ and $\mu_i h_i(x) \leq \mathbb{1}_0(h_i(x))$. # # # Taking $\min$ w.r.t. $x$ of the LHS in $(3.1)$ we get: # # <br> # $$\min_x \mathcal{L}(x,\lambda,\mu) \leq J(x) \ \ \forall x, \lambda \geq 0, \mu$$ # <br> # # Furthermore, restricting $x$ to the primal feasible set $\mathcal{X}$ on which $J(x) = f_0(x)$, we obtain something interesting: # # <br> # $$\min_x \mathcal{L}(x,\lambda,\mu) \leq f_0(x) \ \ \forall x \in \mathcal{X}, \lambda \geq 0, \mu \tag{3.2}$$ # <br> # > Note: The Lagrangian may not attain its $\min$ w.r.t. $x$, in which case the LHS is simply $-\infty$. We shall see later, once we define the ***dual function*** and the ***duality gap***, that this corresponds to the dual function being $-\infty$ $\forall \lambda \geq0, \mu$ and the duality gap being $\infty$. In a sense, this is a useless lower bound. So, for now, we assume the interesting case in which the minimum *is* attained and thus $\inf_x \mathcal{L}(x, \lambda, \mu) = \min_x \mathcal{L}(x, \lambda, \mu)$. # Designating the original problem as the *primal*, we call $g(\lambda, \mu) := \min_x \mathcal{L}(x, \lambda, \mu)$ the ***dual function*** because it exhibits the aforementioned property of weak duality. That is, per $(3.2)$, any feasible value of $g(\lambda, \mu)$ is a lower-bound for any feasible value of the primal. # # Taking min of the other side, we have a more specific flavor of weak duality: # # <br> # $$g(\lambda,\mu) \leq \min_x f_0(x) \ \ \forall \lambda \geq 0, \mu$$ # <br> # # Or simply: # # <br> # $$g(\lambda,\mu) \leq f_0(x^*) \ \ \forall \lambda \geq 0, \mu \tag{3.3}$$ # <br> # # That is, any feasible value of the dual is a lower-bound for the primal optimum. # # Maximizing both sides of $(3.3)$ by noticing that the RHS is a constant, and by assuming the LHS attains its $\max$ we get an even more specific flavor of weak duality: # # <br> # $$\max_{\lambda \geq 0, \mu} g(\lambda,\mu) \leq f_0(x^*)$$ # <br> # # Or simply, assuming $\lambda^*$ and $\mu^*$ to be dual-optimal: # # <br> # $$g(\lambda^*, \mu^*) \leq f_0(x^*) \tag{3.4}$$ # <br> # # That is, the dual optimum is a lower-bound for the primal optimum. # # From here we move, quite naturally, to defining the *dual problem*. # ## The Lagrange Dual Problem # # It's natural, to ask what the *tightest* lower bound on the primal optimal value $f_0(x^*)$ is. This amounts to finding the values $\lambda^* \geq 0$, and $\mu^*$ for which $g(\lambda^*, \mu^*)$ is maximized. We call this the ***Lagrange dual problem*** or, simply, the ***dual problem***. # # It can be stated as: # <br> # $$ # \begin{aligned} # \max_{\lambda, \mu} &: g(\lambda, \mu) # \\ # s.t. &: \lambda \geq 0 # \end{aligned} # $$ # <br> # Looking at the above, it becomes immediately clear why we were motivated to call $\lambda$, and $\mu$ the *dual variables*: they are the variables of the dual problem. # # Weak Duality and Interpretations # # We now return to the general setting of constrained optimization. # # We've already seen weak duality formulated as $(3.2)$, $(3.3)$, and $(3.4)$. But, there's yet another, more symmetric, formulation of weak duality. # # Suppose $x^*$ and $(\lambda^*, \mu^*)$ are primal-dual optimal. Since $g(\lambda^*, \mu^*)$ is the solution to the dual, and $g(\lambda, \mu) = \min_x \mathcal{L}(x, \lambda, \mu)$ we have: # # <br> # $$g(\lambda^*, \mu^*) = \max_{\lambda \geq 0, \mu} \left\{ \min_x \mathcal{L}(x, \lambda, \mu) \right\} \tag{4.1}$$ # <br> # # Similarly, it can be shown that: # # <br> # $$f_0(x^*) = \min_x \left\{ \max_{\lambda \geq 0, mu} \mathcal{L}(x, \lambda, \mu) \right\} \tag{4.2}$$ # <br> # # To see this, note that for some $x$ fixed by the outer minimizer, maximizing the Lagrangian over $\lambda \geq 0$ and $\mu$ recovers $\mathcal{J}(x)$. # # If all of the inequality constraints are respected, that is $f_i(x) \leq 0$ $\forall i$, then, in order to maximize the Lagrangian, the best we can do is set $\lambda_i = 0$ $\forall i$. In case *any* inequality constraint is violated, that is $f_i(x) > 0$ for some $i$, the result of maximizing the Lagrangian can be made $\infty$ by choosing $\lambda_i \rightarrow \infty$ and $\lambda_j = 0$ $\forall j \ne i$. # # Using similar logic, if all equality constraints are respected then $h_i(x) = 0$ $\forall i$. In this case $\mu_i$ can be chosen to be any value. If, on the other hand, some equality constraint is violated then $h_i(x) \ne 0$ for some $i$. By choosing $\mu_i \rightarrow \pm \infty$, where the sign depends on the direction of the violation, the result can be made $\infty$. # # Thus we have shown that: # # <br> # $$\begin{aligned}\max_{\lambda \geq 0, \mu} \mathcal{L}(x,\lambda,\mu) &= \begin{cases}\begin{aligned} # &f_0(x) \ \ \textrm{if $x$ is feasible} # \\ # &\infty \ \ \textrm{otherwise} # \end{aligned}\end{cases} \\ &= \mathcal{J}(x)\end{aligned}$$ # <br> # # Now, since $x^*$ is primal optimal, we have $\min_x \mathcal{J}(x) = \mathcal{J}(x^*)$. Furthermore, since $x^*$ is primal-feasible, we have $J(x^*) = f_0(x^*)$ which yields $(4.2)$ as promised. # # Thus, weak duality can be stated in the following symmetric form: # # <br> # $$\max_{\lambda \geq 0, \mu} \left\{ \min_x \mathcal{L}(x, \lambda, \mu) \right\} \leq \min_x \left\{ \max_{\lambda \geq 0, mu} \mathcal{L}(x, \lambda, \mu) \right\} \tag{3.5}$$ # <br> # ## The Max-Min Inequality # # The inequality expressed as $(3.5)$ is, in fact, a general result in mathematics called the [*Max-Min Inequality*](https://en.wikipedia.org/wiki/Max%E2%80%93min_inequality). To summarize: the Max-Min Inequality makes no assumptions about the function, it's true for all functions of the form $f: X \times Y \rightarrow \mathbb{R}$ and asserts that: # # <br> # $$\sup_{x\in X} \left\{ \inf_{y\in Y} f(x,y) \right\} \leq \inf_{y\in Y} \left\{ \sup_{x\in X} f(x,y) \right\}$$ # <br> # # Since no assumption is made on $f$, the inequality also holds for the Lagrangian, $\mathcal{L}$. And, since we're in the special case where the optimal values of the primal and the dual are assumed to exist, the functions do attain the respective optima. That is, we can replace $\sup$ and $\inf$ in the above inequality with $\max$ and $\min$ which obtains the symmetric formulation of weak duality as in $(3.5)$. # # We can now prove weak duality through a non-optimization lens by proving the Max-Min Inequality. # # For any $f$, and $x \in X$, $y \in Y$ we have: # # <br> # $$f(x,y) \leq \sup_y f(x,y) \ \ \forall x$$ # <br> # # The right hand side is now only a function of $x$, so minimizing both sides w.r.t. $x$ yields: # # <br> # $$ \inf_x f(x,y) \leq \inf_x \left\{ \sup_y f(x,y) \right\} \ \ \forall y$$ # <br> # # The right hand side is now a constant, so maximizing both sides w.r.t. $y$ results in the desired conclusion. # # <br> # $$\sup_y \left\{ \inf_x f(x,y) \right\} \leq \inf_x \left\{ \sup_y f(x,y) \right\}$$ # <br> # > Note: The Max-Min Inequality proof should remind us of the steps taken to obtain $(3.2)$ through $(3.4)$ from $(3.1)$. In fact, $(3.1)$ is of form $f(x,y) \leq \sup_y f(x,y) \ \ \forall x$, since $J(x)$ is, as shown earlier, equivalent to $\max_{\lambda \geq 0, \mu} L(x, \lambda, \mu)$. # ### Game-Theoretic Interpretation # # The Max-Min Inequality is perhaps best understood intuitively as a game between two adversarial players (represented by the optimizers in this case). # # The LHS of the Max-Min Inequality can be interpreted as the following game. First, the outer maximizer, player $Y$, fixes its choice $y$. Then, the inner minimizer, player $X$, chooses $x_y = \arg \inf_x f(x,y)$ which depends on the outer's choice of $y$. Suppose $y^* = \arg \inf_y f(x,y)$ is what player $Y$'s choice would have been were it to act independently of the actions of player $X$. We can imagine a scenario in which the score $f(x_{y^*}, y^*)$ is less than the score $f(x_y, y)$ for some other choice of $y$. So, player $Y$ cannot do as well as it would've done independently, whereas player $X$ is free to do its best. Hence, player $X$, the second player, restricts the choices of player $Y$, the first player. # # If the goal is to score low then player $X$ has the advantage by playing second turn. Conversely, if the goal is to score high player $Y$ has the advantage by playing second turn. This is exactly what the Max-Min Inequality says in mathematical terms. # # Strong Duality and Interpretations # # Strong duality is the special case when weak duality, as it's stated in $(3.4)$, holds with strict equality. That is: # # <br> # $$g(\lambda^*, \mu^*) = f_0(x^*) \tag{5.1}$$ # <br> # > Note: The existence of a primal optimal $x^*$ is needed in order to speak of strong duality at all. To show this, suppose the primal is feasible but unbounded. Then $\not \exists x^*$ that is primal optimal. Suppose further that the dual is feasible. Then $\exists (\lambda, \mu)$ such that weak duality obtains $g(\lambda, \mu) \leq f(x) \ \ \forall x$. That is $g(\lambda, \mu)$ is a lower-bound of the primal objective. This contradicts the assumption of primal unboundedness. To avoid this contradiction, it must be the case that primal unboundedness implies dual infeasibility. However, if the dual is infeasible there can be no talk of strong duality since the dual optimal does not exist. # Alternatively, in its Max-Min characterization: # # <br> # $$\max_{\lambda \geq 0, \mu} \left\{ \min_x \mathcal{L} (x, \lambda, \mu) \right\} = \min_x \left\{ \max_{\lambda \geq 0, \mu} \mathcal{L} (x, \lambda, \mu) \right\} \tag{5.2}$$ # <br> # # Another common way to say a problem is strongly dual is to say its ***duality gap*** is zero. The duality gap is defined as the difference between the primal and dual optima, that is $f_0(x^*) - g(\lambda^*, \mu^*)$. This characterization of strong duality follows immediately from the first definition of strong duality as it's stated in $(5.1)$. Optimization problems that exhibit this property are called ***strongly dual***. # # As mentioned briefly in the introduction, strong duality gives applied scientists the ability to solve an equivalent, usually easier, dual problem instead of the primal one which may be difficult to solve. # As it happens, strong duality also obtains powerful ***optimality conditions*** which allow scientists to check if suspected optimal points are, indeed, optimal. We will soon make both of these claims more rigorous but, for now, it's enough to think of them simply as benefits of strong duality. Given these useful results it would certainly be helpful to know, in advance of solving the problem, whether or not it's strongly dual # # We shall see, through a direct proof, that all linear programs are strongly dual. When it comes to non-linear optimization, however, strong duality is not a general guarantee. The good news is that sufficient conditions for strong duality do exist and will be provided next. # ## Slater's Condition - Sufficient Condition for Strong Duality # # While the rare non-convex problem could exhibit the property, strong duality is mostly enjoyed by convex problems. However, not all convex problems are strongly dual. There are many results that establish conditions on the problem, beyond convexity and existence of a primal-optimal, under which strong duality holds. These conditions are called ***constraint qualifications***. In this section we will explore such conditions for convex problems and discuss them in the specific case of linear programs. # # One of these constraint qualification conditions is ***Slater's condition***. # > **Slater's Condition:** &nbsp; $\exists \ \hat x$ s.t. $f_i(\hat x) < 0$, and $h_i(\hat x) = 0$ $\forall i$. # Informally, Slater's condition says that the existence of a feasible point which has margin w.r.t. all the inequality constraints is needed in addition to convexity. In even simpler terms, the feasible region must have an interior point. # # The sufficient condition for strong duality in convex problems is then: # > **Sufficient Condition for Strong Duality:** &nbsp; Any convex optimization problem satisfying Slater's condition has zero duality gap. # The proof of this is beyond what we're trying to accomplish in this post. # # A weaker constraint qualification condition guarantees strong duality in the case of linear constraints. If $k$ of the $m$ inequality constraints are linear then the condition becomes: # # <br> # $$ # \begin{aligned}f_i(\hat x) &\leq 0, \ i = 1,...,k, \\ # f_i(\hat x) &< 0, \ i = k+1,...,m, \\ # h_i(\hat x) &= 0, \ i = 1,...,p # \end{aligned} # $$ # <br> # # In other words, the linear constraints need not have margin. # # Note that if all the constraints are linear, which is the case in linear programming, the above constraint qualification condition simply reduces to feasibility. # # So, while a sufficient condition of strong duality in non-linear convex programs is, both, the existence of a feasible interior point and a primal optimal, the situation is remarkably simpler in linear programs. Since a primal optimal for a linear program is also feasible, it satisfies the weaker constraint qualification condition. Thus, for a linear program to be strongly dual the existence of a primal optimal is sufficient. # ## The Max-Min Equality # # Just as weak duality is the Max-Min Inequality in disguise, strong duality is the [Minimax Theorem](https://en.wikipedia.org/wiki/Minimax_theorem) in disguise. The Minimax Theorem is the special case of the Max-Min Inequality in which the LHS and the RHS are strictly equal. It holds for any function $f: X \times Y \rightarrow \mathbb{R}$ that has some additional structure. Roughly speaking, when $f$ is saddle-shaped, convex in one variable and concave in the other, the Max-Min Inequality holds with strict equality. # # The following theorem, which is offered without proof, translates this result into the setting of optimization. # > **Saddle Point Theorem:** &nbsp; If $x^*$ and $(\lambda^*, \mu^*)$ are primal and dual optimal solutions for a convex problem which satisfies Slater's condition, they form a saddle point of the associated Lagrangian. Furthermore, if $(x^*, (\lambda^*, \mu^*))$ is a saddle point of a Lagrangian, then $x^*$ is primal optimal and $(\lambda^*, \mu^*)$ is dual optimal for the associated problem, and the ***duality gap*** is zero. # > Note: This theorem should *not* be taken as a ***certificate of strong duality***. If the Lagrangian is saddle-shaped then the associated problem is strongly dual, however the converse is not true. Since not all strongly dual problems are convex problems which satisfy Slater's condition, if a problem is strongly dual it is *not* guaranteed that its Lagrangian is saddle-shaped. # ### Game-Theoretic Interpretation # # In keeping with the game theoretic intuition developed in the section on weak duality, one can imagine a game in which the first player's optimal choice is independent of the second player's actions. In such a game, both players are free to play their best strategies and, consequently, the order of play is not important. # ## An Easier Dual Problem # # # Let's further qualify what we mean when we say strong duality gives an equivalent, usually easier, problem to solve. # # At the start of this post we considered a general convex program. However, everything we've discussed about Lagrangian duality applies to non-convex problems as well. Suppose the primal problem is non-convex. The task is that of finding the primal optimum: # # <br> # $$f_0(x^*) = \min_x \left\{ \max_{\lambda \geq 0, \mu} \mathcal{L} (x, \lambda, \mu) \right\}$$ # <br> # # But maximizing the Lagrangian over $\lambda \geq 0$ and $\mu$ for a fixed $x$, recovers $\mathcal{J}(x)$: a non-differentiable objective. So, we cannot use the unconstrained optimality condition in finding the stationary points of $\mathcal{J}(x)$ which is what's required in the next step. # # Meanwhile, the dual problem is that of finding the dual optimum: # # <br> # $$g(\lambda^*, \mu^*) = \max_{\lambda \geq 0, \mu} \left\{ \min_x \mathcal{L} (x, \lambda, \mu) \right\}$$ # <br> # # Minimizing the Lagrangian over $x$ for fixed $\lambda \geq 0$ and $\mu$ may still be a difficult problem but, at least, it lends itself to using the method of unconstrained optimization. Moreover, the resulting dual function $g(\lambda, \mu) = \min_x \mathcal{L}(x, \lambda, \mu)$ is a point-wise minimum of linear functions in $\lambda$ and $\mu$, so its always concave in those variables. Additionally, the constraint $\lambda \geq 0$ is a simple, convex (linear in fact), constraint. So, the dual problem is a convex optimization problem regardless of the convexity of the primal. # # Solving a convex dual problem is usually easier that solving a non-convex primal problem. However, even if the primal is a convex problem to begin with, the dual may still be easier to solve. The primal could have more variables than constraints in which case its dual has more constraints than variables. This is yet another way in which the dual can be an easier problem to solve than the primal. # # Theorems of the Alternative # # # Duality isn't just a tool for applied science, it has important theoretical uses. For instance, in proving ***Theorems of the Alternative***. These are theorems that describe exclusively disjoint scenarios that together comprise the entire outcome space. Formally, they are theorems of the form $A \implies \neg B \land \neg A \implies B$ where $A$, and $B$ are logical statements. # > Note: Theorems of equivalence (i.e. theorems of the form *'the following are equivalent - TFAE'*) can also be formulated as theorems of the alternative. To say that $A$ and $B$ are equivalent means $ A \iff B$. But this breaks down as $A \implies B \land B \implies A$. Letting $\hat B = \neg B$ we can rewrite the above as $A \implies \neg \hat B \land B \implies A$. But, by taking the contrapositive, $B \implies A$ becomes $\neg A \implies \neg B$, which is to say $\neg A \implies \hat B$. In summary, we have shown that $A \iff B$ is equivalent to $A \implies \neg \hat B \land \neg A \implies \hat B$. So, the class of theorems of the alternative is much broader than it appears and includes theorems of equivalence. # These theorems are usually proven by a ***Separation Argument***, but can also be proven, quite elegantly, through duality. # # A well-known instance of a Theorem of the Alternative is [Farkas' Lemma](https://en.wikipedia.org/wiki/Farkas%27_lemma) which underpins major results in the study of optimization. # ## Farkas' Lemma # # Farkas' Lemma simply states that a given vector $c$ is either a [conic combination](https://v-poghosyan.github.io/blog/optimization/applied%20mathematics/proofs/2022/01/23/Optimization-Review-of-Linear-Algebra-and-Geometry.html#Conic-Combinations-of-$n$-Points) of some vectors $a_i$'s (for $i \in I$), or it's entirely separated from their cone by some hyperplane. # # We state Farkas' Lemma without offering proof since it has such an obvious geometric interpretation. # > **Farkas' Lemma:** &nbsp; For any vector $c$ and $a_i \ \ (i \in I)$ either the first or the second statement holds: # &nbsp; # > * $\exists p \geq 0$ s.t. $c = \sum_{i \in I} a_ip_i$ # > * $\exists$ vector $d$ s.t. $d^Ta_i \geq 0 \ \ \forall i \in I$ but $d^Tc < 0$ # ## Proving a Theorem of the Alternative # # To see how we can prove a Theorem of the Alternative, it helps to state one. # > **Theorem:** &nbsp; Exactly one of the following two statements most hold for a given matrix A. # &nbsp; # > 1. $\exists x \ne 0$ s.t. $Ax = 0$ and $x \geq 0$ # > 2. $\exists p$ s.t. $p^TA > 0$ # ### Proof using a Separation Argument # # #### Prelude # # At the heart of separation arguments lies this simple fact. # > **Separating Hyperplane Theorem:** &nbsp; For any *convex* set $C$, if a point $\omega \notin C$ then there exists a hyperplane separating $\omega$ and $C$. # Farkas' Lemma, for instance, is proved by a separation argument that uses, as its convex set, the conic combination of the $a_i$'s. The conclusion is immediate since in Farkas' Lemma the first statement plainly says that a vector belongs to the convex set, and the second statement plainly says there exists a separating hyperplane between the two. # # This is the pattern all separation arguments must follow. However, in general, it may take a bit of work to define the problem-specific convex set and also to show that the two statements are *really* talking about belonging to this set, and separation from it. However, once these components are in place, the proof is complete. # # Using this idea, let's give a proof of the above theorem using a separation argument. # # #### Proof # # First order of business is to come up with a convex set. # # Let's take $C = \{ z : z = Ay, \sum_i y_i = 1, y \geq 0 \}$ to be the convex hull of the columns of $A$. # # The first statement in the theorem was that $\exists x \ne 0$ s.t. $Ax = 0$ and $x \geq 0$. # # Since $x \ne 0$ and $x \geq 0$ we can scale as $x$ as $y = \alpha x$ until $\sum_i y_i = 1$. # # So, the first statement is equivalent to saying the origin belongs to the convex hull $C$ (i.e. $0 \in C$) # # The second statement was that $\exists p$ s.t. $p^TA > 0$. This is equivalent to saying that all the columns of $A$ lie to one side of the separating hyperplane introduced by $p$. # # But all $z \in C$ are convex combinations of $A$'s columns. In particular since they're a convex combination they're also a conic combination, so all $z \in C$ also lie on the same side of the hyperplane. That is $p^Tz > 0 \ \ \forall z \in C$. # # But, of course, $p^T0 = 0$ (not $> 0$). So, according to the second statement, the origin is separated from $C$. # # This concludes the proof since the two statements must be mutually exclusive. # # ### Proof using Strong Duality # # To prove the theorem we need to show two things. First, we need to show $1 \implies \neg 2$, then we need to show $\neg 1 \implies 2$. # # The $1 \implies \neg 2$ direction is simple. # # Suppose $\exists x \ne 0$ s.t. $Ax = 0$ and $x \geq 0$. # # Then $\forall p \ \ (p^TA)x = p^T(Ax) = p^T0 = 0$ (not $> 0$). # # We tackle the $\neg 1 \implies 2$ direction using duality. # # The strategy is to construct an LP based on $\neg 1$ such that the feasibility of its dual implies $2$. # # We can express $\neg 1$ as '$\forall x \ne 0$, either $Ax \ne 0$ or $x < 0$.' Equivalently, '$x \ne 0 \implies Ax \ne 0$ or $x < 0$.' Taking the contrapositive, statement $1$ becomes '$Ax = 0$ and $x \geq 0 \implies x = 0$.' # # So, we form the LP as: # $$ # \begin{aligned} # &\max_x: \textbf{1}^Tx # \\ # &s.t.: \begin{aligned} &Ax = 0 # \\ # &x \geq 0 # \end{aligned} # \end{aligned} # $$ # Note that $x = 0$ is a feasible solution to the LP. Furthermore, assuming statement $1$ guarantees that $x = 0$ is the only feasible solution. Thus, the LP is feasible and bounded. # # By strong duality, its dual exists and is also feasible and bounded. # # The dual is: # $$ # \begin{aligned} # &\min_p: \textbf{0}^Tp # \\ # &s.t.: p^TA \geq \textbf{1} # \end{aligned} # $$ # Since the dual is feasible, $\exists p$ s.t. $p^TA \geq 1 > 0$ which demonstrates the truth of statement $2$ and, in doing so, completes the proof. # # Strong Duality - Optimality Conditions # # As mentioned before, strong duality also obtains powerful optimality conditions. These conditions are known as ***stationarity condition*** and ***complementary slackness***, and they are often bundled into the [*Karush–Kuhn–Tucker (KKT) Conditions*](https://en.wikipedia.org/wiki/Karush%E2%80%93Kuhn%E2%80%93Tucker_conditions) which will be provided shortly. # # ## Stationarity Condition # # In the section titled [An Easier Dual Problem](https://v-poghosyan.github.io/blog/optimization/applied%20mathematics/proofs/2022/02/07/Optimization-LP-Duality.html#An-Easier-Dual-Problem) we mentioned that the dual problem is that of finding the dual optimal value: # # <br> # $$g(\lambda^*, \mu^*) = \max_{\lambda \geq 0, \mu} \left\{ \min_x \mathcal{L} (x, \lambda, \mu) \right\}$$ # <br> # # If strong duality holds, this dual optimum agrees with the primal optimum. That is: # # <br> # $$g(\lambda^*, \mu^*) = f_0(x^*)$$ # <br> # # Turns out in case of strong duality there's even more to be said. As we saw earlier optimizing the unconstrained objective $\mathcal{J}(x)$ not only resulted in the primal optimum $f_0(x^*)$ for some optimal $x^*$ of the constrained problem, the very same point $x^*$ itself turned out to be an optimizer of $\mathcal{J}(x)$. Similarly, we can show that the primal optimum $x^*$ for some primal-dual optimal pair $(x^*, (\lambda^*, \mu^*))$ optimizes $\mathcal{L}(x, \lambda^*, \mu^*)$. In other words, the primal optimum $x^*$ is a stationary point of the Lagrangian at the dual optimum $(\lambda^*,\mu^*)$. # # That is: # # <br> # $$x^* = \arg \min_x \mathcal{L} (x, \lambda^*, \mu^*) \tag{6.1}$$ # <br> # # Or, equivalently: # # <br> # $$\min_x \mathcal{L}(x, \lambda^*, \mu^*) = \mathcal{L}(x^*, \lambda^*, \mu^*) \tag{6.2}$$ # <br> # # We can think of $(6.1)$ and $(6.2)$ as the analogs of $(2.1)$ and $(2.2)$ for the Lagrangian ($\mathcal{L}$). This is exactly what we've been working towards. Recall that the original motivation in augmenting the constrained problem into the unconstrained $\mathcal{J}$ was to find the former's optimizer using methods of unconstrained optimization on $\mathcal{J}$. Once found, $(2.1)$ or $(2.2)$ would guarantee that an optimizer of $\mathcal{J}$ was, itself, an optimizer of the original problem. Failing that, we relaxed $\mathcal{J}$ into $\mathcal{L}$ hoping we could still accomplish the same. $(6.1)$ and $(6.2)$ are the results which guarantee precisely that. They say that the optimizer $x^*$ of the original problem can be found by optimizing the unconstrained objective $\mathcal{L}$. And, since $\mathcal{L}$ is everywhere differentiable w.r.t. $x$, we can now proceed. # # In practice, however, $(6.1)$ and $(6.2)$ only give us a way to solve for a primal-optimal $x^*$ directly if a dual-optimal $(\lambda^*, \mu^*)$ is already known. That is, any time the dual problem is easier to solve than the primal. # # More generally, this fact gives us the next best thing. It gives us a way to check if a given pair $(x^*,(\lambda^*,\mu^*))$ is primal-dual optimal – an optimality condition known as *stationarity condition*. # > **Stationarity Condition:** &nbsp; Suppose $x^*$ and $(\lambda^*, \mu^*)$ are primal-dual optimal for a strongly dual problem. Then: # <br> # $$\nabla_x f_0(x^*) + \sum_i^m \lambda^*_i\nabla_xf_i(x^*) + \sum_{i=1}^p \mu^*_i\nabla_xh_i(x^*) = 0$$ # The stationary condition is obtained simply by an application of the unconstrained optimality condition to $\mathcal{L}(x, \lambda^*, \mu^*)$: # # <br> # $$\nabla_x \mathcal{L} (x^*, \lambda^*, \mu^*) = 0$$ # <br> # # Expanding the LHS gives: # # <br> # $$\nabla_x f_0(x^*) + \sum_i^m \lambda^*_i\nabla_xf_i(x^*) + \sum_{i=1}^p \mu^*_i\nabla_xh_i(x^*) = 0$$ # <br> # # For the sake of completeness, since we stated them without offering a proof, let's prove the equivalent claims $(6.1)$ and $(6.2)$ from which stationarity condition ultimately follows. # # #### Proof of Claims (6.1) and (6.2) # # Suppose $x^*$ and $(\lambda^*, \mu^*)$ are primal-dual optimal for a strongly dual problem. # # The following point-wise inequality holds in general since its LHS is a minimization over $x$ and its RHS is a maximization over $(\lambda, \mu)$ of the Lagrangian. # # <br> # $$g(\lambda, \mu) \leq \mathcal{L}(x, \lambda, \mu) \leq \mathcal{J}(x) \ \ \forall x, \lambda \geq 0, \mu$$ # <br> # # It is also, in particular, true for the primal-dual optimal pair. That is: # # <br> # $$g(\lambda^*, \mu^*) \leq \mathcal{L}(x^*, \lambda^*, \mu^*) \leq \mathcal{J}(x^*) \tag{7.1}$$ # <br> # # However, $\mathcal{J}(x^*) = f_0(x^*)$ and, by strong duality, $g(\lambda^*, \mu^*) = f_0(x^*)$. Hence, $g(\lambda^*, \mu^*) = \mathcal{J}(x^*)$ and $(7.1)$ is actually the equality. # # <br> # $$\mathcal{L}(x^*, \lambda^*, \mu^*) = g(\lambda^*, \mu^*) \tag{7.2}$$ # <br> # # Substituting, the definition of the dual function for the RHS of $(7.2)$, we get: # # <br> # $$\mathcal{L}(x^*, \lambda^*, \mu^*) = \min_x \mathcal{L}(x, \lambda^*, \mu^*)$$ # <br> # # Which is exactly $(6.2)$ and, by equivalence, also $(6.1)$. # ## Complementary Slackness # # Strong duality also obtains another optimality condition known as *complementary slackness (CS)*. # > **Complementary Slackness (CS):** &nbsp; Suppose $x^*$ and $(\lambda^*, \mu^*)$ are primal-dual optimal for a strongly dual problem. Then: # <br> # $$\lambda^*_i f_i(x^*) = 0 \ \ \forall i$$ # Informally, if a primal constraint at an optimal $x^*$ is *loose*, that is $f_i(x^*) \ne 0$, then its corresponding dual variable $\lambda^*_i$ in the dual optimal $\lambda^*$ must be zero. Conversely, if the dual variable $\lambda_i^*$ is positive then the corresponding constraint must be *tight*. # > Note: If a primal constraint is *tight* at $x^*$, complementary slackness tells us nothing about its corresponding dual variable. # #### Proof of Complementary Slackness # # Suppose $x^*$ and $(\lambda^*, \mu^*)$ are primal-dual optimal for a strongly dual problem. # # Expanding the RHS we obtain: # <br> # $$ # \begin{aligned} # f_0(x^*) &= g(\lambda^*, \mu^*) \\ # &= \min_x \mathcal{L}(x, \lambda^*, \mu^*) \\ # &= \mathcal{L}(x^*, \lambda^*, \mu^*) \\ # &= f_0(x^*) + \sum_{i=1}^m \lambda_i^* f_i(x) + \sum_{i=1}^p \mu_i^* h_i(x^*) \\ # &\leq f_0(x^*) # \end{aligned} \tag{8.1} # $$ # <br> # The first equality holds by strong duality, the second holds by the definition of the dual function, the third equality holds by $(6.2)$, and the fourth is true by the expansion of $\mathcal{L}(x^*, \lambda^*, \mu^*)$. # # To see why the last inequality holds, note that: # # <br> # $$\sum_{i=1}^p \mu_i^* h_i(x^*) = 0$$ # <br> # # since, by feasibility of $x^*$, $h_i(x^*) = 0 \ \ \forall i$. Then again, by feasibility of $x^*$, we have: # # <br> # $$f_i(x^*) \leq 0 \ \ \forall i \tag{8.2}$$ # <br> # # Furthermore, by construction of the Lagrangian, $\lambda \geq 0$. So, together with $(8.2)$, we have: # # <br> # $$\sum_{i=1}^m \lambda^*_i f_i(x^*) \leq 0$$ # <br> # # But taken altogether $(8.1)$ says $f_0(x^*) \leq f_0(x^*)$ which can *only* hold through strict equality. # # Then it must be the case that $\sum_{i=1}^m \lambda^*_i f_i(x^*) = 0$ # # Being a sum of non-positive terms, $\sum_{i=1}^m \lambda^*_i f_i(x^*) = 0$ *if and only if* # # <br> # $$\lambda^*_i f_i(x^*) = 0 \ \ \forall i \tag{8.3}$$ # <br> # # which concludes the proof of complementary slackness. # ## Karush-Kuhn-Tucker (KKT) Conditions # # Complementary slackness and stationarity condition are often bundled into the KKT Conditions. # # In the absence of strong duality the KKT Conditions are necessary but insufficient for optimality. However, for problems which *are* strongly dual the KKT Conditions become a ***certificate of optimality***. That is, they are both necessary and sufficient. # > **KKT Conditions:** &nbsp; The primal-dual pair $(x^*, (\lambda^*, \mu^*))$ satisfies the ***KKT conditions*** if the following hold: # &nbsp; # > 1. $\nabla_x f_0(x^*) + \sum_{i=1}^m \lambda^*_i\nabla_xf_i(x^*) + \sum_{i=1}^p \mu^*_i\nabla_xh_i(x^*) = 0$ # > 2. $\lambda^*_if_i(x^*) = 0 \ \ \forall i$ # > 3. $g_i(x^*) \leq 0 \ \ \forall i$ # > 4. $h_i(x^*) = 0 \ \ \forall i$ # > 5. $\lambda^* \geq 0$ # We recognize *KKT-1* as the stationarity condition, and *KKT-2* as complementary slackness. *KKT-3* through *KKT-5* simply ensure primal-dual feasibility. # > Note: These conditions only apply to problems with differentiable objective and constraints. For the case in which one or more of the objective or constraints is non-differentiable, there is an easy generalization of the KKT conditions using sub-differentials. However, sub-differentials are beyond the scope of this post. # Primal-dual pairs which satisfy the KKT Conditions are called ***KKT pairs***. # # ### Generalization of Unconstrained Optimization # # # The KKT conditions represent a strict generalization of the unconstrained optimality condition for use in constrained problems. # # To see this, note that if there are no constraints then the KKT conditions simply reduce to the familiar unconstrained optimality condition: # # <br> # $$\nabla_x f_0(x^*) = 0$$ # <br> # # In order to discuss optimality in constrained problems, we must first define a ***feasible direction***. # > **Feasible Direction:** &nbsp; A unit vector $d$ is called a *feasible direction* at any $x$ if $x + \epsilon d$ remains feasible for $\epsilon > 0$ small enough. # We are now in a position to generalize the unconstrained optimality condition into a ***constrained optimality condition***. # # By using *Taylor expansion*, for small enough $\epsilon > 0$ and any feasible $d$, we can estimate $f_0(x^* + \epsilon d)$ by its linear approximation as: # # <br> # $$f_0(x^* + \epsilon d) = f_0(x^*) + \epsilon \nabla f_0(x^*)^Td$$ # <br> # # But since $x^*$ is optimal, we have: # # <br> # $$ # \begin{aligned} # f_0(x^*) &\leq f_0(x^* + \epsilon d) \\ # & = f_0(x^*) + \epsilon \nabla f_0(x^*)^Td # \end{aligned} # $$ # <br> # # Which necessitates that $\nabla f_0(x^*)^Td \geq 0$. Since $d$ was just an arbitrary feasible direction, this result must hold for *all* feasible directions. Hence, the constrained optimality condition can be given as: # > **Constrained Optimality Condition:** &nbsp; If $x^*$ is an optimizer of $f_0$ over some constraint set then, for any feasible direction $d$ at $x^*$, $\nabla f_0(x^*)^Td \geq 0$. # Note that $\nabla f_0(x^*)^Td$ is simply the ***directional derivative*** of $f_0$ in the direction $d$. So, in plain words, the constrained optimality condition says that the directional derivative of the objective function in any feasible direction at an optimizer should be non-negative. This ensures that moving in any feasible direction does not minimize the objective any further. # ### Certificate of Optimality # # As promised, the KKT Conditions together with strong duality obtain a certificate of optimality. # > **Certificate of Optimality:** &nbsp; If strong duality holds, then $x^*, (\lambda^*, \mu^*)$ are primal-dual optimal if and only if they are a KKT pair. # #### Proof of Certificate of Optimality # # We have already shown one direction of the certificate in the sections on [stationarity condition](https://v-poghosyan.github.io/blog/optimization/applied%20mathematics/proofs/2022/02/07/Optimization-Duality.html#Stationarity-Condition) and [complementary slackness](https://v-poghosyan.github.io/blog/optimization/applied%20mathematics/proofs/2022/02/07/Optimization-Duality.html#Complementary-Slackness), where we proved that being a primal-dual optimal pair in a strongly convex problem guarantees $(x^*, (\lambda^*, \mu^*))$ is also a KKT pair. # # Showing the other direction provides us with an interesting geometric viewpoint of the KKT conditions. Incidentally, Farkas' Lemma is the key theoretical result that underpins this proof. # Let's begin the proof. # # If a particular constraint is loose at $x^*$ then taking a small enough step in any direction from $x^*$ does not violate it. Formally, if $f_i(x^*) < 0$, then $f_i(x^* + \epsilon d) \leq 0$ $\forall d$ and for some $\epsilon >0$. So, loose constraints do not pose any restrictions on the set of feasible directions. # # However, if a constraint is tight at $x^*$, that is $f_i(x^*) = 0$, then we must be careful not to violate it. Suppose the set of indices of all the tight constraints at $x^*$ is given by $I_{x^*}$. For small enough $\epsilon > 0$, we can estimate $f_i(x^* + \epsilon d)$ by its linear Taylor expansion as: # # <br> # $$f_i(x^* + \epsilon d) = f_i(x^*) + \epsilon \nabla f_i(x^*)^Td \ \ \forall i \in I_{x^*}$$ # <br> # # For feasibility, we want $f_i(x^* + \epsilon d) \leq 0$. So, we require: # # <br> # $$f_i(x^*) + \epsilon \nabla f_i(x^*)^Td \leq 0 \ \ \forall i \in I_{x^*}$$ # <br> # # But since $f_i$ is tight at $x^*$, $f_i(x^*) = 0$, which simply leaves us with: # # <br> # $$\nabla f_i(x^*)^Td \leq 0 \ \ \forall i \in I_{x^*}$$ # <br> # # With the above restriction of $d$, the feasible directions can now be stated as: # # <br> # $$d \ \textrm{s.t.} \ \nabla f_i(x^*)^Td \leq 0 \ \ \forall i \in I_{x^*} \tag{8.1}$$ # <br> # # Or, equivalently: # # <br> # $$d \ \textrm{s.t.} \ - \nabla f_i(x^*)^Td \geq 0 \ \ \forall i \in I_{x^*} \tag{8.2}$$ # <br> # # But, since $x^*$ is optimal, by the constrained optimality condition we have: # # <br> # $$\nabla f_0(x^*)^Td \geq 0 \ \ \forall \ \textrm{feasible} \ d \tag{8.3}$$ # <br> # # That is, for all $d$ as in $(8.2)$. # # Put together, $(8.2)$ and $(8.3)$ say that $\not \exists \ d$ which defines a separating hyperplane between $\nabla f_0(x^*)$ and $-\nabla f_i(x^*)$ for all binding constraints at $x^*$. By Farka's Lemma, this means that the only other alternative scenario must be true — it must be the case that $\nabla f_0(x^*)$ lies in the cone of the $-\nabla f_i(x^*)$'s. # # Formally, $\exists \ \lambda^* \geq 0$ s.t. # # <br> # $$\nabla f_0(x^*) + \sum_{i \in I_{x^*}} \lambda^*_i f_i(x^*) = 0 \tag{8.4}$$ # <br> # # Upon closer examination, $(8.4)$ is exactly *KKT-1*, *KKT- 2*, and *KKT-5* all rolled into one condition. The remaining conditions, *KKT-3* and *KKT-4* simply follow from the assumed feasibility of $x^*$. # # Thus, we have shown that if $x^*$ is primal-optimal, its KKT pair $(x^*, (\lambda^*, \mu^*))$ exists. Furthermore, as proved earlier, if strong duality holds then any KKT pair is primal-dual optimal. Hence, if strong duality holds, the $(\lambda^*, \mu^*)$ obtained through the above procedure is also dual-optimal. # # Strong Duality - Linear Programs # # Linear Programs, due to their simpler structure, lend themselves to a direct proof of strong duality. So, to wrap up this post, we show that LPs have strong duality through a direct proof. # # Strong duality for LPs can be stated as: # > **LP Strong Duality:** &nbsp; If the primal is feasible and bounded with optimal $x^*$ then the dual is also feasible and bounded. Furthermore, a dual optimal $p^*$ is s.t. $c^Tx^* = b^Tp^*$. # Where $x$ is the primal variable, $c^Tx$ is the primal objective, $p$ is the dual variable, and $b^Tp$ is the dual objective. # ## Proof of Strong Duality in LP's # # ### Prelude # # As in the general case, we construct a KKT pair through the use of Farkas' Lemma. Then, by a structural property of LPs, we notice that the dual and the primal optima agree. This concludes the proof of LP strong duality. # # ### Proof # # Suppose $x^*$ is primal-optimal. Let the set $I_{x^*} = \{ i : a_i^Tx^* = b_i\}$ be the set of the indices of the active constraints at $x^*$. Our goal is to construct a dual optimal solution $p^*$ s.t. $c^Tx^* = b^Tp^*$. # # Let $d$ be any vector that satisfies $d^Ta_i \geq 0 \ \ \forall i \in I_{x^*}$. That is, $d$ is a feasible direction w.r.t. to all the active constraints. # # By the assumption that $x^*$ is optimal, we have $c^Tx^* \leq c^T(x^* + \epsilon d) = c^Tx^* + \epsilon c^Td$. Thus, $c^Td = d^Tc \geq 0$ # > Note: $d^Tc$ is nothing but the *directional derivative* at the minimizer $x^*$. So, this also follows from the optimality of $x^*$ using the constrained optimality condition. # But, since $d$ is a vector s.t. $d^Ta_i \geq 0 \ \ \forall i \in I_{x^*}$ and $d^Tc \geq 0$, $d$ does *not* separate $c$ from the cone of the $a_i$'s. And, since $d$ was arbitrary, this puts us in the setting of Farkas' Lemma. Namely, there exist *no* vectors $d$ that separate $c$ from the cone. This means the alternative must be true — $c$ must a conic combination of the $a_i$'s that are active at the minimizer. In other words, $\exists p \geq 0$ s.t. $c = \sum_{i \in I_{x^*}} p_ia_i$. # # But $p$ has dimension equal to only the number of active constraints at $x^*$. To be a dual variable at all, it must have dimension equal to the number of all primal constraints. We extend $p$ to $p^*$ by setting all the entries that do not correspond to the active constraints at $x^*$ to be zero. # # That is $p^*_i = \begin{cases} p_i \ \ \textrm{if} \ \ i \in I_{x^*} \\ 0 \ \ \textrm{if} \ \ i \notin I_{x^*} \end{cases}$. # # Now $A^Tp^* = \sum_{i} p^*_ia_i = c$, so any feasibility condition in the dual, whether it be $A^Tp \leq c$, $A^Tp \geq c$, or $A^Tp = c$, is satisfied by $p^*$. # # Furthermore, the dual objective at $p^*$ agrees with the primal objective at $x^*$. # # $$b^Tp^* = \sum_{i} b_ip_i^* = \sum_{i \in I_{x^*}} b_ip_i^* + \sum_{i \notin I_{x^*}} b_ip_i^* = \sum_{i \in I_{x^*}} a_i^Tx^*p_i^* = (\sum_{i \in I_{x^*}} p_ia_i^T)x^* = c^Tx^* $$ # # This concludes the proof. # # Further Reading - Duality in Unconstrained Problems # # As mentioned briefly, in the case of certain types of unconstrained problems, the ***Fenchel-Legendre (FL) Transform*** is what gives rise to the dual. # # First, we define the FL transform which is also known as a ***convex conjugate*** for reasons that will soon become apparent. # > **FL Transform / Convex Conjugate:** &nbsp; The *FL Transform* or *Convex Conjugate* of a function $f: \mathbb{R}^n \rightarrow \mathbb{R}$ is: # $$f^*(y) = \sup_x \left\{y^Tx - f(x)\right\}$$ # We note some key properties of the FL Transform. # # ## FL Transform - a Convex Operation # # The FL Transform $f^*$ is always convex regardless of the convexity of $f$. # # That's because, for a fixed $x$, $y^Tx - f(x)$ is a linear function in $y$. So, $f^*$ is a point-wise supremum of linear functions, making it convex. # # ## The Case of Involution # # The double FL Transform $f^{**}$ does not always recover $f$. To see this fact note that, as an FL Transform of the *some* function (namely, $f^*$), $f^{**}$ is always convex. Therefore, $f^{**} \ne f$ if $f$ is non-convex. # # But convexity alone is not enough to guarantee involution. We need an additional condition on $f$, namely that its sub-level sets must be closed, to ensure $f^{**} = f$. # # ## Inverse Gradients # # If $f$ has closed sub-level sets and is convex then the gradients of $f$ and $f^*$ are inverses. That is, assuming both $f$ and $f^*$ are differentiable: # # <br> # $$y = \nabla f(x) \iff x = \nabla f^*(y)$$ # <br> # # Let's first prove the $\implies$ direction. # # Suppose $y = \nabla f(x)$. By $f$'s convexity: # # <br> # $$f(\hat x) \geq f(x) + y^T(\hat x - x) \ \ \forall \hat x$$ # <br> # # And so: # # <br> # $$y^T \hat x - f(\hat x) \leq y^T x - f(x) \ \ \forall \hat x$$ # <br> # # By taking supremum over $x$ and by noting that, since the sub-level sets are closed, the supremum is attained, we obtain: # # <br> # $$f^*(y) = y^T x - f(x)$$ # <br> # # The desired result follows by taking the gradient of both sides w.r.t. $y$. That is: # # <br> # $$\nabla f^*(y) = x$$ # <br> # # The $\impliedby$ direction is similar. We start from the assumption that $x = \nabla f^*(y)$ and get the desired result by using the involution property $f^{**} = f$. # # ## FL Duality # # As mentioned, the FL Transform has a natural role in duality. # # Suppose the unconstrained optimization problem is: # # <br> # $$\min_x : f(x) + h(Ax)$$ # <br> # # Where $f$ and $h$ are convex functions, and $A$ is a matrix representing a bounded linear transformation. # # We introduce a dummy variable $y$ and form the artificial constraint $y = Ax$. The problem becomes: # # <br> # $$\begin{aligned} # \min_{x,y} &: f(x) + h(y) \\ # s.t. &: Ax = y # \end{aligned}$$ # <br> # # Forming the Lagrangian gives us: # # <br> # $$\mathcal{L}(x,y,z) = f(x) + h(y) + z^T(Ax - y)$$ # <br> # # Then, the dual function is the following FL Transform: # # <br> # $$ # \begin{aligned} # g(z) &= \min_{x,y} \mathcal{L}(x,y,z) \\ # &= \min_{x,y} f(x) + h(y) + z^T(Ax - y) \\ # &= \min_{x,y} (A^Tz)^Tx + f(x) - z^Ty + h(y) \\ # &= \min_x \left\{ (A^Tz)^Tx + f(x) \right\} + \min_y \left\{ -z^Ty + h(y) \right\} \\ # &= \min_x \left\{ -\left((-A^Tz)^Tx - f(x)\right) \right\} + \min_y \left\{ -\left(z^Ty - h(y)\right) \right\} \\ # &= - \max_x \left\{ (-A^Tz)^Tx - f(x) \right\} - \max_y \left\{ z^Ty - h(y) \right\} \\ # &= - f^*(-A^Tz) - h^*(z) # \end{aligned} # $$ # <br> # # And, consequently, the dual problem is: # # <br> # $$\max_z: - f^*(-A^Tz) - h^*(z)$$ # <br> # # To convince ourselves of the utility of this dual, note that the dual is, indeed, an easier problem. This is because the negative of an FL Transform is always concave regardless of the convexity of $f$ and $h$. So, the dual problem is a maximization of a concave function which is, in general, an easy optimization problem.
_notebooks/2022-02-07-Optimization-Duality.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + importpandas as pd r_cols = ['user_id', 'movie_id', 'rating'] ratings = pd.read_csv(r'C:\Users\Mayank\Desktop\Python_ML_Training\Machine Learning\Day 8\ml23\u.data', sep='\t', names=r_cols, usecols=range(3), encoding="ISO-8859-1") m_cols = ['movie_id', 'title'] movies = pd.read_csv(r'C:\Users\Mayank\Desktop\Python_ML_Training\Machine Learning\Day 8\ml23\u.item', sep='|', names=m_cols, usecols=range(2), encoding="ISO-8859-1") ratings = pd.merge(movies, ratings) _id', 'rating'] ratings = pd.read_csv(r"")
Machine Learning/Day 8/similarmovies.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Imitation Learning for Portfolio Management from model.supervised.imitation_optimal_action import * from utils.data import create_optimal_imitation_dataset, create_imitation_dataset, read_stock_history, normalize from __future__ import print_function import random # %matplotlib inline # %load_ext autoreload # %autoreload 2 # + # dataset history, abbreviation = read_stock_history(filepath='utils/datasets/stocks_history_target.h5') history = history[:, :, :4] # 16 stocks are all involved. We choose first 3 years as training data and last 2 years as testing data. num_training_time = 1095 target_stocks = abbreviation target_history = np.empty(shape=(len(target_stocks), num_training_time, history.shape[2])) for i, stock in enumerate(target_stocks): target_history[i] = history[abbreviation.index(stock), :num_training_time, :] # test on 3 never seen stocks test_stocks = abbreviation test_history = np.empty(shape=(len(test_stocks), history.shape[1] - num_training_time, history.shape[2])) for i, stock in enumerate(test_stocks): test_history[i] = history[abbreviation.index(stock), num_training_time:, :] # - # ## Train optimal action given future observation # build optimal model given future model nb_classes = len(target_stocks) + 1 optimal_given_future_model = create_network_given_future(nb_classes) # run this cell to train optimal action given future model train_optimal_action_given_future_obs(optimal_given_future_model, target_history, target_stocks) # ## Test learning from optimal action using future data: sanity check (X_test, y_test), (_, _) = create_optimal_imitation_dataset(test_history) Y_test = np_utils.to_categorical(y_test, nb_classes) loss, acc = optimal_given_future_model.evaluate(X_test, Y_test) print('Testing result: loss - {}, accuracy - {}'.format(loss, acc)) # ### Start to play in an environment with stocks we have never seen before from environment.portfolio import PortfolioEnv env = PortfolioEnv(test_history, test_stocks, steps=365) # buy and sell only 1 stock done = False observation, info = env.reset() ground_truth_obs = info['next_obs'] close_open_ratio = np.transpose(ground_truth_obs[:, :, 3] / ground_truth_obs[:, :, 0]) close_open_ratio = normalize(close_open_ratio) while not done: action = np.zeros((nb_classes,)) current_action_index = optimal_given_future_model.predict_classes(close_open_ratio, verbose=False) action[current_action_index] = 1.0 observation, reward, done, info = env.step(action) ground_truth_obs = info['next_obs'] close_open_ratio = np.transpose(ground_truth_obs[:, :, 3] / ground_truth_obs[:, :, 0]) close_open_ratio = normalize(close_open_ratio) env.render() # ### It turns out that using probability distribution sometimes yield higher return # buy and sell according to the probability, not the label done = False observation, info = env.reset() ground_truth_obs = info['next_obs'] close_open_ratio = np.transpose(ground_truth_obs[:, :, 3] / ground_truth_obs[:, :, 0]) close_open_ratio = normalize(close_open_ratio) while not done: current_action = optimal_given_future_model.predict(close_open_ratio, verbose=False) current_action = np.squeeze(current_action, axis=0) observation, reward, done, info = env.step(current_action) ground_truth_obs = info['next_obs'] close_open_ratio = np.transpose(ground_truth_obs[:, :, 3] / ground_truth_obs[:, :, 0]) close_open_ratio = normalize(close_open_ratio) env.render() # ## Train optimal action given history observation # we need to test different window length window_length = 3 # create model optimal_given_past_model = create_network_give_past(nb_classes, window_length) # run this cell to train the model. # For 3 stocks, it would take 200 epoches to converge to around 45% validation accuracy # For 16 stocks, the validation accuracy is around 15%. train_optimal_action_given_history_obs(optimal_given_past_model, target_history, target_stocks, window_length) # ## Test optimal action given history observation (X_test, y_test), (_, _) = create_imitation_dataset(test_history, window_length=window_length) Y_test = np_utils.to_categorical(y_test, nb_classes) # increase a dimension of X_test for CNN X_test = np.expand_dims(X_test, axis=-1) loss, acc = optimal_given_past_model.evaluate(X_test, Y_test) print('Testing result: loss - {}, accuracy - {}'.format(loss, acc)) env = PortfolioEnv(test_history, test_stocks, window_length=window_length, steps=365) # buy and sell only 1 stock done = False observation, _ = env.reset() close_open_ratio = observation[:, :, 3] / observation[:, :, 0] close_open_ratio = normalize(close_open_ratio) while not done: action = np.zeros((nb_classes,)) close_open_ratio = np.expand_dims(close_open_ratio, axis=0) close_open_ratio = np.expand_dims(close_open_ratio, axis=-1) current_action_index = optimal_given_past_model.predict_classes(close_open_ratio, verbose=False) action[current_action_index] = 1.0 observation, reward, done, _ = env.step(action) close_open_ratio = observation[:, :, 3] / observation[:, :, 0] close_open_ratio = normalize(close_open_ratio) env.render() # buy and sell according to the probability, not the label done = False observation, _ = env.reset() close_open_ratio = observation[:, :, 3] / observation[:, :, 0] while not done: close_open_ratio = np.expand_dims(close_open_ratio, axis=0) close_open_ratio = np.expand_dims(close_open_ratio, axis=-1) close_open_ratio = normalize(close_open_ratio) current_action = optimal_given_past_model.predict(close_open_ratio, verbose=False) current_action = np.squeeze(current_action, axis=0) observation, reward, done, _ = env.step(current_action) close_open_ratio = observation[:, :, 3] / observation[:, :, 0] env.render()
src/imitation_learning_experiments.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Save Pdfs Objects to S3 using Spark OCR # ## Initialize spark session secret = "" license = "" version = secret.split("-")[0] spark_ocr_jar_path = "../../target/scala-2.11" # + language="bash" # if python -c 'import google.colab' &> /dev/null; then # echo "Run on Google Colab!" # echo "Install Open JDK" # apt-get install -y openjdk-8-jdk-headless -qq > /dev/null # java -version # fi # + import sys import os if 'google.colab' in sys.modules: os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] # + # install from PYPI using secret # #%pip install spark-ocr==$version\.spark24 --extra-index-url=https://pypi.johnsnowlabs.com/$secret --upgrade # - # ## Initialization of spark session # + from pyspark.sql import SparkSession from pyspark import SparkConf from sparkocr import start if license: os.environ['JSL_OCR_LICENSE'] = license # you can set AWS API Keys to env variables # os.environ['AWS_ACCESS_KEY_ID'] = "your key" # os.environ['AWS_SECRET_ACCESS_KEY'] = "your secret" # set additinal dependensies for read data from S3 conf = SparkConf() \ .set("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.3") # or you can set AWS API Keys here # .set('spark.hadoop.fs.s3a.access.key', "your key" ) \ # .set('spark.hadoop.fs.s3a.secret.key', "your secret") spark = start(secret=secret, jar_path=spark_ocr_jar_path, extra_conf=conf) spark # - # ## Imports from pyspark.ml import PipelineModel from sparkocr.enums import PageSegmentationMode from sparkocr.transformers import * from sparkocr.utils import display_image # ## Define paths to pdf on S3 binary_pdf = "s3a://dev.johnsnowlabs.com/ocr/datasets/input/pdfs/" # ## Read pdf objects pdfs = spark.read.format("binaryFile").load(binary_pdf).cache() pdfs.count() # ## Define OCR pipeline # + # Transform pdf to image pdf_to_image = PdfToImage() \ .setInputCol("content") \ .setOutputCol("image_raw") \ .setKeepInput(True) # Transform image to the binary color model binarizer = ImageBinarizer() \ .setInputCol("image_raw") \ .setOutputCol("image") \ .setThreshold(130) # Run OCR for each region ocr = ImageToText() \ .setInputCol("image") \ .setOutputCol("text") \ .setIgnoreResolution(False) \ .setPageSegMode(PageSegmentationMode.SPARSE_TEXT) \ .setConfidenceThreshold(60) #Render text with positions to Pdf document. textToPdf = TextToPdf() \ .setInputCol("positions") \ .setInputImage("image") \ .setInputText("text") \ .setOutputCol("pdf") \ .setInputContent("content") # OCR pipeline pipeline = PipelineModel(stages=[ pdf_to_image, binarizer, ocr, textToPdf ]) # - # ## Run OCR pipeline results = pipeline.transform(pdfs) # ## Store generated pdf object to S3 results.write \ .format("binaryFormat") \ .option("type", "pdf") \ .option("field", "pdf") \ .option("extension", "pdf") \ .mode("overwrite") \ .save("s3a://dev.johnsnowlabs.com/ocr/datasets/output/pdfs/")
jupyter/SparkOCRWritePdfToS3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import tensorflow as tf tf.__version__ import numpy as np # + # declare input shape # in_dim = shape=([None, None, 3]) raw_input = (32, 32, 3) input = tf.keras.Input(raw_input) # Block 1 x = tf.keras.layers.Conv2D(32, 3, strides=2, activation="relu")(input) x = tf.keras.layers.MaxPooling2D(3)(x) x = tf.keras.layers.BatchNormalization()(x) # Block 2 x = tf.keras.layers.Conv2D(64, 3, activation="relu")(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Dropout(0.3)(x) # Now that we apply global max pooling. gap = tf.keras.layers.GlobalMaxPooling2D()(x) # Finally, we add a classification layer. output = tf.keras.layers.Dense(10)(gap) # bind all func_model = tf.keras.Model(input, output) # - func_model.summary() class ModelSubClassing(tf.keras.Model): def __init__(self, num_classes): super(ModelSubClassing, self).__init__() # define all layers in init # Layer of Block 1 self.conv1 = tf.keras.layers.Conv2D(32, 3, strides=2, activation="relu") self.max1 = tf.keras.layers.MaxPooling2D(3) self.bn1 = tf.keras.layers.BatchNormalization() # Layer of Block 2 self.conv2 = tf.keras.layers.Conv2D(64, 3, activation="relu") self.bn2 = tf.keras.layers.BatchNormalization() self.drop = tf.keras.layers.Dropout(0.3) # GAP, followed by Classifier self.gap = tf.keras.layers.GlobalAveragePooling2D() self.dense = tf.keras.layers.Dense(num_classes) def call(self, input_tensor, training=False): # forward pass: block 1 x = self.conv1(input_tensor) x = self.max1(x) x = self.bn1(x) # forward pass: block 2 x = self.conv2(x) x = self.bn2(x) # droput followed by gap and classifier x = self.drop(x) x = self.gap(x) return self.dense(x) def build_graph(self, raw_shape): x = tf.keras.layers.Input(shape=raw_shape) return Model(inputs=[x], outputs=self.call(x)) dim = (32, 32, 3) msc = ModelSubClassing(10) # .build(raw_input) msc.build_graph(dim).summary() # + (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() # x_train.shape, y_train.shape: (60000, 28, 28) (60000,) # x_test.shape, y_test.shape : (10000, 28, 28) (10000,) # train set / data x_train = np.expand_dims(x_train, axis=-1) x_train = np.repeat(x_train, 3, axis=-1) x_train = x_train.astype('float32') / 255 # train set / target y_train = tf.keras.utils.to_categorical(y_train, num_classes=10) # validation set / data x_test = np.expand_dims(x_test, axis=-1) x_test = np.repeat(x_test, 3, axis=-1) x_test = x_test.astype('float32') / 255 # validation set / target y_test = tf.keras.utils.to_categorical(y_test, num_classes=10) # - print('\nModel Sub-Classing API') sub_classing_model = ModelSubClassing(10) # sub_classing_model.compile( # loss = tf.keras.losses.CategoricalCrossentropy(), # metrics = tf.keras.metrics.CategoricalAccuracy(), # optimizer = tf.keras.optimizers.Adam()) # # fit # sub_classing_model.fit(x_train, y_train, batch_size=128, epochs=1); # # Layer: ConvModule # + class ConvModule(tf.keras.layers.Layer): def __init__(self, kernel_num, kernel_size, strides, padding='same'): super(ConvModule, self).__init__() # conv layer self.conv = tf.keras.layers.Conv2D(kernel_num, kernel_size=kernel_size, strides=strides, padding=padding) # batch norm layer self.bn = tf.keras.layers.BatchNormalization() def call(self, input_tensor, training=False): x = self.conv(input_tensor) x = self.bn(x, training=training) x = tf.nn.relu(x) return x def build_graph(self, raw_shape): x = tf.keras.layers.Input(shape=raw_shape) return Model(inputs=[x], outputs=self.call(x)) # - cm = ConvModule(96, (3,3), (1,1)).build_graph(dim) cm.summary() # + cm = ConvModule(96, (3,3), (1,1)) y = cm(tf.ones(shape=(2,32,32,3))) # first call to the `cm` will create weights print("weights:", len(cm.weights)) print("trainable weights:", len(cm.trainable_weights)) # - # # Layer: InceptionModule class InceptionModule(tf.keras.layers.Layer): def __init__(self, kernel_size1x1, kernel_size3x3): super(InceptionModule, self).__init__() # two conv modules: they will take same input tensor self.conv1 = ConvModule(kernel_size1x1, kernel_size=(1,1), strides=(1,1)) self.conv2 = ConvModule(kernel_size3x3, kernel_size=(3,3), strides=(1,1)) self.cat = tf.keras.layers.Concatenate() def call(self, input_tensor, training=False): x_1x1 = self.conv1(input_tensor) x_3x3 = self.conv2(input_tensor) x = self.cat([x_1x1, x_3x3]) return x # # Layer: DownSampleModule class DownsampleModule(tf.keras.layers.Layer): def __init__(self, kernel_size): super(DownsampleModule, self).__init__() # conv layer self.conv3 = ConvModule(kernel_size, kernel_size=(3,3), strides=(2,2), padding="valid") # pooling layer self.pool = tf.keras.layers.MaxPooling2D(pool_size=(3, 3), strides=(2,2)) self.cat = tf.keras.layers.Concatenate() def call(self, input_tensor, training=False): # forward pass conv_x = self.conv3(input_tensor, training=training) pool_x = self.pool(input_tensor) # merged return self.cat([conv_x, pool_x]) # # Model: +Layers # + from tensorflow.keras import Model class MiniInception(tf.keras.Model): def __init__(self, num_classes=10): super(MiniInception, self).__init__() # the first conv module self.conv_block = ConvModule(96, (3,3), (1,1)) # 2 inception module and 1 downsample module self.inception_block1 = InceptionModule(32, 32) self.inception_block2 = InceptionModule(32, 48) self.downsample_block1 = DownsampleModule(80) # 4 inception module and 1 downsample module self.inception_block3 = InceptionModule(112, 48) self.inception_block4 = InceptionModule(96, 64) self.inception_block5 = InceptionModule(80, 80) self.inception_block6 = InceptionModule(48, 96) self.downsample_block2 = DownsampleModule(96) # 2 inception module self.inception_block7 = InceptionModule(176, 160) self.inception_block8 = InceptionModule(176, 160) # average pooling self.avg_pool = tf.keras.layers.AveragePooling2D((7,7)) # model tail self.flat = tf.keras.layers.Flatten() self.classfier = tf.keras.layers.Dense(num_classes, activation='softmax') self.test = True def call(self, input_tensor, training=False, **kwargs): # forward pass x = self.conv_block(input_tensor) x = self.inception_block1(x) x = self.inception_block2(x) x = self.downsample_block1(x) x = self.inception_block3(x) x = self.inception_block4(x) x = self.inception_block5(x) x = self.inception_block6(x) x = self.downsample_block2(x) x = self.inception_block7(x) x = self.inception_block8(x) x = self.avg_pool(x) x = self.flat(x) return self.classfier(x) def build(self, x): x = tf.keras.layers.Input(shape=x.shape[1:]) return Model(inputs=[x], outputs=self.call(x)) # - x = tf.ones(shape=(0,*raw_input)) y = MiniInception().build(x) cm.summary() cm = MiniInception().build_graph(x) cm.summary() # + raw_input = (32, 32, 3) # init model object cm = MiniInception() # The first call to the `cm` will create the weights y = cm(tf.ones(shape=(0,*raw_input))) cm.summary() # # print summary # cm.build_graph(raw_input).summary() # - cm.build_graph(raw_input).summary() # + # class Decoder(tf.keras.layers.Layer): # def __init__(self, units): # super(Decoder, self).__init__() # self.units = units # def build(self, _): # self.output_layer = Dense(units=self.units) # def call(self, X): # return self.output_layer(X) # class Encoder(Layer): # def __init__(self, units): # super(Encoder, self).__init__() # self.output_layer = Dense(units=units, activation=tf.nn.relu) # def call(self, X): # return self.output_layer(X) # class Decoder(Layer): # def __init__(self, units): # super(Decoder, self).__init__() # self.output_layer = Dense(units=units) # def call(self, X): # return self.output_layer(X) def Encoder(x, units): x = Dense(units, activation=tf.nn.relu) return x def Decoder(x, units): x = Dense(units)(x) return x class AutoEncoder(tf.keras.Model): def __init__(self, units): super(AutoEncoder, self).__init__() self.units = units def build(self, input_shape): self.x_in = Input(input_shape) self.encoder = Encoder(units=self.units) self.decoder = Decoder(units=input_shape[-1]) def call(self, x): x = self.encoder(x) x = self.decoder(x) return x # - ae = AutoEncoder(32) ae(tf.keras.Input((320, 320, 3))) ae.summary() from tensorflow.keras.layers import Layer, Dense, Input from tensorflow.keras import Model # + class Encoder(Layer): def __init__(self, units): super(Encoder, self).__init__() self.output_layer = Dense(units=units, activation=tf.nn.relu) def call(self, X): return self.output_layer(X) class Decoder(Layer): def __init__(self, units): super(Decoder, self).__init__() self.output_layer = Dense(units=units) def call(self, X): return self.output_layer(X) class AutoEncoder(Model): def __init__(self, units): super(AutoEncoder, self).__init__() self.units = units def build(self, input_shape): self.encoder = Encoder(units=self.units) self.decoder = Decoder(units=input_shape[-1]) def call(self, X): Z = self.encoder(X) return self.decoder(Z) def encode(self, X): return self.encoder(X) def decode(self, Z): return self.decode(Z) # + # @AutoEncoder.testdec # def darkcov(filters): # x = inputs = input # - ae = AutoEncoder(32) # ae(tf.keras.Input((320, 320, 3))) ae.build_graph((320, 320, 3)).summary() # + class darkcov(Model) def __init__(self, units): super(darkcov, self).__init__() self.units = units def build(self, input_shape) self.output_layer = Dense(units=units, activation=tf.nn.relu) def call(self, X): return self.output_layer(X) class Darknet(Model): def __init__(self, units): super(Darknet, self).__init__() self.output_layer = Dense(units=units, activation=tf.nn.relu) def call(self, X): return self.output_layer(X) class PredConvs(Layer): def __init__(self, units): super(Decoder, self).__init__() self.output_layer = Dense(units=units) def call(self, X): return self.output_layer(X) class Yolo(Model): def __init__(self, units): super(Yolo, self).__init__() self.units = units def build(self, input_shape): self.encoder = Encoder(units=self.units) self.decoder = Decoder(units=input_shape[-1]) def call(self, X): Z = self.encoder(X) return self.decoder(Z) def encode(self, X): return self.encoder(X) def decode(self, Z): return self.decode(Z) # - class yolo(Model): def __init__(self): super(darknet).__init__() def build(self, input_shape): self.darknet = darknet() self.output0 = output() ae = AutoEncoder1(32) # ae.build((320, 320, 3)) ae(tf.keras.Input((320, 320, 3))) ae.summary() # + import tensorflow as tf from tensorflow import keras from tensorflow.keras.layers import ( Add, Concatenate, Conv2D, Input, Lambda, LeakyReLU, MaxPool2D, UpSampling2D, ZeroPadding2D, BatchNormalization, ) from tensorflow.keras.regularizers import l2 class DarknetCov(tf.keras.Model): """ 1 Basic Conv """ def __init__(self, filters, size, strides=1, batch_norm=True, name="darknet_conv", **kwargs): super(DarknetCov, self).__init__() self.strides = strides if strides == 1: padding = 'same' else: self.zero_padding = ZeroPadding2D(((1, 0), (1, 0))) # top left half-padding padding = 'valid' self.conv2d = Conv2D(filters, size, strides, padding, kernel_regularizer=l2(0.0005)) self.batch_norm = batch_norm self.bn = BatchNormalization() self.leaky_relu = LeakyReLU(alpha=0.1) def call(self, x): if self.strides != 1: x = self.zero_padding(x) # top left half-padding x = self.conv2d(x) if self.batch_norm: x = self.bn(x) x = self.leaky_relu(x) return x class DarknetRes(tf.keras.Model): """ 2 DarknetConv + 1 AddLayer """ def __init__(self, filters, blocks=1, name="darknet_residual", **kwargs): super(DarknetRes, self).__init__() self.resblocks = [] for _ in range(blocks): self.resblocks += DarknetCov(filters//2, 1) , self.resblocks += DarknetCov(filters, 3) , self.resblocks += Add() , def call(self, x): prev = x x = self.conv_0(x) x = self.conv_1(x) x = self.add([prev, x]) return x class Darknet(tf.keras.Model): def __init__(self, filters): super(Darknet, self).__init__() self.filters = filters def call(self, x): x = DarknetConv(x, self.filters, 3) x = DarknetBlock(x, self.filters, 1) return x def build_graph(self, x_in): x = inputs = Input(shape=x_in.shape) return Model(inputs=x_in, outputs=self.call(x)) # - dk = Darknet(64) x = tf.ones(shape=(32,32,3)) dk(x) class BigModel(Model): def __init__( self, input_specs: tf.keras.layers.InputSpec = tf.keras.layers.InputSpec(shape=[None, None, None, 3]), name='yolo', **kwargs): super(BigModel, self).__init__(inputs=inputs, ) def get_config():
colab_archive/test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: study # language: python # name: study # --- # # FastSLAM2.0 import sys sys.path.append('../scripts/') from kf import * # this also import Mcl class # %matplotlib widget class EstimatedLandmark(Landmark): # add cov to Landmark. def __init__(self): super().__init__(0,0) self.cov = None def draw(self, ax, elems): if self.cov is None: return center = ax.scatter(self.pos[0], self.pos[1], s=100, marker="*", label="landmarks", color="blue") elems.append(center) elems.append(ax.text(self.pos[0], self.pos[1], "id:"+str(self.id), fontsize=10)) ellipse = sigma_ellipse(self.pos, self.cov, 3) elems.append(ax.add_patch(ellipse)) class MapParticle(Particle): def __init__(self, init_pose, weight, landmark_num): # same with FastSLAM super().__init__(init_pose, weight) self.map = Map() # defined in IdealRobot.py for i in range(landmark_num): self.map.append_landmark(EstimatedLandmark()) def init_landmark_estimation(self, landmark, z, distance_dev_rate, direction_dev): # same with FastSLAM landmark.pos = (z[0] * np.array([np.cos(self.pose[2]+z[1]), np.sin(self.pose[2]+z[1])]).T + self.pose[0:2]) # z=(x,y,phi), self.pose=(x,y,theta) H = -matH(self.pose, landmark.pos)[0:2,0:2] # in kf.py Q = matQ(distance_dev_rate*z[0], direction_dev) landmark.cov = np.linalg.inv(H.T.dot(np.linalg.inv(Q).dot(H))) def observation_update_landmark(self, landmark, z, distance_dev_rate, direction_dev): est_z = IdealCamera.observation_function(self.pose, landmark.pos) #h(m^) if est_z[0] < 0.01: return # landmark update H = -matH(self.pose, landmark.pos)[0:2,0:2] Q = matQ(distance_dev_rate * est_z[0], direction_dev) K = landmark.cov.dot(H.T).dot(np.linalg.inv(Q + H.dot(landmark.cov).dot(H.T))) landmark.pos += K.dot(z - est_z) #(8.39) landmark.cov = (np.eye(2) - K.dot(H)).dot(landmark.cov) #(8.40) # weight update is implemented in motion_update2 for FastSLAM2 # # trajectory(weight) update # Q_z = H.dot(landmark.cov).dot(H.T) + Q # self.weight *= multivariate_normal(mean=est_z, cov=Q_z).pdf(z) def observation_update(self, observation, distance_dev_rate, direction_dev): # same with FastSLAM for d in observation: z = d[0] #landmark's (l,phi) landmark = self.map.landmarks[d[1]] if landmark.cov is None: self.init_landmark_estimation(landmark, z, distance_dev_rate, direction_dev) else: self.observation_update_landmark(landmark, z, distance_dev_rate, direction_dev) def motion_update2(self, nu, omega, time, motion_noise_stds, observation, distance_dev_rate, direction_dev): # prediction + landmark correction # add observation, motion_noise_stds to motion_update() M = matM(nu, omega, time, motion_noise_stds) # covariance of u. 2x2 A = matA(nu, omega, time, self.pose[2]) # map: u->x 2x2 R_t = A.dot(M).dot(A.T) # covariance of x. 2x2 x_hat = IdealRobot.state_transition(nu, omega, time, self.pose) # time means dt after self.pose was computed. dim=2 (8.58) # correction for obs in observation: # weight update zt_hat, Q_zt, H_xt = self.drawing_params(x_hat, self.map.landmarks[obs[1]], distance_dev_rate, direction_dev) Sigma_zt = H_xt.dot(R_t).dot(H_xt.T) + Q_zt # eq (8.77) self.weight *= multivariate_normal(mean=zt_hat, cov=Sigma_zt).pdf(obs[0]) #(8.79) # position correction as drawing. x_hat, R_t = self.gauss_for_drawing(x_hat, R_t, obs[0], self.map.landmarks[obs[1]], distance_dev_rate, direction_dev) # add dim3 R_t +=np.eye(3)*1.0e-10 self.pose = multivariate_normal(mean=x_hat, cov=R_t).rvs() # note: Here draw means extract one particle from the distribution. def gauss_for_drawing(self, x_hat, R_t, z, landmark, distance_dev_rate, direction_dev): zt_hat, Q_zt, H_xt = self.drawing_params(x_hat, landmark, distance_dev_rate, direction_dev) K = R_t.dot(H_xt.T).dot(np.linalg.inv(Q_zt + H_xt.dot(R_t).dot(H_xt.T))) #(8.68) mu_t = K.dot(z - zt_hat) + x_hat #(8.69) Sigma_t = (np.eye(3) - K.dot(H_xt)).dot(R_t) #(8.70) return mu_t, Sigma_t def drawing_params(self, x_hat, landmark, distance_dev_rate, direction_dev): # linearlize h(x,m) d_landmark = np.hypot(*(x_hat[0:2] - landmark.pos)) Q_zt_hat = matQ(distance_dev_rate*d_landmark, direction_dev) ##パーティクルの姿勢と地図からセンサ値の分布の共分散行列を計算## zt_hat = IdealCamera.observation_function(x_hat, landmark.pos) H_m = - matH(x_hat, landmark.pos)[0:2,0:2] #(8.62) H_xt = matH(x_hat, landmark.pos) #(8.63) Q_zt = H_m.dot(landmark.cov).dot(H_m.T) + Q_zt_hat #(8.65) return zt_hat, Q_zt, H_xt class FastSlam2Agent(EstimationAgent): #EstimationAgent is in mcl.py, which is Agent+Estimator. def __init__(self, time_interval, nu, omega, estimator): super().__init__(time_interval, nu, omega, estimator) def decision(self, observation=None): self.estimator.motion_update(self.prev_nu, self.prev_omega, self.time_interval, observation) #add an observation. self.prev_nu, self.prev_omega = self.nu, self.omega self.estimator.observation_update(observation) return self.nu, self.omega class FastSlam2(Mcl): def __init__(self, init_pose, num_particle, num_landmark, motion_noise_stds={"nn":0.19, "no":0.001, "on":0.13, "oo":0.2}, distance_dev_rate=0.14, direction_dev=0.05): #same with FastSlam super().__init__(None, init_pose, num_particle, motion_noise_stds, distance_dev_rate, direction_dev) # let envmap None. self.particles = [MapParticle(init_pose, 1.0/num_particle, num_landmark) for i in range(num_particle)] self.ml = self.particles[0] # temporally self.motion_noise_stds = motion_noise_stds # add to use in motion_update2() def draw(self, ax, elems): #same with FastSlam super().draw(ax, elems) self.ml.map.draw(ax, elems) def observation_update(self, observation): #same with FastSlam for p in self.particles: p.observation_update(observation, self.distance_dev_rate, self.direction_dev) #MapParticle's function self.set_ml() # defined in Mcl. self.resampling() def motion_update(self, nu, omega, time, observation): not_first_obs = [] for obs in observation: if self.particles[0].map.landmarks[obs[1]].cov is not None: # check first particle's map covariance not_first_obs.append(obs) if len(not_first_obs) > 0: for p in self.particles: p.motion_update2(nu, omega, time, \ self.motion_noise_stds, not_first_obs, \ self.distance_dev_rate, self.direction_dev) # Mcl's one. else: for p in self.particles: # use Mcl's motion_update(). p.motion_update(nu, omega, time, \ self.motion_noise_rate_pdf) # Mcl's one. def trial(cambias=True): time_interval = 0.1 world = World(30, time_interval, debug = False) map = Map() # defined in ideal_robot.py for landmark in [(-4,2),(2,-3),(3,3)]: map.append_landmark((Landmark(*landmark))) world.append(map) init_pose = np.array([0,0,0]).T num_particle = 10 fastslam = FastSlam2(init_pose, num_particle, len(map.landmarks)) # delete map. nu = 0.2 omega = 10.0/180*math.pi agent = FastSlam2Agent(time_interval, nu, omega, fastslam) if cambias: robot = Robot(init_pose, sensor=Camera(map), agent=agent, color='red') else: robot = Robot(init_pose, sensor=Camera(map,distance_bias_rate_stddev=0,direction_bias_stddev=0), agent=agent, color='red') world.append(robot) world.draw() trial() trial(cambias=False)
8_slam_with_patriclefilter/fastslam2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd from sklearn.model_selection import StratifiedKFold import numpy as np from sklearn.neural_network import MLPClassifier from sklearn.svm import SVC from sklearn.metrics import accuracy_score from sklearn.ensemble import VotingClassifier import cloudpickle import string import re import warnings from matplotlib import pyplot as plt warnings.filterwarnings("ignore") pd.options.mode.chained_assignment = None # default='warn' # - # ## Load Data # Load Data train = pd.read_csv('../input/Train.csv') test = pd.read_csv('../input/Test.csv') train.head() # ## Modeling # + n_splits = 5 kf = StratifiedKFold(n_splits=n_splits, random_state=47, shuffle=True) # upload pretrained vectorizer = cloudpickle.load(open('../vectorizer/vectorizer.pkl', 'rb')) X_train = vectorizer.predict(train['Text']) X_test = vectorizer.predict(test['Text']) y_train = train['Label'].copy() n_labels = train['Label'].unique().shape[0] y_oof = np.zeros([X_train.shape[0], n_labels]) y_test = np.zeros([X_test.shape[0], n_labels]) i = 0 metrics = list() for tr_idx, val_idx in kf.split(X_train, y_train): X_tr, X_vl = X_train[tr_idx, :], X_train[val_idx, :] y_tr, y_vl = y_train.iloc[tr_idx], y_train.iloc[val_idx] estimators = [('mlp', MLPClassifier(250, random_state=47)), ('svm', SVC(C=1.0,probability=True,random_state=42))] model = VotingClassifier(estimators=estimators, voting='soft') model.fit(X_tr, y_tr) y_pred = model.predict(X_vl) y_oof[val_idx, :] = model.predict_proba(X_vl) metric = accuracy_score(y_vl, y_pred) print("fold #{} val_loss: {}".format(i, metric)) i += 1 y_test += model.predict_proba(X_test) / n_splits metrics.append(metric) metrics = np.array(metrics).mean() print(f'Full accuracy {metrics}') # Full accuracy 0.6922 # - y_sub = np.argmax(y_test, 1) y_sub = pd.DataFrame({'Label': y_sub}) class_ = model.classes_ y_sub['Label'] = y_sub['Label'].apply(lambda x: class_[x]) sub = test[['ID']] sub['LABEL'] = y_sub['Label'] sub.head() # Save submission sub.to_csv(f'../sub/MLP_SVC_sub_{round(metrics,4)}.csv', index=False)
Competition-Solutions/Text/AI4D Malawi News Classification Challenge/Solution 1/Benchmarking/MLP_SVC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: desdeo-tools # language: python # name: desdeo-tools # --- # %matplotlib notebook from desdeo_tools.scalarization import StomASF, PointMethodASF import matplotlib.pyplot as plt import numpy as np data = np.random.rand(10000,2) ideal = np.asarray((0,0)) nadir = np.asarray((1,1)) reference_point = np.asarray((0.5,0.9)) sca1 = StomASF(ideal=ideal) sca2 = PointMethodASF(ideal=ideal, nadir=nadir) sca = [sca1, sca2] sca1data = sca1(data,reference_point=reference_point) sca2data = sca2(data,reference_point=reference_point) f, ax = plt.subplots() points = ax.scatter(x=data[:,0], y=data[:,1], c=scadata) ax.scatter(x=reference_point[0], y=reference_point[1], c='r') f.colorbar(points) np.asarray((sca1, sca2data)).T np.asarray([scalar(data, reference_point) for scalar in sca]).T
docs/notebooks/Scalarization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:jads-env] * # language: python # name: conda-env-jads-env-py # --- # + # Python packages import sys sys.path.append('../') from datetime import datetime import numpy as np import pandas as pd import pickle # Added (new in developing predict) from itertools import product from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split, cross_val_score, RepeatedKFold, GridSearchCV, cross_validate, KFold, cross_val_score from sklearn.linear_model import LogisticRegression, LinearRegression, Ridge, Lasso, LassoCV from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.compose import ColumnTransformer, make_column_transformer from sklearn.pipeline import Pipeline, make_pipeline from sklearn.svm import SVR from sklearn.impute import SimpleImputer from sklearn.neighbors import KNeighborsRegressor from sklearn.decomposition import PCA from sklearn.ensemble import RandomForestRegressor from xgboost import XGBRegressor, XGBClassifier # Custom functions import src.settings as settings import src.mapper_cols as mapper_cols from src.run_all.get_data import get_data, get_data_predict from src.run_all.preprocess import preprocess_data, preprocess_data_predict from src.utilities.utilities import get_latest_file, list_filenames # instellingen voor panda weergave aanpassen pd.set_option('display.max_rows', 500) # alle rijen tonen pd.set_option('display.max_columns', 500) # alle kolommen tonen pd.set_option('display.width', 1000) # kolombreedte pd.set_option("display.precision", 2) # precisie van de kolommen aanpassen pd.set_option('display.float_format', lambda x: '{:.15f}'.format(x)) # floats output tot 15 decimalen # - settings.preprocess_predict.keys() # # Load dataframe to extend features for future datapath = '../data/' filename = 'df_get_data_WMO_WIJK_HUISHOUDENS_BEVOLKING_HEFFING_202104042111.parquet.gzip' df_get_data_WMO = pd.read_parquet(datapath + filename) # df_get_data = df_get_data_WMO.reset_index().copy() df_get_data = df_get_data_WMO.copy() df_get_data.head() # + # datapath = '../data/' # filename = 'df_preprocessed_202104042151_Boerenverstand_Maikel.parquet.gzip' # df_preprocessed = pd.read_parquet(datapath + filename) # + # df_preprocessed # + # print(list(df_preprocessed.reset_index().columns)) # - # # Main_predict # Settings periods = [2020,2021,2022] trained_model = get_latest_file(filename_str_contains='best_model_', datapath=datapath, filetype='pickle') df_prognoses = get_data_predict(save_all=False, personal_note="") df_prognoses.head(10) df = df_prognoses.reset_index().copy() df.head() lijst = list(df['interval'].unique()) print("Strategy 2: GroupInterpolateImputer to interpolate for columns:\n", "* With values in the future \n", "* With constant values \n", "* That are not used in the trained model") # + # datetime_now = datetime.now() # filename = 'df_get_data_predict_' + datetime.strftime(datetime_now, format='%Y%m%d%H%M') # - # + # df_prognoses.to_csv(datapath+filename+'.csv') # + # df_prognoses.to_parquet(datapath + filename + '.parquet.gzip', compression='gzip') # - df_get_data.empty ## Get data if df_get_data.empty: df_get_data_WMO = get_data(save=True) if df_prognoses.empty: df_prognoses = get_data_predict(periods=periods, save_all=True, personal_note="") ## Preprocess # Preprocess predict df_preprocessed_predict = preprocess_data_predict(df_get_data, df_prognoses, save_all=True, personal_note="") df_preprocessed_predict # Preprocess (general) df_preprocessed = preprocess_data(df=df_preprocessed_predict, save_all=False, personal_note='predict') df_preprocessed = df_preprocessed.drop(settings.Y_TARGET_COLS, axis=1) ## Predict y_preds = trained_model.predict(df_preprocessed) y_preds # ## Extend strategy # + # import src.settings as settings # from src.preprocess.preprocess import get_and_combine_cbs_tables, rename_and_subset_cols, \ # get_region_period_spec_val_subtable, downcast_variables_dataframe # pickle file inladen voor predict loaded_model = get_latest_file(filename_str_contains='best_model_', datapath=datapath, filetype='pickle') # - from os import listdir from os.path import isfile, join filename_str_contains='best_model_' datapath='../data/' onlyfiles = sorted([f for f in listdir(datapath) if isfile(join(datapath, f))]) # Get last file filename = [s for s in onlyfiles if filename_str_contains in s][-1] filename from src.run_all.main_predict import predict_data periods = [2020,2021,2022] # %time df = predict_data(df_get_data=df_get_data, periods=periods, trained_model=loaded_model) df df_get_data.shape loaded_model 2235+936 periods_2 = [2020,2021,2022] [str(x) for x in periods_2] # settings.preprocess['MISSING_BOUNDARY'] = 0.99 df_preprocessed = preprocess_data(df=df, save_all=False, personal_note='test') settings.preprocess df_preprocessed # + ## Get data (for extending get data with future) # Determine boundaries for get prognose data roundedto5periods = max(periods) + (5 - max(periods)) % 5 total_periods = list(range(min(periods), roundedto5periods+1, 1)) print("Get 'regio-indeling'") df_regioindeling = get_and_combine_cbs_tables(dict_tables=settings.predict['DICT_TABLES_REGIOINDELING'], double_trouble_colnames=settings.predict[ 'DICT_DOUBLETROUBLECOLNAMES_REGIOINDELING'], url=settings.get_data['CBS_OPEN_URL']) df_regioindeling = rename_and_subset_cols(df=df_regioindeling, dict_rename=settings.predict['DICT_COLS_RENAMED_REGIOINDELING'], list_cols=settings.predict['LIST_COLS_SUBSET_REGIOINDELING']) df_regioindeling[settings.predict['LIST_STR_STRIP_COLS_REGIOINDELING']] = df_regioindeling[ settings.predict['LIST_STR_STRIP_COLS_REGIOINDELING']].apply(lambda x: x.str.strip()) print("Get 'prognose huishoudens' tables") df_huishouden_prognose = get_and_combine_cbs_tables(dict_tables=settings.predict['DICT_TABLES_HUISHOUDEN'], url=settings.get_data['CBS_OPEN_URL']) df_huishouden_prognose['interval'] = df_huishouden_prognose['perioden'] df_huishouden_prognose = df_huishouden_prognose.rename(columns=settings.predict['DICT_COLS_RENAMED_HUISHOUDEN']) df_huishouden_prognose = df_huishouden_prognose[df_huishouden_prognose['prognoseinterval'] == 'Prognose'] df_huishouden_prognose = df_huishouden_prognose[ (df_huishouden_prognose['gemeentenaam'].str.contains('(CR)') == False) & (df_huishouden_prognose['gemeentenaam'].str.contains('(PV)') == False) & (df_huishouden_prognose['gemeentenaam'] != 'Nederland')].copy() df_huishouden_prognose['particulierehuishoudens'] = df_huishouden_prognose['particulierehuishoudens'] * 1000 df_huishouden_prognose['particulierehuishoudens'] = df_huishouden_prognose[ 'particulierehuishoudens'].round().astype(int) df_huishouden_prognose_pivot = pd.pivot_table(data=df_huishouden_prognose, values='particulierehuishoudens', index=['gemeentenaam', 'interval'], columns=['samenstellingvanhethuishouden'], aggfunc=np.sum).reset_index() df_huishouden_prognose_pivot = df_huishouden_prognose_pivot[ df_huishouden_prognose_pivot['interval'].astype(int) <= roundedto5periods] df_huishouden_prognose_pivot = rename_and_subset_cols(df=df_huishouden_prognose_pivot, dict_rename=settings.predict[ 'DICT_COLS_RENAMED_HUISHOUDEN_PIVOT'], list_cols=settings.predict[ 'LIST_COLS_SUBSET_HUISHOUDING_PIVOT']) print("Get 'prognose bevolking' tables") df_population_prognose = get_and_combine_cbs_tables(dict_tables=settings.predict['DICT_TABLES_BEVOLKING'], url=settings.get_data['CBS_OPEN_URL']) df_population_prognose = rename_and_subset_cols(df=df_population_prognose, dict_rename=settings.predict['DICT_COLS_RENAMED_BEVOLKING'], list_cols=settings.predict['LIST_COLS_SUBSET_BEVOLKING']) df_population_prognose['interval'] = df_population_prognose['perioden'].apply(lambda x: x.split(' ')[-1]) df_population_prognose = df_population_prognose[ (df_population_prognose['gemeentenaam'].str.contains('(CR)') == False) & (df_population_prognose['gemeentenaam'].str.contains('(PV)') == False) & (df_population_prognose['gemeentenaam'] != 'Nederland')].copy() df_population_prognose = df_population_prognose[df_population_prognose['interval'].astype(int) <= roundedto5periods] df_population_prognose['aantalinwoners'] = df_population_prognose['aantalinwoners'] * 1000 df_population_prognose['aantalinwoners'] = df_population_prognose['aantalinwoners'].round().astype(int) df_population_prognose = df_population_prognose.drop(['perioden'], axis=1) # Merge all dataframes df_prognoses = pd.merge(df_regioindeling, df_huishouden_prognose_pivot, how='left', left_on=['gemeentenaam'], right_on=['gemeentenaam']) df_prognoses = pd.merge(df_prognoses, df_population_prognose, how='left', left_on=['gemeentenaam', 'interval'], right_on=['gemeentenaam', 'interval']) # Concat with original 'get data' dataframe (incl. drop multiplicacities that don't occur in original dataset) list_unchanged_multiplicacities = df_get_data[df_get_data['interval'] == df_get_data['interval'].max()][ 'codering_regio'].unique() df_prognoses = df_prognoses[df_prognoses['codering_regio'].isin(list_unchanged_multiplicacities)] df_future = pd.concat([df_get_data, df_prognoses], axis=0) df_future = df_future.sort_values(['codering_regio', 'interval']).reset_index().drop(['index'], axis=1) # - df_future # + # list_cols_prognoses # - ## Extend dataframe for blancs # Determine columns for each imputing strategy list_cols_prognoses = df_prognoses.columns # list_cols_prognoses_str = [x for x in list(df_prognoses.loc[:, df_prognoses.dtypes == object].columns) if x!='codering_regio'] list_cols_prognoses_num = list(df_prognoses.loc[:, df_prognoses.dtypes != object].columns) list_all_columns = list(df_future.columns) list_cols_str = list(df_future.loc[:, df_future.dtypes == object].columns) list_cols_str = list(set(list_cols_str) - set(list_cols_prognoses)) list_cols_trained_model = settings.predict['LIST_COLS_TRAINED_MODEL'] list_cols_trained_model = list(set([x.replace('relative_', '') for x in list_cols_trained_model])) list_cols_relate_imputer = list( set(list_cols_trained_model) - set(settings.predict['LIST_COLS_TRAINED_MODEL_INVARIABLY']) - set( list_cols_prognoses)) list_cols_group_imputer = list(set(list_all_columns)-set(list_cols_str)-set(list_cols_relate_imputer)) df_future_cop = df_future.copy() # ffill for string columns df_future_cop.loc[:, list_cols_str] = df_future_cop.loc[:, list_cols_str].ffill() from src.utilities.transformers import ColumnSelector, GroupInterpolateImputer, RelativeColumnScaler, \ CustomScaler, CustomImputer # Group imputer for available future / invariably columns / columns not used in trained model GII = GroupInterpolateImputer(groupcols=settings.predict['GROUP_INTERPOLATE_IMPUTER_GROUPCOLS'], interpolate_method=settings.predict['GROUP_INTERPOLATE_IMPUTER_METHOD'], cols=list_cols_group_imputer) df_future_cop = GII.fit_transform(df_future_cop) df_future_cop # Relational imputer for other columns in trained model list_cols_relate_imputer # + base_col = 'aantalinwoners' future_years = ['2020', '2021', '2022', '2023', '2024', '2025'] all_relate_cols_necessary = settings.predict['LIST_COLS_GROUPER_RELATE_IMPUTER']+list_cols_relate_imputer+[base_col] df_base_year = df_future_cop[df_future_cop['interval']=='2019'][all_relate_cols_necessary] # - df_base_year.loc[:, list_cols_relate_imputer] = df_base_year.loc[:, list_cols_relate_imputer].div(df_base_year['aantalinwoners'], axis=0) df_base_year df_base_year = df_base_year[df_base_year['codering_regio'].isin(df_future_cop[df_future_cop['interval']=='2025'].codering_regio.unique())] # + # df_base_year.set_index('codering_regio')[col] # + # df_future_2 = df_future_cop.copy() # df_future_2 = df_future_2.set_index('codering_regio') # + # df_future_2[df_future_2['interval']=='2021'][base_col] # + # df_future_2[df_future_2['interval']=='2021'].loc[:,col] = df_future_2[df_future_2['interval']=='2021'].loc[:,base_col] * df_base_year.set_index('codering_regio')[col] # - # df_future_2[df_future_2['interval']=='2021'].loc[:,col] df_future_2[df_future_2['interval']==year].loc[:,col] df_future_2[df_future_2['interval']==year].loc[:,base_col] df_base_year.set_index('codering_regio')[col] df_future_cop[df_future_cop['interval'].isin(future_years)].loc[:,['codering_regio']+list_cols_relate_imputer+[base_col]] # + df_future_2 = df_future_cop.copy() df_future_2 = df_future_2.set_index('codering_regio') for col in list_cols_relate_imputer: df_future_2.loc[:,col] = df_future_2.loc[:,base_col] # for year in future_years: base_col_series = df_future_2[df_future_2['interval']==year].loc[:,base_col] perc_col_series = df_base_year.set_index('codering_regio')[col] # df_future_2[df_future_2['interval']==year].loc[:,col] = base_col_series.multiply(perc_col_series) df_future_2.loc[:,col] = df_future_2.loc[:,col] * perc_col_series # print(base_col_series.multiply(perc_col_series)) # - 0.507697108383607*9528.333333333333940 df_future_2[~df_future_2['interval'].isin(future_years)].loc[:,list_cols_relate_imputer] # + # df_future_cop[df_future_cop['interval'].isin(future_years)].loc[:,col] # - df_hist_perc = pd.DataFrame({'code_regio': ['AB01', 'AB02', 'AB03'], 'interval': ['2019', '2019', '2019'], 'allenstaande_vrouwen': [0.4, 0.15, 0.2], 'alleenstaande_mannen': [0.3, 0.1, 0.3]}) df_future = pd.DataFrame({'code_regio': ['AB01', 'AB01','AB01','AB02','AB02','AB02', 'AB03','AB03','AB03'], 'interval': ['2019', '2020', '2021','2019', '2020', '2021', '2019', '2020', '2021'], 'allenstaande_vrouwen': [4, np.nan, np.nan,15, np.nan, np.nan,5, np.nan, np.nan], 'alleenstaande_mannen': [3, np.nan, np.nan,11.5, np.nan, np.nan,15, np.nan, np.nan], 'aantalinwoners': [10,20,30, 100,115,130, 25,50,75]}) df_uitkomst = pd.DataFrame({'code_regio': ['AB01', 'AB01','AB01','AB02','AB02','AB02', 'AB03','AB03','AB03'], 'interval': ['2020', '2021', '2022','2020', '2021', '2022','2020', '2021', '2022'], 'allenstaande_vrouwen': [4, 8, 12, 15,17.25,19.5, 5,10,15], 'alleenstaande_mannen': [3,6,9, 10,11.5,13, 7.5,15,22.5], 'aantalinwoners': [10,20,30, 100,115,130, 25,50,75]}) df_hist_perc df_future df_uitkomst # + # df_hist_perc = df_base_year.copy() # df_future = df_future_cop[df_future_cop['interval'].isin(future_years)].copy() # - df_uitkomst_test = df_future.copy() df_uitkomst_test = df_uitkomst_test.set_index('code_regio') for col in ['allenstaande_vrouwen', 'alleenstaande_mannen']: # for col in list_cols_relate_imputer: df_uitkomst_test.loc[:, col] = df_uitkomst_test['aantalinwoners'] * df_hist_perc.set_index('code_regio')[col] # df_uitkomst_test.loc[:, col] = df_uitkomst_test[base_col] * df_hist_perc.set_index('codering_regio')[col] df_uitkomst_test df_hist_perc.set_index('code_regio')['alleenstaande_mannen'] list(df_prognoses.loc[:, df_prognoses.dtypes == object].columns) list_cols_prognoses_num df_future_cop[df_future_cop['interval'].isin(['2017', '2018', '2019'])][settings.predict['LIST_COLS_GROUPER_RELATE_IMPUTER']+list_cols_prognoses_num+list_cols_relate_imputer] df_future_cop[df_future_cop['interval'].isin(['2017', '2018', '2019'])][settings.predict['LIST_COLS_GROUPER_RELATE_IMPUTER']+list_cols_prognoses_num+list_cols_relate_imputer].dtypes list_cols_relate # + list_past_period = ['2017', '2018', '2019'] list_cols_relate = settings.predict['LIST_COLS_GROUPER_RELATE_IMPUTER']+list_cols_relate_imputer df_var = df_future_cop[df_future_cop['interval'].isin(list_past_period)][list_cols_relate+['aantalinwoners']].copy() # for basecol in list_cols_prognoses_num: # print(basecol) # df_var.loc[:, list_cols_relate_imputer] = df_var.loc[:, list_cols_relate_imputer] / df_var[basecol] # df_var.loc[:, list_cols_relate_imputer] = df_var.loc[:, list_cols_relate_imputer].div(df_var['aantalinwoners'], axis=0) # df_var_mean = df_var.groupby(['codering_regio']).mean().drop(['aantalinwoners'], axis=1) # - df_var df_var['aantalinwoners'] df_var[df_var['codering_regio'].isin(['GM0085', 'GM0017'])] df_future_cop[df_future_cop['codering_regio'].isin(['GM0085', 'GM0017'])][['alleenstaande_mannen', 'alleenstaande_vrouwen', 'aantalinwoners', 'gemeentenaam']] import statistics # statistics.pvariance df_var.loc[3] df_var = df_var.drop(['interval'], axis=1) * 1 df_var = df_var.groupby(['codering_regio']) gb gb.apply(lambda grp: statistics.pvariance(grp)) list(df_prognoses.loc[:, df_prognoses.dtypes != object].columns) df_future[df_future['interval'].isin(['2017', '2018', '2019'])] df_future[df_future['interval'].isin(['2020', '2021', '2022', '2023'])] import src.settings as settings from src.preprocess.preprocess import get_and_combine_cbs_tables, rename_and_subset_cols, \ get_region_period_spec_val_subtable, downcast_variables_dataframe periods = [2020, 2021, 2022] round(max(periods), 5) roundedto5periods = max(periods) + (5 - max(periods)) % 5 total_periods = list(range(min(periods), roundedto5periods+1, 1)) total_periods print("Get 'progonse bevolking' tables") df_population_prognose = get_and_combine_cbs_tables(dict_tables=settings.predict['DICT_TABLES_BEVOLKING'], url=settings.get_data['CBS_OPEN_URL']) df_population_prognose = rename_and_subset_cols(df=df_population_prognose, dict_rename=settings.predict['DICT_COLS_RENAMED_BEVOLKING'], list_cols=settings.predict['LIST_COLS_SUBSET_BEVOLKING']) df_population_prognose['interval'] = df_population_prognose['perioden'].apply(lambda x: x.split(' ')[-1]) df_population_prognose = df_population_prognose[(df_population_prognose['gemeentenaam'].str.contains('(CR)')==False) & (df_population_prognose['gemeentenaam'].str.contains('(PV)')==False) & (df_population_prognose['gemeentenaam']!='Nederland')].copy() df_population_prognose = df_population_prognose[df_population_prognose['interval'].astype(int)<=roundedto5periods] df_population_prognose['aantalinwoners'] = df_population_prognose['aantalinwoners'].round().astype(int) df_population_prognose print("Get 'prognose huishoudens' tables") df_huishouden_prognose = get_and_combine_cbs_tables(dict_tables=settings.predict['DICT_TABLES_HUISHOUDEN'], url=settings.get_data['CBS_OPEN_URL']) df_huishouden_prognose['interval'] = df_huishouden_prognose['perioden'] df_huishouden_prognose = df_huishouden_prognose.rename(columns=settings.predict['DICT_COLS_RENAMED_HUISHOUDEN']) df_huishouden_prognose = df_huishouden_prognose[df_huishouden_prognose['prognoseinterval']=='Prognose'] df_huishouden_prognose = df_huishouden_prognose[(df_huishouden_prognose['gemeentenaam'].str.contains('(CR)')==False) & (df_huishouden_prognose['gemeentenaam'].str.contains('(PV)')==False) & (df_huishouden_prognose['gemeentenaam']!='Nederland')].copy() df_huishouden_prognose['particulierehuishoudens'] = df_huishouden_prognose['particulierehuishoudens'].round().astype(int) df_huishouden_prognose_pivot = pd.pivot_table(data=df_huishouden_prognose, values='particulierehuishoudens', index=['gemeentenaam', 'interval'], columns=['samenstellingvanhethuishouden'], aggfunc=np.sum).reset_index() df_huishouden_prognose_pivot = df_huishouden_prognose_pivot[df_huishouden_prognose_pivot['interval'].astype(int) <= roundedto5periods] df_huishouden_prognose_pivot = rename_and_subset_cols(df=df_huishouden_prognose_pivot, dict_rename=settings.predict['DICT_COLS_RENAMED_HUISHOUDEN_PIVOT'], list_cols=settings.predict['LIST_COLS_SUBSET_HUISHOUDING_PIVOT']) df_huishouden_prognose_pivot print("Get 'regio-indeling'") df_regioindeling = get_and_combine_cbs_tables(dict_tables=settings.predict['DICT_TABLES_REGIOINDELING'], double_trouble_colnames=settings.predict[ 'DICT_DOUBLETROUBLECOLNAMES_REGIOINDELING'], url=settings.get_data['CBS_OPEN_URL']) df_regioindeling = rename_and_subset_cols(df=df_regioindeling, dict_rename=settings.predict['DICT_COLS_RENAMED_REGIOINDELING'], list_cols=settings.predict['LIST_COLS_SUBSET_REGIOINDELING']) df_regioindeling[settings.predict['LIST_STR_STRIP_COLS_REGIOINDELING']] = df_regioindeling[ settings.predict['LIST_STR_STRIP_COLS_REGIOINDELING']].apply(lambda x: x.str.strip()) df_regioindeling # + # Merge all dataframes df_prognoses = pd.merge(df_regioindeling, df_huishouden_prognose_pivot, how='left', left_on=['gemeentenaam'], right_on=['gemeentenaam']) df_prognoses = pd.merge(df_prognoses, df_population_prognose, how='left', left_on=['gemeentenaam', 'interval'], right_on=['gemeentenaam', 'interval']) # - df_prognoses import cbsodata dict_tables=settings.predict['DICT_TABLES_REGIOINDELING'] url=settings.get_data['CBS_OPEN_URL'] print(f"Number of tables to collect: {len(dict_tables)}") df = pd.DataFrame() for interval, table in dict_tables.items(): print(f"Pythonic iteration {interval} for table {table}") df_sub = pd.DataFrame(cbsodata.get_data(table, catalog_url=url)) {i:i for i in df_sub.columns} {'Code_1': 'Code_1gemeente', 'Naam_2': 'Naam_2gemeente', 'SorteringNaam_3': 'SorteringNaam_3gemeente', 'Code_4': 'Code_4arbeidsmarktregio', 'Naam_5': 'Naam_5arbeidsmarktregio', 'Code_6': 'Code_6arrondissementenrechtsgebieden', 'Naam_7': 'Naam_7arrondissementenrechtsgebieden', 'Code_8': 'Code_8corop', 'Naam_9': 'Naam_9corop', 'Code_10': 'Code_10coropsub', 'Naam_11': 'Naam_11coropsub', 'Code_12': 'Code_12coropplus', 'Naam_13': 'Naam_13coropplus', 'Code_14': 'Code_14ggdregio', 'Naam_15': 'Naam_15ggdregio', 'Code_16': 'Code_16jeugdzorgregio', 'Naam_17': 'Naam_17jeugdzorgregio', 'Code_18': 'Code_18kvk', 'Naam_19': 'Naam_19jkvk', 'Code_20': 'Code_20landbouwgebieden', 'Naam_21': 'Naam_21landbouwgebieden', 'Code_22': 'Code_22landbouwgebiedengroepen', 'Naam_23': 'Naam_23landbouwgebiedengroepen', 'Code_24': 'Code_24landsdelen', 'Naam_25': 'Naam_25landsdelen', 'Code_26': 'Code_26nutseen', 'Naam_27': 'Naam_27nutseen', 'Code_28': 'Code_28nutstwee', 'Naam_29': 'Naam_29nutstwee', 'Code_30': 'Code_30nutsdrie', 'Naam_31': 'Naam_31nutsdrie', 'Code_32': 'Code_32provincies', 'Naam_33': 'Naam_33provincies', 'Code_34': 'Code_34regionaleeenheden', 'Naam_35': 'Naam_35regionaleeenheden', 'Code_36': 'Code_36regionaleenergiestrategieregios', 'Naam_37': 'Naam_37regionaleenergiestrategieregios', 'Code_38': 'Code_38regionalemeldencoordinatiepunten', 'Naam_39': 'Naam_39regionalemeldencoordinatiepunten', 'Code_40': 'Code_40regioplusarbeidsmarktregios', 'Naam_41': 'Naam_41regioplusarbeidsmarktregios', 'Code_42': 'Code_42ressortenrechtsgebieden', 'Naam_43': 'Naam_43ressortenrechtsgebieden', 'Code_44': 'Code_44subresregios', 'Naam_45': 'Naam_45subresregios', 'Code_46': 'Code_46toeristengebieden', 'Naam_47': 'Naam_47toeristengebieden', 'Code_48': 'Code_48veiligheidsregios', 'Naam_49': 'Naam_49veiligheidsregios', 'Code_50': 'Code_50zorgkantoorregios', 'Naam_51': 'Naam_51zorgkantoorregios', 'Code_52': 'Code_52gemeentegrootte', 'Omschrijving_53': 'Omschrijving_53gemeentegrootte', 'Code_54': 'Code_54stedelijksheidsklase', 'Omschrijving_55': 'Omschrijving_55stedelijkheidsklasse', 'Inwonertal_56': 'Inwonertal_56', 'Omgevingsadressendichtheid_57': 'Omgevingsadressendichtheid_57'} {'ID', 'RegioS', 'Code_1':'codegemeente', 'Naam_2':'naamgemeente', 'SorteringNaam_3': , 'Code_4', 'Naam_5', 'Code_6', 'Naam_7', 'Code_8', 'Naam_9', 'Code_10', 'Naam_11', 'Code_12', 'Naam_13', 'Code_14', 'Naam_15', 'Code_16', 'Naam_17', 'Code_18', 'Naam_19', 'Code_20', 'Naam_21', 'Code_22', 'Naam_23', 'Code_24', 'Naam_25', 'Code_26', 'Naam_27', 'Code_28', 'Naam_29', 'Code_30', 'Naam_31', 'Code_32', 'Naam_33', 'Code_34', 'Naam_35', 'Code_36', 'Naam_37', 'Code_38', 'Naam_39', 'Code_40', 'Naam_41', 'Code_42', 'Naam_43', 'Code_44', 'Naam_45', 'Code_46', 'Naam_47', 'Code_48', 'Naam_49', 'Code_50', 'Naam_51', 'Code_52', 'Omschrijving_53', 'Code_54', 'Omschrijving_55', 'Inwonertal_56', 'Omgevingsadressendichtheid_57'} # + print(f"Number of tables to collect: {len(dict_tables)}") df = pd.DataFrame() for interval, table in dict_tables.items(): print(f"Pythonic iteration {interval} for table {table}") try: df_sub = pd.DataFrame(cbsodata.get_data(table, catalog_url=url)) if double_trouble_colnames: df_sub = df_sub.rename(columns=double_trouble_colnames) cols_wijk_stripped = [i.rstrip('0123456789').replace("_", "").lower() for i in list(df_sub.columns)] dict_wijk_cols_renamed = {key: value for key, value in zip(iter(df_sub.columns), iter(cols_wijk_stripped))} df_sub = df_sub.rename(columns=dict_wijk_cols_renamed) df_sub['interval'] = interval # print(list(df_sub.columns)) except Exception: df_sub = pd.DataFrame() pass df = pd.concat([df, df_sub], sort=True) # print(list(df.columns)) return df # - df_huishouden_prognose_pivot = pd.pivot_table(data=df_huishouden_prognose, values='particulierehuishoudens', index=['regioindeling', 'interval'], columns=['samenstellingvanhethuishouden'], aggfunc=np.sum).reset_index() df_huishouden_prognose_pivot df_huishouden_prognose = df_huishouden_prognose[(df_huishouden_prognose['regioindeling'].str.contains('(CR)')==False) & (df_huishouden_prognose['regioindeling'].str.contains('(PV)')==False) & (df_huishouden_prognose['regioindeling']!='Nederland')].copy() df[df["col1"].str.contains('this'|'that')==False and df["col2"].str.contains('foo'|'bar')==True] df_population_prognose = rename_and_subset_cols(df=df_population_prognose, dict_rename=settings.get_data['DICT_COLS_RENAMED_WMO'], list_cols=settings.get_data['LIST_COLS_SUBSET_WMO']) df_population_prognose['interval'] = df_population_prognose['perioden'].apply(lambda x: x.split(' ')[-1]) # # Extend dataframe with future list_unique_regions = list(df[df['interval']==df['interval'].max()]['codering_regio'].unique()) list_future_years = ['2020', '2021', '2022'] df_future = pd.DataFrame(list(product(list_unique_regions, list_future_years)), columns=['codering_regio', 'interval']) df_extended = pd.concat([df, df_future]) df_extended['interval'] = df_extended['interval'].astype(int) df_extended = df_extended.sort_values(['codering_regio', 'interval']).reset_index().drop(['index'], axis=1) df_extended # # Strategy one: Use GroupInterpolateImputer # + from sklearn import preprocessing from sklearn.pipeline import make_pipeline import src.settings as settings from src.preprocess.preprocess import make_df_missing from src.utilities.transformers import ColumnSelector, GroupInterpolateImputer, RelativeColumnScaler, \ CustomScaler, CustomImputer # + df_preprocess = df_extended.reset_index().copy() # Determine columns with to much missing values df_missing = make_df_missing(df_preprocess) list_drop_missing_cols = list( df_missing[df_missing['perc_missing'] > 0.99].index) # Determine columns which are not numeric but objects list_drop_object_cols = list(df_preprocess.loc[:, df_preprocess.dtypes == object].columns) # Determine list of columns for first ColumnSelector drop_cols_total = list(set(list_drop_missing_cols + list_drop_object_cols)) drop_cols_total = [c for c in drop_cols_total if c not in settings.preprocess['ORIGINAL_INDEX']] list_column_selector_1 = [c for c in list(df_preprocess.columns) if c not in drop_cols_total] # Make Pipeline and fit transform df_preprocess pl_preprocess = make_pipeline( ColumnSelector(cols=list_column_selector_1), GroupInterpolateImputer(groupcols=settings.preprocess['GROUP_INTERPOLATE_IMPUTER_GROUPCOLS'], interpolate_method='values', cols=settings.preprocess['GROUP_INTERPOLATE_IMPUTER_COLS'])) # - df_preprocessed = pl_preprocess.fit_transform(df_preprocess) df_preprocessed[df_preprocessed['codering_regio']=='GM0197'] # + # # Create sample set for developing visualisation # df_test_set_for_visualise = df_preprocessed[['codering_regio', 'interval', 'wmoclientenper1000inwoners']].copy() # mu, sigma = 0, 0.1 # noise = np.random.normal(mu, sigma, len(df_test_set_for_visualise)) # df_test_set_for_visualise['wmoclientenper1000inwoners'] = df_test_set_for_visualise['wmoclientenper1000inwoners'] + noise # df_test_set_for_visualise.to_csv('../data/sampleset_y_predict.csv') # - pd.DataFrame.interpolate.__code__.co_varnames # # Strategy 2: ffill + percentage # Including making method to determine percentage: # # * Population growth percentage per type of region? Whole country? # * Fixed? # * Certain age? # # Strategy 3: Prognose CBS # # Mogelijke tabellen: # * 84525NED -> niet alle gemeenten # * 84528NED -> Slechts per 5 jaar # * 84526NED -> huishoudens import cbsodata table = '84526NED' url = settings.get_data['CBS_OPEN_URL'] df_prognose_bevolking = pd.DataFrame(cbsodata.get_data(table, catalog_url=url)) table = '84528NED' url = settings.get_data['CBS_OPEN_URL'] df_prognose_bevolking2 = pd.DataFrame(cbsodata.get_data(table, catalog_url=url)) df_prognose_bevolking.head() df_prognose_bevolking2.head() df_prognose_bevolking.SamenstellingVanHetHuishouden.unique() df_prognose_bevolking.RegioIndeling2018.unique() # # Load model, select columns # + def predict_data(df_get_data, periods, trained_model, save_all=False, personal_note=""): ## Get data (for extending get data with future) # Determine boundaries for get prognose data roundedto5periods = max(periods) + (5 - max(periods)) % 5 total_periods = list(range(min(periods), roundedto5periods+1, 1)) total_periods_str = [str(x) for x in total_periods] print("Get 'regio-indeling'") df_regioindeling = get_and_combine_cbs_tables(dict_tables=settings.predict['DICT_TABLES_REGIOINDELING'], double_trouble_colnames=settings.predict[ 'DICT_DOUBLETROUBLECOLNAMES_REGIOINDELING'], url=settings.get_data['CBS_OPEN_URL']) df_regioindeling = rename_and_subset_cols(df=df_regioindeling, dict_rename=settings.predict['DICT_COLS_RENAMED_REGIOINDELING'], list_cols=settings.predict['LIST_COLS_SUBSET_REGIOINDELING']) df_regioindeling[settings.predict['LIST_STR_STRIP_COLS_REGIOINDELING']] = df_regioindeling[ settings.predict['LIST_STR_STRIP_COLS_REGIOINDELING']].apply(lambda x: x.str.strip()) print("Get 'prognose huishoudens' tables") df_huishouden_prognose = get_and_combine_cbs_tables(dict_tables=settings.predict['DICT_TABLES_HUISHOUDEN'], url=settings.get_data['CBS_OPEN_URL']) df_huishouden_prognose['interval'] = df_huishouden_prognose['perioden'] df_huishouden_prognose = df_huishouden_prognose.rename(columns=settings.predict['DICT_COLS_RENAMED_HUISHOUDEN']) df_huishouden_prognose = df_huishouden_prognose[df_huishouden_prognose['prognoseinterval'] == 'Prognose'] df_huishouden_prognose = df_huishouden_prognose[ (df_huishouden_prognose['gemeentenaam'].str.contains('(CR)') == False) & (df_huishouden_prognose['gemeentenaam'].str.contains('(PV)') == False) & (df_huishouden_prognose['gemeentenaam'] != 'Nederland')].copy() df_huishouden_prognose['particulierehuishoudens'] = df_huishouden_prognose['particulierehuishoudens'] * 1000 df_huishouden_prognose['particulierehuishoudens'] = df_huishouden_prognose[ 'particulierehuishoudens'].round().astype(int) df_huishouden_prognose_pivot = pd.pivot_table(data=df_huishouden_prognose, values='particulierehuishoudens', index=['gemeentenaam', 'interval'], columns=['samenstellingvanhethuishouden'], aggfunc=np.sum).reset_index() df_huishouden_prognose_pivot = df_huishouden_prognose_pivot[ df_huishouden_prognose_pivot['interval'].astype(int) <= roundedto5periods] df_huishouden_prognose_pivot = rename_and_subset_cols(df=df_huishouden_prognose_pivot, dict_rename=settings.predict[ 'DICT_COLS_RENAMED_HUISHOUDEN_PIVOT'], list_cols=settings.predict[ 'LIST_COLS_SUBSET_HUISHOUDING_PIVOT']) print("Get 'prognose bevolking' tables") df_population_prognose = get_and_combine_cbs_tables(dict_tables=settings.predict['DICT_TABLES_BEVOLKING'], url=settings.get_data['CBS_OPEN_URL']) df_population_prognose = rename_and_subset_cols(df=df_population_prognose, dict_rename=settings.predict['DICT_COLS_RENAMED_BEVOLKING'], list_cols=settings.predict['LIST_COLS_SUBSET_BEVOLKING']) df_population_prognose['interval'] = df_population_prognose['perioden'].apply(lambda x: x.split(' ')[-1]) df_population_prognose = df_population_prognose[ (df_population_prognose['gemeentenaam'].str.contains('(CR)') == False) & (df_population_prognose['gemeentenaam'].str.contains('(PV)') == False) & (df_population_prognose['gemeentenaam'] != 'Nederland')].copy() df_population_prognose = df_population_prognose[df_population_prognose['interval'].astype(int) <= roundedto5periods] df_population_prognose['aantalinwoners'] = df_population_prognose['aantalinwoners'] * 1000 df_population_prognose['aantalinwoners'] = df_population_prognose['aantalinwoners'].round().astype(int) df_population_prognose = df_population_prognose.drop(['perioden'], axis=1) # Merge all dataframes df_prognoses = pd.merge(df_regioindeling, df_huishouden_prognose_pivot, how='left', left_on=['gemeentenaam'], right_on=['gemeentenaam']) df_prognoses = pd.merge(df_prognoses, df_population_prognose, how='left', left_on=['gemeentenaam', 'interval'], right_on=['gemeentenaam', 'interval']) print(f"Shape of df_prognoses = {df_prognoses.shape}") # Concat with original 'get data' dataframe (incl. drop multiplicacities that don't occur in original dataset) list_unchanged_multiplicacities = df_get_data[df_get_data['interval'] == df_get_data['interval'].max()][ 'codering_regio'].unique() df_prognoses = df_prognoses[df_prognoses['codering_regio'].isin(list_unchanged_multiplicacities)] print(f"Shape of df_prognoses = {df_prognoses.shape}") df_future = pd.concat([df_get_data, df_prognoses], axis=0) df_future = df_future.sort_values(['codering_regio', 'interval']).reset_index().drop(['index'], axis=1) print(f"Shape of df_future = {df_future.shape}") ## Extend dataframe for blancs print("Start extending blancs in DataFrame with future values") # Determine columns for each imputing strategy list_cols_prognoses = df_prognoses.columns # list_cols_prognoses_str = [x for x in list(df_prognoses.loc[:, df_prognoses.dtypes == object].columns) if x!='codering_regio'] list_cols_prognoses_num = list(df_prognoses.loc[:, df_prognoses.dtypes != object].columns) list_all_columns = list(df_future.columns) list_cols_str = list(df_future.loc[:, df_future.dtypes == object].columns) list_cols_str = list(set(list_cols_str) - set(list_cols_prognoses)) list_cols_trained_model = settings.predict['LIST_COLS_TRAINED_MODEL'] list_cols_trained_model = list(set([x.replace('relative_', '') for x in list_cols_trained_model])) list_cols_relate_imputer = list( set(list_cols_trained_model) - set(settings.predict['LIST_COLS_TRAINED_MODEL_INVARIABLY']) - set( list_cols_prognoses)) list_cols_group_imputer = list(set(list_all_columns) - set(list_cols_str) - set(list_cols_relate_imputer)) # ffill for string columns print("ffill for string columns") df_future.loc[:, list_cols_str] = df_future.loc[:, list_cols_str].ffill() print(f"Shape of df_future = {df_future.shape}") # Group imputer for available future / invariably columns / columns not used in trained model print("Group imputer for available future / invariably columns / columns not used in trained model") GII = GroupInterpolateImputer(groupcols=settings.predict['GROUP_INTERPOLATE_IMPUTER_GROUPCOLS'], interpolate_method=settings.predict['GROUP_INTERPOLATE_IMPUTER_METHOD'], cols=list_cols_group_imputer) df_future = GII.fit_transform(df_future) print(f"Shape of df_future = {df_future.shape}") # Relational imputer for other columns in trained model print("Relational imputer for other columns in trained model") base_col = 'aantalinwoners' # future_years = ['2020', '2021', '2022', '2023', '2024', '2025'] all_relate_cols_necessary = settings.predict['LIST_COLS_GROUPER_RELATE_IMPUTER'] + list_cols_relate_imputer + [ base_col] df_base_year = df_future[df_future['interval'] == '2019'][all_relate_cols_necessary] df_base_year.loc[:, list_cols_relate_imputer] = df_base_year.loc[:, list_cols_relate_imputer].div( df_base_year[base_col], axis=0) df_base_year = df_base_year[df_base_year['codering_regio'].isin( df_future[df_future['interval'] == total_periods[-1]].codering_regio.unique())] df_future = df_future.set_index('codering_regio') for col in list_cols_relate_imputer: df_future.loc[:, col] = df_future.loc[:, base_col] df_future.loc[:, col] = df_future.loc[:, col] * df_base_year.set_index('codering_regio')[col] print(f"Shape of df_future = {df_future.shape}") df_future = df_future[df_future['interval'].isin(total_periods_str)].reset_index() df_future = df_future.set_index(['codering_regio', 'interval']) print(f"Shape of df_future = {df_future.shape}") ## Preprocess df_preprocessed = preprocess_data(df=df_future, save_all=False, personal_note='predict') df_preprocessed = df_preprocessed.drop(settings.Y_TARGET_COLS, axis=1) ## Predict y_preds = trained_model.predict(df_preprocessed) # Save # # ? return y_preds
notebooks/Develop_predict.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.svm import SVC from sklearn.model_selection import train_test_split, KFold, cross_val_score from sklearn.metrics import accuracy_score, roc_auc_score, average_precision_score import pickle # - train_data = pd.read_csv('train.csv') val_data = pd.read_csv('dev.csv') notfake = train_data[train_data['label']== 1] fake = train_data[train_data['label'] == 0] true_word_list = [] # + for i in range(0,notfake['review'].count()): true_word_temp = notfake['review'].iloc[i].split() true_word_list.append(true_word_temp) from itertools import chain list1 = list(chain.from_iterable(true_word_list)) from collections import Counter true_mc = Counter(list1).most_common() fake_word_list= fake['review'].str.split(expand=True).stack() from collections import Counter fake_mc = Counter(fake_word_list).most_common() # - df_true = pd.DataFrame(true_mc) df_false = pd.DataFrame(fake_mc) df_false1 = df_false[0:23] df_true1 = df_true[0:23] df_common = list(set(df_true1.iloc[:,0]) & set(df_false1.iloc[:,0])) df_common notfake # https://towardsdatascience.com/text-classification-using-k-nearest-neighbors-46fa8a77acc5 # + import nltk from nltk.corpus import wordnet as wn from nltk.corpus import genesis nltk.download('genesis') nltk.download('wordnet') nltk.download('punkt') nltk.download('averaged_perceptron_tagger') genesis_ic = wn.ic(genesis, False, 0.0) import numpy as np import pandas as pd from nltk.tokenize import word_tokenize from nltk.stem.porter import PorterStemmer from nltk.stem import SnowballStemmer from nltk.stem.lancaster import LancasterStemmer from nltk.corpus import stopwords from sklearn.metrics import roc_auc_score # - class KNN_NLC_Classifer(): def __init__(self, k=1, distance_type = 'path'): self.k = k self.distance_type = distance_type # This function is used for training def fit(self, x_train, y_train): self.x_train = x_train self.y_train = y_train # This function runs the K(1) nearest neighbour algorithm and # returns the label with closest match. def predict(self, x_test): self.x_test = x_test y_predict = [] for i in range(len(x_test)): max_sim = 0 max_index = 0 for j in range(self.x_train.shape[0]): temp = self.document_similarity(x_test[i], self.x_train[j]) if temp > max_sim: max_sim = temp max_index = j y_predict.append(self.y_train[max_index]) return y_predict # + def convert_tag(self, tag): """Convert the tag given by nltk.pos_tag to the tag used by wordnet.synsets""" tag_dict = {'N': 'n', 'J': 'a', 'R': 'r', 'V': 'v'} try: return tag_dict[tag[0]] except KeyError: return None def doc_to_synsets(self, doc): """ Returns a list of synsets in document. Tokenizes and tags the words in the document doc. Then finds the first synset for each word/tag combination. If a synset is not found for that combination it is skipped. Args: doc: string to be converted Returns: list of synsets """ tokens = word_tokenize(doc+' ') l = [] tags = nltk.pos_tag([tokens[0] + ' ']) if len(tokens) == 1 else nltk.pos_tag(tokens) for token, tag in zip(tokens, tags): syntag = self.convert_tag(tag[1]) syns = wn.synsets(token, syntag) if (len(syns) > 0): l.append(syns[0]) return l # + def similarity_score(self, s1, s2, distance_type = 'path'): """ Calculate the normalized similarity score of s1 onto s2 For each synset in s1, finds the synset in s2 with the largest similarity value. Sum of all of the largest similarity values and normalize this value by dividing it by the number of largest similarity values found. Args: s1, s2: list of synsets from doc_to_synsets Returns: normalized similarity score of s1 onto s2 """ s1_largest_scores = [] for i, s1_synset in enumerate(s1, 0): max_score = 0 for s2_synset in s2: if distance_type == 'path': score = s1_synset.path_similarity(s2_synset, simulate_root = False) else: score = s1_synset.wup_similarity(s2_synset) if score != None: if score > max_score: max_score = score if max_score != 0: s1_largest_scores.append(max_score) mean_score = np.mean(s1_largest_scores) return mean_score # - train = pd.read_csv(train_path) validation = pd.read_csv(val_path) # + from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer cnt_vectorizer = CountVectorizer(stop_words='english', binary=True) tfidf_vectorizer = TfidfVectorizer(stop_words='english', binary=True) cnt_vectorizer.fit(train['review']) tfidf_vectorizer.fit(train['review']) cnt_X_train = cnt_vectorizer.transform(train['review']) tfidf_X_train = tfidf_vectorizer.transform(train['review']) cnt_X_dev = cnt_vectorizer.transform(validation['review']) tfidf_X_dev = tfidf_vectorizer.transform(validation['review']) Y_train = train['label'] Y_dev = validation['label'] # - from sklearn.neighbors import KNeighborsClassifier classifier = KNeighborsClassifier(n_neighbors=5) classifier.fit(cnt_X_train, Y_train) y_pred = classifier.predict(cnt_X_train) y_pred y_new_pred = classifier.predict(cnt_X_dev) from sklearn.metrics import classification_report, confusion_matrix print(confusion_matrix(Y_dev, y_new_pred)) print(classification_report(Y_dev, y_new_pred)) classifier10 = KNeighborsClassifier(n_neighbors=10) classifier10.fit(cnt_X_train, Y_train) y_new10_pred = classifier10.predict(cnt_X_dev) print(confusion_matrix(Y_dev, y_new10_pred)) print(classification_report(Y_dev, y_new10_pred)) classifier15 = KNeighborsClassifier(n_neighbors=15) classifier15.fit(cnt_X_train, Y_train) y_new15_pred = classifier15.predict(cnt_X_dev) print(confusion_matrix(Y_dev, y_new15_pred)) print(classification_report(Y_dev, y_new15_pred)) classifier20 = KNeighborsClassifier(n_neighbors=20) classifier20.fit(cnt_X_train, Y_train) y_new20_pred = classifier20.predict(cnt_X_dev) print(confusion_matrix(Y_dev, y_new20_pred)) print(classification_report(Y_dev, y_new20_pred)) classifier11 = KNeighborsClassifier(n_neighbors=11) classifier11.fit(cnt_X_train, Y_train) y_new11_pred = classifier11.predict(cnt_X_dev) print(confusion_matrix(Y_dev, y_new11_pred)) print(classification_report(Y_dev, y_new11_pred)) classifier13 = KNeighborsClassifier(n_neighbors=13) classifier13.fit(cnt_X_train, Y_train) y_new13_pred = classifier13.predict(cnt_X_dev) print(confusion_matrix(Y_dev, y_new13_pred)) print(classification_report(Y_dev, y_new13_pred)) classifier14 = KNeighborsClassifier(n_neighbors=14) classifier14.fit(cnt_X_train, Y_train) y_new14_pred = classifier14.predict(cnt_X_dev) print(confusion_matrix(Y_dev, y_new14_pred)) print(classification_report(Y_dev, y_new14_pred)) # + #copied from amelia import json base = '../data/processed/dev/' def writeJsonFile(fname, data, base=base): with open(base + fname +'.json', 'w') as outfile: json.dump(data, outfile) print('Successfully written to {}'.format(fname)) def readJsonFile(fname, base=base): with open(base + fname + '.json', 'r') as f: data = json.load(f) return data # - #copied from amelia knn_model_params = {'neighbor':[], 'train_accuracy':[],'test_accuracy':[],'test_auc':[], 'test_ap':[]} results_path = '../../data/processed/knn_unbalanced_results_drh382.csv' x = pd.read_csv(results_path) knn_model_params = x.to_dict(orient='list') #copied from amelia k = kernels[0] print('Now trying: ' + k + ' kernels...') for c in cs[2:]: t = Timer() t.start() svm_model_params['kernel'].append(k) svm_model_params['C'].append(c) params = {'kernel':k,'C':c, 'gamma':'scale', 'random_state': 519} fitted_model = fitted_svm(params) metrics = ClassifierMetrics(tfidf_X_train, Y_train, tfidf_X_dev, Y_dev, fitted_model) for m_k in metrics.keys(): svm_model_params[m_k].append(metrics[m_k]) elapsed = t.stop() svm_pd = pd.DataFrame(svm_model_params) results_path = '../../data/processed/svm_svc_tfidf_unbalanced_results_ac4119.csv' svm_pd.to_csv(results_path, sep=',') print('Elapsed time:',elapsed) # + from sklearn.metrics import accuracy_score, roc_auc_score, average_precision_score def ClassifierMetrics (X_train, Y_train, X_test, Y_test, fitted_model): """ X_train: training set features Y_train: training set labels X_test: dev/test set features Y_test: dev/test set labels fitted_model: """ # Y_score = fitted_model.predict_proba(X_test) # If your classifier doesn't output decision_function, use predict_proba. # Make sure it is taking the prob of the '1' class Y_score = fitted_model.predict_proba(X_test)[:,1] Y_pred = fitted_model.predict(X_test) metrics = {'train_accuracy': fitted_model.score(X_train, Y_train), 'test_accuracy': fitted_model.score(X_test, Y_test), 'test_auc_pred': roc_auc_score(Y_test, Y_pred), 'test_auc_score': roc_auc_score(Y_test, Y_score), 'test_ap_pred': average_precision_score(Y_test, Y_pred), 'test_ap_score': average_precision_score(Y_test, Y_score)} return metrics # + import json base = '../data/processed/dev/' def writeJsonFile(fname, data, base=base): with open(base + fname +'.json', 'w') as outfile: json.dump(data, outfile) print('Successfully written to {}'.format(fname)) def readJsonFile(fname, base=base): with open(base + fname + '.json', 'r') as f: data = json.load(f) return data # + # Add more of your fitted models to my_models if there are any # assuming that the suffix for each model is the n_neighbors # and n_neighbors is the only parameter you adjusted. my_models = [(10, classifier10), (11, classifier11), (13, classifier13), (14, classifier14), (15, classifier15), (20, classifier20), (5, classifier)] # - all_attempts = [] for (n, fitted_model) in my_models: params = {'n_neighbors': n} metrics = ClassifierMetrics (cnt_X_train, Y_train, cnt_X_dev, Y_dev, fitted_model) model_attempt_details = {'params': params, 'metrics': metrics} all_attempts.append(model_attempt_details) # File name of the model attempts/results base = '/Users/robin/Documents/GitHub/fake-review-detection-project/data/processed/dev/' fname = 'all_attempts_drh382' writeJsonFile(fname, all_attempts,base) classifier30 = KNeighborsClassifier(n_neighbors=30) classifier30.fit(cnt_X_train, Y_train) classifier50 = KNeighborsClassifier(n_neighbors=50) classifier50.fit(cnt_X_train, Y_train) second_try = [(5, smoteKNN5)] second_attempts = [] for (n, fitted_model) in second_try: params = {'n_neighbors': n} metrics = ClassifierMetrics (cnt_X_train, Y_train, cnt_X_dev, Y_dev, fitted_model) model_attempt_details = {'params': params, 'metrics': metrics} all_attempts.append(model_attempt_details) from imblearn.over_sampling import SMOTE ros = SMOTE(random_state=0) ros_train, ros_y = ros.fit_resample(cnt_X_train, Y_train) smoteKNN5 = KNeighborsClassifier(n_neighbors=5) smoteKNN5.fit(cnt_X_train, Y_train) # + smote_try = # - backup_attempts = all_attempts backup_attempts # !PWD all_attempts cnt_X_train cnt_X_train cnt_X_train
notebooks/drh382/dee_KNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- ''' URL BASE = 'https://api.coingecko.com/api/v3/coins/' (works ) ''' # + base = 'https://api.coingecko.com/api/v3/' url = base + 'coins/bitcoin' page = requests.get(url) # + coin_names = ['bitcoin', 'ehtereum', 'monero'] for coin in coin_names: base = 'https://api.coingecko.com/api/v3/coins/' url = base + coin page = requests.get(url) # -
Week_03_Binary/Downloading and codes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href="https://colab.research.google.com/github/davemlz/eemont/blob/master/tutorials/020-Overloaded-Operators-Number.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="jZEthLln92Ep" # # Overloaded Operators for the ee.Number Object Class # + [markdown] id="dNa470OZ8Oec" # - GitHub Repo: [https://github.com/davemlz/eemont](https://github.com/davemlz/eemont) # - PyPI link: [https://pypi.org/project/eemont/](https://pypi.org/project/eemont/) # - Conda-forge: [https://anaconda.org/conda-forge/eemont](https://anaconda.org/conda-forge/eemont) # - Documentation: [https://eemont.readthedocs.io/](https://eemont.readthedocs.io/) # - More tutorials: [https://github.com/davemlz/eemont/tree/master/tutorials](https://github.com/davemlz/eemont/tree/master/tutorials) # + [markdown] id="CD7h0hbi92Er" # ## Let's start! # + [markdown] id="E0rc6Cya92Es" # If required, please uncomment: # + id="NYzyvKtk92Es" # #!pip install eemont # #!pip install geemap # + [markdown] id="x3Rm3qt_92Et" # Import the required packges. # + id="H0C9S_Hh92Et" import ee, eemont, geemap # + [markdown] id="k1sdX2p592Eu" # Authenticate and Initialize Earth Engine and geemap. # + id="7QDXqVwy8Oef" Map = geemap.Map() # - # Let's define some ee.Number objects as NIR and Red values for NDVI computation: R1 = ee.Number(0.20) R2 = ee.Number(0.12) N1 = ee.Number(0.64) N2 = ee.Number(0.58) # ## Overloaded Operators # `eemont` has overloaded the binary operators, rich comparisons and unary operators in the following list for the `ee.Number` class: # # (+, -, \*\, /, //, %, \**\, <<, >>, &, |, <, <=, ==, !=, >, >=, -, ~) # # Therefore, you can now use them for number operations! NDVI1 = (N1 - R1)/(N1 + R1) NDVI2 = (N2 - R2)/(N2 + R2) # The result is stored as an ee.Number class. Let's check the first one: NDVI1.getInfo() # Now, let's see the second result: NDVI2.getInfo()
tutorials/020-Overloaded-Operators-Number.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # langauge: R # name: ir # --- # # # + knitr::opts_chunk$set(echo = TRUE) require(tidyverse) require(data.table) require(reshape2) #Install additional packages install.packages("ggpubr") install.packages("here") install.packages("cowplot") install.packages("R.utils") require(ggpubr) require(here) require(cowplot) require(R.utils) # - # # # + #Project generated knitr::opts_chunk$set(echo = TRUE) dd_genes <- fread(here('data/discordant_consensus_novel_DD_associated_genes_kaplanis2020.txt'),header=FALSE,data.table=TRUE)[,V1] hotspot_genes <- fread(here('data/hotspot_genes.txt'),header=FALSE,data.table=TRUE)[,V1] novel_hotspot_genes <- fread(here('data/proposed_novel_hotspot_genes.txt'),header=FALSE,data.table=TRUE)[,V1] ion_channel_genes <- fread(here('data/All_genes_with_PF00520_domain.txt'),header=FALSE,data.table=TRUE)[,V1] hotspot_genetic_positions <- fread(here('data/hotspot_to_genomic_positions_PF00520.txt',header=TRUE,data.table=TRUE)) #External datasets gene_tpm <- fread(here('data/external/GTEx_Analysis_2017-06-05_v8_RNASeQCv1.1.9_gene_median_tpm.gct.gz'),header=TRUE,data.table=TRUE) constraint_in <- fread(here('data/external/gnomad.v2.1.1.lof_metrics.by_gene.txt'),header=TRUE,data.table=TRUE) mpc_in <- fread(here('data/external/mpc_missense_constraint_multiple_regions_samocha17.txt'),header=TRUE,data.table=TRUE) ddg2p_in <- fread(here('data/external/DDG2P_22-4-2021.csv'),header=TRUE,data.table=TRUE) omim_in <- fread(here('data/external/genemap2.txt'),header=TRUE,data.table=TRUE) # - # ### Expression input data and counts # # # + #Remove novel hotspot genes from hotspot gene list hotspot_genes <- hotspot_genes[!hotspot_genes %in% novel_hotspot_genes] #Remove hotspot genes from DD consensus gene list dd_genes <- dd_genes[!dd_genes %in% hotspot_genes] #Parse gene TPM file gene_tpm <- gene_tpm[,Gene_ID:=gsub("\\..*","",Name)] gene_tpm <- data.table(melt(gene_tpm,id.vars = c('Description','Name','Gene_ID'))) colnames(gene_tpm) <- c('hgnc','ensembl_versioned','ensembl','tissue','tpm') gene_tpm <- gene_tpm[,class:=ifelse(hgnc %in% novel_hotspot_genes,'Proposed Novel Hotspot Genes', ifelse(hgnc %in% hotspot_genes,'Hotspot Genes', ifelse(hgnc %in% dd_genes,'NDD-Associated Genes','Control Genes')))] gene_tpm <- gene_tpm[,contains_ion_channel:=ifelse(hgnc %in% ion_channel_genes,TRUE,FALSE)] head(gene_tpm) gene_tpm_tab <- gene_tpm %>% group_by(class) %>% dplyr::select('hgnc','class') %>% unique() %>% count() colnames(gene_tpm_tab) <- c('Class','Count') #Table of counts by class (Control Genes, DD Consensus Genes, Hotspot Genes, Proposed Novel Hotspot Genes) knitr::kable(gene_tpm_tab) # - # ### Figure 5A: Tissue expression of hotspot genes compared to control and other NDD genes # # # + #Set seed set.seed(1255) #Randomly sample NDD-associated and control Genes sampleGenes <- function( iterations, sampling_size ) { out <- vector('list',length=iterations) for( i in 1:iterations ) { sampled_dat <- gene_tpm %>% filter(class!='Hotspot Genes' & class!='Proposed Novel Hotspot Genes') %>% group_by(tissue,class) %>% sample_n(sampling_size,replace=TRUE) %>% mutate(i=factor(i)) out[[i]] <- data.table(sampled_dat) } out_dt <- do.call(rbind,out) return(out_dt) } hotspot_tpm <- gene_tpm %>% filter(class=='Hotspot Genes') %>% mutate(i=0) #Bind sampled genes to hotspot genes; calculate the proportion expressed, mean, and standard deviation across sets fig_5a <- rbind(sampleGenes(1000,19),hotspot_tpm) %>% group_by(tissue,class,i) %>% summarise( prop_expressed = sum(tpm > 1, na.rm=T) / n(), .groups = 'drop_last' ) %>% ungroup() %>% group_by(tissue,class) %>% summarise( mean = mean(prop_expressed), min = mean - sd(prop_expressed), max = mean + sd(prop_expressed), .groups = 'keep' ) %>% data.table() #Figure 5A a_fill <- c('Control Genes' = 'skyblue3', 'NDD-Associated Genes' = 'darkgreen', 'Hotspot Genes' = 'darkgoldenrod') a <- ggplot(fig_5a[class!='Proposed Novel Hotspot Genes' & class!='Hotspot Genes'],aes(x=tissue,y=mean,colour=class,ymin=min,ymax=max)) + geom_point(alpha=0.7,size=3,shape=15) + geom_linerange(alpha=0.5,size=3) + geom_point(data=fig_a[class=='Hotspot Genes'],aes(x=tissue,y=mean,colour=class),size=5) + theme_pubr() + theme(axis.text = element_text(size=14), axis.title = element_text(size=16), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1), legend.title = element_blank(), legend.text = element_text(size=14)) + scale_colour_manual(values=a_fill, guide=guide_legend(override.aes = list(linetype = c(1,0,1), shape = c(15,19,15), size = c(3,5,3)))) + ylim(0,1) + xlab('') + ylab('Proportion expressed genes (TPM > 1)') plot_grid(a,labels='AUTO',label_size=16) ggsave('results/main/Figure_5A.svg',plot=last_plot(),device='svg',dpi=300,width=14,height=10) # - # ### Figure 5B: TPM distribution in brain and other tissues varies across gene sets # # # + #Compute median TPM in brain and other tissues median_gene_tpm <- gene_tpm %>% mutate(tissue_group = ifelse(tissue %in% brain_tissues,'Brain','Other Tissues')) %>% group_by(ensembl_versioned,tissue_group) %>% mutate(median_tpm_tissuegroup = median(tpm)) %>% dplyr::select(-c('tpm','tissue')) %>% unique() %>% reshape2::dcast(formula=hgnc + class + ensembl_versioned + contains_ion_channel ~ tissue_group, value.var='median_tpm_tissuegroup') %>% mutate(class = ifelse(hgnc %in% ion_channel_genes & class=='NDD-Associated Genes','PF00520-Containing NDD-Associated Genes', ifelse(hgnc %in% ion_channel_genes & class=='Control Genes','PF00520-Containing Control Genes',class))) %>% data.table() #Figure 5B labels_b = c('NDD-Associated Genes' = 'NDD-Associated Genes', 'PF00520-Containing NDD-Associated Genes' = 'PF00520 Domain-Containing\nNDD-Associated Genes', 'Hotspot Genes' = 'Hotspot Genes', 'Proposed Novel Hotspot Genes' = 'Proposed Novel Hotspot Genes') b <- ggplot(median_gene_tpm,aes(x=log10(`Other Tissues`), y=log10(Brain), colour=factor(class,levels=c('Control Genes', 'NDD-Associated Genes', 'PF00520-Containing NDD-Associated Genes', 'Hotspot Genes', 'Proposed Novel Hotspot Genes')))) + geom_abline(slope=1,intercept=0,linetype='dashed',colour='black') + geom_density2d(data=median_gene_tpm[class=='Control Genes' & Brain!=0 & `Other Tissues`!=0],alpha=0.7) + geom_density2d(data=median_gene_tpm[class=='NDD-Associated Genes' & Brain!=0 & `Other Tissues`!=0],alpha=0.7) + geom_point(data=median_gene_tpm[class=='PF00520-Containing NDD-Associated Genes'],size=4) + geom_point(data=median_gene_tpm[class=='Hotspot Genes'],size=4) + geom_text(data=median_gene_tpm[class=='Proposed Novel Hotspot Genes'], aes(label=hgnc),size=5,fontface='italic',show.legend=FALSE) + theme_pubr() + theme(axis.text = element_text(size=14), axis.title = element_text(size=16), legend.title = element_blank(), legend.text = element_text(size=14), legend.position = c(0.75,0.13)) + scale_colour_manual(labels=labels_b,values=c('skyblue3', 'darkgoldenrod', 'darkgreen', 'grey40', 'tomato4'), guide=guide_legend(override.aes = list(linetype=c(1,0,1,0,0), shape=c(NA,19,NA,19,65)), nrow=5, by_row=TRUE)) + ylab('Log10 Median Expression in Brain Tissues') + xlab('Log10 Median Expression in Other Tissues') + coord_cartesian(xlim=c(-3,3),ylim=c(-3,3)) plot_grid(b,labels='B',label_size=16) ggsave('results/main/Figure_5B.svg',plot=last_plot(),device='svg',dpi=300,width=14,height=10) plot_grid(a,b,labels='AUTO',label_size=16,nrow=1,rel_widths = c(1.7,1),align='hv',axis='t') ggsave('results/main/Figure_5.png',plot=last_plot(),device='png',dpi=300,width=20,height=9) ggsave('results/main/Figure_5.svg',plot=last_plot(),device='svg',dpi=300,width=20,height=9) # - # ### Figure 4A: Constraint in hotspot and proposed novel hotspot genes # # # + #Get relevant constraint information probabilistic_constraint <- c('pLI','pRec','pNull') upper_bound_fraction <- c('oe_lof_upper','oe_mis_upper','oe_syn_upper') observed_expected <- c('oe_lof','oe_mis','oe_syn','oe_lof_lower','oe_mis_lower','oe_syn_lower',upper_bound_fraction) z_scores <- c('lof_z','mis_z','syn_z') all_constraint <- unique(c(probabilistic_constraint,upper_bound_fraction,observed_expected,z_scores)) constraint <- constraint_in %>% subset(select=c('gene',all_constraint)) %>% reshape2::melt(id.vars = 'gene', value.name = 'value', var.name = 'constraint_metric') %>% filter(!is.na(value)) %>% mutate(class=ifelse(gene %in% novel_hotspot_genes,'Proposed Novel Hotspot Genes', ifelse(gene %in% hotspot_genes,'Hotspot Genes', ifelse(gene %in% dd_genes,'NDD-Associated Genes','Control Genes')))) fig_4a <- constraint %>% filter(variable %in% observed_expected) %>% group_by(class,variable) %>% mutate(variable=gsub('oe_','',variable)) %>% mutate(type=substr(variable,1,3)) %>% mutate(bound=ifelse(grepl('upper',variable),'upper', ifelse(grepl('lower',variable),'lower',''))) %>% group_by(class,type,bound) %>% summarise( mean = mean(value), .groups = 'drop_last' ) %>% mutate(bound=ifelse(bound=='','oe',bound)) %>% reshape2::dcast(formula=class + type ~ bound, value.var='mean') #Figure 4A a_labels = c('Loss of function','Missense','Synonymous') a_levels = c('Control Genes','NDD-Associated Genes','Hotspot Genes','Proposed Novel Hotspot Genes') a_x_labels = c('Control Genes\nn = 18648','NDD-Associated\nGenes\nn = 985','Hotspot Genes\nn = 19','Proposed Novel\nHotspot Genes\nn = 6') a <- ggplot(fig_4a,aes(x=factor(class,levels=a_levels),y=oe,colour=type,ymin=lower,ymax=upper)) + geom_pointrange(position=position_dodge(width=1),size=1.6) + geom_hline(yintercept=1,linetype='dashed',colour='black') + theme_pubr() + theme(axis.text = element_text(size=14), axis.title = element_text(size=16), legend.title = element_blank(), legend.text = element_text(size=14)) + scale_x_discrete(labels=a_x_labels) + scale_colour_manual(labels=a_labels, values=c('tomato4','darkorange3','deeppink4')) + ylab('Observed/expected counts') + xlab('') ggsave('results/main/Figure_5A.svg',plot=last_plot(),device='svg',dpi=300,width=10,height=8) # - # ### Figure 4B: Mutation hotspots occur in missense constrained regions within genes # # # + #Get regional missense constraint at hotspot positions mpc <- mpc_in[,mpc_starts:=ifelse(genomic_start<genomic_end,genomic_start,genomic_end)] mpc <- mpc[,mpc_ends:=ifelse(genomic_start<genomic_end,genomic_end,genomic_start)] mpc <- mpc[,obs_exp:=as.numeric(sub(',','.',obs_exp))] mpc <- mpc[,gene_name:=gene] mpc <- mpc[,gencode_transcript:=transcript] mpc <- mpc[,c('gene_name','gencode_transcript','mpc_starts','mpc_ends','obs_exp','region_name')] getRegionalConstraint <- function( gene, transcript ) { mpc_temp <- mpc[gene_name==gene & gencode_transcript==transcript] temp_hotspots <- hotspot_genetic_positions[hgnc==gene & gencode_transcription_id==transcript] temp_hotspots <- temp_hotspots[,start_pos:=min(chromosome_position_base_pair_one,chromosome_position_base_pair_two,chromosome_position_base_pair_three),by=1:nrow(temp_hotspots)] temp_hotspots <- temp_hotspots[,end_pos:=max(chromosome_position_base_pair_one,chromosome_position_base_pair_two,chromosome_position_base_pair_three),by=1:nrow(temp_hotspots)] setkey(mpc_temp,mpc_starts,mpc_ends) setkey(temp_hotspots,start_pos,end_pos) overlaps <- foverlaps(temp_hotspots, mpc_temp, nomatch = 0) overlaps <- overlaps[,c('gene_name','gencode_transcript','obs_exp','region_name','mpc_starts','mpc_ends','start_pos','end_pos','hotspot')] return(overlaps) } genes <- unique(hotspot_genetic_positions[,hgnc]) regionalconstraint_out <- vector('list',length=nrow(hotspot_genetic_positions)) n = 1 for( gene in genes ) { transcripts <- unique(hotspot_genetic_positions[hgnc==gene,gencode_transcription_id]) for( transcript in transcripts ) { regionalconstraint_out[[n]] <- getRegionalConstraint(gene, transcript) n = n + 1 } } regional_constraint <- do.call(rbind,regionalconstraint_out) regional_constraint <- regional_constraint[,class:=ifelse(gene_name %in% hotspot_genes,'Hotspot Genes', ifelse(gene_name %in% novel_hotspot_genes,'Proposed Novel Hotspot Genes', ifelse(gene_name %in% dd_genes,'NDD-Associated Genes','Control Genes')))] colnames(regional_constraint) <- c('gene','gencode_transcript','obs_exp','region_name','mpc_starts','mpc_ends','hotspot_starts','hotspot_ends','hotspot','class') #Get global missense constraint global_oe_mis <- constraint %>% filter(variable=='oe_mis') #Add global missense constraint to regional constraint infile and parse fig_4b <- regional_constraint %>% inner_join(global_oe_mis,by='gene') %>% dplyr::select(obs_exp,value,gene,class.x,hotspot) %>% mutate(obs_exp=ifelse(obs_exp>1,1,obs_exp), value=ifelse(value>1,1,value)) %>% unique() %>% data.table() #Figure 4B b_labels <- c('Control Genes' = 'PF00520 Domain-Containing\nControl Genes\nn = 28', 'Hotspot Genes' = 'Hotspot Genes\nn = 16', 'NDD-Associated Genes' = 'PF00520 Domain-Containing\nNDD-Associated Genes\nn = 7') b <- ggplot(fig_4b[class.x!='Proposed Novel Hotspot Genes'],aes(x=class.x,y=obs_exp,fill=class.x)) + geom_boxplot(alpha=0.8) + theme_pubr() + theme(legend.position = 'none', axis.text = element_text(size=14), axis.title = element_text(size=16), legend.title = element_blank(), legend.text = element_text(size=14)) + ggsignif::geom_signif( comparisons = list(c('NDD-Associated Genes','Hotspot Genes'), c('Control Genes','Hotspot Genes')), y_position = c(1.2,1.3), colour='black', test = 'wilcox.test') + scale_x_discrete(labels=b_labels) + scale_fill_manual(values=c('skyblue3','darkgoldenrod','darkgreen')) + ylab('Observed/expected regional missense constraint\nat PF00520 domains') + xlab('') plot_grid(b,labels='B',label_size=16) ggsave('results/main/Figure_4B.svg',plot=last_plot(),device='svg',dpi=300,width=10,height=8) plot_grid(a,b,labels='AUTO',label_size=16,nrow=1,rel_widths = c(1.1,1),align='hv',axis='bt') ggsave('results/main/Figure_4.svg',plot=last_plot(),device='svg',dpi=300,width=18,height=8) ggsave('results/main/Figure_4.png',plot=last_plot(),device='png',dpi=300,width=18,height=8) # - # ### Supplementary data # # # + #Supplementary Data S9: Mutational constraint in hotspot and proposed novel hotspot genes supp_data_s9 <- constraint_in %>% subset(select=c('gene',all_constraint)) %>% drop_na() %>% mutate(class=ifelse(gene %in% novel_hotspot_genes,'Proposed Novel Hotspot Genes', ifelse(gene %in% hotspot_genes,'Hotspot Genes', ifelse(gene %in% dd_genes,'NDD-Associated Genes','Control Genes')))) %>% filter(class=='Hotspot Genes' | class=='Proposed Novel Hotspot Genes') %>% arrange(class,pLI) %>% data.table() write.table(supp_data_s9,'results/supp/Supplementary_Data_S9_Constraint.txt',col.names=TRUE, row.names=FALSE,quote=FALSE,sep='\t') #Supplementary Data S10: Proportion of hotspot genes expressed across tissues prop_expressed <- gene_tpm %>% group_by(tissue,class) %>% summarise( num_expressed = sum(tpm > 1, na.rm=T), num_unexpressed = sum(tpm <= 1, na.rm=T), .groups = 'drop_last' ) %>% filter(class!='Proposed Novel Hotspot Genes') %>% data.table() getFisher <- function( class1, class2, tissue_type ) { num_expressed_1 <- unique(prop_expressed[class==class1 & tissue==tissue_type,num_expressed]) num_expressed_2 <- unique(prop_expressed[class==class2 & tissue==tissue_type,num_expressed]) num_unexpressed_1 <- unique(prop_expressed[class==class1 & tissue==tissue_type,num_unexpressed]) num_unexpressed_2 <- unique(prop_expressed[class==class2 & tissue==tissue_type,num_unexpressed]) class1_prop <- c(num_expressed_1,num_unexpressed_1) class2_prop <- c(num_expressed_2,num_unexpressed_2) p <- fisher.test(cbind(class1_prop,class2_prop))$p.value return(p) } prop_expressed <- prop_expressed[,Fisher_p_controls:=getFisher('Hotspot Genes','Control Genes',tissue),by=tissue] prop_expressed <- prop_expressed[,Fisher_p_NDD:=getFisher('Hotspot Genes','NDD-Associated Genes',tissue),by=tissue] prop_expressed <- prop_expressed[,Fisher_p_controls_bonferroni:=Fisher_p_controls*54] #Bonferroni correct over 54 GTEx tissues prop_expressed <- prop_expressed[,Fisher_p_NDD_bonferroni:=Fisher_p_NDD*54] #Bonferroni correct over 54 GTEx tissues supp_data_s10 <- prop_expressed %>% mutate(Fisher_p_controls_bonferroni=ifelse(Fisher_p_controls_bonferroni>1,1,Fisher_p_controls_bonferroni), Fisher_p_NDD_bonferroni=ifelse(Fisher_p_NDD_bonferroni>1,1,Fisher_p_NDD_bonferroni)) %>% dplyr::select(class,tissue,num_expressed,num_unexpressed,Fisher_p_controls,Fisher_p_NDD,Fisher_p_controls_bonferroni,Fisher_p_NDD_bonferroni) %>% data.table() colnames(supp_data_s10) <- c('Class','GTEx_Tissue','Num_Expressed Genes','Num_Unexpressed_Genes', 'Fisher_P_Controls','Fisher_P_NDD','Fisher_P_Controls_Bonferroni', 'Fisher_P_NDD_Bonferroni') write.table(supp_data_s10,'results/supp/Supplementary_Data_S10_Proportion_hotspot_genes_expressed_across_tissues_all.txt', col.names=TRUE,row.names=FALSE,quote=FALSE,sep="\t") #Supplementary data S11: Proportion of hotspot genes expressed across tissues, PF00520 domain-containing genes prop_expressed <- gene_tpm %>% filter(contains_ion_channel==TRUE) %>% group_by(tissue,class) %>% summarise( num_expressed = sum(tpm > 1, na.rm=T), num_unexpressed = sum(tpm <= 1, na.rm=T), .groups = 'drop_last' ) %>% filter(class!='Proposed Novel Hotspot Genes') %>% data.table() prop_expressed <- prop_expressed[,Fisher_p_controls:=getFisher('Hotspot Genes','Control Genes',tissue),by=tissue] prop_expressed <- prop_expressed[,Fisher_p_NDD:=getFisher('Hotspot Genes','NDD-Associated Genes',tissue),by=tissue] prop_expressed <- prop_expressed[,Fisher_p_controls_bonferroni:=Fisher_p_controls*54] #Bonferroni correct over 54 GTEx tissues prop_expressed <- prop_expressed[,Fisher_p_NDD_bonferroni:=Fisher_p_NDD*54] #Bonferroni correct over 54 GTEx tissues supp_data_s11 <- prop_expressed %>% mutate(Fisher_p_controls_bonferroni=ifelse(Fisher_p_controls_bonferroni>1,1,Fisher_p_controls_bonferroni), Fisher_p_NDD_bonferroni=ifelse(Fisher_p_NDD_bonferroni>1,1,Fisher_p_NDD_bonferroni)) %>% dplyr::select(class,tissue,num_expressed,num_unexpressed,Fisher_p_controls,Fisher_p_NDD,Fisher_p_controls_bonferroni,Fisher_p_NDD_bonferroni) %>% data.table() colnames(supp_data_s11) <- c('Class','GTEx_Tissue','Num_Expressed Genes','Num_Unexpressed_Genes', 'Fisher_P_Controls','Fisher_P_NDD','Fisher_P_Controls_Bonferroni', 'Fisher_P_NDD_Bonferroni') write.table(supp_data_s11,'results/supp/Supplementary_Data_S11_Proportion_hotspot_genes_expressed_across_tissues_PF00520.txt', col.names=TRUE,row.names=FALSE,quote=FALSE,sep="\t") #Supplementary data S12: PDFs for assignment of proposed novel hotspot genes to hotspot gene or NDD-associated/control gene distributions median_gene_tpm_dist <- median_gene_tpm %>% filter(class=='Hotspot Genes' | class=='Control Genes' | contains_ion_channel==FALSE) %>% data.table() median_gene_tpm_test <- median_gene_tpm %>% filter(class=='Proposed Novel Hotspot Genes' | (class=='NDD-Associated Genes' & contains_ion_channel==TRUE)) %>% reshape2::melt(measure.vars = c('Brain','Other Tissues')) %>% data.table() getClassification <- function( comparison_class, tissue_type ) { gene_vec <- median_gene_tpm_test[,hgnc] class_vec <- median_gene_tpm_test[,class] tissue_vec <- median_gene_tpm_test[,variable] mean_hotspot <- mean(log10(median_gene_tpm_dist[class=='Hotspot Genes' & get(tissue_type)!=0,get(tissue_type)])) sd_hotspot <- sd(log10(median_gene_tpm_dist[class=='Hotspot Genes' & get(tissue_type)!=0,get(tissue_type)])) mean_other <- mean(log10(median_gene_tpm_dist[class==comparison_class & get(tissue_type)!=0,get(tissue_type)])) sd_other <- sd(log10(median_gene_tpm_dist[class==comparison_class & get(tissue_type)!=0,get(tissue_type)])) probabilitydensity_hotspot <- dnorm(log10(median_gene_tpm_test[,value]), mean = mean_hotspot, sd = sd_hotspot) probabilitydensity_other <- dnorm(log10(median_gene_tpm_test[,value]), mean = mean_other, sd = sd_other) classification <- ifelse(probabilitydensity_hotspot > probabilitydensity_other,'Hotspot Distribution',paste0(comparison_class,' Distribution')) classification_out <- data.table(Gene = gene_vec, Class = class_vec, Comparison_Tissue = tissue_vec, Distribution_Used = comparison_class, PD_Hotspot = probabilitydensity_hotspot, PD_Other = probabilitydensity_other, Classification = classification) classification_out <- classification_out[Comparison_Tissue==tissue_type] return(classification_out) } supp_data_s12 <- rbind(getClassification('Control Genes','Brain'), getClassification('NDD-Associated Genes','Other Tissues')) %>% plyr::arrange(Comparison_Tissue,Class,Classification) %>% data.table() knitr::kable(supp_data_s12) write.table(supp_data_s12,'results/supp/Supplementary_Data_S12_PDFs_for_proposed_novel_hotspot_classification.txt', col.names=TRUE,row.names=FALSE,quote=FALSE,sep="\t") #Supplementary data S14: Gene sets used in analysis ddg2p_genes <- ddg2p_in %>% pull(`gene symbol`) %>% unique() omim_genes <- omim_in %>% separate_rows(`Gene Symbols`,sep=', ') %>% filter(Phenotypes!='') %>% pull(`Gene Symbols`) %>% unique() constraint_genes <- constraint %>% pull(gene) %>% unique() supp_data_s14 <- gene_tpm %>% mutate(pf00520 = contains_ion_channel) %>% dplyr::select(hgnc,ensembl_versioned,class,pf00520) %>% mutate(ddg2p=ifelse(hgnc %in% ddg2p_genes,TRUE,FALSE), omim=ifelse(hgnc %in% omim_genes,TRUE,FALSE), constraint=ifelse(hgnc %in% constraint_genes,TRUE,FALSE)) %>% unique() %>% data.table() write.table(supp_data_s14,'results/supp/Supplementary_Data_S14_Gene_sets_all.txt', col.names=TRUE,row.names=FALSE,quote=FALSE,sep="\t") #Supplementary data S15: PF00520 domain-containing genes used in analysis supp_data_s15 <- supp_data_s14 %>% filter(pf00520==TRUE) %>% data.table() write.table(supp_data_s15,'results/supp/Supplementary_Data_S15_Gene_sets_PF00520.txt', col.names=TRUE,row.names=FALSE,quote=FALSE,sep="\t") # - # ### Supplementary tables # # # + #Supplementary table 4: Hotspot genes are enriched for gain-of-function mutation consequences in DDG2P nonhap_list <- c('dominant negative','gain of function','activating','increased gene dosage') hotspot_ddg2p_activating_genes <- length(unique(ddg2p_in[`gene symbol` %in% hotspot_genes & `mutation consequence` %in% nonhap_list,`gene symbol`])) hotspot_ddg2p_genes <- length(unique(ddg2p_in[`gene symbol` %in% hotspot_genes,`gene symbol`])) - hotspot_ddg2p_activating_genes ddg2p_activating_genes <- length(unique(ddg2p_in[`mutation consequence` %in% nonhap_list,`gene symbol`])) - hotspot_ddg2p_activating_genes ddg2p_genes <- length(unique(ddg2p_in[,`gene symbol`])) - hotspot_ddg2p_genes - ddg2p_activating_genes supp_table_4 <- rbind(c(hotspot_ddg2p_activating_genes,hotspot_ddg2p_genes), c(ddg2p_activating_genes,ddg2p_genes)) colnames(supp_table_4) <- c('Gain of Function Mutation Consequence','Other Mutation Consequence') row.names(supp_table_4) <- c('Hotspot Genes in DDG2P','Other DDG2P Genes') knitr::kable(supp_table_4) fisher.test(supp_table_4) #Supplementary table 5: NDD-associated genes have higher levels of constitutive expression than control genes constitutively_expressed_counts <- gene_tpm %>% group_by(ensembl_versioned) %>% mutate(expressed = ifelse(all(tpm < 1),'Unexpressed','Expressed')) %>% mutate(constitutively_expressed = ifelse(all(tpm >= 1),'Constitutively Expressed','Not Constitutively Expressed')) %>% ungroup() %>% dplyr::select(ensembl_versioned,class,expressed,constitutively_expressed) %>% filter(class=='Control Genes' | class=='NDD-Associated Genes') %>% unique() %>% count(class,expressed,constitutively_expressed) %>% group_by(class,constitutively_expressed) %>% summarise(n = sum(n), .groups = 'keep') %>% data.table() supp_table_5 <- rbind(constitutively_expressed_counts[class=='Control Genes',n], constitutively_expressed_counts[class=='NDD-Associated Genes',n]) colnames(supp_table_5) <- c('Constitutively Expressed','Not Constitutively Expressed') row.names(supp_table_5) <- c('Control Genes','NDD-Associated Genes') knitr::kable(supp_table_5) fisher.test(supp_table_5) # - # ### Supplementary figures # # # + #Supplementary figure 1: A significant proportion of hotspot genes have evidence of regional missense constraint compared to control and NDD-associated genes total_genes_rmc = length(unique(mpc_in[,gene])) dd_consensus_with_rmc = length(unique(mpc_in[gene %in% dd_genes,gene])) hotspot_with_rmc = length(unique(mpc_in[gene %in% hotspot_genes,gene])) novel_hotspot_with_rmc = length(unique(mpc_in[gene %in% novel_hotspot_genes,gene])) control_genes_with_rmc = total_genes_rmc - (dd_consensus_with_rmc + hotspot_with_rmc + novel_hotspot_with_rmc) hotspot_counts <- c(hotspot_with_rmc,length(hotspot_genes)) dd_consensus_counts <- c(dd_consensus_with_rmc,length(dd_genes)) control_counts = c(control_genes_with_rmc,length(unique(gene_tpm[class=='Control Genes',hgnc]))) supp_fig1 <- data.table(rmc_counts=c(novel_hotspot_with_rmc,hotspot_with_rmc,dd_consensus_with_rmc,control_genes_with_rmc), total_counts=c(length(novel_hotspot_genes),length(hotspot_genes),length(dd_genes),length(unique(gene_tpm[class=='Control Genes',hgnc]))), class=c('Proposed Novel Hotspot Genes','Hotspot Genes','NDD-Associated Genes','Control Genes')) supp_fig1 <- supp_fig1 %>% mutate(prop_rmc = rmc_counts/total_counts, prop_other = 1 - prop_rmc) %>% reshape2::melt(id.vars='class') %>% data.table() supp_fig1_x_labels = c('Control Genes' = 'Control Genes', 'Hotspot Genes' = 'Hotspot Genes', 'NDD-Associated Genes' = 'NDD-Associated Genes', 'Proposed Novel Hotspot Genes' = 'Proposed Novel\nHotspot Genes') supp_fig1_labels = c('Genes without Evidence of Regional Missense Constraint','Genes with Evidence of Regional Missense Constraint') supp_fig1_fill = c('darkgrey','tomato4') ggplot(supp_c[variable=='prop_rmc' | variable=='prop_other'], aes(x=class,y=value,fill=factor(variable,levels=c('prop_other','prop_rmc')))) + geom_col() + theme_pubr() + theme(legend.position = 'top', axis.text = element_text(size=14), axis.title = element_text(size=14), legend.title = element_blank(), legend.text = element_text(size=14)) + scale_fill_manual(values=supp_fig1_fill,labels=supp_fig1_labels) + scale_x_discrete(labels=supp_fig1_x_labels) + xlab('') + ylab('Proportion of Genes') ggsave('results/supp/Supplementary_Figure_1.svg',plot=last_plot(),device='svg',dpi=300,width=10,height=8) ggsave('results/supp/Supplementary_Figure_1.png',plot=last_plot(),device='png',dpi=300,width=10,height=8) fisher.test(rbind(hotspot_counts,control_counts)) fisher.test(rbind(hotspot_counts,dd_consensus_counts)) fisher.test(rbind(dd_consensus_counts,control_counts)) #Supplementary figure 2: Most mutation hotspots are more constrained for missense variation than their global missense constraint score would suggest ggplot(fig_4b,aes(x=value,y=obs_exp,colour=class.x)) + geom_point(size=7) + geom_abline(slope=1,intercept=0,colour='black',linetype='dashed') + theme_pubr() + theme(axis.text = element_text(size=14), axis.title = element_text(size=16), legend.position = 'top', legend.text = element_text(size=14), legend.title = element_blank()) + scale_colour_manual(values=c('skyblue3','darkgoldenrod','darkgreen','tomato4')) + xlim(0,1) + ylim(0,1) + xlab('Global missense constraint') + ylab('Regional missense constraint at hotspots') ggsave('results/supp/Supplementary_Figure_2.svg',plot=last_plot(),device='svg',dpi=300,width=10,height=8) ggsave('results/supp/Supplementary_Figure_2.png',plot=last_plot(),device='png',dpi=300,width=10,height=8) #Supplementary figure 3: A higher proportion of hotspot genes are expressed in brain than NDD-associated or control genes median_gene_tpm <- gene_tpm %>% mutate(tissue_group = ifelse(tissue %in% brain_tissues,'Brain','Other Tissues')) %>% group_by(ensembl_versioned,tissue_group) %>% mutate(median_tpm_tissuegroup = median(tpm)) %>% dplyr::select(-c('tpm','tissue')) %>% unique() %>% reshape2::dcast(formula=hgnc + class + ensembl_versioned + contains_ion_channel ~ tissue_group, value.var='median_tpm_tissuegroup') %>% data.table() supp_fig3 <- median_gene_tpm %>% group_by(class) %>% summarise( num_brain = sum(Brain > `Other Tissues`, na.rm=T), num_other_tissues = sum(`Other Tissues` > Brain, na.rm=T), total = n(), num_unexpressed = total - (num_brain + num_other_tissues), prop_brain = sum(Brain > `Other Tissues`, na.rm=T) / n(), prop_other = sum(`Other Tissues` > Brain, na.rm=T) / n(), prop_unexpressed = sum(Brain == `Other Tissues`, na.rm=T) / n(), .groups='drop_last' ) %>% reshape2::melt(id.vars=c('class')) %>% data.table() supp_fig3_x_labels = c('Control Genes','NDD-Associated Genes','Hotspot Genes','Proposed Novel\nHotspot Genes') supp_fig3_fill = c('prop_unexpressed' = 'darkgrey', 'prop_other' = 'steelblue3', 'prop_brain' = 'steelblue4') supp_fig3_levels = c('prop_unexpressed','prop_other','prop_brain') supp_fig3_class_levels = c('Control Genes','NDD-Associated Genes','Hotspot Genes','Proposed Novel Hotspot Genes') supp_fig3_fill_labels = c('prop_unexpressed' = 'Unexpressed', 'prop_other' = 'Other Tissues TPM > Brain TPM', 'prop_brain' = 'Brain TPM > Other Tissues TPM') ggplot(supp_fig3[variable %in% c('prop_brain','prop_other','prop_unexpressed')], aes(x=factor(class,levels=supp_fig3_class_levels), y=value,fill=factor(variable,levels=supp_fig3_levels))) + geom_col(colour='black',alpha=0.8) + theme_pubr() + theme(legend.position = 'top', axis.text = element_text(size=14), axis.title = element_text(size=14), legend.title = element_blank(), legend.text = element_text(size=14)) + scale_fill_manual(labels=supp_fig3_fill_labels,values=supp_fig3_fill) + scale_x_discrete(labels=supp_fig3_x_labels) + xlab('') + ylab('Proportion of Genes') ggsave('results/supp/Supplementary_Figure_3.svg',plot=last_plot(),device='svg',dpi=300,width=10,height=8) ggsave('results/supp/Supplementary_Figure_3.png',plot=last_plot(),device='png',dpi=300,width=10,height=8) hotspot_counts <- c(supp_fig3[class=='Hotspot Genes' & variable=='num_brain',value], supp_fig3[class=='Hotspot Genes' & variable=='num_other_tissues',value]) dd_counts <- c(supp_fig3[class=='NDD-Associated Genes' & variable=='num_brain',value], supp_fig3[class=='NDD-Associated Genes' & variable=='num_other_tissues',value]) control_counts <- c(supp_fig3[class=='Control Genes' & variable=='num_brain',value], supp_fig3[class=='Control Genes' & variable=='num_other_tissues',value]) fisher.test(rbind(hotspot_counts,control_counts)) fisher.test(rbind(hotspot_counts,dd_counts)) #Supplementary figure 4: Proportion of hotspot genes expressed across tissues compared to PF00520 domain-containing NDD-associated genes and PF00520 domain-containing control genes supp_fig4 <- gene_tpm %>% filter(contains_ion_channel==TRUE) %>% filter(class!='Proposed Novel Hotspot Genes') %>% group_by(tissue,class) %>% summarise( prop_expressed = sum(tpm > 1, na.rm=T) / n(), .groups = 'keep' ) %>% data.table() supp_fig4_colour <- c('Control Genes' = 'skyblue3', 'NDD-Associated Genes' = 'darkgreen', 'Hotspot Genes' = 'darkgoldenrod') supp_fig4_labels <- c('Control Genes' = 'PF00520 Domain-Containing Control Genes', 'NDD-Associated Genes' = 'PF00520 Domain-Containing NDD-Associated Genes', 'Hotspot Genes' = 'Hotspot Genes') ggplot(supp_fig4, aes(x=tissue,y=prop_expressed,colour=class)) + geom_point(size=4) + theme_pubr() + theme(axis.text = element_text(size=14), axis.title = element_text(size=16), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1), legend.title = element_blank(), legend.text = element_text(size=14)) + scale_colour_manual(values=supp_fig4_colour,labels=supp_fig4_labels) + ylim(0,1) + xlab('') + ylab('Proportion expressed genes (TPM > 1)') ggsave('results/supp/Supplementary_Figure_4.svg',plot=last_plot(),device='svg',dpi=300,width=14,height=10) ggsave('results/supp/Supplementary_Figure_4.png',plot=last_plot(),device='png',dpi=300,width=14,height=10) #Supplementary figure 5: A higher proportion of hotspot genes are expressed in brain than PF00520 domain-containing control genes supp_fig5 <- median_gene_tpm %>% filter(contains_ion_channel==TRUE) %>% group_by(class) %>% summarise( num_brain = sum(Brain > `Other Tissues`, na.rm=T), num_other_tissues = sum(`Other Tissues` > Brain, na.rm=T), total = n(), num_unexpressed = total - (num_brain + num_other_tissues), prop_brain = sum(Brain > `Other Tissues`, na.rm=T) / n(), prop_other = sum(`Other Tissues` > Brain, na.rm=T) / n(), prop_unexpressed = sum(Brain == `Other Tissues`, na.rm=T) / n(), .groups='drop_last' ) %>% reshape2::melt(id.vars=c('class')) %>% data.table() supp_fig5_x_labels = c('PF00520 Domain-\nContaining\nControl Genes', 'PF00520 Domain-\nContaining\nNDD-Associated Genes', 'Hotspot Genes','Proposed Novel\nHotspot Genes') supp_fig5_fill = c('prop_unexpressed' = 'darkgrey', 'prop_other' = 'steelblue3', 'prop_brain' = 'steelblue4') supp_fig5_levels = c('prop_unexpressed','prop_other','prop_brain') supp_fig5_class_levels = c('Control Genes','NDD-Associated Genes','Hotspot Genes','Proposed Novel Hotspot Genes') supp_fig5_fill_labels = c('prop_unexpressed' = 'Unexpressed', 'prop_other' = 'Other Tissues TPM > Brain TPM', 'prop_brain' = 'Brain TPM > Other Tissues TPM') ggplot(supp_fig5[variable %in% c('prop_brain','prop_other','prop_unexpressed')], aes(x=factor(class,levels=supp_fig5_class_levels), y=value,fill=factor(variable,levels=supp_fig5_levels))) + geom_col(colour='black',alpha=0.8) + theme_pubr() + theme(legend.position = 'top', axis.text = element_text(size=14), axis.title = element_text(size=14), legend.title = element_blank(), legend.text = element_text(size=14)) + scale_fill_manual(labels=supp_fig5_fill_labels,values=supp_fig5_fill) + scale_x_discrete(labels=supp_fig5_x_labels) + xlab('') + ylab('Proportion of Genes') ggsave('results/supp/Supplementary_Figure_5.svg',plot=last_plot(),device='svg',dpi=300,width=10,height=8) ggsave('results/supp/Supplementary_Figure_5.png',plot=last_plot(),device='png',dpi=300,width=10,height=8) hotspot_counts_PF00520 <- c(supp_fig5[class=='Hotspot Genes' & variable=='num_brain',value], supp_fig5[class=='Hotspot Genes' & variable=='num_other_tissues',value]) dd_counts_PF00520 <- c(supp_fig5[class=='NDD-Associated Genes' & variable=='num_brain',value], supp_fig5[class=='NDD-Associated Genes' & variable=='num_other_tissues',value]) control_counts_PF00520 <- c(supp_fig5[class=='Control Genes' & variable=='num_brain',value], supp_fig5[class=='Control Genes' & variable=='num_other_tissues',value]) fisher.test(rbind(hotspot_counts_PF00520,control_counts_PF00520)) fisher.test(rbind(hotspot_counts_PF00520,dd_counts_PF00520)) #Supplementary figure 6: TPM differences between hotspot, NDD-associated, and control genes in brain and other tissues a <- ggplot(median_gene_tpm[class!='Proposed Novel Hotspot Genes'], aes(x=factor(class,levels=c('Control Genes','NDD-Associated Genes','Hotspot Genes')),y=log10(Brain),fill=class)) + geom_boxplot(outlier.colour = NA,alpha=0.8) + theme_pubr() + theme(axis.text = element_text(size=14), axis.title = element_text(size=16), legend.title = element_blank(), legend.text = element_text(size=14)) + scale_fill_manual(values=c('skyblue3','darkgreen','darkgoldenrod')) + ggsignif::geom_signif(comparisons = list(c('NDD-Associated Genes','Hotspot Genes'), c('Control Genes','NDD-Associated Genes'), c('Control Genes','Hotspot Genes')), map_signif_level = FALSE, y_position = c(3.5,3.8,4.2), colour='black') + xlab('') + ylab('Log10 Brain TPM') b <- ggplot(median_gene_tpm[class!='Proposed Novel Hotspot Genes'], aes(x=factor(class,levels=c('Control Genes','NDD-Associated Genes','Hotspot Genes')),y=log10(`Other Tissues`),fill=class)) + geom_boxplot(outlier.colour = NA,alpha=0.8) + theme_pubr() + theme(axis.text = element_text(size=14), axis.title = element_text(size=16), legend.title = element_blank(), legend.text = element_text(size=14)) + scale_fill_manual(values=c('skyblue3','darkgreen','darkgoldenrod')) + ggsignif::geom_signif(comparisons = list(c('NDD-Associated Genes','Hotspot Genes'), c('Control Genes','NDD-Associated Genes'), c('Control Genes','Hotspot Genes')), map_signif_level = FALSE, y_position = c(3.5,3.8,4.2), colour='black') + xlab('') + ylab('Log10 Other Tissues TPM') plot_grid(a,b,labels='AUTO',label_size=16,nrow=1) ggsave('results/supp/Supplementary_Figure_6.svg',plot=last_plot(),device='svg',dpi=300,width=14,height=8) ggsave('results/supp/Supplementary_Figure_6.png',plot=last_plot(),device='png',dpi=300,width=14,height=8)
MDHS_notebooks/hotspotExpressionConstraint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np # !pip install pyspark from pyspark.sql import SparkSession spark = SparkSession.builder\ .appName('FirstDemo')\ .master("local[*]")\ .getOrCreate() spark # Creation of DataFrame in PySpark: # The following are some ways to create dataframe in pyspark with spark sql: # From a list of values, from a list of tuples or from a dictionary. # The other ways to create dataframe are from spark RDD and from a pandas dataframe # but the dataframe generated from pandas dataframe is only working with the python API # Dataframe is the core object for storing data which consists of rows and columns # + # Creation of dataframe in pandas: # In pandas, DF is a data structure that contains two dimensional data. # from csv or excel file # from list: List is a collection data types, which is mutuable and allows duplicates values. # List is represented by collection of data points in square brackets # from dictionary- Keys representing the columns name and rows as values # from numpy arrays- Dataframe is always two dimesnional structure so we can create DF by 2-dimensional numpy arrays. # - # create the dataframe header = ["state", "location", "sell_price"] data = map(lambda r: (r[0], r[1], float(r[2])), map(lambda x: x.split(","), ["London, East, 200.00", "Nebraska, West, 400.11", "Iowa, North, 23.21", "Ohio, West, 34.11", "NY, South, 45.22", "Boston, East, 34.11"])) df = spark.createDataFrame(data,header) df
Create DataFrame in Pyspark.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Gram-Schmidt--The Movie from IPython.display import FileLink FileLink("gram-schmidt.webm") # Click the link above to play the movie. You can also find it on the Wikpedia page for Gram-Schmidt # # https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process#/media/File:Gram-Schmidt_orthonormalization_process.gif
demos/linear_least_squares/Gram-Schmidt--The Movie.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Local Outlier Factor # - import pandas as pd import time from sklearn.neighbors import LocalOutlierFactor import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import pairwise import sklearn.model_selection start = time.time() df = pd.read_csv('1mill_sample_anomaly.csv') end = time.time() print(end - start) #df2 = df.sample(n=1000000) dfNum = df[['Duration', 'SrcPackets', 'DstPackets', 'SrcBytes', 'DstBytes']] # small n, high variance. N=length, then find most probable class # auto chooses best algorithm based on data passed into fit start = time.time() lof = LocalOutlierFactor(n_neighbors = 20, algorithm= 'auto', contamination= 0.05) labels = lof.fit_predict(dfNum) end = time.time() print(end - start) # + # Intra cluster distance start = time.time() dfNum['Outlier'] = labels benign = dfNum[dfNum['Outlier'] == 1] anomalies = dfNum[dfNum['Outlier'] == -1] benign_centroid = benign.mean().values full_centroid = dfNum.mean().values full_spread = 0 for index, row in dfNum.iterrows(): d = np.sum(np.square(full_centroid - row.values)) full_spread = full_spread + d benign_spread = 0 for index, row in benign.iterrows(): d = np.sum(np.square(benign_centroid - row.values)) benign_spread = benign_spread + d ratio = (full_spread / dfNum.shape[0]) / (benign_spread / benign.shape[0]) print(ratio) end = time.time() print(end - start) # - # Adding column to dataframe and resaving ensembling csv df['LOF_0.01_Labels'] = labels df.to_csv('1mill_anomaly_ensemble_df.csv') # Removing 'Port' from the port names and grouping ports into 3 categories df['DstPort'] = df['DstPort'].str.replace('Port', '') df['DstPort'] = df['DstPort'].astype(int) df['DstPortCategory'] = np.where(df['DstPort']<=1023, 'System Ports', 'User Ports') df['DstPortCategory'][df['DstPort']>=49152] = 'Dynamic and/or Private Ports' # Chi-Squared Test to Test Whether Anomalous and Benign Five-Tuples have significantly # differnt port dist #df['Outlier'] = labels cont_tab = pd.crosstab(index=df['Outlier'], columns=df['DstPortCategory']) print(cont_tab) from scipy import stats stats.chi2_contingency(cont_tab) # + # Creating a plot of the ports for anomalous five tuples anomalies2 = df[df['Outlier']==-1] freq_series = pd.Series(anomalies2['DstPort'].value_counts().head(5).values) x_labels = pd.Series(anomalies2['DstPort'].value_counts().head(5).index) ax = freq_series.plot(kind='bar') ax.set_title('Anomalous Port Distribution (Local Outlier Factor)') ax.set_xlabel('Port Number') ax.set_ylabel('Frequency') ax.set_xticklabels(x_labels) plt.xticks(rotation=360) rects = ax.patches # Make some labels. #labels = ["label%d" % i for i in range(len(rects))] percs = list(anomalies2['DstPort'].value_counts().head(6)*100/len(anomalies2)) percs = [round(perc, 1) for perc in percs] labels = [str(perc)+'%' for perc in percs] for rect, label in zip(rects, labels): height = rect.get_height() ax.text(rect.get_x() + rect.get_width() / 2, height + 5, label, ha='center', va='bottom') plt.savefig('LOF_anomalous_portDist_0.01contam.png') # + # Creating a plot of the ports for anomalous five tuples anomalies2 = df[df['Outlier']==1] freq_series = pd.Series(anomalies2['DstPort'].value_counts().head(5).values) x_labels = pd.Series(anomalies2['DstPort'].value_counts().head(5).index) ax = freq_series.plot(kind='bar') ax.set_title('Non-Anomalous Port Distribution (Local Outlier Factor)') ax.set_xlabel('Port Number') ax.set_ylabel('Frequency') ax.set_xticklabels(x_labels) plt.xticks(rotation=360) rects = ax.patches # Make some labels. #labels = ["label%d" % i for i in range(len(rects))] percs = list(anomalies2['DstPort'].value_counts().head(6)*100/len(anomalies2)) percs = [round(perc, 1) for perc in percs] labels = [str(perc)+'%' for perc in percs] for rect, label in zip(rects, labels): height = rect.get_height() ax.text(rect.get_x() + rect.get_width() / 2, height + 5, label, ha='center', va='bottom') plt.savefig('LOF_nonanomalous_portDist_0.01contam.png') # - # Chi-Squared Test to Test Whether Anomalous and Benign Five-Tuples have significantly # differnt protocol dist df['Outlier'] = labels cont_tab2 = pd.crosstab(index=df['Outlier'], columns=df['Protocol']) print(cont_tab2) from scipy import stats stats.chi2_contingency(cont_tab2) # + # Creating a plot of the protocols for anomalous five tuples anomalies2 = df[df['Outlier']==-1] freq_series = pd.Series(anomalies2['Protocol'].value_counts().values) x_labels = pd.Series(anomalies2['Protocol'].value_counts().index) ax = freq_series.plot(kind='bar') ax.set_title('Anomalous Protocol Distribution (Local Outlier Factor)') ax.set_xlabel('Protocol') ax.set_ylabel('Frequency') ax.set_xticklabels(x_labels) plt.xticks(rotation=360) rects = ax.patches # Make some labels. #labels = ["label%d" % i for i in range(len(rects))] percs = list(anomalies2['Protocol'].value_counts()*100/len(anomalies2)) percs = [round(perc, 1) for perc in percs] labels = [str(perc)+'%' for perc in percs] for rect, label in zip(rects, labels): height = rect.get_height() ax.text(rect.get_x() + rect.get_width() / 2, height + 5, label, ha='center', va='bottom') plt.savefig('LOF_anomalous_protocolDist_0.01contam.png') # + # Creating a plot of the protocols for non-anomalous five tuples benign2 = df[df['Outlier']==1] freq_series = pd.Series(benign2['Protocol'].value_counts().values) x_labels = pd.Series(benign2['Protocol'].value_counts().index) ax = freq_series.plot(kind='bar') ax.set_title('Non-Anomalous Protocol Distribution (Local Outlier Factor)') ax.set_xlabel('Protocol') ax.set_ylabel('Frequency') ax.set_xticklabels(x_labels) plt.xticks(rotation=360) rects = ax.patches # Make some labels. #labels = ["label%d" % i for i in range(len(rects))] percs = list(benign2['Protocol'].value_counts()*100/len(benign2)) percs = [round(perc, 1) for perc in percs] labels = [str(perc)+'%' for perc in percs] for rect, label in zip(rects, labels): height = rect.get_height() ax.text(rect.get_x() + rect.get_width() / 2, height + 5, label, ha='center', va='bottom') plt.savefig('LOF_benign_protocolDist_0.01contam.png') # - from sklearn import preprocessing from sklearn.decomposition import PCA # Plotting PCA to Visualize Anomalies # Scaling for PCA start = time.time() pre = preprocessing.scale(dfNum[["Duration", "SrcPackets", "DstPackets", "SrcBytes", "DstBytes"]]) pre = pd.DataFrame.from_records(pre, columns = ["Duration", "SrcPackets", "DstPackets", "SrcBytes", "DstBytes"]) print(pre.head()) end = time.time() print(end - start) dfNum['Outlier'] = labels # PCA plot of protocol and numerical values-- color coded by Protocol start = time.time() pcaDF = pre[["Duration", "SrcPackets", "DstPackets", "SrcBytes", "DstBytes"]] pca = PCA(n_components=2).fit_transform(pcaDF) pcdf = pd.DataFrame(data = pca, columns = ['PC1', 'PC2']) pcdf.plot(x="PC1", y="PC2", kind='scatter', title='PCA Plot', c=dfNum['Outlier'] , legend=True, colormap = 'Accent', alpha = 0.05) plt.savefig('1mill_PCA_LOF.pdf') plt.show() end = time.time() print(end - start)
Catherine/ICMLA_Files/LocalOutlierFactor(1).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D5_DimensionalityReduction/W1D5_Tutorial4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # - # # Tutorial 4: Nonlinear Dimensionality Reduction # **Week 1, Day 5: Dimensionality Reduction** # # **By Neuromatch Academy** # # __Content creators:__ <NAME>, <NAME> # # __Content reviewers:__ <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # # + [markdown] colab_type="text" # **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** # # <p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p> # - # --- # # Tutorial Objectives # # In this notebook we'll explore how dimensionality reduction can be useful for visualizing and inferring structure in your data. To do this, we will compare PCA with t-SNE, a nonlinear dimensionality reduction method. # # Overview: # - Visualize MNIST in 2D using PCA. # - Visualize MNIST in 2D using t-SNE. # + cellView="form" # @title Video 1: PCA Applications from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="2Zb93aOWioM", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # --- # # Setup # Run these cells to get the tutorial started. # Imports import numpy as np import matplotlib.pyplot as plt # + cellView="form" #@title Figure Settings import ipywidgets as widgets # interactive display # %config InlineBackend.figure_format = 'retina' plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle") # + cellView="form" #@title Helper functions def visualize_components(component1, component2, labels, show=True): """ Plots a 2D representation of the data for visualization with categories labelled as different colors. Args: component1 (numpy array of floats) : Vector of component 1 scores component2 (numpy array of floats) : Vector of component 2 scores labels (numpy array of floats) : Vector corresponding to categories of samples Returns: Nothing. """ plt.figure() cmap = plt.cm.get_cmap('tab10') plt.scatter(x=component1, y=component2, c=labels, cmap=cmap) plt.xlabel('Component 1') plt.ylabel('Component 2') plt.colorbar(ticks=range(10)) plt.clim(-0.5, 9.5) if show: plt.show() # - # --- # # Section 1: Visualize MNIST in 2D using PCA # # In this exercise, we'll visualize the first few components of the MNIST dataset to look for evidence of structure in the data. But in this tutorial, we will also be interested in the label of each image (i.e., which numeral it is from 0 to 9). Start by running the following cell to reload the MNIST dataset (this takes a few seconds). from sklearn.datasets import fetch_openml mnist = fetch_openml(name='mnist_784', as_frame = False) X = mnist.data labels = [int(k) for k in mnist.target] labels = np.array(labels) # To perform PCA, we now will use the method implemented in sklearn. Run the following cell to set the parameters of PCA - we will only look at the top 2 components because we will be visualizing the data in 2D. from sklearn.decomposition import PCA pca_model = PCA(n_components=2) # Initializes PCA pca_model.fit(X) # Performs PCA # ## Exercise 1: Visualization of MNIST in 2D using PCA # # Fill in the code below to perform PCA and visualize the top two components. For better visualization, take only the first 2,000 samples of the data (this will also make t-SNE much faster in the following section of the tutorial so don't skip this step!) # # **Suggestions:** # - Truncate the data matrix at 2,000 samples. You will also need to truncate the array of labels. # - Perform PCA on the truncated data. # - Use the function `visualize_components` to plot the labelled data. help(visualize_components) help(pca_model.transform) # + ################################################# ## TODO for students: take only 2,000 samples and perform PCA ################################################# # Take only the first 2000 samples with the corresponding labels # X, labels = ... # Perform PCA # scores = pca_model.transform(X) # Plot the data and reconstruction # visualize_components(...) # + # to_remove solution # Take only the first 2000 samples with the corresponding labels X, labels = X[:2000, :], labels[:2000] # Perform PCA scores = pca_model.transform(X) # Plot the data and reconstruction with plt.xkcd(): visualize_components(scores[:, 0], scores[:, 1], labels) # - # ## Think! # - What do you see? Are different samples corresponding to the same numeral clustered together? Is there much overlap? # - Do some pairs of numerals appear to be more distinguishable than others? # --- # # Section 2: Visualize MNIST in 2D using t-SNE # # + cellView="form" # @title Video 2: Nonlinear Methods from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="5Xpb0YaN5Ms", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # Next we will analyze the same data using t-SNE, a nonlinear dimensionality reduction method that is useful for visualizing high dimensional data in 2D or 3D. Run the cell below to get started. from sklearn.manifold import TSNE tsne_model = TSNE(n_components=2, perplexity=30, random_state=2020) # ## Exercise 2: Apply t-SNE on MNIST # First, we'll run t-SNE on the data to explore whether we can see more structure. The cell above defined the parameters that we will use to find our embedding (i.e, the low-dimensional representation of the data) and stored them in `model`. To run t-SNE on our data, use the function `model.fit_transform`. # # **Suggestions:** # - Run t-SNE using the function `model.fit_transform`. # - Plot the result data using `visualize_components`. help(tsne_model.fit_transform) # + ################################################# ## TODO for students: perform tSNE and visualize the data ################################################# # perform t-SNE embed = ... # Visualize the data # visualize_components(..., ..., labels) # + # to_remove solution # perform t-SNE embed = tsne_model.fit_transform(X) # Visualize the data with plt.xkcd(): visualize_components(embed[:, 0], embed[:, 1], labels) # - # ## Exercise 3: Run t-SNE with different perplexities # # Unlike PCA, t-SNE has a free parameter (the perplexity) that roughly determines how global vs. local information is weighted. Here we'll take a look at how the perplexity affects our interpretation of the results. # # **Steps:** # - Rerun t-SNE (don't forget to re-initialize using the function `TSNE` as above) with a perplexity of 50, 5 and 2. # + def explore_perplexity(values): """ Plots a 2D representation of the data for visualization with categories labelled as different colors using different perplexities. Args: values (list of floats) : list with perplexities to be visualized Returns: Nothing. """ for perp in values: ################################################# ## TO DO for students: Insert your code here to redefine the t-SNE "model" ## while setting the perplexity perform t-SNE on the data and plot the ## results for perplexity = 50, 5, and 2 (set random_state to 2020 # Comment these lines when you complete the function raise NotImplementedError("Student Exercise! Explore t-SNE with different perplexity") ################################################# # perform t-SNE tsne_model = ... embed = tsne_model.fit_transform(X) visualize_components(embed[:, 0], embed[:, 1], labels, show=False) plt.title(f"perplexity: {perp}") # Uncomment when you complete the function # values = [50, 5, 2] # explore_perplexity(values) # + # to_remove solution def explore_perplexity(values): """ Plots a 2D representation of the data for visualization with categories labelled as different colors using different perplexities. Args: values (list of floats) : list with perplexities to be visualized Returns: Nothing. """ for perp in values: # perform t-SNE tsne_model = TSNE(n_components=2, perplexity=perp, random_state=2020) embed = tsne_model.fit_transform(X) visualize_components(embed[:, 0], embed[:, 1], labels, show=False) plt.title(f"perplexity: {perp}") plt.show() # Uncomment when you complete the function values = [50, 5, 2] with plt.xkcd(): explore_perplexity(values) # - # ## Think! # # - What changes compared to your previous results using perplexity equal to 50? Do you see any clusters that have a different structure than before? # - What changes in the embedding structure for perplexity equals to 5 or 2? # --- # # Summary # * We learned the difference between linear and nonlinear dimensionality reduction. While nonlinear methods can be more powerful, they can also be senseitive to noise. In contrast, linear methods are useful for their simplicity and robustness. # * We compared PCA and t-SNE for data visualization. Using t-SNE, we could visualize clusters in the data corresponding to different digits. While PCA was able to separate some clusters (e.g., 0 vs 1), it performed poorly overall. # * However, the results of t-SNE can change depending on the choice of perplexity. To learn more, we recommend this [Distill paper](https://distill.pub/2016/misread-tsne/). #
tutorials/W1D5_DimensionalityReduction/W1D5_Tutorial4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/icehelmetminer/DISASTERS/blob/main/supplychain.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="bfouzLjuuFyb" import pandas as pd import matplotlib.pyplot as plt import datetime as dt # + id="bWfYOnWCuige" import pandas as pd data_xls = pd.read_excel('/content/savefile_workingproject.xlsx', 'Sheet1', dtype=str, index_col=None) data_xls.to_csv('csvfile.csv', encoding='utf-8', index=False) dataset = pd.read_csv('csvfile.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="u6djezq8uoqB" outputId="685e9cc3-4675-4ab1-bf52-f1d0760fb613" dataset.head() # + colab={"base_uri": "https://localhost:8080/", "height": 424} id="LjtNyjlTuu-t" outputId="d7dbea46-09c1-4d69-ef77-5dc510a5fac1" dataset.shipped=pd.to_datetime(dataset.shipped) dataset.returned=pd.to_datetime(dataset.returned) dataset['duration']=dataset.returned-dataset.shipped dataset # + colab={"base_uri": "https://localhost:8080/"} id="1RnixWhpwG-F" outputId="f43f2b13-4632-4966-b997-d47b5d137a9f" p_start=dataset.shipped.min() p_end=dataset.returned.max() p_duration=(p_end-p_start).days+1 print('project duration :', p_duration) print('project start date :', p_start) print('project end date :', p_end) # + colab={"base_uri": "https://localhost:8080/", "height": 424} id="iKz3AFzcW4fr" outputId="7fbc7e94-4d63-4e89-f537-1cd999a30da4" dataset.sort_values(by='shipped', ascending=True, na_position='first') # + colab={"base_uri": "https://localhost:8080/", "height": 112} id="onz-_4qkXMjx" outputId="6d15af7a-faff-4551-caaa-d40c3886d3f2" #dataset['container_status'] = '' dataset.head(2) # + id="51kQZ-F_DUx1" # #!pip install icecream def initializer(length): # this adds empty array to df array = [] value = '' for i in range(length): array.append(value) return array # + id="Ykya3pwpSpJn" # #!pip install icecream def initializer2(length): # this adds empty array to df array = [] for i in range(length): array.append(i) return array # + id="Au2xu1GmYZFA" colab={"base_uri": "https://localhost:8080/"} outputId="d30242e1-da84-489e-b1bc-0ff7c0cb6eac" import icecream from icecream import ic import time import numpy as np # #!pip install tqdm import tqdm from tqdm import tqdm #has customer k received unit i in the past? the_container_store = pd.DataFrame() import warnings from pandas.core.common import SettingWithCopyWarning warnings.simplefilter(action="ignore", category=SettingWithCopyWarning) dataset['index'] = initializer2(len(dataset['serial'])) the_container_store['unit'] = initializer(len(dataset['serial'])) # SN-0000767 the_container_store['number_of_customer_buys'] = initializer(len(dataset['serial'])) # [553240,545473] the_container_store['customer_buys'] = initializer(len(dataset['serial'])) # [553240,545473] the_container_store['customer_id'] = initializer(len(dataset['serial'])) # [553240,545473] the_container_store['customer_bitems'] = initializer(len(dataset['serial'])) # [553240,545473] the_container_store['customer_possession_time'] = initializer(len(dataset['serial'])) #[average of 1602.636806 and 1005.411806] counter = 0 items = dataset['serial'] ''' serial_numbers serial customer_id customer dest shipped returned customer_possession_time duration 0 SN-0000767 9 553240 553240 TAIWAN 2013-09-18 2018-02-07 1602.636806 1603 days ''' #2509 units #counter is the index import statistics import math counter = 0 items = dataset['serial'] dataset = dataset[['index','serial','customer_id','customer','customer_possession_time']] for item in tqdm(items): #ic(item) the_container_store['unit'][counter] = item #ic(the_container_store) buyer = dataset['customer'][counter] return_time = dataset['customer_possession_time'][counter] buys = dataset.loc[dataset['customer'] == buyer] # all the places the buyer is listed in the customer column bitems = dataset.loc[dataset['serial'] == item] buys_array = np.array(buys.index.values) bitems_array = np.array(bitems.index.values) common_indices = list(np.intersect1d(buys_array, bitems_array)) #print(f'\nchecking for accuracy \n{buyer} is the buyer id for transactions involving the following items:\n') common_df = [] returnrow = [] return_time = 0 for ind in common_indices: # show the transactions dataset.rename(columns=dataset.iloc[0]).drop(dataset.index[0]) row = dataset.loc[dataset['index'] == ind] row = row.values.tolist() row_inner = row[0] row_superinner = row_inner[0] returnrow.append(row_superinner) return_time = len(row_inner)/sum(row_inner) #print(len(row_inner)) #print(sum(row_inner)) the_container_store['customer_id'][counter] = buyer the_container_store['customer_buys'][counter] = returnrow the_container_store['number_of_customer_buys'][counter] = len(returnrow) the_container_store['customer_possession_time'][counter] = statistics.mean(returnrow) counter+=1 the_container_store.head() the_container_store.to_csv("quicksave.csv") #the_container_store['customer'] ic() # + id="YEw-qcrtOoRT" colab={"base_uri": "https://localhost:8080/", "height": 73} outputId="33d7c6dc-3d5f-4c7c-edf9-94f3c7a60021" #the_container_store['unit'] = dataset['customer_id'] '''old_customer_entry = np.array(the_container_store['customer'][counter]) current_customer_array = np.array(dataset['customer_id'][counter]) #new_customer_entry = np.vstack((old_customer_entry,current_customer_array)) new_customer_entry = old_customer_entry.append(old_customer_entry) new_customer_entry = np.concatenate(old_customer_entry,current_customer_array) the_container_store['customer'][counter] = new_customer_entry the_container_store['possession_times'][counter] = dataset['customer_id'][counter] ''' #print(the_container_store) '''testing=[] for i in range(3): testing.append([1,2,3]) ''' '''for container in tqdm(items): # for every unit we want to look at which customers bought it #container is a # index for customer in dataset['customer']: #print(customer," bought and returned in ",return_time," days") try: return_time = dataset['customer_possession_time'][counter] except Exception: continue''' #counter+=1 ''' for container in tqdm(items): for customer,return_time in zip(dataset['customer'],dataset['customer_possession_time']): the_container_store['unit'][counter] = container customers = the_container_store['customer'][counter] try: customers = pd.array(customers) customers.append(customer) except Exception as e: #print(e) continue the_container_store['possession_times'][counter] = return_time counter+=1''' #for customer in dataset[''] # + id="57QulGSCFo-8" colab={"base_uri": "https://localhost:8080/"} outputId="5b0d7d69-27a1-442b-a032-00484d74ecf5" the_container_store.head(20) items # + id="H13jGuymFr3V"
supplychain.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:py37] # language: python # name: conda-env-py37-py # --- # + import pandas as pd import datetime from erddapy import ERDDAP import pandas as pd import numpy as np # + server_url = 'http://downdraft.pmel.noaa.gov:8080/erddap' d = ERDDAP(server=server_url, protocol='tabledap', response='csv', ) d.dataset_id='channel_843357_thingspeak' d.variables = [d.get_var_by_attr(dataset_id=prawler, standard_name=lambda v: v is not None) for prawler in [d.dataset_id]][0] d.constraints = { 'time>=': datetime.datetime.today()-datetime.timedelta(days=30), 'time<=': datetime.datetime.utcnow() } d.get_download_url() df = d.to_pandas( skiprows=(1,) # units information can be dropped. ).dropna() df.sort_index(inplace=True) df.columns = [x[1].split()[0] for x in enumerate(df.columns)] df.tail() # - df['UnixTime_GMT'] = [int(1000*datetime.datetime.strptime(x,'%Y-%m-%dT%H:%M:%SZ').timestamp()) for x in df.time] df.set_index('UnixTime_GMT',inplace=True) df.drop(columns=['time'],inplace=True) # + outputpath = '/Users/bell/Sites/web/timeseries_viewer/home_station/test_station/' df.to_csv(outputpath+'csv/airTH_unix.csv',columns=['Temp','RH_Percent']) df.to_csv(outputpath+'csv/airTP_unix.csv',columns=['BaroTemp','SLP']) # - print("Last run {0}".format(datetime.datetime.utcnow())) with open(outputpath + 'txt/lastUpdated.txt','w') as myfile: myfile.write(str(datetime.datetime.utcnow())) # ## 19BSITAEPR-2A # ### 19bsitaepr2a met # + server_url = 'http://downdraft.pmel.noaa.gov:8080/erddap' d = ERDDAP(server=server_url, protocol='tabledap', response='csv', ) d.dataset_id='erddap_19bsitaepr2a_met' d.variables = [d.get_var_by_attr(dataset_id=prawler, standard_name=lambda v: v is not None) for prawler in [d.dataset_id]][0] d.constraints = { 'time>=': datetime.datetime.today()-datetime.timedelta(days=366), 'time<=': datetime.datetime.utcnow() } d.get_download_url() df = d.to_pandas( skiprows=(1,) # units information can be dropped. ).dropna() df.sort_index(inplace=True) df.columns = [x[1].split()[0] for x in enumerate(df.columns)] df.tail() # - df.head() # + #calculate windspeed and direction df['wind_speed (m s-1)']=np.sqrt(df['northward_wind']**2 + df['eastward_wind']**2) df['wind_from_direction (degrees true)'] = 270-np.rad2deg(np.arctan2(df['northward_wind'], df['eastward_wind'])) df['wind_from_direction (degrees true)'][df['wind_from_direction (degrees true)']>360] = df['wind_from_direction (degrees true)'][df['wind_from_direction (degrees true)']>360]-360 #quick qc df['air_temperature'][df['air_temperature']>50]=np.nan ### df['UnixTime_GMT'] = [int(1000*datetime.datetime.strptime(x,'%Y-%m-%dT%H:%M:%SZ').timestamp()) for x in df.time] df.set_index('UnixTime_GMT',inplace=True) df.drop(columns=['time'],inplace=True) # - df.to_csv('airTH_unix.csv',columns=['air_temperature','relative_humidity']) df.to_csv('airTP_unix.csv',columns=['air_temperature','air_pressure']) df.to_csv('airWSWD_unix.csv',columns=['wind_from_direction (degrees true)','wind_speed (m s-1)']) print("Last run {0}".format(datetime.datetime.utcnow())) with open('lastUpdated.txt','w') as myfile: myfile.write(str(datetime.datetime.utcnow())) # ### 19BSITAEPR-2A Prawler # + server_url = 'http://downdraft.pmel.noaa.gov:8080/erddap' d = ERDDAP(server=server_url, protocol='tabledap', response='csv', ) d.dataset_id='erddap_19bsitaepr2a_prawler' #d.variables = [d.get_var_by_attr(dataset_id=prawler, standard_name=lambda v: v is not None) for prawler in [d.dataset_id]][0] d.variables = [ 'profile_id', 'Temperature', 'depth', 'time', ] # + d.constraints = { 'time>=': datetime.datetime.today()-datetime.timedelta(days=366), 'time<=': datetime.datetime.utcnow() } d.get_download_url() df_sst = d.to_pandas( skiprows=(1,) # units information can be dropped. ).dropna() df_sst.sort_index(inplace=True) df_sst.columns = [x[1].split()[0] for x in enumerate(df.columns)] df_sst.tail() # - dfint= df_sst.groupby('profile_id') sst,sst_time = [], [] for i,cast in enumerate(dfint.groups): if (dfint.get_group(cast)['depth'].std() > 5) and (dfint.get_group(cast)['depth'].min() < 5): sst_time = sst_time + [(dfint.get_group(cast)['time'].values)[0]] #print(dfint.get_group(cast)['Temperature'][0:10]) sst = sst +[(dfint.get_group(cast)['Temperature'][0:5]).median()] df_sst_10min.info() # + df_sst_10min = pd.DataFrame(np.array([sst_time,sst]).T,columns=('sst_time','sst')) df_sst_10min.set_index(pd.to_datetime(df_sst_10min['sst_time']),inplace=True) df_sst_10min.drop(columns=['sst_time'],inplace=True) df_sst_10min["sst"]=pd.to_numeric(df_sst_10min["sst"]) df_sst_10minr=df_sst_10min.resample('10T').mean() df_sst_10minr['UnixTime_GMT'] = [int(1000*datetime.datetime.strptime(str(x),'%Y-%m-%d %H:%M:%S+00:00').timestamp()) for x in df_sst_10minr.index] #df.to_csv('airT_SST_unix.csv',columns=['air_temperature','relative_humidity']) df_sst_10minr.set_index('UnixTime_GMT',inplace=True) # - result = df_sst_10minr.join(df,sort=False) result.to_csv('airT_SST_unix.csv',columns=['air_temperature','sst']) # ### 18BSITAEPR-2A # + server_url = 'http://downdraft.pmel.noaa.gov:8080/erddap' d = ERDDAP(server=server_url, protocol='tabledap', response='csv', ) d.dataset_id='erddap_18bsitaepr2a_met' d.variables = [d.get_var_by_attr(dataset_id=prawler, standard_name=lambda v: v is not None) for prawler in [d.dataset_id]][0] d.constraints = { 'time>=': '2018-01-01', 'time<=': '2018-12-31' } d.get_download_url() df = d.to_pandas( skiprows=(1,) # units information can be dropped. ).dropna() df.sort_index(inplace=True) df.columns = [x[1].split()[0] for x in enumerate(df.columns)] #calculate windspeed and direction df['wind_speed (m s-1)']=np.sqrt(df['northward_wind']**2 + df['eastward_wind']**2) df['wind_from_direction (degrees true)'] = 270-np.rad2deg(np.arctan2(df['northward_wind'], df['eastward_wind'])) df['wind_from_direction (degrees true)'][df['wind_from_direction (degrees true)']>360] = df['wind_from_direction (degrees true)'][df['wind_from_direction (degrees true)']>360]-360 #quick qc df['air_temperature'][df['air_temperature']>50]=np.nan df['wind_speed (m s-1)'][df['wind_speed (m s-1)']>100]=np.nan ### df['UnixTime_GMT'] = [int(1000*datetime.datetime.strptime(x,'%Y-%m-%dT%H:%M:%SZ').timestamp()) for x in df.time] df.set_index('UnixTime_GMT',inplace=True) df.drop(columns=['time'],inplace=True) df.to_csv('airTH_unix.csv',columns=['air_temperature','relative_humidity']) df.to_csv('airTP_unix.csv',columns=['air_temperature','air_pressure']) df.to_csv('airWSWD_unix.csv',columns=['wind_from_direction (degrees true)','wind_speed (m s-1)']) print("Last run {0}".format(datetime.datetime.utcnow())) with open('lastUpdated.txt','w') as myfile: myfile.write(str(datetime.datetime.utcnow())) # - # ### 19CKITAEPR-2A (FCOM) # + server_url = 'http://heron:8080/erddap' d = ERDDAP(server=server_url, protocol='tabledap', response='csv', ) d.dataset_id='TELOF001_PRAWC_F001' d.variables = [d.get_var_by_attr(dataset_id=prawler, standard_name=lambda v: v is not None) for prawler in [d.dataset_id]][0] d.constraints = { } d.get_download_url() df = d.to_pandas( skiprows=(1,) # units information can be dropped. ).dropna() df.sort_index(inplace=True) df.columns = [x[1].split()[0] for x in enumerate(df.columns)] # -
swbell/ThingSpeak_Wx/Erddap2_webserver_thingspeak.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Section 5: Quality and track record # # * What are the levels of quality (impact) of Covid AI research papers? # * What are the levels of experience of AI researchers focusing on Covid? # * How does the above differ between COVID research clusters? # ## Preamble # %run ../notebook_preamble.ipy import random import altair as alt from toolz.curried import * from ast import literal_eval from scipy.stats import ttest_ind, mannwhitneyu from ai_covid_19.utils.utils import * # + def citation_distr(_vector,bins=[0,1,2,3,5,10,20,100,1000]): '''Bins citations according to intervals Args: _vector: distribution of citations bins: (list) intervals for binning ''' bins_ = bins.copy() cut = pd.cut(_vector,bins=bins_,right=False,include_lowest=True) out = cut.value_counts(normalize=True) out.index= ['_'.join([x.strip() for x in re.sub('\[|\)','',str(inter)).split(',')]) for inter in out.index] return(out) def get_author_profile(x): '''Extract an author track record Args: x (df) is a df with the publications that the author has been involved in Returns a series with the number of papers she has authored, her citation mean and median and her experience (number of years she has been present in the data) ''' stats = [len(x), x['citation_count'].median(), x['citation_count'].mean(), 2020-x['year'].min()] return(pd.Series(stats, index=['paper_n','citation_median','citation_mean','experience'])) def make_mean_comp(table,var_name,table_name): '''Creates a table to compare means ''' t_l = table.reset_index(drop=False).melt(id_vars=var_name) t_l.rename(columns={var_name:'category','variable':'statistic'},inplace=True) t_l['variable'] = [f"{var_name}: {b}" for b in t_l['category']] t_l['table'] = table_name return(t_l) def get_tests_table(table,variable_name,test=ttest_ind): '''P ''' res = {} for x in stat_names: t_1 = test(table.loc[table[variable_name]==True][x], table.loc[table[variable_name]==False][x]) res[x] = t_1[1] return(res) # - # # ## 1. Read data #All arXiv data rxiv = pd.read_csv(f"{data_path}/processed/rxiv_metadata.csv",dtype={'id':str, 'is_ai':bool,'is_covid':bool}).pipe(preview) #Create the covid df cov = rxiv.query("is_covid == True").reset_index(drop=True).pipe(preview) #Create a paper-cluster membership lookup cluster_lookup = pd.read_csv( f"{data_path}/processed/covid_semantic.csv",dtype={'article_id':str}).drop_duplicates( 'article_id').set_index('article_id')['cluster'].to_dict() # ## 2. Data analysis # ### 1. Covid vs non covid citations # # How do the levels of citations for Covid and non-Covid research compare? # # #### Cited / non-cited comparison # + rxiv_2020 = rxiv.query('year == 2020') rxiv_2020['cluster'] = rxiv['id'].map(cluster_lookup) # - rxiv_2020.groupby(['is_covid','is_ai'])['citation_count'].mean().reset_index(drop=False).pivot_table( index='is_covid',columns='is_ai') # + cit_groups = rxiv_2020.groupby( ['is_covid','is_ai','article_source'])['citation_count'].mean().reset_index() alt.Chart(cit_groups).mark_bar().encode(x='is_covid:N',y='citation_count', column='is_ai:N', row='article_source').properties(height=100,width=50) # - #Comparison of paper with at least one citation: AI vs non AI by article source rxiv_2020.assign( has_cit = lambda x: x['citation_count']>0).groupby( ['article_source','is_covid'])['has_cit'].mean().reset_index(name='share').pivot_table( index='article_source',columns='is_covid',values='share').assign(rep = lambda x: x[1]/x[0]) # #### Full distribution of citations by source int_lookup = {'0_1':'0: 0-1', '100_1000':'7: >100', '10_20':'5: 10-20', '1_2':'1: 1-2', '20_100':'6: 20-100', '2_3':'2: 2-3', '3_5':'3: 3-5', '5_10':'4: 5-10'} # + rxiv_cit_distrs = rxiv_2020.groupby( ['is_covid','is_ai','article_source'])['citation_count'].apply(lambda x: citation_distr(x)).reset_index( drop=False).pipe(preview) rxiv_cit_distrs['int_sorted'] = rxiv_cit_distrs['level_3'].map(int_lookup) # + #Clean up the variable names and categories rxiv_cit_distrs['is_covid'],rxiv_cit_distrs['is_ai'],rxiv_cit_distrs['article_source'] = [ func(rxiv_cit_distrs[var]) for func,var in zip( [convert_covid,convert_ai,convert_source],['is_covid','is_ai','article_source'])] rxiv_cit_distrs['citation_count'] = 100*rxiv_cit_distrs['citation_count'] rxiv_cit_distrs['% of papers'] = make_pc(rxiv_cit_distrs['citation_count']) # + #Chart b = (alt .Chart(rxiv_cit_distrs) .mark_bar(stroke='black',strokeWidth=0.5) .encode(x=alt.X('is_covid:N',title=''), y=alt.Y('citation_count',title='% of papers'), color=alt.Color( 'int_sorted:O',scale=alt.Scale(scheme='orangered'),title=['Number of','citations']), column=alt.Column('article_source',title='Source'), row=alt.Row('is_ai:N',title=''), tooltip=['% of papers'])) b = b.properties(height=77,width=100).resolve_scale(color='shared') b.save(f"{fig_path}/fig_10.html") b # - # #### Citations by cluster # + #Focus on covid papers for which we have cluster information rxiv['cluster'] = rxiv['id'].map(cluster_lookup) cov = rxiv.query('is_covid==True').reset_index(drop=True).dropna(axis=0,subset=['cluster']) #List of top 12 clusters in terms of AI publications top_ai_clusters = list(cov.query('is_ai==1')['cluster'].value_counts().sort_values(ascending=False)[:12].index) # + #Get citations for papers in different clusters cit_sorted = clean_cluster([x for x in cov.groupby('cluster')['citation_count'].mean().sort_values(ascending=False).index if x in top_ai_clusters]) #Clean variable names cov['cluster'] = clean_cluster(cov['cluster']) top_clust_cov = cov.loc[[x in cit_sorted for x in cov['cluster']]] top_clust_cov['rank'] = top_clust_cov['cluster'].map({c:n for n,c in enumerate(cit_sorted)}) # - top_clust_cov['cluster'] = clean_cluster(top_clust_cov['cluster']) top_clust_cov['is_ai'] = convert_ai(top_clust_cov['is_ai']) # + #Calculate citation means citation_means = top_clust_cov.groupby(['is_ai','cluster'])['citation_count'].mean().apply( lambda x: np.round(x,2)).reset_index(name='Citation mean') #Merge with the cluster info top_clust_cov_2 = pd.merge(top_clust_cov,citation_means, left_on=['is_ai','cluster'],right_on=['is_ai','cluster']) # + out = [] for n,c in enumerate(cit_sorted): l1 = (alt.Chart(top_clust_cov_2) .transform_filter(alt.datum.cluster==c) .mark_point(opacity=0.5,stroke='black',strokeWidth=2,filled=True) .encode(x=alt.X('is_ai:N',title=''), y=alt.Y('citation_count:Q',title=['Number','of citations']), size=alt.Size('count()', scale=alt.Scale(range=[0,100],type='log'), title=['Number', 'of publications']), color=alt.Color('is_ai:N',title='Category'), tooltip=['Citation mean:Q'])) l2 = (alt.Chart(top_clust_cov_2) .transform_filter(alt.datum.cluster==c) .mark_line(strokeWidth=1,strokeDash=[1,1]) .encode(x='is_ai:N',y='citation_count:Q',detail='is_ai:N',color='is_ai:N')) l2 = (alt.Chart(top_clust_cov_2) .transform_filter(alt.datum.cluster==c) .mark_tick(strokeWidth=1,opacity=0.7) .encode(x='is_ai:N',y='citation_count:Q', color='is_ai:N',tooltip=['Citation mean:Q'])) ch = (l1+l2).properties(height=100,width=150,title=c) out.append(ch) out_threes = [out[:3],out[3:6],out[6:9],out[9:12]] #Arrange things so they fit in a page r = [] for p in out_threes: r.append(alt.hconcat(*p)) fin_fig = alt.vconcat(*r) fin_fig.save(f"{fig_path}/fig_11.html") fin_fig # + #Calculate citation means by group: How often are AI means above or below non-A) citation_means = rxiv_2020.query('is_covid==True').groupby(['cluster','is_ai'])[ 'citation_count'].mean().reset_index(name='statistic').pivot_table(index='cluster',columns='is_ai', values='statistic').loc[ top_ai_clusters[:10]].sort_values(1,ascending=False) citation_means['status'] = [row[1]>=row[0] for _id,row in citation_means.iterrows()] citation_means['status'].mean() # - # ### 2. Track record of authors # # Here we compare the publication records of authors focusing on different COVID-19 topics and in different categories (eg AI vs non AI) # + #Focus on papers with authors rxiv_auth = rxiv.dropna(axis=0,subset=['mag_authors']) #Extract author ids from author credentials dict #First we need to parse the mag_authors json rxiv_auth['mag_authors'] = rxiv_auth['mag_authors'].apply(literal_eval) rxiv_auth['author_id'] = [[x['author_id'] for x in p] for p in rxiv_auth['mag_authors']] # - #Create the lookup between papers and authors paper_author_lookup = rxiv_auth[['id','author_id']].explode('author_id').pipe(preview) # + #Find authors with at least one covid paper covid_paper_ids = set(cov['id']) ai_paper_ids = set(rxiv.query('is_ai == 1')['id']) covid_ai_paper_ids = set(cov.query('is_ai == 1')['id']) #Get lists of authors with at least one covid, ai, covid ai paper cov_paper_auths,ai_paper_auths,covid_ai_paper_auths = [set( paper_author_lookup.loc[[x in ids for x in paper_author_lookup['id']]]['author_id']) for ids in [covid_paper_ids,ai_paper_ids, covid_ai_paper_ids]] # - #Merge with xiv and focus on covid authors trajectories = rxiv.merge(paper_author_lookup,left_on='id',right_on='id').dropna( axis=0,subset=['author_id']) # #### Covid and non-Covid authors. # + #Focus on non Covid publications in recent years trajectories_non_covid = trajectories.loc[[x not in covid_paper_ids for x in trajectories['id']]] trajectories_recent = trajectories_non_covid.query("(year == 2018) | (year == 2019)") author_cluster = trajectories.groupby('cluster')['author_id'].apply(lambda x: set(list(x))) #Extract researcher profiles (focusing on those with publications in 2019) #Note - this takes some time to run author_profile = trajectories_recent.groupby('author_id').apply(lambda x: get_author_profile(x)) #Label authors with whether they have one covid, one ai or one covid_ai paper author_profile['has_cov'], author_profile['has_ai'],author_profile['has_covid_ai'] = [ author_profile.index.isin(group) for group in [cov_paper_auths,ai_paper_auths,covid_ai_paper_auths]] #Label them wih the clusters where they have published author_profile['cluster'] = [[cid for cid,cgr in author_cluster.iteritems() if auth in cgr] for auth in author_profile.index] # + #Here we compare the statistics for papers in various categories stat_names = ['paper_n','citation_mean','citation_median','experience'] #Create a bunch of tables that compare mean citations for ait cov_comp = author_profile.groupby('has_cov')[stat_names].mean() ai_comp = author_profile.query('has_ai == True').groupby('has_covid_ai')[stat_names].mean() cov_ai_comp = author_profile.query('has_cov == True').groupby('has_covid_ai')[stat_names].mean() tables = [cov_comp,ai_comp,cov_ai_comp] var_names = ['has_cov','has_covid_ai','has_covid_ai'] table_names = ['all_papers','all_ai_papers','all_covid_papers'] all_tables = pd.concat([make_mean_comp(t,var,name) for t,var,name in zip(tables,var_names,table_names)]) all_tables.head() # + #Here we test statistical significance of differences in means between the variables test_df = pd.DataFrame([get_tests_table(table,variable_name,mannwhitneyu) for table,variable_name in zip([author_profile, author_profile.query('has_ai == True'), author_profile.query('has_cov == True')], ['has_cov','has_covid_ai','has_covid_ai'])], index=['all_papers','all_ai_papers','all_covid_papers']) #Turn into a long df so we can merge with the means table test_df_long = test_df.reset_index(drop=False).melt(id_vars='index', var_name='statistic', value_name='significant') test_df_long['significant'] = test_df_long['significant']<0.05 all_tables_tests = pd.merge(all_tables,test_df_long,left_on=['statistic','table'], right_on=['statistic','index']) # + #Tidy up variable names for the chart var_names = ['statistic','variable','table','index','significant'] #Lookups between variables stat_lookup = {'paper_n':'Number of papers','citation_mean':'Citation (mean)', 'citation_median':'Citation (median)','experience':'Experience'} var_lookup = {'has_cov: False':'Not COVID-19','has_cov: True':'COVID-19', 'has_covid_ai: True': 'COVID-19 and AI','has_covid_ai: False': 'COVID-19 and not AI'} table_lookup = {'all_papers':'All research','all_ai_papers':'AI research', 'all_covid_papers':'COVID-19 research'} significant = {True:'Significant',False:'Insignificant'} #Convert variables using the lookups for v,l in zip(var_names,[stat_lookup,var_lookup,table_lookup,table_lookup,significant]): all_tables_tests[v] = convert_var(all_tables_tests[v],l) #Create a rounded variable for tooltops all_tables_tests['value_label'] = [str(np.round(x,2)) for x in all_tables_tests['value']] #We are not interested in the experience variable (we are focusing on authors with recent publications) all_tables_tests = all_tables_tests.query("statistic != 'Experience'") # + #Make chart mean_comp = (alt.Chart(all_tables_tests) .mark_bar(height=15,stroke='black') .encode( y=alt.Y('variable:N',title=''), x=alt.X('value:Q',title='Score'), color=alt.Color('significant', scale=alt.Scale(range=['lightpink','steelblue']),title='Significance'), column=alt.Column('statistic:N'), row=alt.Row('table:N', sort=['All research','AI research','COVID-19 reesearch'], title='Corpus'), tooltip=['value_label']) .resolve_scale(y='independent',x='shared').properties(height=70,width=70)) mean_comp.save(f"{fig_path}/fig_13.html") mean_comp # - # #### AI and non-AI authors between clusters # # In this case we want to consider the full trajectory of researchers working in Covid, not just the most recent two years, but excluding Covid papers #Focus on Covid authors in recent years trajectories_covid_authors = trajectories_non_covid.loc[trajectories.author_id.isin(cov_paper_auths)] trajectories_covid_authors = trajectories_covid_authors.query('(year > 2017) & (year <2020)') #Extract author profile author_profile_cov = trajectories_covid_authors.groupby('author_id').apply(lambda x: get_author_profile(x)) #Label authors with whether they have one covid, one ai or one covid_ai paper author_profile_cov['has_covid_ai'] = author_profile_cov.index.isin(covid_ai_paper_auths) #Label them wih the clusters where they have published author_profile_cov['cluster'] = [[cid for cid,cgr in author_cluster.iteritems() if auth in cgr] for auth in author_profile_cov.index] # + #Author profile in cluster #Explode the dataframe by the author cluster author_profile_exploded = author_profile_cov.explode('cluster') #Calculate means for each cluster prof_clust = author_profile_exploded.groupby( ['cluster','has_covid_ai'])[stat_names].mean().loc[top_ai_clusters[:10]].reset_index(drop=False) # + #Calculate statistical significance of differences cluster_test_df = pd.DataFrame([get_tests_table(author_profile_exploded.loc[author_profile_exploded['cluster']==x], 'has_covid_ai',mannwhitneyu) for x in top_ai_clusters[:10]],index=top_ai_clusters[:10]) cluster_test_long = cluster_test_df.reset_index(drop=False).melt(id_vars='index',var_name='statistic', value_name='significance') cluster_test_long['significance_thres'] = ['p < 0.01' if x<0.01 else 'p < 0.05' if x<0.05 else 'p < 0.1' if x<0.1 else 'p > 0.1' for x in cluster_test_long['significance']] # + #Make charts charts = [] for v in ['paper_n','citation_mean','citation_median']: d = prof_clust[['cluster','has_covid_ai',v]] d['has_covid_ai'] = convert_ai(d['has_covid_ai']) s = cluster_test_long.loc[cluster_test_long['statistic']==v].set_index( 'index')['significance_thres'].to_dict() d['significance_thres'] = d['cluster'].map(s) d['cluster'] = clean_cluster(d['cluster']) d[f'Mean {stat_lookup[v]}'] = [str(np.round(x,2)) for x in d[v]] c = (alt.Chart(d) .mark_bar(height=10,stroke='black',strokeWidth=1,strokeOpacity=1) .encode(y=alt.Y('has_covid_ai',title=None), x=alt.X(v,title=stat_lookup[v]), color=alt.Color('has_covid_ai',title='Category'), opacity=alt.Opacity('significance_thres:N',scale=alt.Scale(range=[0.3,1]), title='Significance', sort=['p > 0.1','p < 0.1','p < 0.05','p < 0.01']), row=alt.Row('cluster', sort=alt.EncodingSortField(v,'max',order='descending')), tooltip=[f'Mean {stat_lookup[v]}'])) charts.append((c).properties(height=40,width=40,title=stat_lookup[v])) cluster_comp = alt.hconcat(*charts).configure_axis(grid=True) cluster_comp.save(f"{fig_path}/fig_14.html") cluster_comp # - for x in stat_names: piv = prof_clust.pivot_table(index='cluster',columns='has_covid_ai',values=x) print(x) print(np.mean(piv[False]>piv[True])) print('\n')
notebooks/ai_covid-19_report/paper_section_5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.chrome.options import Options import time import html from bs4 import BeautifulSoup from urllib.parse import quote class BypassedSearch(object): def __init__(self): options = Options() options.add_argument('--headless') options.add_argument('--incognito') self.driver = webdriver.Chrome(options=options) def searchUsingScraper(self, name): trackName = quote(name) if trackName: url = 'https://www.youtube.com/results?search_query=' generatedURL = url+trackName self.driver.get(generatedURL) time.sleep(3) divs = self.driver.find_element_by_xpath("//body/ytd-app/div[@id='content']/ytd-page-manager[@id='page-manager']/ytd-search[@class='style-scope ytd-page-manager']/div[@id='container']/ytd-two-column-search-results-renderer[@class='style-scope ytd-search']/div[@id='primary']/ytd-section-list-renderer[@class='style-scope ytd-two-column-search-results-renderer']/div[@id='contents']/ytd-item-section-renderer[@class='style-scope ytd-section-list-renderer']/div[3]") content = self.driver.execute_script("return arguments[0].innerHTML;",divs) else: print("URL not Generated") return "Failed To Generate URL" page = BeautifulSoup(content, 'html5lib') atags = page.findAll('a', attrs={'id': 'video-title'}) videoLibrary = [] cleanId = '' for eachTag in atags: if eachTag.get('href') != '' and '&list=' not in eachTag.get('href'): filteredTags = {} if '=' in eachTag.get('href'): # Filtering out channels from the search filteredTags['Name'] = eachTag.get('title') cleanId = eachTag.get('href') cleanId = cleanId.split("=") filteredTags['VideoUrl'] = cleanId[1] videoLibrary.append(filteredTags) actualMatch = [] relativeMatch = [] for each in videoLibrary: # Title Matching if html.unescape(each['Name'].lower()) == name.lower(): actualMatch.append(each['VideoUrl']) else: relativeMatch.append(each['VideoUrl']) if actualMatch: return actualMatch[0] # return videoId from Here elif relativeMatch: return relativeMatch[0] # return videoId from Here else: return "Video Not Found" byps = BypassedSearch() ur = byps.searchUsingScraper('Tale of Us - Vinewood Blues') print(ur) # -
src/utils/youtube_search_engine_v2 (need to update previous code).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="8ObSVCw3vmxf" executionInfo={"status": "ok", "timestamp": 1619804505352, "user_tz": 420, "elapsed": 9635, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} outputId="0e93f9bf-4774-4b80-fc91-700ffbc217e3" import subprocess subprocess.check_call(["python3", '-m', 'pip', 'install', 'dgl']) # import dgl-cu101 # import dgl-cu11 as dg import dgl # + id="PQTu2JOZzi75" # !pip install dgl-cu110 # + colab={"base_uri": "https://localhost:8080/"} id="J4gNFHow3Cc8" executionInfo={"status": "ok", "timestamp": 1619804309403, "user_tz": 420, "elapsed": 950, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} outputId="e46afc1a-552c-4501-bf45-87ad62f644f4" # !pip uninstall dgl # + id="1G1IfOl6zKaZ" # !nvcc --version # + id="Xo7aBRl6qmQf" executionInfo={"status": "ok", "timestamp": 1619804506170, "user_tz": 420, "elapsed": 10431, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} import os import dgl.data from dgl.data import DGLDataset import torch import torch.nn as nn import torch.nn.functional as F import pandas as pd import numpy as np import tqdm from sklearn.linear_model import LinearRegression # + id="eCbOLWNXqtMF" executionInfo={"status": "ok", "timestamp": 1619804506172, "user_tz": 420, "elapsed": 10410, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} os.chdir("/content/drive/MyDrive/Winter_Research") # + [markdown] id="JNAhCh9S0gHx" # ## Make the Dataset # + [markdown] id="TRgmoXrx4FZy" # #### Load into RAM # + id="tru5nekG4EAc" executionInfo={"status": "ok", "timestamp": 1619804746981, "user_tz": 420, "elapsed": 251194, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} master_df = pd.read_csv("Sentinel2_Traffic/Traffic_Data/5_state_traffic.csv") master_df = master_df.set_index("Unnamed: 0") CA_x, CA_y = [], [] KS_x, KS_y = [], [] MT_x, MT_y = [], [] TX_x, TX_y = [], [] OH_x, OH_y = [], [] states = {"CA" : [CA_x, CA_y, "Roi_1"], "KS" : [KS_x, KS_y, "Roi_2"], "MT" : [MT_x, MT_y, "Roi_3"], "TX" : [TX_x, TX_y, "Roi_4"], "OH" : [OH_x, OH_y, "Roi_5"]} # for st in ["CA", "KS", "MT", "TX", "OH"]: for st in ["CA"]: path_check = "Rois/" + states[st][2] + "/greedy_a/" path = "Rois/" + states[st][2] + "/sent_cloud_90p_raw/" imgs_check = os.listdir(path_check) imgs = os.listdir(path) for img, img_check in zip(imgs, imgs_check): date = img.split('.')[0] photo = np.loadtxt(path + img).reshape(-1, 5) photo_check = np.loadtxt(path_check + img_check).reshape(-1, 7, 3) if photo_check[pd.isnull(photo_check)].shape[0] == 0: if date in list(master_df.index): if not pd.isna(master_df.loc[date][st]): states[st][0].append(photo) states[st][1].append(master_df.loc[date][st]) # + id="5vcGTuzU9e_-" executionInfo={"status": "ok", "timestamp": 1619802908146, "user_tz": 420, "elapsed": 704, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} device = torch.cuda.current_device() # + id="YwaoAPNZCL5_" executionInfo={"status": "ok", "timestamp": 1619804746983, "user_tz": 420, "elapsed": 251176, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} def gen_around(x, y): return [(x, y), (x, y + 10), (x, y - 10), (x + 10, y), (x - 10, y), (x + 10, y + 10), (x + 10, y - 10), (x - 10, y + 10), (x - 10, y - 10)] def gen_around_strict(x, y): return [(x, y), (x, y + 10), (x, y - 10), (x + 10, y), (x - 10, y)] def neighbors(road, coords, x, y, diagonal=True): neigh = [] if diagonal: cand = gen_around(x, y) else: cand = gen_around_strict(x, y) for pix in cand: if pix[0] in coords: if pix[1] in coords[pix[0]]: neigh.append(coords[pix[0]][pix[1]]['idx']) return neigh def src_dst(road, coords, diagonal=True): src, dst, values = [], [] , [] for row in range(road.shape[0]): x = road[row][4] y = road[row][3] idx = coords[x][y]['idx'] val = coords[x][y]['val'] if val[0] != road[row][:3][0]: assert(False) for c in neighbors(road, coords, x, y, diagonal): src.append(idx) dst.append(c) values.append(val) return src, dst #, values # + colab={"background_save": true} id="JvAgaV_xZdyG" gdl.data.save_graphs(Road_Graphs, "Rois") # + colab={"background_save": true} id="wkkwwUqtXT3H" type(Road_Graphs) # + id="0uaXVQDu0qcy" executionInfo={"status": "ok", "timestamp": 1619804746984, "user_tz": 420, "elapsed": 247596, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} class RoadDataset(DGLDataset): def __init__(self, states): self.states = states super().__init__(name='road_graphs') def process(self): self.graphs = [] self.labels = [] self.state = [] # for st in self.states.keys(): for st in ["CA"]: print(st) for i in range(len(self.states[st][0])): print(i) img = states[st][0][i] coords = {} vals = [] for j in range(img.shape[0]): # print(img[j].shape) lon = img[j][4].astype(int) # print(lon) lat = img[j][3].astype(int) val = img[j][:3] vals.append(val) if lon not in coords: coords[lon] = {} coords[lon][lat] = {'idx' : j, 'val' : val} src, dst = src_dst(img, coords) #src, dst, values = src_dst(img, coords) # print(np.mean(src), np.mean(dst), np.mean(values)) graph = dgl.graph((src, dst), num_nodes=img.shape[0]) graph.ndata['feat'] = torch.from_numpy(np.array(vals)) #graph = graph.add_self_loop(graph) # graph = graph.to(device) self.graphs.append(graph) self.labels.append(self.states[st][1][i]) self.state.append(st) # assert(False) def __getitem__(self, i): return self.graphs[i], self.labels[i] def __len__(self): return len(self.graphs) # + id="FRawP-N7wUim" Road_Graphs = RoadDataset(states) # + id="SZPiZXWzrtsh" executionInfo={"status": "ok", "timestamp": 1619752315371, "user_tz": 420, "elapsed": 396, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} # Generate a synthetic dataset with 10000 graphs, ranging from 10 to 500 nodes. # dataset = dgl.data.GINDataset('PROTEINS', self_loop=True) dataset = Road_Graphs # + colab={"base_uri": "https://localhost:8080/"} id="3unrPE2qwrsM" executionInfo={"elapsed": 310, "status": "ok", "timestamp": 1619747424870, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}, "user_tz": 420} outputId="3391a252-1cd2-4a16-d0d9-45eed3dd73ce" print('Node feature dimensionality:', 3)#Road_Graphs.dim_nfeats) print('Number of graph categories:', dataset.gclasses) # + id="_OgB-inrdghN" executionInfo={"status": "ok", "timestamp": 1619805317280, "user_tz": 420, "elapsed": 813, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} dataset = Road_Graphs # + [markdown] id="I0YHApYN0jWP" # ## Train the Model # + id="kbVTkI3bwvT3" executionInfo={"status": "ok", "timestamp": 1619805319555, "user_tz": 420, "elapsed": 505, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} from dgl.dataloading import GraphDataLoader from torch.utils.data.sampler import SubsetRandomSampler # + id="V7qBnByRxH3O" executionInfo={"status": "ok", "timestamp": 1619805320556, "user_tz": 420, "elapsed": 919, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} num_examples = len(dataset) num_train = int(num_examples * 0.8) train_sampler = SubsetRandomSampler(torch.arange(num_train)) test_sampler = SubsetRandomSampler(torch.arange(num_train, num_examples)) train_dataloader = GraphDataLoader( dataset, sampler=train_sampler, batch_size=5, drop_last=False) test_dataloader = GraphDataLoader( dataset, sampler=test_sampler, batch_size=5, drop_last=False) # + colab={"base_uri": "https://localhost:8080/"} id="3XfChGC6xKWr" executionInfo={"status": "ok", "timestamp": 1619805321239, "user_tz": 420, "elapsed": 792, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} outputId="85a20b9c-fa89-4aca-856f-6cad4f541d7c" it = iter(train_dataloader) batch = next(it) print(batch) # + id="joeec78KxMec" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1619805321697, "user_tz": 420, "elapsed": 650, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} outputId="59c5cda6-edea-4aaa-f4a8-9fcea348373a" batched_graph, labels = batch print('Number of nodes for each graph element in the batch:', batched_graph.batch_num_nodes()) print('Number of edges for each graph element in the batch:', batched_graph.batch_num_edges()) # Recover the original graph elements from the minibatch graphs = dgl.unbatch(batched_graph) print('The original graphs in the minibatch:') print(graphs) # + id="TIU9IeqIyhd9" executionInfo={"status": "ok", "timestamp": 1619808158288, "user_tz": 420, "elapsed": 774, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} from dgl.nn import GraphConv, DenseGraphConv class GCN(nn.Module): def __init__(self, in_feats, h1_feats, h2_feats, num_classes): super(GCN, self).__init__() self.conv1 = GraphConv(in_feats, h1_feats) self.conv2 = GraphConv(h1_feats, h2_feats) self.conv3 = GraphConv(h2_feats, num_classes) # self.linear1 = nn.Linear(in_features=lin1, out_features=lin2) # self.linear2 = nn.Linear(in_features=lin2, out_features=lin3) # self.linear3 = nn.Linear(in_features=lin3, out_features=num_classes) def forward(self, g, in_feat): h = self.conv1(g, in_feat) h = F.relu(h) h = self.conv2(g, h) h = F.relu(h) h = self.conv3(g, h) # print(type(h)) # print(h.shape) # h = h.reshape(-1, 10) # h = self.linear1(h) # h = F.relu(h) # h = self.linear2(h) # h = F.relu(h) # h = self.linear3(h) g.ndata['h'] = h # print(dgl.mean_nodes(g, 'h')) # assert(False) return dgl.mean_nodes(g, 'h') # + id="PmF1t0AbxxJ7" executionInfo={"status": "ok", "timestamp": 1619808159659, "user_tz": 420, "elapsed": 1252, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} # device = torch.cuda.current_device() # + id="gMeuMXKZypn2" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1619809764107, "user_tz": 420, "elapsed": 83344, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} outputId="4bae199a-6046-4a6a-b17d-550dab2e88aa" # Create the model with given dimensions model = GCN(3, 100, 100, 1) # 10, 100, 50, 1) # model = GCN(3, 16, 1) # model.cuda() criterion = nn.MSELoss() #model.to('cuda:0') optimizer = torch.optim.Adam(model.parameters(), lr=0.01) for epoch in range(10): loss_tot = 0 batches = 0 for batched_graph, labels in train_dataloader: # batched_graph.to(device) # labels.to(device) pred = model(batched_graph, batched_graph.ndata['feat'].float()) # print(pred, labels) loss = criterion(pred, labels.reshape(labels.shape[0], 1).float()) loss_tot += loss.item() batches += 1 optimizer.zero_grad() loss.backward() optimizer.step() print(loss_tot / batches) # num_correct = 0 # num_tests = 0 # + colab={"base_uri": "https://localhost:8080/"} id="X9utp4Ey1XsY" executionInfo={"status": "ok", "timestamp": 1619809780713, "user_tz": 420, "elapsed": 1617, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} outputId="7da50cae-cd66-4c06-8cc6-febe39c36af3" x = [] y = [] num_tests = 0 for batched_graph, labels in test_dataloader: pred = model(batched_graph, batched_graph.ndata['feat'].float()) loss += criterion(pred, labels.reshape(labels.shape[0], 1).float()).item() x.extend([x[0] for x in pred.detach().numpy().tolist()]) y.extend([x[0] for x in labels.reshape(labels.shape[0], 1).detach().numpy().tolist()]) num_tests += 1 print('Test loss:', loss / num_tests) # + colab={"base_uri": "https://localhost:8080/"} id="x605KQ2mj2Eo" executionInfo={"elapsed": 732, "status": "ok", "timestamp": 1619748942655, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}, "user_tz": 420} outputId="26d476cf-6c42-44fd-d06a-84ad54b2dbed" batched_graph # + colab={"base_uri": "https://localhost:8080/"} id="PugjXSMJj6iw" executionInfo={"elapsed": 443, "status": "ok", "timestamp": 1619748960815, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}, "user_tz": 420} outputId="9261d4be-3431-4b2a-f77d-ed53ca7c6a54" labels # + colab={"base_uri": "https://localhost:8080/"} id="ZPnl5jOhhpdt" executionInfo={"elapsed": 413, "status": "ok", "timestamp": 1619748468758, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}, "user_tz": 420} outputId="099d05ce-93d8-4faa-d787-c6178e5ce5ba" [x[0] for x in pred.detach().numpy().tolist()] # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="xoWxWKuehdRv" executionInfo={"status": "ok", "timestamp": 1619809784235, "user_tz": 420, "elapsed": 553, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} outputId="c8268bb7-3ba3-4b8a-a362-cb2dce26a306" import matplotlib.pyplot as plt plt.scatter(x, y) # + id="I-3ubHl_1LHW" executionInfo={"status": "ok", "timestamp": 1619808917097, "user_tz": 420, "elapsed": 675, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} x_temp = y y_temp = x # + colab={"base_uri": "https://localhost:8080/"} id="J6lzJzSm1MCJ" executionInfo={"status": "ok", "timestamp": 1619805436519, "user_tz": 420, "elapsed": 482, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} outputId="53c0cd5b-4ef9-4f2a-dd9b-4709cc076055" for i in range(len(y_temp)): if y_temp[i] > 1200: print(len(x_temp)) x_temp.pop(i) y_temp.pop(i) i = i - 1 break # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="OkwUWvEDiee0" executionInfo={"status": "ok", "timestamp": 1619809788199, "user_tz": 420, "elapsed": 913, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}} outputId="4882129d-5773-4028-e407-e40b802ee9ef" # print(y_temp) x_plot = np.array(y_temp) y_plot = np.array(x_temp) new_x = np.array(x_plot).reshape(-1,1) new_y = np.array(y_plot) fit = LinearRegression().fit(new_x, new_y) score = fit.score(new_x, new_y) plt.xlabel("Prediction") plt.ylabel("Actual Traffic") print(score) plt.scatter(new_x, new_y) axes = plt.gca() x_vals = np.array(axes.get_xlim()) y_vals = x_vals plt.plot(x_vals, y_vals, '--') pre_y = fit.predict(new_x) # plt.plot plt.plot(new_x, pre_y) plt.plot(x_vals, y_vals, '--') # plt.savefig("Just_GCN_Cali_fit.png") plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="Bd8bGEthjF1G" executionInfo={"elapsed": 383, "status": "ok", "timestamp": 1619748757105, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}, "user_tz": 420} outputId="a2a73496-2863-4383-81c2-3d3673ce6979" ›y # + colab={"base_uri": "https://localhost:8080/"} id="4tmDsY2uyvVX" executionInfo={"elapsed": 408, "status": "ok", "timestamp": 1619736321067, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00042520871958656342"}, "user_tz": 420} outputId="ec5c6e4f-ffda-4a7b-e6e3-5d824c6537e1" labels # + id="rEdW2WZmzto4"
Notebooks/GCN_Implement.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lab 03.1: Visualizing Variables # # This lab is presented with some revisions from [<NAME> at Cal Poly](https://web.calpoly.edu/~dsun09/index.html) and his [Data301 Course](http://users.csc.calpoly.edu/~dsun09/data301/lectures.html) # # ### When you have filled out all the questions, submit via [Tulane Canvas](https://tulane.instructure.com/) # The classic expression, "A picture is worth a thousand words," applies to data science. A graphic usually conveys more information (and more efficiently) than the summary statistics we examined in the previous section. In this section, you will learn how to produce simple graphics for quantitative and categorical variables. # ## Graphics in the Jupyter Notebook # # By default, Python outputs graphics to a new window. But what if we want a record of the graphic? We could save the graphic to an image file, but then the graphic and the code that generated it would live in different files. If these files were to ever get separated, then it may be difficult to regenerate the graphic. In other words, the standard Python workflow is not **reproducible**. # # Jupyter notebooks support a reproducible workflow, by allowing graphics to be embedded directly in a notebook. Now, the graphic and the code that generated it live in the same file, adjacent to one another. To make graphics show up in the Jupyter notebook, we have to specify that `matplotlib` (the main graphics library in Python) should output the graphic to the "inline" backend, as opposed to, for example, a backend that makes the graphic appear in a new window. To specify a backend for `matplotlib`, we run a so-called **magic command** (or just **magic**, for short). Magic commands modify the behavior of a notebook or an individual cell. For example, the `%timeit` magic, which we will use later in this book, times how long it takes to run a line of code. You can recognize magics because they are preceded by `%` or `%%`. For a full list of magics, consult [the documentation](https://ipython.readthedocs.io/en/stable/interactive/magics.html). # # The `%matplotlib` magic below allows you to specify a backend. In general, if you plan to create graphics in the Jupyter notebook, then the following magic should be the first line in your notebook. # %matplotlib inline # ## Visualizing Quantitative Variables # # Graphics can help us understand how the values of a quantitative variable are distributed. We will study two types of visualizations for quantitative variables: histograms and densities. # # ### Histograms # # The standard visualization for a single quantitative variable is the **histogram**. A histogram sorts the values into bins and uses bars to represent the number of values in each bin. # # To make a histogram, we call the `.plot.hist()` method of the selected variable. All of the plotting functions in `pandas` are preceded by `.plot`. import pandas as pd df = pd.read_csv("../data/titanic.csv") display(df[0:5]) df.fare.plot.hist() # It seems that we did not get enough resolution to really determine what is going on at the lower end of the scale. Let's request more bins. df.fare.plot.hist(bins=50) # From this graph, we see a concentration of values around 10-30 (which we previously identified as the "center") and a spread of about 30-50 (which we previously identified as the "spread"). We also see the outlier who paid more than £500. We also see features that were not obvious before: the skewed shape of the distribution, the gap between £300 and £500, and so on. This single picture has managed to convey more information than a dozen summary statistics. # We might want to plot more than one histogram on the same graphic to make for easy comparison. To do this, we simply make multiple calls to plotting functions within the same cell. For example, if we wanted to compare the distributions of the number of siblings/spouses and the number of parents/children that accompanied passengers, we could call `.plot.hist()` twice. df.sibsp.plot.hist() df.parch.plot.hist() # Notice that `pandas` automatically plotted the two histograms using different colors. There are two problems with this plot. First, we don't know which color corresponds to which variable. Second, we cannot see the blue histogram underneath the orange histogram because the colors are opaque. # # To solve the first problem, we add a legend for each variable by specifying `legend=True`. To solve the second problem, we set the transparency `alpha`, which is a number between 0 and 1, with 0 being perfectly transparent and 1 being completely opaque. Try varying `alpha` to get a feel for what it does. df.sibsp.plot.hist(legend=True, alpha=.5) df.parch.plot.hist(legend=True, alpha=.5) # The parents/children histogram is higher at 0 and 2, but the sibling/spouse histogram is higher at 1. This makes sense because # # 1. There were many childless couples on the Titanic (i.e., 0 children). # 2. You can only have 1 spouse, but many children travel with 2 parents. # ### Densities # # Another way to visualize the distribution of a quantitative variable is by plotting its **density**. A density plot turns the jagged histogram into a smooth curve, allowing the user to focus on the general shape of the distribution. df.fare.plot.density() # The x-axis is too wide. (You cannot have negative fares.) We can set the limits of the x-axis manually using the `xlim` argument. df.fare.plot.density(xlim=(0, 600)) # Let's superimpose this density curve on top of the histogram, by making two calls to plotting functions: df.fare.plot.hist(bins=50) df.fare.plot.density(xlim=(0, 600)) # If you squint at this plot, you will see an orange line at the bottom of the plot. This is supposed to be the density. But why does it appear as a flat line? The y-axis offers a hint. When we made the density plot earlier, the y-axis extended from 0 to about 0.02. Now the y-axis extends all the way to 500. On such a scale, a curve that fluctuates between 0 and 0.02 will appear to be a flat line! # # The problem is that the histogram and the density are currently on different scales. By default, histograms display counts, while densities are defined so that the total area under the curve is 1. To be able to display a histogram and density on the same graph, we have to normalize the histogram so that the total area of the bars is 1. We can do this by setting the option `density=True`. df.fare.plot.hist(bins=50, density=True) df.fare.plot.density(xlim=(0, 600)) # Now we can clearly see how the density smooths the histogram. It does a pretty good job for the most part, but it "oversmooths" near 0, missing the spike. # You might wonder what options are available besides `legend`, `alpha`, `bins`, `density`, and `xlim`. Let's look at the documentation for `.plot.hist()`. # + # df.fare.plot.hist? # - # The documentation for `.plot.hist()` tells us about one option, `bins`, but then refers us to `.plot()` for additional options. If you peek inside `.plot()`, you will find the `legend`, `density`, and `xlim` options, as well as many others. # + # df.fare.plot? # - # Try these out to get a feel for what they do. For example, can you figure out how to make the density curve red in the above graph? Or how to change the size of the figure? # # You might be surprised that `alpha` is not listed among the options above. This is because `alpha` is an option of the `matplotlib` plotting method that `pandas` calls. (Notice that the last argument of `.plot()` is `**kwds`, for "options to pass to matplotlib plotting method".) # ## Visualizing Categorical Variables # # To visualize a categorical variable, we simply need to represent the frequency of each category. We will focus on two types of visualizations for categorical variables: bar graphs and pie charts. # # ### Bar Graphs # # In a **bar graph**, each bar represents a category, and the height of the bar represents the frequency of that category. # # `pandas` does not produce a bar graph directly from a categorical variable. In order to make a bar graph, the variable must be in the form of a `Series` whose index is the categories and whose values are the frequencies. This is exactly what we get when we call `.value_counts()` on a categorical variable. # # So making a bar graph requires two steps: # 1. Get the frequency of each category using `.value_counts()`. # 2. Call `.plot.bar()` on these frequencies. # # For example, here is a bar graph showing how many passengers are in each class. pclass_counts = df.pclass.value_counts() pclass_counts.plot.bar() # The bar graph makes it clear how many more 3rd class passengers there were than 1st and 2nd class passengers. Bar graphs make it easy to compare categories to each other. # Although bar graphs superficially look similar to histograms (they both have bars), they are not the same. The x-axis on a histogram is a numeric scale, but the x-axis on a bar graph is not, since a bar graph is used to represent categorical variables and the categories are often unordered. For this reason, the bars in a bar graph are typically separated by gaps, to emphasize that the bars do not fall on any particular scale. By contrast, a histogram will not have gaps between the bars, unless there is actually a gap in the data. # The bars in a bar graph are also not guaranteed to be ordered in any particular way. `pandas` will plot the bars in the same order as they appeared in the `Series`. Since `.value_counts()` sorts the categories by frequency, the bar graph was sorted in the same order. # In the case of the passenger class, we probably want the bars in the order 1st, 2nd, 3rd. To do this, we can sort the index of the value counts before passing them to `.plot.bar()`. pclass_counts.sort_index(inplace=True) pclass_counts.plot.bar() pclass_counts # Sometimes, a bar graph is easier to read if the bars are horizontal instead of vertical. For example, when the labels are long, arranging the bars horizontally allow the labels to be printed left-to-right instead of down-to-up. To make horizontal bars, just add an `h` to the end of `.plot.bar`: pclass_counts.plot.barh() # ### Pie Charts # # Pie charts are another way to visualize a categorical variable. Whereas bar graphs make it easy to compare categories to each other, pie charts make it easy to compare a category to the whole. pclass_counts.plot.pie() # It is immediately obvious from this pie chart that 3rd class passengers made up more than half of all passengers on the Titanic. Determining this from the bar graph would have required more mental exertion on the part of the reader. # Again, you can use tab completion to discover more plots that you can make using `pandas`. But beware! Just because a function is available does not mean it's suitable. (For example, `.plot.hist()` makes no sense for a categorical variable.) # # Exercises # # All of the following exercises use the Tips data set (`../data/tips.csv`). # **Exercise 1.** Make a graphic that shows the distribution of total bills. Explain what you see. tips = pd.read_csv("../data/tips.csv") tips.total_bill.plot.hist(bins=50) # the x-axis of this histogram shows the distribution of the total bill (the lowest paid bill up to the highest paid) and # the y-axis shows how many groups had a bill in a certain bin. For example, it looks like only one group had a total that # was >= $50. # **Exercise 2.** Make a graphic that shows the number of parties this waiter served on each day of the week. Try to make sure your graphic has the days of the week in the right order, starting from Sunday (this is a bit tricky and there are several ways to do this....) order = ["Sun", "Mon", "Tues", "Wed", "Thur", "Fri", "Sat"] party_count = tips.day.value_counts() party_count.loc[order].plot.bar() party_count # i included Mon-Wed because it felt wrong just skipping them # **Exercise 3.** The `.plot.box()` command makes a box plot. A box plot is another way to visualize the distribution of a quantitative variable. Make a box plot of the total bills. Which summary statistics can you read off from this graphic? (You may want to refresh your memory about [how box plots are made](http://www.physics.csbsju.edu/stats/box2.html) or read the [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.boxplot.html).) tips.total_bill.plot.box() # Based on where the box is, I can see that the median spent on the bill is roughly $17, the first quartile is around $12, # the third quartile is around $25, and there are several suspected outliers past the inner fence ranging from $40 to $50. # **Exercise 4.** What happens when you call `.plot.pie()` on a quantitative variable, like `tip`? Why does the pie chart look this way? Do you think pie charts are appropriate for quantitative variables? tips.tip.plot.pie() # it looks terrible. The pie chart looks this way because every slice of the pie is a different "category", which for the # tip variable is too broad. Instead of having a selection to choose from, it has to plot every unique tip it sees. It is not # a great idea to use pie charts for quantitative variables, it seems. # **Exercise 5.** Make a graphic that shows the distribution of party sizes served by the waiter. tips.rename(columns={"size":"party"}, inplace=True) tips.party.plot.hist() # I would have chosen a different representation, but this seemed to make the distribution the clearest. # ### When you have filled out all the questions, submit via [Tulane Canvas](https://tulane.instructure.com/)
_labs/Lab03/Lab03.1-VisualizingVariables.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### create figure for hierarchy example # 1. get latent distances vs sequential distances # 2. get MI of markov model # 3. get MI of recursively generated sequence - store the average latent distance for each of the sequential distances # 4. Plot latent distance by MI # 5. plot sequential distance by MI import numpy as np import pandas as pd from scipy import random as sr import matplotlib.pyplot as plt from childes_mi.utils.paths import DATA_DIR, FIGURE_DIR, ensure_dir from childes_mi.utils.general import flatten,save_fig # + def gen_balanced_matrix(na=5, ps=[0.7, 0.2, 0.1]): """ Generates a balanced matrix in which every state can reach every other state for hierarchical and Markov models """ for r in range(1000): breakme = False probs = np.zeros((na, na)) for p in ps: for i in np.arange(na): ixloc = np.where( (probs[i, :] == 0) & (np.sum(probs != p, axis=0) == na) )[0] if len(ixloc) > 0: probs[i, np.random.permutation(ixloc)[0]] = p else: # the initialization didn't work breakme = True if breakme: continue probs = probs / np.sum(probs, axis=0) return probs return "Generation Failed" def gen_seq_hierarchical(alphabet, probs, depth, n_subsamples): """ generates a sequence via the Lin Tegmark recursive model Arguments: alphabet {[type]} -- [alphabet of states] probs {[type]} -- [probability matrix for recursive subsampling] depth {[type]} -- [how many times to recursively subsample] n_subsamples {[type]} -- [the number of new elements to recursively replace old elements with] Returns: sequence [type] -- [sequence of elements] """ sequence = np.random.choice( alphabet, p=np.sum(probs, axis=1) / np.sum(probs), size=1 ) if type(depth) == list: depth = np.random.choice(depth) depth_list = range(depth) for i in depth_list: q = np.random.choice(n_subsamples) sequence = subsample_sequence(sequence, probs, q, alphabet) return sequence def subsample_sequence(sequence, probs, q, alphabet): """ subsamples a sequence given a probability matrix given a sequence, resamples each element in that sequences given a probability matrix of sequence element to new elements Arguments: sequence {[type]} -- input sequence probs {[type]} -- the probability matrix q {[type]} -- the number of items to subsample """ return [ item for sublist in [ np.random.choice(alphabet, p=probs[:, i], size=q) for i in sequence ] for item in sublist ] # - # how many branches to sample in hierarchical n_subsamples = [2] # how many subsamples to perform depth = 20 # alphabet size a_n = 2 alphabet = np.arange(a_n) # how many sequences to use nseq = 1 print('seq len ',(np.mean(n_subsamples)**depth)) # generate probbility matrix probs = gen_balanced_matrix(na=a_n, ps=[.9, .1]) probs # distances of MI to sample at distances = np.arange(1,101) sequences = gen_seq_hierarchical(alphabet, probs, depth, n_subsamples=n_subsamples) len(sequences) sequences[:10] from childes_mi.information_theory import mutual_information as mi (MI, MI_var), (shuff_MI, shuff_MI_var) = mi.sequential_mutual_information( [sequences], distances=distances, n_jobs=-1 ) MI_DF = pd.DataFrame( [[MI, MI_var, shuff_MI, shuff_MI_var, distances]], columns=["MI", "MI_var", "shuff_MI", "shuff_MI_var", "distances"], ) row = MI_DF.iloc[0] # + fig, ax = plt.subplots(figsize=(10,5)) MI = row.MI-row.shuff_MI MI_var = row.MI_var ax.scatter(distances, MI) ax.fill_between(distances, MI-MI_var, MI+MI_var, alpha = 0.25, color= 'k') ax.set_yscale('log') ax.set_xscale('log') # - latent_vs_sequential_distance = pd.read_pickle('latent_vs_sequential_distance.pickle') latent_vs_sequential_distance[:3] mean_latent_distances = latent_vs_sequential_distance.loc[distances].latent_distances['mean'].values nplot = 100 np.max(mean_latent_distances[:nplot]) from matplotlib.ticker import StrMethodFormatter, NullFormatter # + fig, ax = plt.subplots(figsize=(5,5)) MI = row.MI-row.shuff_MI MI_var = row.MI_var ax.scatter(mean_latent_distances[:nplot], MI[:nplot], color='k') ax.fill_between(mean_latent_distances[:nplot], MI[:nplot]-MI_var[:nplot], MI[:nplot]+MI_var[:nplot], alpha = 0, color= 'k') #ax.plot(mean_latent_distances[:nplot], MI[:nplot], alpha = 1, color= 'k', lw=5) ax.set_yscale('log') ax.set_xscale('log') #ax.set_xlim([1,50]) ax.set_xlabel('Sequential distance', fontsize=18) ax.set_ylabel('Mutual Information (bits)', fontsize=18) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax.spines[axis].set_color('k') ax.grid(False) ax.tick_params(which='both', direction='in', labelsize=14, pad=10) ax.tick_params(which='major', length=10, width =3) ax.tick_params(which='minor', length=5, width =2) ax.set_xticks([4,8,16]) ax.set_xticklabels(['4','8','16']) ax.xaxis.set_major_formatter(StrMethodFormatter('{x:.0f}')) ax.xaxis.set_minor_formatter(NullFormatter()) # + fig, ax = plt.subplots(figsize=(5,5)) MI = row.MI-row.shuff_MI MI_var = row.MI_var nplot=50 ax.scatter(distances[:nplot], MI[:nplot], color='k') ax.fill_between(distances[:nplot], MI[:nplot]-MI_var[:nplot], MI[:nplot]+MI_var[:nplot], alpha = 0, color= 'k') #ax.plot(mean_latent_distances[:nplot], MI[:nplot], alpha = 1, color= 'k', lw=5) ax.set_yscale('log') ax.set_xscale('log') #ax.set_xlim([1,50]) ax.set_xlabel('Sequential distance', fontsize=18) ax.set_ylabel('Mutual Information (bits)', fontsize=18) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax.spines[axis].set_color('k') ax.grid(False) ax.tick_params(which='both', direction='in', labelsize=14, pad=10) ax.tick_params(which='major', length=10, width =3) ax.tick_params(which='minor', length=5, width =2) ax.set_xticks([1,10,50]) ax.set_xticklabels(['1','10','50']) ax.xaxis.set_major_formatter(StrMethodFormatter('{x:.0f}')) ax.xaxis.set_minor_formatter(NullFormatter()) # - # #### fit decay # + import lmfit def residuals(y_true, y_model, x, logscaled=False): if logscaled: return np.abs(np.log(y_true) - np.log(y_model)) * (1 / (np.log(1 + x))) else: return np.abs(y_true - y_model) def model_res(p, x, y, fit, model): if fit == "lin": return residuals(y, model(p, x), x) else: return residuals(y, model(p, x), x, logscaled=True) # fitting model def fit_model_iter(model, n_iter=10, **kwargs): """ re-fit model n_iter times and choose the best fit chooses method based upon best-fit """ models = [] AICs = [] for iter in np.arange(n_iter): results_model = model.minimize(**kwargs) models.append(results_model) AICs.append(results_model.aic) return models[np.argmin(AICs)] def get_y(model, results, x): return model({i: results.params[i].value for i in results.params}, x) def exp_decay(p, x): return p["e_init"] * np.exp(-x * p["e_decay_const"]) + p["intercept"] # decay types def powerlaw_decay(p, x): return p["p_init"] * x ** (p["p_decay_const"]) + p["intercept"] p_exp = lmfit.Parameters() p_exp.add_many( ("e_init", 0.5, True, 1e-10), ("e_decay_const", 0.1, True, 1e-10), ("intercept", 1e-5, True, 1e-10), ) p_power = lmfit.Parameters() p_power.add_many( ("p_init", 0.5, True, 1e-10), ("p_decay_const", -0.5, True, -np.inf, -1e-10), ("intercept", 1e-5, True, 1e-10), ) # - # #### Fit exponential fit='log' n_iter=1 method=["nelder", "leastsq", "least-squares"] d = mean_latent_distances[:nplot] sig = MI[:nplot] # + results_exp_min = lmfit.Minimizer( model_res, p_exp, fcn_args=(d, sig, fit, exp_decay), nan_policy="omit" ) results_exp = [ fit_model_iter(results_exp_min, n_iter=n_iter, **{"method": meth}) for meth in method ] results_exp = results_exp[np.argmin([i.aic for i in results_exp])] results_exp y_exp = get_y(exp_decay, results_exp, d) # + fig, ax = plt.subplots(figsize=(5,5)) MI = row.MI-row.shuff_MI MI_var = row.MI_var nplot=50 ax.scatter(mean_latent_distances[:nplot], MI[:nplot], color='k') ax.fill_between(mean_latent_distances[:nplot], MI[:nplot]-MI_var[:nplot], MI[:nplot]+MI_var[:nplot], alpha = 0, color= 'k') ax.plot(mean_latent_distances[:nplot], y_exp, alpha = 0.5, color= 'k', lw=5) ax.set_yscale('log') ax.set_xscale('log') #ax.set_xlim([1,50]) ax.set_xlabel('Latent distance', fontsize=18) ax.set_ylabel('Mutual Information (bits)', fontsize=18) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax.spines[axis].set_color('k') ax.grid(False) ax.tick_params(which='both', direction='in', labelsize=14, pad=10) ax.tick_params(which='major', length=10, width =3) ax.tick_params(which='minor', length=5, width =2) ax.set_xticks([4,8,16]) ax.set_xticklabels(['4','8','16']) ax.xaxis.set_major_formatter(StrMethodFormatter('{x:.0f}')) ax.xaxis.set_minor_formatter(NullFormatter()) ensure_dir(FIGURE_DIR/'model_fig') save_fig(FIGURE_DIR/ 'model_fig' / 'latent-decay') # + fig, ax = plt.subplots(figsize=(5,5)) MI = row.MI-row.shuff_MI MI_var = row.MI_var nplot=50 ax.scatter(mean_latent_distances[:nplot], MI[:nplot], color='k') ax.fill_between(mean_latent_distances[:nplot], MI[:nplot]-MI_var[:nplot], MI[:nplot]+MI_var[:nplot], alpha = 0, color= 'k') ax.plot(mean_latent_distances[:nplot], y_exp, alpha = 0.5, color= 'k', lw=5) ax.set_yscale('log') ax.set_xscale('log') ax.set_xlabel('Latent distance', fontsize=18) ax.set_ylabel('Mutual Information (bits)', fontsize=18) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax.spines[axis].set_color('k') ax.grid(False) ax.tick_params(which='both', direction='in', labelsize=14, pad=10) ax.tick_params(which='major', length=10, width =3) ax.tick_params(which='minor', length=5, width =2) ax.set_xticks([4,8,16]) ax.set_xticklabels(['4','8','16']) ax.set_xlim([1,50]) ax.xaxis.set_major_formatter(StrMethodFormatter('{x:.0f}')) ax.xaxis.set_minor_formatter(NullFormatter()) # - # #### Fit powerlaw fit='log' n_iter=1 method=["nelder", "leastsq", "least-squares"] d = distances[:nplot] sig = MI[:nplot] # + results_power_min = lmfit.Minimizer( model_res, p_power, fcn_args=(d, sig, fit, powerlaw_decay), nan_policy="omit", ) results_power = [ fit_model_iter(results_power_min, n_iter=n_iter, **{"method": meth}) for meth in method ] results_power = results_power[np.argmin([i.aic for i in results_power])] y_power = get_y(powerlaw_decay, results_power, d) # + fig, ax = plt.subplots(figsize=(5,5)) MI = row.MI-row.shuff_MI MI_var = row.MI_var nplot=50 ax.scatter(d[:nplot], MI[:nplot], color='k') ax.fill_between(d[:nplot], MI[:nplot]-MI_var[:nplot], MI[:nplot]+MI_var[:nplot], alpha = 0, color= 'k') ax.plot(d[:nplot], y_power, alpha = 0.5, color= 'k', lw=5) ax.set_yscale('log') ax.set_xscale('log') #ax.set_xlim([1,50]) ax.set_xlabel('Sequential distance', fontsize=18) ax.set_ylabel('Mutual Information (bits)', fontsize=18) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(3) ax.spines[axis].set_color('k') ax.grid(False) ax.tick_params(which='both', direction='in', labelsize=14, pad=10) ax.tick_params(which='major', length=10, width =3) ax.tick_params(which='minor', length=5, width =2) ax.set_xticks([1,10,50]) ax.set_xticklabels(['4','8','50']) ax.set_xlim([0.95,50]) ax.xaxis.set_major_formatter(StrMethodFormatter('{x:.0f}')) ax.xaxis.set_minor_formatter(NullFormatter()) ensure_dir(FIGURE_DIR/'model_fig') save_fig(FIGURE_DIR/ 'model_fig' / 'powerlaw-decay') # -
notebooks/hierarchy-model/hierarchical model MI.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Generating data # # The purpose of this notebook is to generate and save some common datasets for training/testing. This way, results can easily be compared across different models. # + import numpy as np from itertools import product import random import pandas as pd from pathlib import Path from sklearn.model_selection import train_test_split # - # ## Directories code_dir = Path('../Code') data_dir = code_dir / Path('data') random_dir = data_dir / Path('random') random_zero_padded_dir = data_dir / Path('random_zero_padded') sum_strat_dir = data_dir / Path('sum_strat') uniform_sum_dir = data_dir / Path('uniform_sum') # ## Generation functions # # Functions copy-pasted from my code. # ### Utils functions # + def one_hot(n, max_value): # One-hots a positive integer n where n <= max_value one_hot_n = np.zeros(max_value) one_hot_n[n] = 1 return one_hot_n def undo_one_hot(v): # If an integer is one-hot encoded using the one_hot function above, return the integer n return np.argmax(v) def one_hot_matrix(M, max_value): # Given a matrix M of size (n_samples, n_ints) return the matrix one-hotted. The return matrix is # of size (n_samples, n_ints, max_value) n_samples, seq_length = M.shape M_oh = np.array([one_hot(r, max_value) for r in np.array(M).flatten()]).reshape( (n_samples, seq_length, max_value)) # In case this is a target vector, we don't want to include an unnecessary axis return np.squeeze(M_oh) def undo_one_hot_matrix(M, decoder_map): # Given a matrix M of size (n_samples, timesteps, vocab_size) coming from one_hot_matrix, return the sequence # that was encoded. decoded_list = [] for i in range(M.shape[0]): decoded = '' sample = M[i] for ts in range(sample.shape[0]): decoded += decoder_map[undo_one_hot(sample[ts])] decoded_list.append(decoded) return decoded_list def char_to_int_map(max_value=9, min_value=0): char_to_int = {str(n): n for n in range(min_value, max_value+1)} n_terms = max_value - min_value + 1 char_to_int['+'] = n_terms char_to_int['\t'] = n_terms + 1 char_to_int['\n'] = n_terms + 2 char_to_int[' '] = n_terms + 3 return char_to_int def input_seq_length(n_terms, n_digits): # Given an addition sequence with n_terms terms each with n_digits, return how many characters the (non-padded) # resulting input string can be (maximum possible length) # n_digits for each term, and n_terms - 1 "plus signs", along with an end-of-string character \n and a # start-of-string character \t return n_terms * n_digits + (n_terms - 1) + 1 def target_seq_length(n_terms, n_digits): # Given an addition sequence with n_terms terms each with n_digits, return how many characters the (non-padded) # resulting output string can be (maximum possible length) # All terms except the final +2 come from simple algebra computing the max number of digits possible. # The final +1 comes from the start-of-sequence character \t that is prepended to all target sequences. # The inital +1 comes from the \n appended return 1 + n_digits + 1 + int(np.floor(np.log10(n_terms))) + 1 def reverse_dict(d): return {v: k for k, v in d.items()} int_to_char = reverse_dict(char_to_int_map()) def decode_sample(x, decoder_map, one_hot=False): # Given an array with integer encoding (or optionally one-hot encoding), decode it into # a string if one_hot: x = undo_one_hot(x) return ''.join([decoder_map[s] for s in x]) def decode_matrix(X, decoder_map, one_hot=False): # Given a matrix with integer encoding (or optionally one-hot encoding), decode it into # a list of strings if one_hot: X = undo_one_hot_matrix(X) decoded_strs = [decode_sample(x, decoder_map) for x in X] return decoded_strs def zero_pad(X, y=None): # Generate all possible insertions of a zero to keep the sum the same while allowing # to generalize to more terms. Assumes X is a numpy array of strings. If you want # to update a y array so that shapes are the same, pass it as well x_padded = [] for x in X: x_list = x.split('+') # - # ### Helper functions # + def _generate_sample(n_terms, n_digits, allow_less_terms=False, zero_pad_to=None): # Generate a sample of the form "number_1+number_2+...+number_{n_terms}=answer" x = [] if allow_less_terms: for _ in range(np.random.randint(2, n_terms + 1)): x.append(np.random.randint(10 ** n_digits - 1)) else: for _ in range(n_terms): x.append(np.random.randint(10 ** n_digits - 1)) if zero_pad_to is not None: assert isinstance(zero_pad_to, int) and zero_pad_to > n_terms, 'zero_pad_to must be an integer greater than n_terms' x = zero_pad_sample(x, zero_pad_to) y = np.sum(x) x_str = '+'.join(str(n) for n in x) y_str = str(y) return x_str.strip(), y_str.strip() def _generate_sample_from_y(n_terms, n_digits, y, zero_pad_to=None): # Generates a sample which sums to y (used to uniformly distribute the sums) x = [] while len(x) < n_terms - 1: # Don't allow it to pick a number causing sum(x) to exceed y, but also subject # to the restriction of n_digits. # Also, don't allow it to pick such a small number that it would be impossible # for the remaining terms to be chosen to sum to y (for example, if y = 150 and # n_terms = 2, n_digits = 2, we can't pick 49, or else you would need 101 to sum # to y. y_upper_bound = y - np.sum(x) n_digits_upper_bound = 10 ** n_digits - 1 upper_bound = min([y_upper_bound, n_digits_upper_bound]) lower_bound = (y - np.sum(x) - (10 ** n_digits - 1) * (n_terms - len(x) - 1)) lower_bound = max([0, lower_bound]) if upper_bound > 0: x.append(np.random.randint(lower_bound, upper_bound + 1)) else: x.append(0) x.append(y - np.sum(x)) random.shuffle(x) if zero_pad_to is not None: assert isinstance(zero_pad_to, int) and zero_pad_to > n_terms, 'zero_pad_to must be an integer greater than n_terms' x = zero_pad_sample(x, zero_pad_to) x_str = '+'.join(str(n) for n in x) y_str = str(y) return x_str.strip(), y_str.strip() def _format_sample(x_str, y_str, n_terms, n_digits, int_encoder=None, reverse=False): # Format a sample of the form "number_1+number_2+...+number_{n_terms}=answer". # Each number_i has n_digits digits # If a dictionary is passed for int_encoder then use the it to convert characters to integers (so for instance # convert '3' to 3 or '+' to 12) if reverse: x_str = x_str[::-1] # Prepend an end-of-sequence character \n and for the target append a start-of-sequence character \t x_str = x_str + '\n' y_str = '\t' + y_str + '\n' # Pad x so that is always has the same length. max_input_digits = input_seq_length(n_terms, n_digits) x_str = x_str.ljust(max_input_digits) max_target_digits = target_seq_length(n_terms, n_digits) y_str = y_str.ljust(max_target_digits) if int_encoder is not None: assert isinstance(int_encoder, dict), 'int_encoder must be a dictionary mapping characters to integers' x_list = [int_encoder[c] for c in x_str] y_list = [int_encoder[c] for c in y_str] return x_list, y_list def _generate_samples(n_samples, n_terms=2, n_digits=2, int_encoder=None, one_hot=False, reverse=False, allow_less_terms=False, zero_pad_to=None): # Generate n_samples examples of addition problems as defined in _generate_sample above X = [] y = [] for _ in range(n_samples): x_str, y_str = _generate_sample(n_terms, n_digits, allow_less_terms=allow_less_terms, zero_pad_to=zero_pad_to) x_sample, y_sample = _format_sample(x_str, y_str, n_terms, n_digits, int_encoder, reverse) X.append(x_sample) y.append(y_sample) X = np.array(X) y = np.array(y) if one_hot: X = one_hot_matrix(X, len(int_encoder)) y = one_hot_matrix(y, len(int_encoder)) return X, y def _generate_uniform_samples(n_samples, n_terms=2, n_digits=2, int_encoder=None, one_hot=False, reverse=False, zero_pad_to=None): # Generate samples uniformly w.r.t. the sum max_sum = (10**n_digits - 1) * n_terms possible_sums = range(max_sum + 1) X = [] y = [] for _ in range(n_samples): x_str, y_str = _generate_sample_from_y(n_terms, n_digits, np.random.choice(possible_sums), zero_pad_to=zero_pad_to) x_sample, y_sample = _format_sample(x_str, y_str, n_terms, n_digits, int_encoder, reverse) assert len(x_sample) == 6, f'x_str = {x_str}, x_sample = {x_sample}' X.append(x_sample) y.append(y_sample) X = np.array(X) y = np.array(y) if one_hot: X = one_hot_matrix(X, len(int_encoder)) y = one_hot_matrix(y, len(int_encoder)) return X, y # + def generate_samples(n_samples, n_terms=2, n_digits=2, int_encoder=None, one_hot=False, reverse=False, allow_less_terms=False, uniform=False): # Generate n_samples series with conditions n_terms and n_digits if uniform: X, y = _generate_uniform_samples(n_samples, n_terms, n_digits, int_encoder, one_hot, reverse) else: X, y = _generate_samples(n_samples, n_terms, n_digits, int_encoder, one_hot, reverse, allow_less_terms) return np.array(X), np.array(y) def generate_all_samples(n_terms=2, n_digits=2, int_encoder=None, one_hot=False, reverse=False): # Generate ALL possible integer addition problems with conditions n_terms and n_digits X = [] y = [] x_all = range(10 ** n_digits) x_cartesian = list(product(x_all, repeat=n_terms)) for x in x_cartesian: x_str = '+'.join([str(a) for a in x]) y_str = str(sum(x)) x_str = x_str.strip() y_str = y_str.strip() x_sample, y_sample = _format_sample(x_str, y_str, n_terms, n_digits, int_encoder, reverse) X.append(x_sample) y.append(y_sample) assert len(X) == 10 ** (n_digits * n_terms), "You didn't generate all possible problems..." X = np.array(X) y = np.array(y) if one_hot: X = one_hot_matrix(X, len(int_encoder)) y = one_hot_matrix(y, len(int_encoder)) return X, y # - # ## All samples # ### Helper functions def create_df(X, dataset, one_hot=False): assert dataset.lower() in ['train', 'test', 'validation'], 'Dataset must be one of "train", "test", "validation"' # Note that y is not needed, since we can infer the sum from X # Split into columns for each term string_col = decode_matrix(X, int_to_char, one_hot=one_hot) df = pd.DataFrame({'string': string_col}) summands_df = df['string'].str.split('+', expand=True) summands_df.columns = [f'term_{i}' for i in range(summands_df.shape[1])] summands_df[summands_df.columns[-1]] = summands_df[summands_df.columns[-1]].str.replace('\n', '') df = df.join(summands_df) # Clean up the columns cols = list(df.columns) cols.remove('string') for c in cols: df[c] = df[c].str.strip('\n') df[c] = df[c].str.strip() df[c] = df[c].astype(int) df['sum'] = sum([df[c] for c in cols]) if dataset.lower() == 'train': df['set'] = 'Train' elif dataset.lower() == 'test': df['set'] = 'Test' elif dataset.lower() == 'validation': df['set'] = 'Validation' return df def save_full_df(X_train, X_test, path, fname): df_train = create_df(X_train, dataset='train') df_test = create_df(X_test, dataset='test') df = pd.concat([df_train, df_test]) if not isinstance(path, Path): path = Path(path) if not isinstance(fname, Path): fname = Path(fname) df.to_csv(path / fname, index=False) def train_test_from_idx(X, y, train_idx, test_idx=None, save=True, path=None, return_arrays=False): if test_idx is None: test_idx = list(set(range(X.shape[0])) - set(train_idx)) X_train = X[train_idx] y_train = y[train_idx] X_test = X[test_idx] y_test = y[test_idx] if save and (path is not None): np.save(path / Path('X_train.npy'), X_train) np.save(path / Path('X_test.npy'), X_test) np.save(path / Path('y_train.npy'), y_train) np.save(path / Path('y_test.npy'), y_test) if return_arrays: return X_train, X_test, y_train, y_test # ### 2 terms, 2 digits # + n_terms = 3 n_digits = 2 model_dir = Path(f'{n_terms}term_{n_digits}digs') random_dir = random_dir / model_dir uniform_sum_dir = uniform_sum_dir / model_dir sum_strat_dir = sum_strat_dir / model_dir # - X, y = generate_all_samples(n_terms, n_digits, char_to_int_map(), one_hot=False) X.shape y.shape df = create_df(X, dataset='train') df.head() # ## Split randomly # # Randomly split the data into training and testing (70% train, 30% test). X_2_2_train, X_2_2_test, y_2_2_train, y_2_2_test = train_test_split(X, y, test_size=0.3) np.save(random_dir / Path('X_train.npy'), X_2_2_train) np.save(random_dir / Path('X_test.npy'), X_2_2_test) np.save(random_dir / Path('y_train.npy'), y_2_2_train) np.save(random_dir / Path('y_test.npy'), y_2_2_test) save_full_df(X_2_2_train, X_2_2_test, random_dir, 'df.csv') # ## Stratify by sum # # Stratify the data by the sum, so that a fixed percentage of series summing to each value is kept. df_sample = df.sample(frac=0.7, weights='sum', random_state=1) df_sample.head() train_idx = np.array(df_sample.index) test_idx = np.array(list(set(df.index) - set(train_idx))) X_train, X_test, _, _ = train_test_from_idx(X, y, train_idx, test_idx, path=sum_strat_dir, return_arrays=True) save_full_df(X_train, X_test, sum_strat_dir, 'df.csv') # ## Uniform by sum # # Sample (with replacement) so that each sum appears an equal number of times. From there, randomly sample some data for the test set. df['sum'].value_counts().max() df_over = df.groupby('sum').sample(100, replace=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) save_full_df(X_train, X_test, uniform_sum_dir, 'df.csv') np.save(uniform_sum_dir / Path('X_train.npy'), X_train) np.save(uniform_sum_dir / Path('X_test.npy'), X_test) np.save(uniform_sum_dir / Path('y_train.npy'), y_train) np.save(uniform_sum_dir / Path('y_test.npy'), y_test)
Notebooks/Generating data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: unseen # language: python # name: unseen # --- # ## Event definition # # ### Time selection # For the UK, the event of interest is UK February average precipitation. Since we download monthly averages, we do not have to do any preprocessing along the time dimension her. For the Siberian heatwave, we are interested in the March-May average. Therefore we need to take the seasonal average of the monthly timeseries. # ### Spatial selection # From grid to country-averaged timeseries. # # In this notebook we explore how to best extract areal averaged precipitation and test this for UK precipitation within SEAS5 and EOBS, as part of our UNSEEN-open [workflow](../Workflow.ipynb). The code is inspired on <NAME>'s [blog](http://www.matteodefelice.name/post/aggregating-gridded-data/) -- credits to him! # # We create a mask for all 241 countries within [Regionmask](https://regionmask.readthedocs.io/en/stable/#), that has predefined countries from [Natural Earth datasets](http://www.naturalearthdata.com) (shapefiles). We use the mask to go from gridded precipitation to country-averaged timeseries. We start with UK, number 31 within the country mask. # #### Import packages # # We need the packages regionmask for masking and xesmf for regridding. I cannot install xesmf into the UNSEEN-open environment without breaking my environment, so in this notebook I use a separate 'upscale' environment, as suggested by this [issue](https://github.com/JiaweiZhuang/xESMF/issues/47#issuecomment-582421822). I use the packages esmpy=7.1.0 xesmf=0.2.1 regionmask cartopy matplotlib xarray numpy netcdf4. ##This is so variables get printed within jupyter from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" # + ##import packages import os import xarray as xr import numpy as np import matplotlib.pyplot as plt import cartopy import cartopy.crs as ccrs import matplotlib.ticker as mticker import regionmask # Masking import xesmf as xe # Regridding # - ##We want the working directory to be the UNSEEN-open directory pwd = os.getcwd() ##current working directory is UNSEEN-open/Notebooks/1.Download pwd #print the present working directory os.chdir(pwd+'/../../') # Change the working directory to UNSEEN-open os.getcwd() #print the working directory # #### Load SEAS5 and EOBS # # From CDS, we retrieve SEAS5 in notebook [1.2 Retrieve](1.Download/1.2Retrieve.ipynb) and concatenate the retrieved files in notebook [1.3 Merge](1.Download/1.3Merge.ipynb). We create a netcdf file containing the dimensions lat, lon, time (35 years), number (25 ensembles) and leadtime (5 initialization months). SEAS5 = xr.open_dataset('../UK_example/SEAS5/SEAS5.nc') SEAS5 # And load EOBS netcdf with only February precipitation, resulting in 71 values, one for each year within 1950 - 2020 over the European domain (25N-75N x 40W-75E). EOBS = xr.open_dataset('../UK_example/EOBS/EOBS.nc') EOBS # #### Masking # Here we load the countries and create a mask for SEAS5 and for EOBS. # Regionmask has predefined countries from [Natural Earth datasets](http://www.naturalearthdata.com) (shapefiles). countries = regionmask.defined_regions.natural_earth.countries_50 countries # Now we create the mask for the SEAS5 grid. Only one timestep is needed to create the mask. This mask will lateron be used to mask all the timesteps. SEAS5_mask = countries.mask(SEAS5.sel(leadtime=2, number=0, time='1982'), lon_name='longitude', lat_name='latitude') # And create a plot to illustrate what the mask looks like. The mask just indicates for each gridcell what country the gridcell belongs to. SEAS5_mask SEAS5_mask.plot() # #### Extract spatial average # And now we can extract the UK averaged precipitation within SEAS5 by using the mask index of the UK: `where(SEAS5_mask == UK_index)`. # So we need to find the index of one of the 241 abbreviations. In this case for the UK use 'GB'. Additionally, if you can't find a country, use `countries.regions` to get the full names of the countries. # countries.abbrevs.index('GB') # To select the UK average, we select SEAS5 precipitation (tprate), select the gridcells that are within the UK and take the mean over those gridcells. This results in a dataset of February precipitation for 35 years (1981-2016), with 5 leadtimes and 25 ensemble members. SEAS5_UK = (SEAS5['tprate'] .where(SEAS5_mask == 31) .mean(dim=['latitude', 'longitude'])) SEAS5_UK # However, xarray does not take into account the area of the gridcells in taking the average. Therefore, we have to calculate the [area-weighted mean](http://xarray.pydata.org/en/stable/examples/area_weighted_temperature.html) of the gridcells. To calculate the area of each gridcell, I use cdo `cdo gridarea infile outfile`. Here I load the generated file: Gridarea_SEAS5 = xr.open_dataset('../UK_example/Gridarea_SEAS5.nc') Gridarea_SEAS5['cell_area'].plot() SEAS5_UK_weighted = (SEAS5['tprate'] .where(SEAS5_mask == 31) .weighted(Gridarea_SEAS5['cell_area']) .mean(dim=['latitude', 'longitude']) ) SEAS5_UK_weighted # What is the difference between the weighted and non-weighted average? # # I plot the UK average for ensemble member 0 and leadtime 2 SEAS5_UK.sel(leadtime=2,number=0).plot() SEAS5_UK_weighted.sel(leadtime=2,number=0).plot() # And a scatter plot of all ensemble members, leadtimes and years also shows little influence plt.scatter(SEAS5_UK.values.flatten(),SEAS5_UK_weighted.values.flatten()) # ### EOBS # Same for EOBS. Because this is a larger domain on higher resolution, there are more countries and they look more realistic. EOBS_mask = countries.mask(EOBS.sel(time='1982'), lon_name='longitude', lat_name='latitude') EOBS_mask.plot() EOBS_mask Gridarea_EOBS = xr.open_dataset('../UK_example/Gridarea_EOBS.nc') Gridarea_EOBS['cell_area'].plot() EOBS_UK_weighted = (EOBS['rr'] .where(EOBS_mask == 31) .weighted(Gridarea_EOBS['cell_area']) .mean(dim=['latitude', 'longitude']) ) EOBS_UK_weighted EOBS_UK_weighted.plot() # #### Save the UK weighted average datasets # SEAS5_UK_weighted.to_netcdf('Data/SEAS5_UK_weighted.nc') EOBS_UK_weighted.to_netcdf('Data/EOBS_UK_weighted.nc') ## save as netcdf EOBS_UK_weighted.to_pandas().to_csv('Data/EOBS_UK_weighted.csv') ## and save as csv. SEAS5_UK_weighted.close() EOBS_UK_weighted.close() # #### Illustrate the SEAS5 and EOBS masks for the UK # # Here I plot the masked mean SEAS5 and EOBS precipitation. EOBS is averaged over 71 years, SEAS5 is averaged over years, leadtime and ensemble members. # + fig, axs = plt.subplots(1, 2, subplot_kw={'projection': ccrs.OSGB()}) SEAS5['tprate'].where(SEAS5_mask == 31).mean( dim=['time', 'leadtime', 'number']).plot( transform=ccrs.PlateCarree(), vmin=0, vmax=8, cmap=plt.cm.Blues, ax=axs[0]) EOBS['rr'].where(EOBS_mask == 31).mean(dim='time').plot( transform=ccrs.PlateCarree(), vmin=0, vmax=8, cmap=plt.cm.Blues, ax=axs[1]) for ax in axs.flat: ax.coastlines(resolution='10m') axs[0].set_title('SEAS5') axs[1].set_title('EOBS') # - # #### Illustrate the SEAS5 and EOBS UK average # # And the area-weighted average UK precipitation for SEAS5 and EOBS I plot here. For SEAS5 I plot the range, both min/max and the 2.5/97.5 % percentile of all ensemble members and leadtimes for each year. # + ax = plt.axes() Quantiles = SEAS5_UK_weighted.quantile([0,2.5/100, 0.5, 97.5/100,1], dim=['number','leadtime']) ax.plot(Quantiles.time, Quantiles.sel(quantile=0.5), color='orange',label = 'SEAS5 median') ax.fill_between(Quantiles.time.values, Quantiles.sel(quantile=0.025), Quantiles.sel(quantile=0.975), color='orange', alpha=0.2,label = '95% / min max') ax.fill_between(Quantiles.time.values, Quantiles.sel(quantile=0), Quantiles.sel(quantile=1), color='orange', alpha=0.2) EOBS_UK_weighted.plot(ax=ax,x='time',label = 'E-OBS') # Quantiles_EOBS = EOBS['rr'].where(EOBS_mask == 143).mean(dim = ['latitude','longitude']).quantile([2.5/100, 0.5, 97.5/100], dim=['time'])#.plot() # ax.plot(EOBS.time, np.repeat(Quantiles_EOBS.sel(quantile=0.5).values,71), color='blue',linestyle = '--',linewidth = 1) # ax.plot(EOBS.time, np.repeat(Quantiles_EOBS.sel(quantile=2.5/100).values,71), color='blue',linestyle = '--',linewidth = 1) # ax.plot(EOBS.time, np.repeat(Quantiles_EOBS.sel(quantile=97.5/100).values,71), color='blue',linestyle = '--',linewidth = 1) plt.legend(loc = 'lower left', ncol=2 )#loc = (0.1, 0) upper left # - # ### Potential improvements # # There are many different sources and methods available for extracting areal-averages from shapefiles. Here I have used shapely / masking in xarray. Something that lacks with this method is the weighted extraction from a shapefile, that is more precise on the boundaries. In R, raster:extract can use the percentage of the area that falls within the country for each grid cell to use as weight in averaging. For more information on this method, see the EGU 2018 course: https://github.com/hydrosoc/rhydro_EGU18/blob/master/netCDF.pdf. For SEAS5, with its coarse resolution, this might make a difference. However, for it's speed and reproducibility, we have chosen to stick to xarray. # # We have used xarray where you can apply weights yourself to a dataset and then calculate the weighted mean. Sources I have used: [xarray weighted reductions](http://xarray.pydata.org/en/stable/computation.html#weighted-array-reductions) [Matteo's blog](http://www.matteodefelice.name/post/aggregating-gridded-data/) [regionmask package](https://regionmask.readthedocs.io/en/stable/#) [Arctic weighted average example](https://nordicesmhub.github.io/NEGI-Abisko-2019/training/Example_model_global_arctic_average.html) and [area weighted temperature example](http://xarray.pydata.org/en/stable/examples/area_weighted_temperature.html). # # Instead of xarray, the areal weighing can be done in iris: https://www.wemcouncil.org/wp/wemc-tech-blog-4-5-calculating-nuts2-regional-averages-with-land-sea-mask/. It still doesn't solve the problem with country borders. Since I don't know iris and have been working in xarray, I have not further explored that method. # # Two options we could still explore to improve the extraction: # # 1. Do the extraction in R and compare it to the current xarray method. # 2. Upscale EOBS to the SEAS5 grid and then mask: We could regrid EOBS to the SEAS5 grid and then select the same gridcells for both datasets. The country outline would not be perfect, but the masks would be the same so the comparison would be fair. I think this method is most commonly applied in climate science, where you regrid all your xx datasets to the same grid so you can compare them. Pro: pragmatic. Con: not a perfect country definition. I explore this in [1.5Upscale] # Source: This pretty [awesome colab notebook](https://colab.research.google.<KEY>=B8PyoC1vXvd-) on seasonal forecasting regrids seasonal forecasts and reanalysis on the same grid before calculating skill scores #
doc/_build/.doctrees/nbsphinx/Notebooks/2.Preprocess/2.2Mask.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 # + import os os.environ['CUDA_VISIBLE_DEVICES'] = '0,1' import numpy as np from PIL import Image, ImageDraw import os import cv2 import time from face_detector import FaceDetector # - MODEL_PATH = 'model.pb' face_detector = FaceDetector(MODEL_PATH, gpu_memory_fraction=0.25, visible_device_list='0') # # Get an image # + path = '/home/gpu2/hdd/dan/WIDER/WIDER_train/images/48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_972.jpg' image_array = cv2.imread(path) image_array = cv2.cvtColor(image_array, cv2.COLOR_BGR2RGB) image = Image.fromarray(image_array) image # - # # Show detections def draw_boxes_on_image(image, boxes, scores): image_copy = image.copy() draw = ImageDraw.Draw(image_copy, 'RGBA') width, height = image.size for b, s in zip(boxes, scores): ymin, xmin, ymax, xmax = b fill = (255, 0, 0, 45) outline = 'red' draw.rectangle( [(xmin, ymin), (xmax, ymax)], fill=fill, outline=outline ) draw.text((xmin, ymin), text='{:.3f}'.format(s)) return image_copy boxes, scores = face_detector(image_array, score_threshold=0.3) draw_boxes_on_image(Image.fromarray(image_array), boxes, scores) # # Measure speed # + times = [] for _ in range(110): start = time.perf_counter() boxes, scores = face_detector(image_array, score_threshold=0.25) times.append(time.perf_counter() - start) times = np.array(times) times = times[10:] print(times.mean(), times.std())
.ipynb_checkpoints/try_detector-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # # Kriging example # # Since the global data base we used cannot be shared, we demonstrate using freely # available data from Assumpcao et al. (2013) for South America, how the codes # can be used. # # For simplicity's sake we did not use two different categories here, but focused # on the continental area instead, by simply discarding all points where the Moho # depth is less than 30 km. # + import numpy as np import matplotlib.pyplot as plt import clean_kriging import sklearn.cluster as cluster from func_dump import get_pairwise_geo_distance import logging logging.basicConfig(level=logging.DEBUG) def test_cluster_size(point_data,max_size,do_plot=False,chosen_range=None, perc_levels=20): """Test effect of number of clusters on cluster radius and size """ cluster_sizes = range(5,max_size,1) radius_1 = np.zeros((len(cluster_sizes),3)) cluster_N = np.zeros((len(cluster_sizes),3)) percentages = np.zeros((len(cluster_sizes),perc_levels+1)) X = point_data Xsel = X pd = get_pairwise_geo_distance(Xsel[:,0],Xsel[:,1]) for k,n_clusters in enumerate(cluster_sizes): model = cluster.AgglomerativeClustering(linkage='complete',affinity='precomputed',n_clusters=n_clusters) model.fit(pd) radius = np.zeros((n_clusters)) cluster_members = np.zeros((n_clusters)) for i,c in enumerate(np.unique(model.labels_)): ix = np.where(model.labels_==c)[0] radius[i] = 0.5*pd[np.ix_(ix,ix)].max() cluster_members[i] = np.sum(model.labels_==c) r1i,r1a,r1s = (radius.min(),radius.max(),radius.std()) radius_1[k,0] = r1i radius_1[k,1] = r1a radius_1[k,2] = np.median(radius) percentages[k,:] = np.percentile(radius,np.linspace(0,100,perc_levels+1)) radius_1 = radius_1*110.0 percentages = percentages*110.0 if do_plot: plt.plot(cluster_sizes,radius_1) for i in range(perc_levels): if i<perc_levels/2: alpha = (i+1)*2.0/perc_levels else: alpha = (perc_levels-i)*2.0/perc_levels plt.fill_between(cluster_sizes,percentages[:,i],percentages[:,i+1], alpha=alpha,facecolor='green',edgecolor='none') if not chosen_range is None: return cluster_sizes[np.argmin(np.abs(radius_1[:,2]-chosen_range))] def cluster_map(krigor): """Visualize distribution spatial distribution of a cluster """ fig = plt.figure(figsize=(7,11)) Xsel = krigor.X model = krigor.cluster_results[0] n_clusters = model.n_clusters cmap = plt.cm.get_cmap("jet",n_clusters) clu = model.cluster_centers_ pointsize = np.sqrt(np.bincount(model.labels_)) for i in range(len(Xsel)): j = model.labels_[i] if (Xsel[i,0]*clu[j,0])<0 and np.abs(np.abs(clu[j,0])-180.0) < 10.0: continue plt.plot((Xsel[i,0],clu[j,0]),(Xsel[i,1],clu[j,1]),color=cmap(model.labels_[i]),alpha=0.5) print clu.shape,n_clusters,pointsize.shape plt.scatter(clu[:,0],clu[:,1],7.5*pointsize,np.linspace(0,n_clusters,n_clusters),'s', alpha=1.0,cmap=cmap,edgecolor='r',linewidth=1.5) plt.scatter(Xsel[:,0],Xsel[:,1],2,model.labels_,cmap=cmap,alpha=1.0,edgecolor='k') plt.axis('equal') plt.xlabel('Longitude') plt.ylabel('Latitude') #plt.xlim([-90,-20]) # - # ## Data input # We load the file shipped together with this example. See the inside of the files for references to the sources. # + point_data = np.loadtxt("Seismic_Moho_Assumpcao.txt",delimiter=",") point_data[:,2] = -0.001*point_data[:,2] point_data = point_data[point_data[:,2]>30.0,:] lon = np.arange(np.round(point_data[:,0].min()),np.round(point_data[:,0].max()+1),1) lat = np.arange(np.round(point_data[:,1].min()),np.round(point_data[:,1].max()+1),1) lonGrid,latGrid = np.meshgrid(lon,lat) # - # ## Prior specification # We want to use inverse gamma priors for nugget, sill and range. The inverse gamma distribution is defined in terms of the parameters $\alpha$ and $\beta$, which we derive here from a specified mean and variance. # # $$\mu = \mathrm{Mean} = \frac{\beta}{\alpha-1} \quad \text{and}\quad \sigma^2= \mathrm{var} = \frac{\beta^2}{(\alpha-1)^2(\alpha-2)}$$ # # Thus, # # $$ # \alpha = 2 + \frac{\mu^2}{\sigma^2} \quad \text{and}\quad\beta = \frac{\mu^3}{\sigma^2} + \mu # $$ # # The variable `moments` contains mean and variance for all nugget, sill and range. The last dimension of `moments` would be used, if there are different categories (i.e. ocean vs. continent), but in this example this is not required. moments = np.zeros((3,2,1)) moments[:,:,0] = np.array(((1.0,3.0**2),(40.0,40.0**2),(10.0,10.0**2))) beta = moments[:,0,:]**3/moments[:,1,:]+moments[:,0,:] alpha = 2 + moments[:,0,:]**2 / moments[:,1,:] # ## Clustering # All important routines are contained in objects of the class `MLEKrigor`. Such an object is created by passing it longitude,latitude,value and category. In this example, all category values are simply zero. Any clustering algorithm from the scikit-learn package can be used. Any options contained in the dictionary `clusterOption` will be passed to the constructor. # # After clustering, the covariance parameters for all clusters are determined (`krigor._fit_all_clusters`). # + cat = np.ones((point_data.shape[0]),dtype=int) krigor = clean_kriging.MLEKrigor(point_data[:,0],point_data[:,1],point_data[:,2],cat) clusterOptions=[{'linkage':'complete','affinity':'precomputed','n_clusters':16}] krigor._cluster_points(cluster.AgglomerativeClustering,options=clusterOptions,use_pd=True) krigor._detect_dupes() krigor._fit_all_clusters(minNugget=0.5,minSill=1.0, hyperpars=np.dstack((alpha,beta)),prior="inv_gamma",maxRange=None) krigDict = {"threshold":1,"lambda_w":1.0,"minSill":1.0, "minNugget":0.5, "maxAbsError":4.0,"maxRelError":2.0,"badPoints":None, "hyperPars":np.dstack((alpha,beta)),"prior":"inv_gamma", "blocks":10} # - cluster_map(krigor) # In this map, the individual points are connected with lines to their respective cluster center # ## Outlier detection # This is the most time-consuming step. The routine `jacknife` performs the hold-one-out cross validation to detect possible outliers. Two criteria are used to determine if a point is an outlier. # # 1. The **absolute** prediction error needs to be 4 km or more. # 2. The prediction error is twice as high as the estimated error. # # This is controlled by the variables `maxAbsErr` and `maxRelErr` passed to the function `jacknife`. The third parameter ($\lambda_w$) controls how the covariance parameters are interpolated. # # There are two rounds of outlier detection (see main text for explanation). # + sigma1,new_chosen = krigor.jacknife(4.0,2.0,100.0) krigor.chosen_points = new_chosen.copy() krigor._fit_all_clusters(minNugget=0.5,minSill=1.0, hyperpars=krigDict["hyperPars"],prior="inv_gamma",maxRange=None) sigma2,new_new_chosen = krigor.jacknife(4.0,2.0,100.0) krigor.chosen_points = new_new_chosen.copy() krigor._fit_all_clusters(minNugget=0.5,minSill=1.0, hyperpars=krigDict["hyperPars"],prior="inv_gamma",maxRange=None) # - # ## Interpolation # To run the actual interpolation, the `predict` method of the `MLEKrigor` is used. It takes, longitude, latitude and category as main input. In addition, $\lambda_w$ needs to be specified. This mainly affects the obtained uncertainties. If desired, the full covariance matrix can also be calculated, but due to memory constraints, by default only the variance (main diagonal) is computed. # # Note that `predict` does not respect the shape of the input points and the outputs needs to be reshaped. Furtheremore, the **variance** of the error is returned (to be compatible with the full covariance case) not the standard deviation! # + cat_grid = np.ones(lonGrid.shape,dtype=int) pred,krigvar,predPars = krigor.predict(lonGrid.flatten(),latGrid.flatten(),cat_grid.flatten(), lambda_w=100.0,get_covar=False) pred = pred.reshape(lonGrid.shape) krigvar = krigvar.reshape(lonGrid.shape) # + plt.figure() plt.contourf(lonGrid,latGrid,pred) cbar = plt.colorbar() cbar.set_label('Moho depth [km]') plt.axis('equal') plt.xlabel('Longitude') plt.ylabel('Latitude') plt.figure() plt.contourf(lonGrid,latGrid,np.sqrt(krigvar)) cbar = plt.colorbar() cbar.set_label('Moho uncertainty [km]') plt.axis('equal') # - # Note that in this case, one should not interpret results in the oceanic domain, since they were excluded from the interpolation.
.ipynb_checkpoints/example-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib.pyplot as plt import numpy as np from metpy.cbook import get_test_data from metpy.calc import get_wind_components, lcl, dry_lapse, parcel_profile from metpy.plots import SkewT from metpy.units import units, concatenate # - # Parse the data p, T, Td, direc, spd = np.loadtxt(get_test_data('may3_sounding.txt'), usecols=(0, 2, 3, 6, 7), unpack=True) # Add units to the data arrays p = p * units.mbar T = T * units.degC Td = Td * units.degC spd = spd * units.knot direc = direc * units.deg # Convert wind speed and direction to components u, v = get_wind_components(spd, direc) # + # Create a new figure. The dimensions here give a good aspect ratio fig = plt.figure(figsize=(9, 9)) skew = SkewT(fig, rotation=45) # Plot the data using normal plotting functions, in this case using # log scaling in Y, as dictated by the typical meteorological plot skew.plot(p, T, 'r') skew.plot(p, Td, 'g') skew.plot_barbs(p, u, v) skew.ax.set_ylim(1000, 100) skew.ax.set_xlim(-40, 60) # Calculate LCL height and plot as black dot l = lcl(p[0], T[0], Td[0]) lcl_temp = dry_lapse(concatenate((p[0], l)), T[0])[-1].to('degC') skew.plot(l, lcl_temp, 'ko', markerfacecolor='black') # Calculate full parcel profile and add to plot as black line prof = parcel_profile(p, T[0], Td[0]).to('degC') skew.plot(p, prof, 'k', linewidth=2) # Example of coloring area between profiles skew.ax.fill_betweenx(p, T, prof, where=T>=prof, facecolor='blue', alpha=0.4) skew.ax.fill_betweenx(p, T, prof, where=T<prof, facecolor='red', alpha=0.4) # An example of a slanted line at constant T -- in this case the 0 # isotherm l = skew.ax.axvline(0, color='c', linestyle='--', linewidth=2) # Add the relevant special lines skew.plot_dry_adiabats() skew.plot_moist_adiabats() skew.plot_mixing_lines() # Show the plot plt.show()
examples/notebooks/Advanced_Sounding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.7 ('base') # language: python # name: python3 # --- import pandas as pd df = pd.read_csv("../datasets/city_temperature.csv", parse_dates=['Date']) df.head() df.shape df = df[df.Temp > -60] df = df[df.Temp > -60] df["Dayofyear"] = df["Date"].astype("datetime64").dt.dayofyear df_israel = df[df["Country"] == "Israel"] df_israel.plot(kind='scatter', x='Dayofyear', y='Temp', c="Year", colormap='Paired') import numpy as np x = df_israel[['Temp', 'Month']].groupby(['Month']).agg(['mean', 'std']).head() df_israel.groupby('Month').Temp.mean().plot(kind="bar", yerr=df_israel.groupby('Month').Temp.std()) df.groupby(['Country', 'Month']).Temp.agg(['std', 'mean']).plot.line() x = df.groupby(['Country', 'Month']).mean() x["mean_temp"] = df.groupby(['Country', 'Month']).mean() df.groupby(['Country', 'Month']).plot(kind='line', x='Dayofyear', y='Temp', c="Country", colormap='Paired') df.groupby(['Country', 'Month']).Temp.mean().plot(kind="bar", yerr=df.groupby('Month').Temp.std())
exercises/city.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Discrimination by Regression # + import numpy as np import pandas as pd import matplotlib.pyplot as plt def safelog(x): return(np.log(x + 1e-100)) # - # ## Read Data # + images = np.genfromtxt('hw02_data_set_images.csv',delimiter=',').reshape(5, 39, 320) labels = np.char.strip(np.genfromtxt('hw02_data_set_labels.csv',delimiter=',',dtype=str),'"').reshape(5,39) for index, key in enumerate(['A', 'B', 'C', 'D', 'E']): labels[labels == key] = index + 1 labels = labels.astype(int) # + training_images = images[:,:25,:].reshape(125, 320) training_l = labels[:,:25].reshape(125,) test_images = images[:,25:,:].reshape(70, 320) test_l = labels[:,25:].reshape(70,) plt.imshow(training_images[100,:].reshape(16,20).T) plt.show() K = np.max(training_l) N_train = training_l.shape[0] N_test = test_l.shape[0] training_labels = np.zeros((N_train, K)).astype(int) training_labels[range(N_train), training_l - 1] = 1 test_labels = np.zeros((N_test, K)).astype(int) test_labels[range(N_test), test_l - 1] = 1 # - def sigmoid(X, w, w0): return(1 / (1 + np.exp(-(np.matmul(X, w) + w0)))) # + def gradient_W(X, y_truth, y_predicted): return(np.asarray([-np.sum(np.repeat((y_truth[:,c] - y_predicted[:,c])[:, None], X.shape[1], axis = 1) * X, axis = 0) for c in range(K)]).transpose())/3 def gradient_w0(Y_truth, Y_predicted): return(-np.sum(Y_truth - Y_predicted, axis = 0)) # - # set learning parameters eta = 0.01 epsilon = 1e-3 W = np.random.uniform(low = -0.01, high = 0.01, size = (training_images.shape[-1], K)) w0 = np.random.uniform(low = -0.01, high = 0.01, size = (1, K)) # $\textrm{Error} = -\sum\limits_{i = 1}^{N} \sum\limits_{c = 1}^{K} \left[ y_{ic}\log(\widehat{y}_{ic}) \right]$ iteration = 1 objective_values = [] while 1: Y_predicted = sigmoid(training_images, W, w0) print(training_images.shape) objective_values = np.append(objective_values, np.sum(0.5*((training_labels - Y_predicted)**2))) W_old = W w0_old = w0 W = W - eta * gradient_W(training_images, training_labels, Y_predicted) w0 = w0 - eta * gradient_w0(training_labels, Y_predicted) if np.sqrt(np.sum((w0 - w0_old))**2 + np.sum((W - W_old)**2)) < epsilon: break iteration = iteration + 1 plt.figure(figsize = (10, 6)) plt.plot(range(1, iteration + 1), objective_values, "k-") plt.xlabel("Iteration") plt.ylabel("Error") plt.show() # + y_predicted = np.argmax(Y_predicted, axis = 1) + 1 confusion_matrix = pd.crosstab(y_predicted, np.sum(training_labels * np.array([1,2,3,4,5]), axis=1), rownames = ['y_predicted'], colnames = ['y_train']) print(confusion_matrix) print("\n====================") print("\n====================") # - Y_predicted_test = sigmoid(test_images, W, w0) # + y_predicted_test = np.argmax(Y_predicted_test, axis = 1) + 1 confusion_matrix = pd.crosstab(y_predicted_test, np.sum(test_labels * np.array([1,2,3,4,5]), axis=1), rownames = ['y_predicted'], colnames = ['y_test']) print(confusion_matrix) # -
2 - Discrimination by Regression/hw2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''base'': conda)' # name: python3 # --- # # Exercise 1 # How can you improve the syntax used in the following code cells? # # Think about conventions, and code readability. x = 4 + 1 print(x) x = x + 1 print(x) x += 1 print(x) for i in range(10): if i % 3 == 0 :#checks to see if i is a multiple of 3 print(i, 'is a multiple of 3!') ((2*8)+8)/(4-9+2*1-1) # # Exercise 2 # Create a variable `savings` and give it value 200 # Your answer goes here savings = 20 # Create a variable `factor` and assign it value 1.1 # Your answer goes here factor = 1.1 # Calculate `balance`: balance = savings * factor ** 5 # Now print a statement that says something like "Your account balance after 5 years would be: $xxx" and use the value of `balance` to populate xxx: # Your answer goes here sep = '' print("Your account balance after 5 years would be: $", balance ) # Let's make two modifications to this: # * use `sep = ''` in print function to avoid the extra space between dollar sign and balance value. # * use `round()` function to round to two decimals # # Note: Use `?round` in a new cell to get help from the help function. # Your answer goes here # # Exercise 3 # The value of pi is given below in a string/character format: pi_char = "3.1415926535" type(pi_char) # Google and find a function that would convert it to float so we can use it in math equations, call this new converted value `pi` # Your answer goes here # Using this variable calculate the area of a circle with radius `r = 2` and print a rounded value for area (up to 3 decimals): # Your answer goes here # # Exercise 4 # What is the quotient and remainder of 67432/98? # Your answer goes here # # Exercise 5 # Write a statement to check whether `a` is a multiple of 5 and within the range of 100-150 or 0-50. Check your statement with the following values and it should give you the following values: # # 55 --> False # # 110 --> True # # -25 --> False # # 150 --> False # # 100 --> True # Your answer goes here
01-Introduction/Exercise-02-Python-Syntax-and-Semantics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + tags=[] import pickle from tqdm import tqdm import numpy as np # - # # merge two or more existing pickle data sets| # + tags=[] # data_path = '/Users/jplasser/Documents/AI Master/WS2021/MastersThesis/code.nosync/CNEP/src/data/mimic3/' data_path = '../data/mimic3/' merge_datasets = ['train','val'] # , 'test'] target_dataset = 'extended' dataset = merge_datasets[0] filename_base = "_data_unique_embed_BERT_chunked_meanpooler_prepro" template = f'{data_path}new_{dataset}{filename_base}.pickle' data = pickle.load(open(template, 'rb')) for dataset in merge_datasets[1:]: template = f'{data_path}new_{dataset}{filename_base}.pickle' data_ = pickle.load(open(template, 'rb')) for k in data.keys(): if isinstance(data[k], np.ndarray): data[k] = np.concatenate((data[k], data_[k]), axis=0) else: data[k].extend(data_[k]) assert len(set([d.shape[0] if isinstance(d, np.ndarray) else len(d) for d in data.values()])) == 1 dataset = target_dataset template = f'{data_path}new_{dataset}{filename_base}.pickle' pickle.dump(data, open(template, 'wb')) # + tags=[] data['embeds'].shape # - # # split into train and test set # + tags=[] from sklearn.model_selection import train_test_split import pandas as pd # + tags=[] test_size = 0.01 seed = 4711 X = data['inputs'] E = data['embeds'] Ee = data['embeds_events'] y = data['labels'] # + tags=[] data.keys() # + tags=[] X_train, X_test, E_train, E_test, Ee_train, Ee_test, y_train, y_test = train_test_split(X, E, Ee, y, test_size=test_size, random_state=seed) # + tags=[] X_train.shape, E_train.shape, Ee_train.shape, y_train.shape, X_test.shape, E_test.shape, Ee_test.shape, y_test.shape # + tags=[] data_train = {'labels':y_train, 'inputs': X_train, 'embeds': E_train, 'embeds_events': Ee_train} data_test = {'labels':y_test, 'inputs': X_test, 'embeds': E_test, 'embeds_events': Ee_test} # + tags=[] dataset = f'train_split_{1-test_size}' template = f'{data_path}new_{dataset}_data_unique_embed_CNEP.pickle' pickle.dump(data_train, open(template, 'wb')) dataset = f'test_split_{test_size}' template = f'{data_path}new_{dataset}_data_unique_embed_CNEP.pickle' pickle.dump(data_test, open(template, 'wb')) # -
src/notebooks/mimic_merge_datasets_to_train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %reload_ext autoreload # %autoreload 2 import warnings warnings.filterwarnings('ignore') import numpy as np import pandas as pd pd.options.display.max_columns = 999 pd.options.display.max_rows = 999 pd.options.display.max_colwidth = 100 import geopandas as gpd import plotly.graph_objs as go from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot, offline import plotly.express as px import seaborn as sns import matplotlib.pyplot as plt from os import listdir import unicodedata from paths import * from scripts.vis_graphs import remove_acentos from scripts.vis_graphs import normalize_cols from scripts import manipulation from scripts.io import read_sheets from scripts import scrap_data from scripts import vis_graphs from datetime import datetime today = datetime.today().strftime('%Y-%m-%d') import yaml # + final_data = pd.read_csv('../data/cumulative_data/covid_last.csv') # new_data, df_final = scrap_data.load_data() ### PADRONIZA DOS DADOS codes = pd.read_csv('../data/country_codes.csv') # codes = codes[['CountryCode','CountryName']].drop_duplicates() # codes.columns = normalize(codes.columns) df = final_data.copy() df.columns = normalize_cols(df.columns) df = pd.merge(df,codes,on='countryname', how='left') country_rename = {'US':'United States', 'UK':'United Kingdom', "Brazil":"Brasil"} df['countryname'] = df['countryname'].replace(country_rename) df_pop = pd.read_csv('../data/world_population.csv') df = pd.merge(df,df_pop,on='countryname', how='left') mask = ((df['population'].notnull()) & (df['countrycode'].notnull())) df = df[mask] df = manipulation.create_all_country_total_data(df) names_ids = df.sort_values(by='confirmed', ascending=False)[['countryname','countrycode']].drop_duplicates(keep='first') avaliable_geoids = names_ids['countrycode'].unique() avaliable_countrynames = names_ids['countryname'].unique() # + import dash import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output import pandas as pd external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) # + dropdown1 = dcc.Dropdown( id='selected_country', options=[{'label':i, 'value':j} for i,j in zip(avaliable_countrynames,avaliable_geoids)], value=['US','IT','ES','DE', 'CN','FR','UK','BR'], multi=True ) dropdown2 = dcc.Dropdown( id='selected_country2', options=[{'label':i, 'value':j} for i,j in zip(avaliable_countrynames,avaliable_geoids)], value='BR' ) app.layout = html.Div([ html.Div([ dropdown1 ], className='row'), html.Div([ html.Div([ dcc.Graph(id='country-graphic11'), ], className='six columns', # style={'width': '50%', 'display': 'inline'}, ), html.Div([ dcc.Graph(id='country-graphic12'), ], className='six columns', # style={'width': '50%', 'display': 'inline'}, ), ], className='row'), html.Div([ dropdown2 ], className='row'), html.Div([ dcc.Graph(id='country-graphic2') ], className='row'), html.Div([ html.Iframe(src = "https://storage.cloud.google.com/sv-covid19/maps/cidades_estados.html?hl=pt-br", height=820, width=980) ], className='row', style={'aling':'center'}, ), ]) # app.css.append_css({ # 'external_url': 'https://codepen.io/chriddyp/pen/bWLwgP.css' # }) @app.callback( [Output(component_id = 'country-graphic11', component_property = 'figure'), Output(component_id = 'country-graphic12', component_property = 'figure')], [Input(component_id = 'selected_country', component_property= 'value')] ) def update_graph(geoids): themes = yaml.load(open('../themes/custom_colorscales.yaml', 'r'), Loader=yaml.FullLoader) fig11 = vis_graphs.total_casos(df = manipulation.create_all_country_total_data(df), mask_countrys = df['countrycode'].isin(geoids), themes=themes['all_world_dash'], var='cases', date=today, save=False) fig12 = vis_graphs.total_casos(df = manipulation.create_all_country_total_data(df), mask_countrys = df['countrycode'].isin(geoids), themes=themes['all_world_dash'], var='deaths', date=today, save=False) return fig11, fig12 @app.callback( Output(component_id = 'country-graphic2', component_property = 'figure'), [Input(component_id = 'selected_country2', component_property= 'value')] ) def update_graph2(geoid): themes = yaml.load(open('../themes/custom_colorscales.yaml', 'r'), Loader=yaml.FullLoader) fig = vis_graphs.total_by_country_dash(df = manipulation.create_single_country_data(df), geoid = geoid, themes=themes['by_country'], data=today, save=False) return fig if __name__ == '__main__': app.run_server(debug=False) # - # + import dash import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output themes = yaml.load(open('../themes/custom_colorscales.yaml', 'r'), Loader=yaml.FullLoader) import pandas as pd external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) app.layout = html.Div([ html.Div([ dcc.Dropdown( id='selected_country', options=[{'label':i, 'value':j} for i,j in zip(avaliable_countrynames,avaliable_geoids)], value='BR' ) ]), dcc.Graph(id='country-graphic'), ]) @app.callback( Output(component_id = 'country-graphic', component_property = 'figure'), [Input(component_id = 'selected_country', component_property= 'value')] ) def update_graph(geoid): # final = df[df['countryname'] == selected_country] return fig if __name__ == '__main__': app.run_server(debug=False) # -
notebooks/coronaDash.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # <img width="500" src="https://azurecomcdn.azureedge.net/cvt-18f087887a905ed3ae5310bee894aa53fc03cfffadc5dc9902bfe3469d832fec/less/images/section/azure-maps.png" /> # # # Azure Maps Geospatial Services # # [Microsoft Azure Maps ](https://azure.microsoft.com/en-us/services/azure-maps/) provides developers from all industries with powerful geospatial capabilities. Those geospatial capabilities are packed with the freshest mapping data. Azure Maps is available for web, mobile (iOS and Android), Microsoft Power BI, Microsoft Power Apps and Microsoft Synapse. Azure Maps is an Open API compliant set of REST APIs. The following are only a high-level overview of the services which Azure Maps offers - Maps, Search, Routing, Traffic, Weather, Time Zones, Geolocation, Geofencing, Map Data, Creator, and Spatial Operations. # # ## Usage # # ### Geocode addresses # [**Address Geocoding**](https://docs.microsoft.com/en-us/rest/api/maps/search/post-search-address-batch) The Search Address Batch API sends batches of queries to Search Address API using just a single API call. This API geocodes text addresses or partial addresses and the geocoding search index will be queried for everything above the street level data. **Note** that the geocoder is very tolerant of typos and incomplete addresses. It will also handle everything from exact street addresses or street or intersections as well as higher level geographies such as city centers, counties, states etc. # # ### Reverse Geocode Coordinates # [**Reverse Geocoding**](https://docs.microsoft.com/en-us/rest/api/maps/search/post-search-address-reverse-batch) The Search Address Reverse Batch API sends batches of queries to Search Address Reverse API using just a single API call. This API takes in location coordinates and translates them into human readable street addresses. Most often this is needed in tracking applications where you receive a GPS feed from the device or asset and wish to know what address where the coordinate is located. # # ### Get Point In Polygon # [**Get Point in Polygon**](https://docs.microsoft.com/en-us/rest/api/maps/spatial/get-point-in-polygon) This API returns a boolean value indicating whether a point is inside a set of polygons. The set of polygons can we pre-created by using the [**Data Upload API**](https://docs.microsoft.com/en-us/rest/api/maps/data/upload-preview) referenced by a unique udid. # ## Prerequisites # # 1. Sign into the [Azure Portal](https://portal.azure.com) and create an Azure Maps account by following these [instructions](https://docs.microsoft.com/en-us/azure/azure-maps/how-to-manage-account-keys#create-a-new-account). # 1. Once the Maps account is created, provision a Maps Creator Resource by following these [instructions](https://docs.microsoft.com/en-us/azure/azure-maps/how-to-manage-creator#create-creator-resource). Creator is a [geographically scoped service](https://docs.microsoft.com/en-us/azure/azure-maps/creator-geographic-scope). Pick appropriate location while provisioning the creator resource. # 1. Follow these [instructions](https://docs.microsoft.com/en-us/azure/cognitive-services/big-data/getting-started#create-an-apache-spark-cluster) to set up your Azure Databricks environment and install SynapseML. # 1. After you create a new notebook in Azure Databricks, copy the **Shared code** below and paste into a new cell in your notebook. # 1. Choose a service sample, below, and copy paste it into a second new cell in your notebook. # 1. Replace the `AZUREMAPS_API_KEY` placeholders with your own [Maps account key](https://docs.microsoft.com/en-us/azure/azure-maps/how-to-manage-authentication#view-authentication-details). # 1. Choose the run button (triangle icon) in the upper right corner of the cell, then select **Run Cell**. # 1. View results in a table below the cell. # ## Shared code # # To get started, we'll need to add this code to the project: # + from pyspark.sql.functions import udf, col from pyspark.sql.types import StructType,StructField, DoubleType from pyspark.sql.functions import lit from pyspark.ml import PipelineModel from pyspark.sql.functions import col import os import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry # Configure more resiliant requests to stop flakiness retry_strategy = Retry( total=3, status_forcelist=[429, 500, 502, 503, 504], method_whitelist=["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"] ) adapter = HTTPAdapter(max_retries=retry_strategy) http = requests.Session() http.mount("https://", adapter) http.mount("http://", adapter) # - if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() from notebookutils.mssparkutils.credentials import getSecret os.environ['AZURE_MAPS_KEY'] = getSecret( "mmlspark-keys", "azuremaps-api-key") # + from synapse.ml.cognitive import * from synapse.ml.geospatial import * # An Azure Maps account key azureMapsKey = os.environ["AZURE_MAPS_KEY"] # - # ## Geocoding sample # # The azure maps geocoder sends batches of queries to the [Search Address API](https://docs.microsoft.com/en-us/rest/api/maps/search/getsearchaddress). The API limits the batch size to 10000 queries per request. # + from synapse.ml.stages import FixedMiniBatchTransformer, FlattenBatch df = spark.createDataFrame([ ("One, Microsoft Way, Redmond",), ("400 Broad St, Seattle",), ("350 5th Ave, New York",), ("Pike Pl, Seattle",), ("Champ de Mars, 5 Avenue Anatole France, 75007 Paris",) ], ["address",]) def extract_location_fields(df): # Use this function to select only lat/lon columns into the dataframe return df.select(col("*"), col("output.response.results").getItem(0).getField("position").getField("lat").alias("Latitude"), col("output.response.results").getItem(0).getField("position").getField("lon").alias("Longitude") ).drop("output") # Run the Azure Maps geocoder to enhance the data with location data geocoder = (AddressGeocoder() .setSubscriptionKey(azureMapsKey) .setAddressCol("address") .setOutputCol("output")) # Show the results of your text query in a table format display(extract_location_fields(geocoder.transform(FixedMiniBatchTransformer().setBatchSize(10).transform(df)))) # - # ## Reverse Geocoding sample # # The azure maps reverse geocoder sends batches of queries to the [Search Address Reverse API](https://docs.microsoft.com/en-us/rest/api/maps/search/get-search-address-reverse) using just a single API call. The API allows caller to batch up to 10,000 queries per request # + # Create a dataframe that's tied to it's column names df = spark.createDataFrame((( (48.858561, 2.294911), (47.639765, -122.127896), (47.621028, -122.348170), (47.734012, -122.102737) )), StructType([StructField("lat", DoubleType()), StructField("lon", DoubleType())])) # Run the Azure Maps geocoder to enhance the data with location data rev_geocoder = (ReverseAddressGeocoder() .setSubscriptionKey(azureMapsKey) .setLatitudeCol("lat") .setLongitudeCol("lon") .setOutputCol("output")) # Show the results of your text query in a table format display(rev_geocoder.transform(FixedMiniBatchTransformer().setBatchSize(10).transform(df)).select(col("*"), col("output.response.addresses").getItem(0).getField("address").getField("freeformAddress").alias("In Polygon"), col("output.response.addresses").getItem(0).getField("address").getField("country").alias("Intersecting Polygons") ).drop("output")) # - # ## Check Point In Polygon sample # # This API returns a boolean value indicating whether a point is inside a set of polygons. The polygon can be added to you creator account using the [**Data Upload API**](https://docs.microsoft.com/en-us/rest/api/maps/data/upload-preview). The API then returnrs a unique udid to reference the polygon. # ### Setup geojson Polygons in your azure maps creator account # # Based on where the creator resource was provisioned, we need to prefix the appropriate geography code to the azure maps URL. In this example, the assumption is that the creator resource was provisioned in `East US 2` Location and hence we pick `us` as our geo prefix. # + import time import json # Choose a geography, you want your data to reside in. # Allowed values # us => North American datacenters # eu -> European datacenters url_geo_prefix = 'us' # Upload a geojson with polygons in them r= http.post(f'https://{url_geo_prefix}.atlas.microsoft.com/mapData/upload?api-version=1.0&dataFormat=geojson&subscription-key={azureMapsKey}', json= { "type": "FeatureCollection", "features": [ { "type": "Feature", "properties": { "geometryId": "test_geometry" }, "geometry": { "type": "Polygon", "coordinates":[ [ [ -122.14290618896484, 47.67856488312544 ], [ -122.03956604003906, 47.67856488312544 ], [ -122.03956604003906, 47.7483271435476 ], [ -122.14290618896484, 47.7483271435476 ], [ -122.14290618896484, 47.67856488312544 ] ] ] } } ] }) long_running_operation = r.headers.get('location') time.sleep(30) # Sometimes this may take upto 30 seconds print(f"Status Code: {r.status_code}, Long Running Operation: {long_running_operation}") # This Operation completes in approximately 5 ~ 15 seconds user_data_id_resource_url = json.loads(http.get(f'{long_running_operation}&subscription-key={azureMapsKey}').content)['resourceLocation'] user_data_id = json.loads(http.get(f'{user_data_id_resource_url}&subscription-key={azureMapsKey}').content)['udid'] # - # ### Use the function to check if point is in polygon # + # Create a dataframe that's tied to it's column names df = spark.createDataFrame((( (48.858561, 2.294911), (47.639765, -122.127896), (47.621028, -122.348170), (47.734012, -122.102737) )), StructType([StructField("lat", DoubleType()), StructField("lon", DoubleType())])) # Run the Azure Maps geocoder to enhance the data with location data check_point_in_polygon = (CheckPointInPolygon() .setSubscriptionKey(azureMapsKey) .setGeography(url_geo_prefix) .setUserDataIdentifier(user_data_id) .setLatitudeCol("lat") .setLongitudeCol("lon") .setOutputCol("output")) # Show the results of your text query in a table format display(check_point_in_polygon.transform(df).select(col("*"), col("output.result.pointInPolygons").alias("In Polygon"), col("output.result.intersectingGeometries").alias("Intersecting Polygons") ).drop("output")) # - # ### Cleanup res = http.delete(f"https://{url_geo_prefix}.atlas.microsoft.com/mapData/{user_data_id}?api-version=1.0&subscription-key={azureMapsKey}")
notebooks/features/geospatial_services/GeospatialServices - Overview.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # [LEGALST-123] Lab 22: Word2Vec & PCA # # In this lab, we will implement a Word2Vec algorithm on a sample of the UN General Debates dataset and conduct Principle Component Analysis of the resulting word embeddings. # # *Estimated Time: 45 minutes* # # ### Table of Contents # # [Overview](#section context)<br> # # [The Data](#section data)<br> # # 0- [Pre-Processing](#section 0)<br> # # 1 - [Word2Vec](#section 1)<br> # # &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 1 - [Training](#subsection 1) # # &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 2 - [Embeddings](#subsection 2) # # 2- [PCA of Word Embeddings](#section 2)<br> # # &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 1 - [Plot Word Vectors Using PCA](#subsection 3) # # # __Dependencies:__ # + import pandas as pd import numpy as np # !pip install nltk import nltk # !pip install gensim import gensim import string import matplotlib.pyplot as plt # %matplotlib inline from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA # - # ## Overview <a id='context'></a> # # In this lab, we will be turning individual words in the data set into vectors, called "Word Embeddings". Word embedding generally attempts to identify semantic relationships between words by observing them in the context that the word appears. Word2Vec is the most prominent word embedding algorithm - this is what we will have practice using in today's lab. # # Imagine that each word in a novel has its meaning determined by the ones that surround it in a limited window. For example, in <NAME>'s first sentence, “me” is paired on either side by “Call” and “Ishmael.” After observing the windows around every word in the novel (or many novels), the computer will notice a pattern in which “me” falls between similar pairs of words to “her,” “him,” or “them.” Of course, the computer had gone through a similar process over the words “Call” and “Ishmael,” for which “me” is reciprocally part of their contexts. This chaining of signifiers to one another mirrors some of humanists' most sophisticated interpretative frameworks of language. # # The two main flavors of Word2Vec are CBOW (Continuous Bag of Words) and Skip-Gram, which can be distinguished partly by their input and output during training. Skip-Gram takes a word of interest as its input (e.g. "me") and tries to learn how to predict its context words ("Call","Ishmael"). CBOW does the opposite, taking the context words ("Call","Ishmael") as a single input and tries to predict the word of interest ("me"). # # In general, CBOW is is faster and does well with frequent words, while Skip-Gram potentially represents rare words better. # # ### Word2Vec Features # <ul> # <li>`size`: Number of dimensions for word embedding model</li> # <li>`window`: Number of context words to observe in each direction</li> # <li>`min_count`: Minimum frequency for words included in model</li> # <li>`sg` (Skip-Gram): '0' indicates CBOW model; '1' indicates Skip-Gram</li> # <li>`alpha`: Learning rate (initial); prevents model from over-correcting, enables finer tuning</li> # <li>`iterations`: Number of passes through dataset</li> # <li>`batch_words`: Number of words to sample from data during each pass</li> # </ul> # # For more detailed background on Word2Vec's mechanics, I suggest this <a href="https://www.tensorflow.org/versions/r0.8/tutorials/word2vec/index.html">brief tutorial</a> by Google, especially the sections "Motivation," "Skip-Gram Model," and "Visualizing." # # We will be using the default value for most of our parameters. # ## The Data <a id='data'></a> # # We will be working with the UN General Debates data set that contains data from years __2013 and up__. Run the following code block to load the `un-general-debates-2013up` csv into the notebook as `un`: un = pd.read_csv('data/un-general-debates-2013up.csv') un.head() # --- # # # Pre-Processing <a id='section 0'></a> # # Word2Vec learns about the relationships among words by observing them in context. We'll need to tokenize the words in a sample of texts from the dataframe while retaining sentence boundaries. # # Just as we did in the Pre-Processing lab about a month ago (3/1), we'll use the Natural Language Toolkit (NLTK) to tokenize a our sample text. Note that each text of each row in the `un` dataframe is in the form of a single string. Because we want to work with a larger sample than just one instance of a text, we will first __combine the first 300 strings of texts in the data frame__ to create one long string of text, which we will simply call `text`. #Use a for loop to combine 300 strings of texts in the `un` dataframe into one string. text = '' for ...: ... #SOLUTION # Next, get the individual sentences from `text`. Show the first three sentences. # # *Hint*: the `sent_tokenize` method of nltk will be useful here. # create sentence tokens sents = ... ... #SOLUTION # Lowercase all words in each sentence, and make sure we only keep nonempty sentences: # # *Hint*: You need only one list comprehension to accomplish this. #filter out empty sentences nonempty_sents = ... #SOLUTION # Now for each sentence in `nonempty_sents`, return a list of words without punctuation or stopwords. Set this to `tokens`. This should result in a list of lists, each list representing a sentence that has been tokenized. Show the first three tokenized sentences. # # *Hint*: In the Pre-Processing Lab, we defined the function `rem_punc_stop` that does exactly this. How might we apply this to every sentence of `text`? # + #redefine rem_punc_stop here: def rem_punc_stop(text): from string import punctuation from nltk.corpus import stopwords stop_words = stopwords.words("english") #a list of English stop words for ...: text = ... #get rid of all punctuation marks toks = ... #create a list of tokens from resulting punctuation-less text toks_reduced = ... #filter out stopwords from the list of tokens return ... # + #SOLUTION # - # Now that you have re-defined rem_punc_stop, you are ready to use this function to tokenize every sentence cleanly using the following code. This would create a list `tokens` whose elements are lists of tokens, which each list representing a single sentence. However, due to the limitations of DataHub, running this code will very likley crash your Kernel. This will not become a problem once you begin working locally (i.e. working on notebooks on your own computer as opposed to DataHub) as you will have more memory and computing power to work with. # + ###**** DO NOT RUN THIS CODE BLOCK ***### ### tokens = [rem_punc_stop(sentence) for sentence in nonempty_sents] ### # - # Luckily, we have created a `Tokens.csv` for you that contains a table with a `text` column containing cleanly tokenized sentences. Loadthis csv, and run the follwing code block to get the desired list format. #load Tokens.csv tokens_tbl = #SOLUTION #RUN THIS CELL! tokens = [eval(tokens_tbl['texts'][i]) for i in range(len(tokens_tbl))] tokens[:3] # --- # # # Word2Vec<a id='section 1'></a> # # # ## Training <a id='subsection 1'></a> # # Phew! Now that we have pre-processed our text, we can use the `gensim` library to construct our word embeddings. We will use the Continous Bag of Words model (CBOW), which predicts target words from its neighboring context words to learn word embeddings from raw text. # # Read through the documentation of the Word2Vec method in gensim to understand how to implement the Word2Vec model. Then fill in the blanks so that: we use a __Continuout Bag of Words__ model to create word embeddings of __size 100__ for words that appear in `text` __5 or more times__. Set the learning rate to .025, number of iterations to 5, and sample 10000 words from the data during each pass. # + #Run this code for documentation, or refer to the list above for parameter definitions # gensim.models.Word2Vec? # - #Fill in the missing parameter values model = ... #SOLUTION # ## Embeddings <a id='subsection 2'></a> # # We can return the actual high-dimensional vector by simply indexing the model with the word as the key: #Run this cell print(model.wv['assembly']) # Use the following empty cells to look at what the word embeddings look like for words you think may appear in the `text`! Keep in mind that even if a word shows up in `text` as seen above, a word vector will not be created unless it satisfies all conditions we inputted into the model above. Try words like `president` and `conference` to start! If you're curious, the cell directly below will return a list of words that have been turned into word vectors by the model above: #Run this cell words = list(model.wv.key_to_index) print(words) ... ... ... # `gensim` comes with some handy methods to analyze word relationships. `similarity` will give us a number from 0-1 based on how similar two words are. If this sounds like cosine similarity for words, you'd be right! It just takes the cosine similarity of the high dimensional vectors we input. # # In the following cell, find the similarity between the words `president` and `leadership`: model.wv.similarity(...,...) #SOLUTION # Now find the similarity between the words `different` and `leadership`. model.wv.similarity('different', 'leadership') # You should notice that the the second smilarity score is significantly lower than the first. Does this make sense? # # Find the similarity score between other words that may have very strong or very weak relationships: # + #Similarity 1 # + #Similarity 2 # - # We can also find cosine distance between two clusters of word vectors. Each cluster is measured as the mean of its words: #Similarity between the president/leadership cluster and the confdient/experience cluster model.wv.n_similarity(['president','leadership'],['confident','experience']) # We can find words that don't belong with `doesnt_match`. It finds the mean vector of the words in the `list`, and identifies the furthest away. Out of the three words in the list `['president', 'violent', 'leadership']`, which is the furthest vector from the mean? #Fill in the blanks model.wv.doesnt_match([..., ..., ...]) # *YOUR ANSWER* #SOLUTION # The most famous implementation of this vector math is semantics. What happens if we take: # # $$leadership - president + assembly = $$ model.wv.most_similar(positive=['leadership', 'assembly'], negative=['president']) # __Question__: What does this equation mean, and what do these output vectors mean? # *YOUR ANSWER HERE* # __Answer__: # The way that this works is that, by adding and subtracting attributes of each word vector, the equation outputs a vector with a new value for each of the (in this case 100) attributes. The model then outputs words in the corpus that most closely matches the attributes of this outputted vector. The most famous exampls is: # # $$ King - Man + Woman = ...$$ # # __Question__: What do you think this would output, and why? # Your answer here: # __ANSWER__: Queen. We take the 'manliness' from `KING` and replace it with 'woman', so the word vector King, while retaining its high royalty attribute, no longer has a strong manliness attribute and now has a strong feminine attribute. # # Principle Component Analysis <a id='section 2'></a> # # Next we will explore the word embeddings of our `text` visually with PCA (remember the EDA lab from 3/22?). # # We can retrieve __all__ of the vectors from a trained model as follows: X = model.wv[model.wv.index_to_key] # As we did in the EDA lab, we want to standardize X so that all features have the same scale. Do this by creating a StandardScaler(), then run its fit_transform method on X. You should recognize the syntax the EDA lab. # + #scale the data X_std = ... #look at the covariance matrix ... # + #SOLUTION # scale the data # look at the covariance matrix # - # We can then train a projection method on the vectors, such as those methods offered in scikit-learn, then use matplotlib to plot the projection as a scatter plot which we will do next. # # ### Plot Word Vectors Using PCA <a id='subsection 3'></a> # # Recall that we can create a 2-dimensional PCA model of the word vectors using the scikit-learn PCA class. Construct a PCA objectusing the `PCA()` class of the scikit-learn library (setting n_components=2 so we can graph it in two dimensions) and use its fit_transform method on your standardized X to get Y_pca: the principal components. # + # make a PCA pca = ... #set n_components to 2 to graph in 2-D # fit the standardized data Y_pca = pca... # + #SOLUTION #make a PCA # fit the standardized data # - # The resulting projection can be plotted using matplotlib, pulling out the two dimensions as x and y coordinates. Create a scatter plot of the standardized word embeddings, setting the __size of each scatter point to 5__ to avoid overcrowding. # + #Create a scatter plot here # - #SOLUTION plt.scatter(Y_pca[:, 0], Y_pca[:, 1], s= 5); # __Question__: What do each point represent? What do the x and y axes represent? # *YOUR ANSWER HERE:* # __ANSWER__: Eat point represents a word. Explanation for axes are below as a part of the lab: # You might at this point still be confused on what the x- and y- axes represent. Because PCA selects and combines features according to what best describes and models the desired variable, the x and y axes actually don't have an intuitive meaning on a human level. PCA's job is to reduce the dimension of the features, and in this case it manipulated the 100 features each word vector had to just 2 that best described the words we modeled on. So, don't worry too much about what the coordinates of each word represents - we just want you to have a general visual understanding of word vectors and how they may be related to one another on a graph. # # On that note, run the following cell. This will label each vector with its respective word. Can you figure out, in general, what the code is doing? # + import random rando = random.sample(list(model.wv.index_to_key), 15) X1 = model.wv[rando] pca1 = PCA(n_components=2) result = pca.fit_transform(X1) # create a scatter plot of the projection plt.scatter(result[:, 0], result[:, 1]) for i, word in enumerate(rando): plt.annotate(word, xy=(result[i, 0], result[i, 1])) plt.show() # - # *YOUR ANSWER HER* # __ANSWER:__ # # We are randomly selecting 15 words from the vectors outputted by the model. Then we are using PCA to reduce the dimention to 2 features and standardize the values as we did before. Then we are using matplotlib to create a scatter plot and, for each coordinate of the word, assigning the word label to the coordinate! # Great job! You've now completed the lab on word embeddings and visualizing embeddings with PCA! # # --- # # ## Bibliography # # - <NAME>. (2017, October 6). How to Develop Word Embeddings in Python with Gensim. https://machinelearningmastery.com/develop-word-embeddings-python-gensim/ # # - TensorFlow. (2018, March 29). Vector Representations of Words. https://www.tensorflow.org/tutorials/word2vec # - `rem_punc_stop` function borrowed from Tian Qin's notebook on pre-processig: https://github.com/ds-modules/LEGALST-190/blob/master/labs/3-1/3-1_preprocessing_text_student_version.ipynb # - PCA section adapted from materials by <NAME>: https://github.com/ds-modules/LEGALST-190/blob/master/labs/3-22/3-22_EDA.ipynb # - Word2Vec introduction & examples adapted from materials by <NAME>: https://github.com/henchc/textxd-2017/blob/master/08-Word-Embeddings.ipynb # # # # Notebook developed by: <NAME> # # Data Science Modules: http://data.berkeley.edu/education/modules
labs/22_Word Embedding/22_Word_Embedding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Local Linear Regression # Spring 2019 AME-70790 Final Project # # <NAME> (<EMAIL>) # # Reference: <NAME>., & <NAME>. (1994). Kernel smoothing. Chapman and Hall/CRC. # ___ # Another application of kernel smoothers is *kernel regression* which is a non-parametric regression method that shares many of the same ideas as kernel density estimation. # One of the main advantages of kernel regression methods is their underlying simplicity compared to other non-parametric methods such as spline functions or wavelets. # We will specifically focus on a subset of kernel regression methods called *local polynomial estimators*. # #### Local Polynomial Estimators # Let us consider an arbitrary set of $n$ training points $\left\{x_{i},y_{i}\right\}_{i=1}^{n}$. # To formulate the local polynomial estimator we start by first looking at a standard least squares regression model with a polynomial basis function: # $$\begin{gather} # \mathcal{L} = \sum_{i=1}^{n}\left(y_{i}-\textbf{w}^{T}\textbf{X}\right)^{2},\\ # \textbf{w} = \left[ w_{1}, w_{2}, ... , w_{p}\right]^{T}, \quad \textbf{X}=\left[\begin{matrix} 1 & x_{1} & x_{1}^{2} &... & x_{1}^{p} \\ 1 & x_{2} & x_{2}^{2} &... & x_{2}^{p} \\ ... & & & & ... \\ 1 & x_{n} & x_{n}^{2} &... & x_{n}^{p} \\\end{matrix}\right]. # \end{gather}$$ # In the standard least square approach we would take the derivative of the loss with respect to the weights and set it equal to zero. # However, we will take a slightly different approach, consider the parameterization $m_{w}(x)=\textbf{w}^{T}\textbf{X}$ where the function $m_{w}$ now represents the polynomial that we with to fit. # Now rather than considering the polynomial directly, we approximate it using Taylor expansion to the $p$-th order: # $$\begin{gather} # \mathcal{L} = \sum_{i=1}^{n}\left(y_{i}-m_{w}(x_{i})\right)^{2},\\ # m_{w}(x_{i})\approx m(x)+m'(x)(x_{i}-x)+\frac{m''(x)}{2}(x_{i}-x)^{2}+...+\frac{m^{(p)}(x)}{p!}(x_{i}-x)^{p},\\ # \mathcal{L} = \sum_{i=1}^{n}\left(y_{i}-\sum_{j=0}^{p}\frac{m^{(j)}(x)}{j!}(x_{i}-x)^{j}\right)^{2}= \sum_{i=1}^{n}\left(y_{i}-\sum_{j=0}^{p}\beta_{j}(x_{i}-x)^{j}\right)^{2}, # \end{gather}$$ # where we have replaced the coefficients of the Taylor expansion with $\beta_{j}$. # The last addition is the addition of a kernel operator such the contributions of each datum $(x_{i},y_{i})$ are based on its spacial location to the point of interest: # $$\mathcal{L} = \sum_{i=1}^{n}\left(y_{i}-\sum_{j=0}^{p}\beta_{j}(x_{i}-x)^{j}\right)^{2}K_{h}(x_{i}-x).$$ # We can reformulate the loss in matrix form and solve similar to standard least squares regression: # $$\begin{align} # \mathcal{L}&=(\textbf{Y}-\boldsymbol{\beta}^{T}\textbf{X}_{x})^{2}\textbf{W}_{x}\\ # \frac{\partial \mathcal{L}}{\partial\boldsymbol{\beta}}&=2\textbf{X}_{x}^{T}\textbf{W}_{x}\textbf{X}_{x}\boldsymbol{\beta} - 2\textbf{X}_{x}^{T}\textbf{W}_{x}\textbf{Y}\\ # \hat{\boldsymbol{\beta}}(x) &= \left(\textbf{X}_{x}^{T}\textbf{W}_{x}\textbf{X}_{x}\right)^{-1}\textbf{X}_{x}^{T}\textbf{W}_{x}\textbf{Y}, # \end{align}$$ # assuming $\textbf{X}^{T}\textbf{W}\textbf{X}$ is invertible. Each matrix is defined as follows: # $$\textbf{Y}=\left[y_{1},y_{2},...,y_{n}\right]^{T}, \quad # \textbf{X}_{x}=\left[\begin{matrix} 1 & x_{1}-x &... & x_{1}^{p}-x \\ 1 & x_{2}-x & ... & x_{2}^{p}-x \\ ... & & & ... \\ 1 & x_{n}-x &... & x_{n}^{p}-x \\\end{matrix}\right], \quad # \textbf{W}_{x}=\textrm{diag}\left\{K_{h}(x_{1}-x), ..., K_{h}(x_{p}-x)\right\}.$$ # Since we have defined the Taylor expansion centered around point $x$, the prediction at point $x$ is then simply the intercept coefficient: # $$\hat{\beta}_{0} = \hat{m}(x) = \textbf{e}^{T}\left(\textbf{X}_{x}^{T}\textbf{W}_{x}\textbf{X}_{x}\right)^{-1}\textbf{X}_{x}^{T}\textbf{W}_{x}\textbf{Y},$$ # where $\textbf{e}^{T}$ is a $[p+1]$ vector with the first element equal to 1 and the rest zero. # Note that unlike linear regression, this expression must be evaluated at every predictive point. # This makes the computational cost of local polynomial estimators much more expensive. # #### Local Linear Regression # With the general formulation of the local polynomial estimator defined with its solution, let us start by taking a look at *local linear regression* which can be found from the equations above by setting $p=1$: # $$\mathcal{L} = \sum_{i=1}^{n}\left(y_{i}-\beta_{0} + \beta_{1}(x_{i}-x)\right)^{2}K_{h}(x_{i}-x).$$ # For the kernel function, we will continue the normal distribution $K_{h}=\mathcal{N}(0,h^{2})$. Now for our regression problem we will consider the following function: # $$m(x)=2\exp\left\{-x^{2}/(0.3)^{2}\right\}+3\exp\left\{-(x-1)^{2}/(0.7)^{2}\right\}, \quad x\in[0,1].$$ # When generating our training data, we evenly sample points between [0,1] and add additional output-wise noise to each observation: # $$y_{i}=m(x_{i})+0.075\epsilon_{i},\quad \epsilon_{i}\sim\mathcal{N}(0,1).$$ import numpy as np from scipy.stats import norm import matplotlib.pyplot as plt # + plt.close("all") np.random.seed(123) ntrain = 100 # Number of training points h = 0.05 # Kernel Bandwidth # Generate training data x_train = np.linspace(0,1,ntrain) Y = 2*np.exp(-x_train**2/(0.3**2)) + 3*np.exp(-(x_train-1)**2/(0.7**2)) + 0.075*np.random.randn(ntrain) # Predict n_pred = 50 x_pred = np.linspace(0,1,n_pred) betas = np.zeros((n_pred, 2)) for i, x0 in enumerate(x_pred): # Solve for betas X = np.stack([np.ones(ntrain), x_train-x0], axis=1) W = np.diag(norm.pdf(x_train-x0, loc=0, scale=h)) betas[i] = np.linalg.inv(X.T.dot(W).dot(X)).dot(X.T).dot(W).dot(Y) # - # With the beta coefficients calculated we have the predictions of the model. # As one can imagine, when the number of points one needs to predict at increases significantly this local linear regression model will be unusable. # + fig = plt.figure(figsize=(10,5)) ax = [] ax.append(plt.subplot2grid((1, 2), (0, 0))) ax.append(plt.subplot2grid((1, 2), (0, 1))) # Plot target x_target = np.linspace(0,1,200) y_target = 2*np.exp(-x_target**2/(0.3**2)) + 3*np.exp(-(x_target-1)**2/(0.7**2)) ax[0].plot(x_target, y_target, '-', color='k', label='Target') # Plot training data ax[0].scatter(x_train, Y, c='k', s=15.0, marker='x', linewidth=1.0, label='Training Data', alpha=0.5) # Plot local linear kernel estimate # Recall that the prediction at each point is simply the intercept value beta0 ax[0].plot(x_pred, betas[:,0], '--', color='r', label='Prediction') ax[0].set_xlabel('x') ax[0].set_ylabel('y') ax[0].legend() # Plot several local linear fits p1, = ax[1].plot(x_target, y_target, '-', color='k', label='Target') idxs = [int(n_pred/5), int(n_pred/2), int(4*n_pred/5)] for i, idx in enumerate(idxs): x_cen = x_pred[idx] x_loc = x_pred[idx-int(n_pred/10):idx+int(n_pred/10)] p2, = ax[1].plot(x_loc, betas[idx,0]+betas[idx,1]*(x_loc - x_cen), '-.', color='b') p3, = ax[1].plot(x_target, 0.025*norm.pdf(x_target-x_cen, loc=0, scale=h)+np.min(Y), '--', color='k', linewidth=0.5) ax[1].plot([x_cen,x_cen], [np.min(Y), betas[idx,0]], '--', color='k', linewidth=0.5) # center line ax[1].set_xlabel('x') ax[1].set_ylabel('y') ax[1].legend((p1,p2,p3),('Target', 'Linear Fits', 'Kernel Weight')) # Save and show figure plt.savefig('figs/05_local_linear_regression.pdf') plt.savefig('figs/05_local_linear_regression.png') plt.show() # - # On the left we can see the prediction of the local linear regression which does a very nice job at estimating the function. On the right three local linear fits which illustrates how the local linear regression predicts by fitting a linear model centered at each prediction point. # Below are the kernel weights which illustrate the range of training points that have influence on the prediction. # #### Bandwidth Sensitivity # Similar to kernel density estimation, the bandwidth of the kernel can be very influential over the prediction quality. band_widths = [0.005, 0.05, 0.2] # Predict n_pred = 50 x_pred = np.linspace(0,1,n_pred) betas = np.zeros((len(band_widths), n_pred, 2)) for i, h in enumerate(band_widths): for j, x0 in enumerate(x_pred): # Solve for betas X = np.stack([np.ones(ntrain), x_train-x0], axis=1) W = np.diag(norm.pdf(x_train-x0, loc=0, scale=h)) betas[i,j] = np.linalg.inv(X.T.dot(W).dot(X)).dot(X.T).dot(W).dot(Y) # + fig = plt.figure(figsize=(15,4)) ax = [] ax.append(plt.subplot2grid((1, 3), (0, 0))) ax.append(plt.subplot2grid((1, 3), (0, 1))) ax.append(plt.subplot2grid((1, 3), (0, 2))) for i, h in enumerate(band_widths): # Kernel prediction ax[i].plot(x_pred, betas[i,:,0], '--', c='r', label='KDE') # Target ax[i].plot(x_target, y_target, '--', color='k', label='Target') ax[i].set_title('Bandwidth: {:.02f}'.format(h)) ax[i].set_xlabel('x') ax[i].set_ylabel('y') # Save and show figure plt.savefig('figs/05_local_linear_bandwidth.pdf') plt.savefig('figs/05_local_linear_bandwidth.png') plt.show() # - # Here we see precisely the trends seen for kernel density estimation. # If the bandwidth is too low the prediction is noisy and if it is too high the prediction is over smoothed.
05_local_linear_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="UMo_ZEt2Zbs5" # Downloading dataset from kaggle through API key # + colab={"base_uri": "https://localhost:8080/"} id="lE5SlEl50MHP" outputId="fd733942-9e2d-4b92-c30c-87fc12180412" from google.colab import drive drive.mount('/content/gdrive') # + id="xZOZjdoF0Sfl" import os os.environ['KAGGLE_CONFIG_DIR'] = "/content/gdrive/My Drive/Resume_Screening/Kaggle" # + colab={"base_uri": "https://localhost:8080/"} id="-ynCDDER0izP" outputId="90c99014-f707-4df5-8154-3b73a38f659e" # %cd /content/gdrive/MyDrive/Resume_Screening/Kaggle # + id="MTLG6DrL3Z-I" colab={"base_uri": "https://localhost:8080/"} outputId="ff89674d-f698-4805-8382-be1310e9f91c" # !ls # + [markdown] id="naD7uKJIuXX9" # Importing Libraries # + id="yoTvRDd2zlIT" import numpy as np import pandas as pd # + [markdown] id="pUcpwlNRudim" # # **Dataset** # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="upFN-tzJzt1d" outputId="e90fcf80-1f98-4114-ba84-5a407b73d5dd" data = pd.read_csv('UpdatedResumeDataSet.csv',encoding='utf-8') data['Cleaned_Resume'] = '' data # + id="6umJmUQ1z30I" colab={"base_uri": "https://localhost:8080/"} outputId="9c494f4b-ea80-4029-8e1e-fd11d8bea7f6" print("Resume Categories : ") print(data['Category'].value_counts()) # + id="2kuNw3SGusYg" colab={"base_uri": "https://localhost:8080/", "height": 897} outputId="141c1aaf-9645-4bbf-98e3-8954d5b0c031" import seaborn as sns import matplotlib.pyplot as plt plt.figure(figsize=(15,15)) plt.xticks(rotation=90) sns.countplot(y="Category", data=data) # + [markdown] id="o_O1qoN2vDgI" # # **Cleaning the dataset** # + id="XKquDKcP0igv" import re def cleaned_data(resume_data): #removing all URLs. resume_data = re.sub('http\S+\s*', ' ', resume_data) #removing RT and cc. resume_data = re.sub('RT|cc', ' ', resume_data) #removing hashtags resume_data = re.sub('#\S+', '', resume_data) #removing mentions resume_data = re.sub('@\S+', ' ', resume_data) #removing punctuations resume_data = re.sub('[%s]' % re.escape("""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""), ' ', resume_data) #removing all non-ASCII values. resume_data = re.sub(r'[^\x00-\x7f]',r' ', resume_data) #removing extra whitespaces resume_data = re.sub('\s+', ' ', resume_data) return resume_data # + id="xOlq0HuV1c2u" for i,resume in enumerate(data['Resume']): data['Cleaned_Resume'][i] = cleaned_data(resume) # + [markdown] id="s3wvERUjvIQJ" # Cleaned Data # + id="pNhsS8HN1vxn" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="ecedd73b-8e74-493f-d5d1-d93608b52a69" data.head() # + id="dxM_tgOF2Y8t" colab={"base_uri": "https://localhost:8080/"} outputId="b90e292b-cff0-42bc-a7e5-a81210c72b21" print("Length of dataset :",len(data)) # + [markdown] id="KiNJXQHaqsgl" # # **Information regarding Dataset** # + id="Gdnrxat52uaU" #getting the complete text from the dataset. corpus = "" for i in range(0,len(data)): corpus = corpus + data['Cleaned_Resume'][i].lower() # + id="InD-_t0K25Kp" corpus[0:1000] # + id="G2YdpznD26-T" import nltk from nltk.corpus import stopwords import string from wordcloud import WordCloud # + id="0QAljA305WOt" colab={"base_uri": "https://localhost:8080/"} outputId="7361e4c5-33b1-47f2-fac9-30f2f7eda4a0" nltk.download('stopwords') # + id="m5Tm-jU13FSB" colab={"base_uri": "https://localhost:8080/"} outputId="9b74046b-0a83-4629-ab19-f79bccc9fe9f" #creating the tokenizer. tokenizer = nltk.tokenize.RegexpTokenizer('\w+') tokens = tokenizer.tokenize(corpus) print("Total number of tokens :", len(tokens)) # + id="DvFz_azY4J6P" colab={"base_uri": "https://localhost:8080/"} outputId="eccf01fc-7f9c-4ddb-f49f-022c2c88d1b5" #getting the stop words from nltk dataset. stopwords = nltk.corpus.stopwords.words('english') print("Total number of stopwords :", len(stopwords)) # + id="exqxkct_5JUs" colab={"base_uri": "https://localhost:8080/"} outputId="b441390e-812e-4e67-ea50-ec2371780acb" #removing the stop words from the tokens. words = [] for token in tokens: if token not in stopwords: words.append(token) print("Total number of words after removal of stopwords :", len(words)) # + [markdown] id="ySJBoS8v6j6b" # # **Lemmatization** # + id="OVQa2GeO7FY9" colab={"base_uri": "https://localhost:8080/"} outputId="83b85fd4-a68f-4c8f-97ac-f2c431a1cf56" nltk.download('wordnet') # + id="S11z4zc150PK" colab={"base_uri": "https://localhost:8080/"} outputId="f3143f2e-7381-46d4-b7fd-ff307554dbd4" from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() lemmatized_words = [] for word in words : word = lemmatizer.lemmatize(word) lemmatized_words.append(word) print("Total number of lemmatized words :", len(lemmatized_words)) # + id="NLah-VvA7KNq" colab={"base_uri": "https://localhost:8080/"} outputId="f0f6633f-726a-498f-8083-affef5edba5f" frequency_distribution = nltk.FreqDist(lemmatized_words) common_words = frequency_distribution.most_common(10) print("Top 10 common words : ") print(common_words) # + [markdown] id="oNNbuUdft8sn" # # **Frequency Distribution** # + id="bCJ4Ac9Xt0Tp" colab={"base_uri": "https://localhost:8080/", "height": 771} outputId="a3e69609-e7e6-4575-a83b-597871d34983" #Frequency Distribution Plot plt.subplots(figsize=(20,12)) frequency_distribution.plot(30) # + [markdown] id="x4Q7ilcLu5Ub" # # **WordCloud** # + id="aJK1i3kNuUSE" colab={"base_uri": "https://localhost:8080/", "height": 862} outputId="63165bfc-2242-4e80-c5db-e2c2ac065dc4" #converting into string string = ' '.join([i for i in lemmatized_words if not i.isdigit()]) plt.subplots(figsize=(15,15)) wordcloud = WordCloud(background_color = 'black', max_words = 100, width = 1500, height = 1500).generate(string) plt.imshow(wordcloud) plt.title('Resume Text WordCloud (100 Words)') plt.axis('off') plt.show() # + [markdown] id="QVCVUvzI3P3G" # **Encoding the labels under "Category"** # + id="dT-Xu025zmC-" from sklearn import preprocessing import copy #label encoder le = preprocessing.LabelEncoder() temp = copy.deepcopy(data['Category']) le.fit(temp) categories = le.classes_ #converting the categorical data into labels. data['Category'] = le.transform(temp) # + id="QLz5WwIIUsx8" colab={"base_uri": "https://localhost:8080/"} outputId="65797c50-0031-4295-d135-1891266e7c26" categories # + [markdown] id="VUdlPm018_tz" # **Getting the training and testing data** # + id="Nu0YOo1nSeE7" colab={"base_uri": "https://localhost:8080/"} outputId="e8d02121-9821-4782-b35a-edfdb4f7842c" from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer #getting the resume text and categories from data. X = data['Cleaned_Resume'].values y = data['Category'].values #using the TfidfVectorizer to transform the text into vector. word_vectorizer = TfidfVectorizer(sublinear_tf = True, stop_words = 'english') word_vectorizer.fit(X) word_features = word_vectorizer.transform(X) #splitting the dataset into training and test data. X_train,X_test,y_train,y_test = train_test_split(word_features, y, random_state=0, test_size=0.2) print("Shape of X-train : ",X_train.shape) print("Shape of X-test : ",X_test.shape) # + id="pmkyo37pTaCm" from sklearn import linear_model from sklearn import metrics # + [markdown] id="ZVeHGlUR_-B0" # # **Training the model** # + id="h-J1w-JFS4g8" #this estimator implements regularized linear models with stochastic gradient descent model = linear_model.SGDClassifier(loss = 'log') model.fit(X_train,y_train) prediction = model.predict(X_test) # + id="SlP3LULg7cOj" colab={"base_uri": "https://localhost:8080/"} outputId="6e48a11f-5c40-4581-de97-a1e1bb706b0f" from sklearn.model_selection import cross_val_score cross_val_score(model, X_train, y_train) # + id="DAtSLBGKTWB0" for i in prediction: print(categories[i]) # + [markdown] id="YYgE7gr_AaUi" # **Converting Pdf file to text** # + [markdown] id="8bxXY1LuBuQa" # Using the model to classify the text based on "Category" # + [markdown] id="zXe4YNLFB2se" # Given a folder with docx files, using the model to predict the category for each resume # + id="_HARvs3t2gfG" directory = '/content/gdrive/MyDrive/Resume_Screening/Kaggle/Resumes' filenames = [] # iterate over files in that directory. for filename in os.listdir(directory): f = os.path.join(directory, filename) filenames.append(filename) # + id="ahwXqcMg217S" filenames # + id="7ClBYVfA2xL_" colab={"base_uri": "https://localhost:8080/"} outputId="e720bf48-04db-4c54-cb38-7196586d9eaf" # !pip install python-docx # + [markdown] id="GbDnO9WECPer" # Converting the text in docx file to string # + id="qNO300VD2vRQ" import docx def getText(filename): doc = docx.Document(filename) fullText = [] #getting the complete text from docx file for para in doc.paragraphs: fullText.append(para.text) return '\n'.join(fullText) # + id="VktUYsq62icT" resumes = [] for i in filenames: new_directory = directory+'/'+i text = getText(new_directory) #appending the text to a list. resumes.append(text) # + id="w8AmnZJkFefK" colab={"base_uri": "https://localhost:8080/", "height": 137} outputId="c0871f74-2a00-4304-c565-46e7319f203e" resumes[0] # + [markdown] id="0u8pETKPDN6c" # Pre-processing the text data and using the model to predict the top 3 categories which the resume might belong to. # + id="TakPw3ZT2m6p" cleaned_resumes = [] new_resume_data = {} pie_chart = {} for count,resume in enumerate(resumes): #cleaning the resume text cleaned_resume = cleaned_data(resume) #appending the cleaned data to a list. cleaned_resumes.append(cleaned_resume) #getting the feature vector for the cleaned text. word_features = word_vectorizer.transform([cleaned_resume]) #getting the probabilities for each category. probabilities = model.predict_proba(word_features) #getting the top 3 predictions from the model(but they are encoded labels.) best_three = np.argsort(probabilities, axis=1)[:,-3:] #storing the required data. best_three_list = [] for i in best_three: #converting the encoded labels to the actual labels. best_three_list.append(categories[i]) if categories[i][0] in pie_chart: pie_chart[categories[i][0]] += 1 else : pie_chart[categories[i][0]] = 1 new_resume_data[count] = (best_three_list, resume, filenames[count]) # + [markdown] id="fEF9L7VzIoxu" # # **Visualising the distribution of categories** # + id="RtKIh2lZ3q_q" colab={"base_uri": "https://localhost:8080/", "height": 846} outputId="e348c54b-aa29-4979-acd9-b19107def364" plt.figure(figsize=(15,15)) plt.pie(pie_chart.values()) plt.legend(labels = pie_chart.keys()) plt.show() # + [markdown] id="w_AK3zOlIi4w" # # **All categories** # + id="I2-C0KC1GSwm" colab={"base_uri": "https://localhost:8080/"} outputId="a52cb39d-280e-43b2-9bbf-d2ffcee5ff32" for i in range(len(categories)): print(str(i) + ". " + categories[i]) # + [markdown] id="1q_0-ZIdIz2D" # # **Category Input** # + id="ooT5NODq6a14" category_numbers = list(map(int, input("Enter category numbers (upto : 3): ").split())) # + id="IteWJY5J7Hfm" category_numbers # + id="we6hhMmk7GH1" requested_data = {} requested_categories = [] for i in category_numbers : requested_categories.append(categories[i]) required = len(requested_categories) while required > 1 : print("Resumes with ",str(required), " requested categories.") for i in range(0,len(new_resume_data)): count = 0 for category_list in new_resume_data[i][0]: for cat in category_list: if cat in requested_categories: count += 1 if count == required: print("\tSkills : ", new_resume_data[i][0]) print("\tFile name : ", new_resume_data[i][2]) # print("\tResume : ", new_resume_data[i][1]) print("\n") required -= 1 print("----------------------------------------") # + [markdown] id="I45tNrIqog0Z" # # **Saving the model** # + id="5mnc7OvSnNmr" import pickle # + id="lA8FLtwHoML7" colab={"base_uri": "https://localhost:8080/"} outputId="a240298f-73be-40d5-cfab-c51233fc15a9" # %cd /content/gdrive/MyDrive/Resume_Screening/ # + id="FtE64gzFoGkW" with open('model.pkl','wb') as f: pickle.dump(model,f)
Resume_Screening_Final.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import numpy as np import pandas as pd # # Introduction to Machine Learning with Scikit-Learn # # Today's workshop, which is presented by the [KAUST Visualization Core Lab (KVL)](https://corelabs.kaust.edu.sa/visualization/), is the second of two *Introduction to Machine Learning with Scikit-Learn* workshops. These workshops will largely follow Chapter 2 of [*Hands-on Machine Learning with Scikit-Learn, Keras, and TensorFlow*](https://learning.oreilly.com/library/view/hands-on-machine-learning/9781492032632/) which walks through the process of developing an end-to-end machine learning project with [Scikit-Learn](https://scikit-learn.org/stable/index.html). # # ## Today's schedule # # * Preparing the Data for Machine Learning Algorithms # * Selecting and Training a Model # * Fine Tuning Your Model # # Prepare the data for machine learning algorithms # # "Best practice" is to write functions to automate the process of preparing your data for machine learning. Why? # # * Allows you to reproduce these transformations easily on any dataset. # * You will gradually build a library of transformation functions that you can reuse in future projects. # * You can use these functions in a "live" system to transform the new data before feeding it to your algorithms. # * This will make it possible for you to easily experiment with various transformations and see which combination of transformations works best. # # First we need to load the training data. The code below loads the training dataset that we created last week using stratified sampling on binned value of `median_income`. training_df = pd.read_csv("../data/housing/training.csv", index_col="id") training_df.info() training_df.head() # ## Feature Engineering # # We are going to start with some basic feature engineering and data cleaning tasks that we discussed in last week's session but that we didn't actually complete. Feature engineering is one of the most important parts of any machine learning project. Feature engineering is often the most labor intensive part of building a machine learning pipeline and often requires extensive expertise/domain knowledge relevant to the problem at hand. # # Recently packages such as [featuretools](https://www.featuretools.com/) have been developed to (partially) automate the process of feature engineering. The success of [deep learning](https://en.wikipedia.org/wiki/Deep_learning) in various domains is in significant part due to the fact that deep learning models are able to automatically engineer features that are most useful for solving certain machine learning tasks. In effect deep learng replaces the expensive to acquire expertise/domain knowledge required to hand-engineer predictive features. The story about [space2vec](https://medium.com/dessa-news/space-2-vec-fd900f5566), a deep learning based supernovae classifier developed by machine learning engineers with no expertise in Astronomy that was able to outperform the machine learning solution developed by NERSC scientists, is a recent example of the power of automated feature engineering. The machine learning pipeline developed by NERSC scientists, called [AUTOSCAN](https://portal.nersc.gov/project/dessn/autoscan/), was a significant improvement over the previous solution which relied on manual classification of supernovae by astronomers. However, in order to achieve such high accuracy, the NERSC solution relied on a dataset of hand-engineered features developed by astronomers with over a century of combined training and expertise in the domain. The deep learning algorithm used by space2vec could be applied directly to the raw image data and did not rely on any hand-engineered features. # + def engineer_features(df): """Encapsulate feature engineering in a function so it can be easiyl applied to training and testing datasets.""" _rooms_per_household = (df.loc[:, "total_rooms"] .div(df.loc[:, "households"])) _bedrooms_per_room = (df.loc[:, "total_bedrooms"] .div(df.loc[:, "total_rooms"])) _population_per_household = (df.loc[:, "population"] .div(df.loc[:, "households"])) new_attributes = {"rooms_per_household": _rooms_per_household, "bedrooms_per_room": _bedrooms_per_room, "population_per_household": _population_per_household} return df.assign(**new_attributes) # - training_df_with_extra_features = engineer_features(training_df) training_df_with_extra_features.head() # ## Data Cleaning training_df_with_extra_features.describe() # Recall that the target variable `median_house_value` as well as attributes `housing_median_age` and `median_income` are all truncated above some threshold value. fig, ax = plt.subplots(1, 3, figsize=(12, 8)) _ = (training_df_with_extra_features.loc[:, ["housing_median_age", "median_income", "median_house_value"]] .hist(bins=50, ax=ax)) # We need to drop all the observations whose values for at least one of these variables match their respective maximum values. We are also going to encapsulate the logic for dropping observations in a function so that we can reuse the same logic later to drop values from the testing data. # + def _drop_max_values(df, attribute): threshold = (df.loc[:, attribute] .max()) return df.loc[df.loc[:, attribute] < threshold, :] def clean_dataset(df): """ * Median house values were truncated at 500000 USD. Census block groups with median house values equal to this threshold should be excluded from the analysis. * Median income values were truncated at 15 (thousand USD). Census block groups with median income values equal to this threshold should be excluded from the analysis. * Median housing ages were truncated at 52 years. Census block groups with housing median age values equal to this threshold should be excluded from the analysis. """ _df = _drop_max_values(df, "median_house_value") _df = _drop_max_values(_df, "median_income") _df = _drop_max_values(_df, "housing_median_age") return _df # - cleaned_training_df = clean_dataset(training_df_with_extra_features) fig, ax = plt.subplots(1, 3, figsize=(12, 8)) _ = (cleaned_training_df.loc[:, ["housing_median_age", "median_income", "median_house_value"]] .hist(bins=50, ax=ax)) # Let’s also separate the attributes/features and the labels/targets. Separating the attributes/features from the labels/targets allows us to more easily apply different sets of transformations to these datasets. training_features_df = cleaned_training_df.drop("median_house_value", axis=1) training_target_df = cleaned_training_df.loc[:, ["median_house_value"]] # Most machine learning algorithms will not work with missing data. There are three options for dealing with missing data. # # 1. Drop any training samples that are missing values for *any* attribute/feature. # 2. Drop any attribute/feature with missing values. # 3. Explicitly decide how to fill in the missing values. # # We can implement any of the above approaches using built-in functionality of Pandas. # option 1 (training_features_df.dropna(subset=["total_bedrooms"]) .info()) # option 2 (training_features_df.drop("total_bedrooms", axis=1) .info()) # option 3 _median = (training_features_df.loc[:, "total_bedrooms"] # save this value for later so you can prepare the testing features! .median()) (training_features_df.fillna({"total_bedrooms": _median}) .info()) # However, rather than using Pandas I recommend using the [Scikit-Learn](https://scikit-learn.org/stable/index.html). The Scikit-Learn [`impute`](https://scikit-learn.org/stable/modules/impute.html) module contains a number of different algorithms for filling missing values. # + from sklearn import impute simple_imputer = impute.SimpleImputer(strategy="median") # - # The [`impute.SimpleImputer`](https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html) is the first Scikit-Learn Transformer that we have encountered. As such now is a good to to discuss the Scikit-Learn application programming interface (API). The Scikit-Learn API is one of the best designed API's around and has heavily influenced API design choices of other libraries in the Python Data Science and Machine Learning ecosystem, in particular [Dask](https://dask.org/) and [NVIDIA RAPIDS](https://rapids.ai/index.html). Familiarly with the Scikit-Learn API will make it easier for you to get started with these libraries. # # The Scikit-Learn API is built around the following key concepts. # # * Estimators: Any object that can estimate some parameters based on a dataset is called an estimator (e.g., an `impute.SimpleImputer` is an estimator). The estimation itself is performed by the `fit` method, and it takes only a dataset as a parameter (or two for supervised learning algorithms; the second dataset contains the labels). Any other parameter needed to guide the estimation process is considered a *hyperparameter* (such as the `strategy` parameter in `impute.SimpleImputer`), and it must be set as an instance variable (generally via a constructor parameter). # # * Transformers: Some estimators (such as an `impute.SimpleImputer`) can also transform a dataset; these are called transformers. Once again, the API is simple: the transformation is performed by the `transform` method with the dataset to transform as a parameter. It returns the transformed dataset. This transformation generally relies on the learned parameters, as is the case for an imputer. All transformers also have a convenience method called `fit_transform` that is equivalent to calling `fit` and then `transform` (but sometimes `fit_transform` is optimized and runs much faster). # # * Predictors: Finally, some estimators, given a dataset, are capable of making predictions; they are called predictors. A predictor has a `predict` method that takes a dataset of new instances and returns a dataset of corresponding predictions. It also has a `score` method that measures the quality of the predictions, given a test set (and the corresponding labels, in the case of supervised learning algorithms). # # All of an estimator’s hyperparameters are accessible directly via public instance variables (e.g., `simple_imputer.strategy`), and all the estimator’s learned parameters are accessible via public instance variables with an underscore suffix (e.g., `simple_imputer.statistics_`). Finally, Scikit-Learn provides reasonable default values for most parameters which makes it easy to quickly create a baseline working system. simple_imputer.fit(training_features_df) # Since the median only exists for numeric atttributes/features, you will need to drop all of the non-numeric attributes/features from the dataset before fitting `simple_imputer`. numeric_features_df = training_features_df.drop("ocean_proximity", axis=1) simple_imputer.fit(numeric_features_df) # Fitting the `simple_impute` will compute the median values for each attribute/feature in the dataset and store the values for later reuse. simple_imputer.statistics_ # medians computed using Pandas give same results as above numeric_features_df.median() # To fill any missing value in the original dataset using the median values computed by calling the `fit` method, we call the `tranform` method. imputed_numeric_features_df = simple_imputer.transform(numeric_features_df) # Z is numpy array and no longer has any missing values np.any(imputed_numeric_features_df == np.nan) # There is also a `fit_transform` method which combines the calls to `fit` and `transform` in sequence. imputed_numeric_features_df = simple_imputer.fit_transform(numeric_features_df) simple_imputer.statistics_ # ## Handling Text and Categorical Attributes # # So far we have only discussed how to handle numeric attributes/features. Our dataset contains on non-numeric attribute/feature, `ocean_proximity` which we have good reason to think is important determinant of housing prices. non_numeric_features_df = training_features_df.loc[:, ["ocean_proximity"]] non_numeric_features_df.head() # While the above might look like arbitrary text, `ocean_proximity` only takes a limited number of values. non_numeric_features_df.value_counts() # Machine learning algorithms almost always work with numbers. The Scikit-Learn [`preprocessing`](https://scikit-learn.org/stable/modules/preprocessing.html) module has several strategies for [encoding non-numeric attributes/features](https://scikit-learn.org/stable/modules/preprocessing.html#encoding-categorical-features). The simplest strategy is called ordinal encoding and is implemented by the [OrdinalEncoder](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html#sklearn.preprocessing.OrdinalEncoder) class. # + from sklearn import preprocessing ordinal_encoder = preprocessing.OrdinalEncoder() # - Z = ordinal_encoder.fit_transform(non_numeric_features_df) Z ordinal_encoder.categories_ # With this representation machine learning algorithms will assume that two nearby values are more similar than two distant values. This may be fine in some cases, for example cases where the the categories have a natural ordering such as “bad,” “average,” “good,” and “excellent”. # # ### Exercise # # Can anyone see an issue with using an ordinal encoding for our `ocean_proximity` attribute? # ### Answer: # # The categories for `ocean_proximity` are not obviously ordered. For example category `0` (`<1H Ocean`) and category `4` (`NEAR OCEAN`) are cleary more similar than to categories `1` and `3`, respectively. Also what about the category `3` (`ISLAND`)? # An alternative encoding strategy that is commonly used with categorical features that have not natural ordering is to create one binary attribute per category. In our case we create one attribute equal to `1` when the category is `<1H OCEAN` (and `0` otherwise), another attribute equal to `1` when the category is `INLAND` (and `0` otherwise), and so on. This is called one-hot encoding, because only one attribute will be equal to 1 (hot), while the others will be 0 (cold). These new attributes are sometimes called dummy attributes. Scikit-Learn provides a [`OneHotEncoder`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html#sklearn.preprocessing.OneHotEncoder) class to convert categorical values into one-hot vectors. one_hot_encoder = preprocessing.OneHotEncoder() Z = one_hot_encoder.fit_transform(non_numeric_features_df) # transformed features are now a sparse matrix Z # convert sparse matrix to dense numpy array Z.toarray() one_hot_encoder.categories_ # Note that if a categorical attribute has a large number of possible categories, then one-hot encoding will result in a large number of input features. This may slow down training and degrade performance. If this happens, you may want to try replacing the categorical attributes/features with useful numerical attributes/features related to the categories: for example, you could replace the `ocean_proximity` feature with the distance to the ocean. Alternatively, you could replace each category with a learnable, low-dimensional vector called an embedding. This approach is called [feature learning](https://en.wikipedia.org/wiki/Feature_learning) or representation learning and is covered in chapters 13 and 17 of textbook). # ## Feature Scaling # # Machine learning algorithms typically don’t perform well when the input numerical attributes have very different scales. training_features_df.describe() # The simplest approach is to rescale features so that they all reside within the same range (typically between 0 and 1). This approach is implemented in Scikit-Learn by the [`MinMaxScaler`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html#sklearn.preprocessing.MinMaxScaler) class. min_max_scaler = preprocessing.MinMaxScaler() scaled_numeric_features_df = min_max_scaler.fit_transform(imputed_numeric_features_df) min_max_scaler.data_min_ # these values will be reused later to rescale the testing features min_max_scaler.data_max_ # these values will be reused later to rescale the testing features # But what happens if an attribute has outliers and you apply min-max scaling? _ = training_features_df.plot(kind="box", subplots=True, figsize=(24, 8)) plt.tight_layout() # An alternative approach is to rescale features so that they all have zero mean and unit standard deviation. This approach, which is also called standardization, is particularly useful when attributes/features have outliers and when downstream machine learning algorithms assume that attributes/features have a Gaussian or Normal distribution. # we will use this to make sure that all numerical features have the same scale standard_scaler = preprocessing.StandardScaler() scaled_numeric_features_df = standard_scaler.fit_transform(imputed_numeric_features_df) standard_scaler.mean_ # these values will be reused later to rescale the testing features standard_scaler.scale_ # these values will be reused later to rescale the testing features # As with all the transformations, it is important to fit the scalers to the training data only, not to the full dataset (including the test set). Only then can you use them to transform the training set and the test set (and new data). # ## Transformation Pipelines # # As you can see creating preprocessing pipelines involves quite a lot of steps and each of the steps needs to be executed in the correct order. Fortunately Scikit-Learn allows you to combine estimators together to create [pipelines](https://scikit-learn.org/stable/modules/compose.html#combining-estimators). We can encapsulate all of the preprocessing logic for our numeric attributes as well as the preprocessing logic for our non-numeric attributes into separate instances of the [`Pipeline`](https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html#sklearn.pipeline.Pipeline) class. # # The `Pipeline` constructor takes a list of name/estimator pairs defining a sequence of steps. All but the last estimator must be transformers (i.e., they must have a `fit_transform` method). The names can be anything you like (as long as they are unique). Later we will see how to access the parameters of pipelines using these names when we discuss hyperparameter tuning. # + from sklearn import pipeline numerical_pipeline = pipeline.Pipeline( steps=[ ('imputer', impute.SimpleImputer(strategy="median")), ('standard_scaler', preprocessing.StandardScaler()) ], ) categorical_pipeline = pipeline.Pipeline( steps=[ ("one_hot_encoder", preprocessing.OneHotEncoder()) ], ) # - # We can then [combine these pipelines](https://scikit-learn.org/stable/modules/compose.html#columntransformer-for-heterogeneous-data) into a single pipeline using the [`ColumnTransformer`](https://scikit-learn.org/stable/modules/generated/sklearn.compose.ColumnTransformer.html#sklearn.compose.ColumnTransformer) class. The constructor requires a list of tuples, where each tuple contains a name, a transformer, and a list of names (or indices) of columns that the transformer should be applied to. # + from sklearn import compose numerical_attributes = [ "longitude", "latitude", "housing_median_age", "total_rooms", "total_bedrooms", "population", "households", "median_income", "rooms_per_household", "bedrooms_per_room", "population_per_household", ] categorical_attributes = [ "ocean_proximity" ] preprocessing_pipeline = compose.ColumnTransformer( transformers=[ ("numerical_pipeline", numerical_pipeline, numerical_attributes), ("categorical_pipeline", categorical_pipeline, categorical_attributes) ], ) # - # Now we can fit the entire preprocessing pipeline to our training features dataset in one go! preprocessed_training_features = preprocessing_pipeline.fit_transform(training_features_df) type(preprocessed_training_features) # I often find it useful to create a Pandas `DataFrame` from the `preprocessed_training_features` NumPy `ndarray`. # + categories = list(preprocessing_pipeline.named_transformers_["categorical_pipeline"] .named_steps["one_hot_encoder"] .categories_[0]) _columns = numerical_attributes + categories preprocessed_training_features_df = (pd.DataFrame .from_records(preprocessed_training_features, columns=_columns)) # - preprocessed_training_features_df.head() # Another useful class is [`FeatureUnion`](https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.FeatureUnion.html#sklearn.pipeline.FeatureUnion). `FeatureUnion` combines several transformer objects into a new transformer that combines their output. A `FeatureUnion` takes a list of transformer objects. During fitting, each of these transformers is fit to the data independently. The transformers are applied in parallel, and the feature matrices they output are concatenated side-by-side into a larger matrix. # Finally, estimators can be displayed with a HTML representation when shown in a Jupyter notebook. Visualizing estimators is particularly useful to diagnose or visualize a `Pipeline` with many estimators. This visualization is activated by setting the display option in [sklearn.set_config](https://scikit-learn.org/stable/modules/generated/sklearn.set_config.html#sklearn.set_config). # + from sklearn import set_config set_config(display='diagram') # - preprocessing_pipeline # # Select and Train a Model # # At last! You framed the problem, you got the data and explored it, you sampled a training set and a test set, and you wrote transformation pipelines to clean up and prepare your data for machine learning algorithms automatically. You are now ready to select and train a Machine Learning model. You might have been wondering if we were every going to make it to this point! Fact is, most of your time developing machine learning solutions to real-world problems will not be spent training machine learning models: most of *your* time will be spent preparing the data for machine learning algorithms and most of the *computer* time will be spent training the machine learning models. # ## Training and Evaluating on the Training Dataset # + from sklearn import linear_model regressor = linear_model.LinearRegression() regressor.fit(preprocessed_training_features, training_target_df) # - # Congrats! You have fit your first machine learning model using Scikit-Learn. Now let's evaluate our model's performance using our chosen metric: root mean squared error (RMSE). # + from sklearn import metrics predictions = regressor.predict(preprocessed_training_features) mse = metrics.mean_squared_error(training_target_df, predictions) rmse = mse**0.5 # - rmse # units are USD # Linear regression is often a sensible model to start but often underfits datasets with more complex relationships. Let’s train a [`tree.DecisionTreeRegressor`](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html). This is a powerful model, capable of finding complex nonlinear relationships in the data # + from sklearn import tree regressor = tree.DecisionTreeRegressor() regressor.fit(preprocessed_training_features, training_target_df) # - predictions = regressor.predict(preprocessed_training_features) mse = metrics.mean_squared_error(training_target_df, predictions) rmse = mse**0.5 rmse # Wait, what!? No error at all? Could this model really be absolutely perfect? Unfortunately it is much more likely that the model has badly overfit the training data. How can you be sure? As we saw earlier, you don’t want to touch the testing dataset until you are ready to launch a model you are confident about, so you need to use part of the training set for training and part of it for model validation. # ## Better Evaluation using Cross Validation # # The following code use Scikit-Learn [`model_selection.cross_val_score`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html#sklearn.model_selection.cross_val_score) to randomly split the training set into 10 distinct subsets called folds, then it trains and evaluates our model 10 times, picking a different fold for evaluation every time and training on the other 9 folds. The result is an array containing the 10 evaluation scores. # + from sklearn import model_selection linear_regression_scores = model_selection.cross_val_score(linear_model.LinearRegression(), X=preprocessed_training_features, y=training_target_df, cv=10, scoring="neg_mean_squared_error", n_jobs=10) # - def display_rmses(rmses): print("RMSE mean:", rmses.mean()) print("RMSE standard deviation:", rmses.std()) linear_regression_rmses = np.sqrt(-linear_regression_scores) display_rmses(linear_regression_rmses) _random_state = np.random.RandomState(42) decision_tree_scores = model_selection.cross_val_score(tree.DecisionTreeRegressor(random_state=_random_state), X=preprocessed_training_features, y=training_target_df, cv=10, scoring="neg_mean_squared_error", n_jobs=10) decision_tree_rmses = np.sqrt(-decision_tree_scores) display_rmses(decision_tree_rmses) # Now the `DecisionTreeRegressor` doesn’t look nearly as good as it did earlier. In fact, it seems to perform worse than the much simpler `LinearRegression` model. Notice that cross-validation allows you to get not only an estimate of the performance of your model, but also a measure of how precise this estimate is (i.e., its standard deviation). # Let’s try one last model now: the [`RandomForestRegressor`](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html). Random forests work by training many decision trees on random subsets of the features, then averaging the predictions made by each of the decision trees to arrive at an overall prediction. Building a model on top of many other models is called [ensemble learning](https://en.wikipedia.org/wiki/Ensemble_learning) and it is often a great approach to improve the predictions of your machine learning pipeline. # + from sklearn import ensemble _random_state = np.random.RandomState(42) regressor = ensemble.RandomForestRegressor(random_state=_random_state) regressor.fit(preprocessed_training_features, training_target_df.iloc[:, 0].ravel()) # - predictions = regressor.predict(preprocessed_training_features) mse = metrics.mean_squared_error(training_target_df, predictions) rmse = mse**0.5 rmse _random_state = np.random.RandomState(42) random_forest_scores = model_selection.cross_val_score(ensemble.RandomForestRegressor(random_state=_random_state), X=preprocessed_training_features, y=training_target_df, cv=10, scoring="neg_mean_squared_error", n_jobs=10) random_forest_rmses = np.sqrt(-random_forest_scores) display_rmses(random_forest_rmses) # A `RandomForestRegressor` look very promising. Note that the score on the training set is still much lower than on the validation sets which indicates that this model is still overfitting the training set. Possible solutions for overfitting are to simplify the model, constrain it (i.e., regularize it), or get a lot more training data. # ### Exercise # # Before we dive into hyperparameter tuning, you should out a few other models from various categories of machine Learning algorithms: in particular take a look at [Nearest Neighbor](https://scikit-learn.org/stable/modules/neighbors.html) and [Support Vector Machine (SVM)](https://scikit-learn.org/stable/modules/svm.html#regression) regression algorithms. Don't spend too much time tweaking the default hyperparameters. The goal is to shortlist two or three promising models for fine-tuning. from sklearn import neighbors knn_scores = model_selection.cross_val_score(neighbors.KNeighborsRegressor(), X=preprocessed_training_features, y=training_target_df, cv=10, scoring="neg_mean_squared_error", n_jobs=10) knn_rmses = np.sqrt(-knn_scores) display_rmses(knn_rmses) from sklearn import svm svr_scores = model_selection.cross_val_score(svm.SVR(), X=preprocessed_training_features, y=training_target_df, cv=10, scoring="neg_mean_squared_error", n_jobs=10) svr_rmses = np.sqrt(-svr_scores) display_rmses(svr_rmses) # # Fine-tune your models # # Most common approach to tuning a model is to manually fiddle with the hyperparameters until you find a great combination of hyperparameter values. Needless to day, this approach to model tuning is *very* tedious and not at all scientific. We can do much better! # ## Grid Search # # Simplest approach is to use Scikit-Learn’s [`model_selection.GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). All you need to do is tell it which hyperparameters you want it to experiment with and what values to try out. The `model_selection.GridSearchCV` class will then use cross-validation to evaluate all the possible combinations of hyperparameter values and return the best scoring set of hyperparameters according to your specified metric. # + parameter_grid = [ {'n_estimators': [10, 100], 'max_features': ["auto", "sqrt", "log2"]}, # 2 * 3 = 6 parameter combinations to try {'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 4, 8]}, # 1 * 2 * 3 = 6 parameter combinations to try ] _random_state = np.random.RandomState(42) random_forest_regressor = ensemble.RandomForestRegressor(random_state=_random_state) grid_search_cv = model_selection.GridSearchCV(random_forest_regressor, parameter_grid, cv=5, scoring='neg_mean_squared_error', return_train_score=True, n_jobs=5, verbose=10) grid_search_cv # - _ = grid_search_cv.fit(preprocessed_training_features, training_target_df) # RMSE for the best parameters (-grid_search_cv.best_score_)**0.5 grid_search_cv.best_params_ # best_estimator_ is trained with the values from best_params_ grid_search_cv.best_estimator_ # You should save every model you experiment with so that you can come back easily to any model you want. Make sure you save both the hyperparameters and the trained parameters as well as the cross-validation scores and perhaps the actual predictions as well. This will allow you to more easily compare scores across model types and compare the types of errors they make. # + import joblib import time timestamp = time.strftime("%Y%m%d-%H%M%S") _ = joblib.dump(grid_search_cv, f"../results/models/grid-search-cv-random-forest-regressor-{timestamp}.pkl") # - # For reference here is how you would reload the trained model from the file. reloaded_grid_search_cv = joblib.load(f"../results/models/grid-search-cv-random-forest-regressor-{timestamp}.pkl") # compare with grid_search_cv.best_params_ reloaded_grid_search_cv.best_params_ # ## Randomized Search # # The grid search approach is fine when you are exploring relatively few combinations but when the hyperparameter search space is large it is often preferable to use [`model_selection.RandomizedSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html#sklearn.model_selection.RandomizedSearchCV) instead. Instead of trying out all possible combinations, `model_selection.RandomizedSearchCV` evaluates a given number of random combinations by selecting a random value for each hyperparameter at every iteration. This approach has two main benefits. # # * More efficient exploration of the hyperparameter space. # * More control over the computing budget you want to allocate to hyperparameter search. # # + from scipy import stats _param_distributions = { "n_estimators": stats.geom(p=0.01), "min_samples_split": stats.beta(a=1, b=99), "min_samples_leaf": stats.beta(a=1, b=999), } _random_state = np.random.RandomState(42) random_forest_regressor = ensemble.RandomForestRegressor(random_state=_random_state) randomized_search_cv = model_selection.RandomizedSearchCV( random_forest_regressor, param_distributions=_param_distributions, scoring="neg_mean_squared_error", random_state=_random_state, n_iter=10, cv=5, n_jobs=5, verbose=10 ) randomized_search_cv # - _ = randomized_search_cv.fit(preprocessed_training_features, training_target_df) # RMSE for the best parameters (-randomized_search_cv.best_score_)**0.5 randomized_search_cv.best_params_ _timestamp = time.strftime("%Y%m%d-%H%M%S") _ = joblib.dump(randomized_search_cv.best_estimator_, f"../results/models/randomized-search-cv-random-forest-regressor-{_timestamp}.pkl") # Grid search and randomized search are the two easiest ways to get started with hyperparameter optimization (HPO) within Scikit-Learn. However, increasingly I finfd myself using [Optuna](https://optuna.org/) for my HPO workloads. # ## Analyze the Best Models and Their Errors # # You will often gain good insights on the problem by inspecting the best models. For example, the `ensemble.RandomForestRegressor` can indicate the relative importance of each attribute for making accurate predictions. _data = (randomized_search_cv.best_estimator_ .feature_importances_) _index = preprocessed_training_features_df.columns feature_importances = pd.Series(_data, index=_index) feature_importances.sort_values(ascending=False) # It looks like only one of the categories of `ocean_proximity` is useful. Based on this information, I might go back and re-encode `ocean_proximity` to be a binary indicator that takes the value of `1` if the category is either `ISLAND`, `NEAR_BAY`, or `NEAR OCEAN` and `0` if the value is `INLAND`. The would reduce the number of features and speed up computation for some machine learning models. # # You should also look at the specific errors that your system makes, then try to understand why it makes them and what could fix the problem (adding extra features or getting rid of uninformative ones, cleaning up outliers, etc.). _y_true = (training_target_df.values .ravel()) _y_pred = (randomized_search_cv.best_estimator_ .predict(preprocessed_training_features)) _prediction_errors = _y_true - _y_pred # positive prediction error indicates model under-predicts housing prices! preprocessed_training_features_df["prediction_errors"] = _prediction_errors # ### Prediction errors have lots of outliers # # If your predictions errors exhibit lots of outliers, then you can inspect which training data samples are the ones for which the model makes poor predictions. fig, ax = plt.subplots(1, 1, figsize=(12, 10)) _ = preprocessed_training_features_df.loc[:, "prediction_errors"].plot(kind="box") # census block groups for which model under-predicts housing prices (preprocessed_training_features_df.sort_values("prediction_errors", ascending=False) .head()) # census block groups for which model over-predicts housing prices (preprocessed_training_features_df.sort_values("prediction_errors", ascending=False) .tail()) # ### Exploring the geographical distribution of prediction errors fig, ax = plt.subplots(1, 1, figsize=(12, 10)) _color = preprocessed_training_features_df.loc[:, "prediction_errors"] / 10000 _cmap = plt.get_cmap("viridis") _ = preprocessed_training_features_df.plot(kind="scatter", x="longitude", y="latitude", c=_color, cmap=_cmap, ax=ax, alpha=0.4) # ### Exploring how prediction errors vary with median income fig, ax = plt.subplots(1, 1, figsize=(12, 10)) _ = preprocessed_training_features_df.plot(kind="scatter", x="median_income", y="prediction_errors", ax=ax, alpha=0.1) # ## Evaluate your system on the test dataset # # After tweaking your models for a while, you eventually have a system that performs sufficiently well. Now is the time to evaluate the final model on the test set. testing_df = pd.read_csv("../data/housing/testing.csv", index_col="id") with_engineered_features_df = engineer_features(testing_df) cleaned_testing_df = clean_dataset(with_engineered_features_df) testing_features_df = cleaned_testing_df.drop("median_house_value", axis=1, inplace=False) testing_target_df = cleaned_testing_df.loc[:, "median_house_value"] preprocessed_testing_features = preprocessing_pipeline.transform(testing_features_df) predictions = randomized_search_cv.best_estimator_.predict(preprocessed_testing_features) np.sqrt(metrics.mean_squared_error(testing_target_df, predictions)) # In some cases, such a point estimate of the generalization error will not be quite enough to convince you to launch: what if it is just marginally better than the model currently in production? You might want to have an idea of how precise this estimate is. # example of computing an estimate of the confidence interval for the test set error confidence = 0.95 squared_errors = (testing_target_df - predictions)** 2 _interval = (stats.t .interval(confidence, squared_errors.size - 1, loc=squared_errors.mean(), scale=stats.sem(squared_errors))) np.sqrt(_interval) # If you did a lot of hyperparameter tuning, the performance will usually be slightly worse than what you measured using cross-validation (because your system ends up fine-tuned to perform well on the validation data and will likely not perform as well on unknown datasets). It is not the case in this example, but when this happens you must resist the temptation to tweak the hyperparameters to make the numbers look good on the test set; the improvements would be unlikely to generalize to new data.
notebooks/part-02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # The isBadVersion API is already defined for you. # @param version, an integer # @return a bool # def isBadVersion(version): class Solution: def firstBadVersion(self, n): """ :type n: int :rtype: int """ mid = int(n / 2) left = 1; right = n; max_right = 0; min_bad = n+1; while left < right: if isBadVersion(mid): if mid == max_right + 1: return mid else: min_bad = mid right = mid mid = int((left + right)/2) else: if mid == min_bad - 1: return right else: max_right = mid left = mid mid = int((left + right + 1)/2) return left; # 只有一个version的情况 # - # Runtime: 32 ms, faster than 89.73% of Python3 online submissions for First Bad Version. # Memory Usage: 13 MB, less than 88.63% of Python3 online submissions for First Bad Version. # + # The isBadVersion API is already defined for you. # @param version, an integer # @return a bool # def isBadVersion(version): class Solution: def firstBadVersion(self, n): """ :type n: int :rtype: int """ if n == 1: return 1 mid = int(n / 2) left = 0; right = n + 1; while left < right: if isBadVersion(mid): if mid == left + 1: return mid else: right = mid mid = int((left + right)/2) else: if mid == right - 1: return right else: left = mid mid = int((left + right + 1)/2) return left; # 只有一个version的情况 # - # Runtime: 28 ms, faster than 97.76% of Python3 online submissions for First Bad Version. # Memory Usage: 13.1 MB, less than 63.47% of Python3 online submissions for First Bad Version. #
LeetCode278.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 911 Calls Data from *Kaggle* # For this project I would be performing some basic exploratory analysis on previously collected data from **Kaggel**, to show some trends regarding the 911 calls made from the Montgomery County, Pennsylvania # The data contains the following fields: # # * lat : String variable, Latitude # * lng: String variable, Longitude # * desc: String variable, Description of the Emergency Call # * zip: String variable, Zipcode # * title: String variable, Title # * timeStamp: String variable, YYYY-MM-DD HH:MM:SS # * twp: String variable, Township # * addr: String variable, Address # * e: String variable, Dummy variable (always 1) # **Importing the necessary libraries** import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline plt.rcParams["patch.force_edgecolor"] = True sns.set() sns.set_style('whitegrid') # **The CSV file is then read into a Data Frame** calls=pd.read_csv('911.csv') # **Quick Summary of the dataset** calls.head() # **In order to arrange the data neatly so as to facilitate easier analysis, a couple of modification has to be made to the dataset** # # * Splitting the reason for the call # * Converting the *timestamp* into datetime # * Adding columns for hour, day, month and date calls['Reason'] = calls['title'].apply(lambda var: var.split(':')[0]) calls['Reason-subclass'] = calls['title'].apply(lambda var: var.split(':')[1]) calls['timeStamp']=pd.to_datetime(calls['timeStamp']) calls['hour'] = calls['timeStamp'].apply(lambda var:var.hour) calls['month'] = calls['timeStamp'].apply(lambda var:var.month) calls['day of week'] = calls['timeStamp'].apply(lambda var:var.dayofweek) calls['Date'] = calls['timeStamp'].apply(lambda var:var.date()) # **Summary of the modified Dataset** calls.head() # **From the dataset we can look at the list of the top 5 townships that made the most number of calls** top_five = calls['twp'].value_counts().head(5) print(top_five) # **Now looking at the number of calls based on the *Reason for the call*.** sns.countplot(x='Reason', data=calls ,palette='bright', order = calls['Reason'].value_counts().index) sns.despine() # **Looking further into the subclass, we get to identify the categories of the Emergency, Traffic or Fire calls being made** fig,ax = plt.subplots(3,1, figsize=(12,15)) sns.countplot(data=calls[calls['Reason']=='EMS'], y='Reason-subclass', palette = 'bright', order = calls[calls['Reason']=='EMS']['Reason-subclass'].value_counts().index[0:10], ax=ax[0]) ax[0].set_title('EMS', fontsize = 'large') sns.countplot(data=calls[calls['Reason']=='Traffic'], y='Reason-subclass', palette = 'bright', order = calls[calls['Reason']=='Traffic']['Reason-subclass'].value_counts().index[0:10], ax=ax[1]) ax[1].set_title('Traffic', fontsize = 'large') sns.countplot(data=calls[calls['Reason']=='Fire'], y='Reason-subclass', palette = 'bright', order = calls[calls['Reason']=='Fire']['Reason-subclass'].value_counts().index[0:10], ax=ax[2]) ax[2].set_title('Fire', fontsize = 'large') sns.despine() plt.tight_layout() # **The figures above list the top sub fields of each reason mentioned earlier. Although we had seen earlier that the EMS had the most number of 911 calls made, from the plot above we can see that the individual subclass that caused the most number of 911 calls is *Vehicle Accident* under the "Traffic" reason ** dmap = {0:'Mon',1:'Tue',2:'Wed',3:'Thu',4:'Fri',5:'Sat',6:'Sun'} calls['Day of Week'] = calls['day of week'].map(dmap) lmap = ['Mon','Tue','Wed','Thu','Fri','Sat','Sun'] calls['Day of Week'] = pd.Categorical(calls['Day of Week'], lmap) fig,ax = plt.subplots(3,1, figsize=(12,15)) sns.countplot(data=calls[calls['Reason']=='EMS'], y='twp', palette = 'bright', order = calls[calls['Reason']=='EMS']['twp'].value_counts().index[0:10], ax=ax[0]) ax[0].set_title('EMS', fontsize = 'large') ax[0].set_ylabel('Town') sns.countplot(data=calls[calls['Reason']=='Traffic'], y='twp', palette = 'bright', order = calls[calls['Reason']=='Traffic']['twp'].value_counts().index[0:10], ax=ax[1]) ax[1].set_title('Traffic', fontsize = 'large') ax[1].set_ylabel('Town') sns.countplot(data=calls[calls['Reason']=='Fire'], y='twp', palette = 'bright', order = calls[calls['Reason']=='Fire']['twp'].value_counts().index[0:10], ax=ax[2]) ax[2].set_title('Fire', fontsize = 'large') ax[2].set_ylabel('Town') sns.despine() plt.tight_layout() # **It was shown before that the township with the highest number of 911 calls was *Lower Merion*, the bar plot confirms this. But in terms of Emergency calls, Norristown makes 20% more calls than Lower Merion** # **Looking at the data now with respect to time** grp_Fire = calls[calls['Reason']=='Fire'].groupby('Date').count() grp_Traf = calls[calls['Reason']=='Traffic'].groupby('Date').count() grp_EMS = calls[calls['Reason']=='EMS'].groupby('Date').count() fig = plt.figure(figsize=(12, 6)) plt.plot(grp_Fire['lat'],lw=2.5, label='Fire') plt.plot(grp_Traf['lat'],lw=2.5, color = 'red', label='Traffic') plt.plot(grp_EMS['lat'],lw=2.5, color = 'black', label='EMS') plt.tight_layout() plt.legend(bbox_to_anchor=(1.15, 1), fontsize = 'medium') plt.xlabel('Date', fontsize = 'medium') plt.ylabel('Number of Calls', fontsize = 'medium') plt.title('No. of 911 calls over time', fontsize = 'x-large') plt.show() # **A heatmap would be a good way to visualize data in identify specific hours or months of calls** table1 = calls[calls['Reason']=='EMS'].pivot_table(values='e', index=['month'], columns = ['hour'], aggfunc = len) table2 = calls[calls['Reason']=='Traffic'].pivot_table(values='e', index=['month'], columns = ['hour'], aggfunc = len) table3 = calls[calls['Reason']=='Fire'].pivot_table(values='e', index=['month'], columns = ['hour'], aggfunc = len) figure,ax = plt.subplots(3,1,figsize=(12,15)) sns.heatmap(table1, cmap = 'viridis', ax=ax[0]) ax[0].set_title('EMS', fontsize = 'large') sns.heatmap(table2, cmap = 'viridis', ax=ax[1]) ax[1].set_title('Traffic', fontsize = 'large') sns.heatmap(table3, cmap = 'viridis', ax=ax[2]) ax[2].set_title('Fire', fontsize = 'large') plt.tight_layout() # * It looks like most of the Emergency calls are being made during the day time, while the Traffic and Fire calls are being made during the evening # * We also notice the dearth in calls towards the end of the year and a strong rise in calls at the beginning of the year. # * Very few calls are made into midnight/early morning. # ** More analysis can be drawn from the data on looking more closely at parameter such as calls made froma specific location due to respiratory difficulties etc. For the purposes of this project, we restrict ourselves to the afore-mentioned analysis, as we focus more on understanding and using the packages of Python for the purpose of Data Analysis.**
911_calls-Kaggle.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Python program to illustrate the concept # of threading # importing the threading module import threading def print_cube(num): """ function to print cube of given num """ global results1 results1=("Cube: {}".format(num * num * num)) def print_square(num): """ function to print square of given num """ print("Square: {}".format(num * num)) if __name__ == "__main__": # creating thread t1 = threading.Thread(print_square(10)) t2 = threading.Thread(print_cube(10)) # starting thread 1 t1.start() # starting thread 2 t2.start() # wait until thread 1 is completely executed t1.join() # wait until thread 2 is completely executed t2.join() # both threads completely executed print("Done!") # -
code/multithread test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="dlvl6kiulffy" # # Chapter 3 - Exercise 1b: # - # ### Các kiến thức sử dụng trong bài tập: # # # #### Các thao tác trên mảng: # 1. Lấy các phần tử (không trùng) xuất hiện trong cả 2 mảng a và b: hàm **np.intersect1d** # 2. Lấy các phần tử chỉ xuất hiện trong mảng a và không có trong mảng b: hàm **np.setdiff1d** # # #### Thực hiện các yêu cầu sau và đối chiếu với kết quả cho trước: # + colab={} colab_type="code" id="gr8DM63nlff1" import numpy as np # + colab={} colab_type="code" id="D5K_vpeHlff9" # Câu 1: Cho 2 array arr_a = [1,2,3,2,3,4,3,4,5,6] và arr_b = [7,2,10,2,7,4,9,4,9,8] arr_a = [1,2,3,2,3,4,3,4,5,6] arr_b = [7,2,10,2,7,4,9,4,9,8] # Tạo array mới arr_c chỉ lấy duy nhất các phần tử xuất hiện ở cả array arr_a và array arr_b arr_c = np.intersect1d(arr_a, arr_b) print(arr_c) # - # <details> # <summary>Nhấn vào đây để xem kết quả !</summary> # # <pre>[2 4] # </pre> # # </details> # + colab={} colab_type="code" id="yif1PT2-lff_" # Câu 2: Từ 2 array arr_a và arr_b ở câu 1 => Tạo array mới arr_d chứa các phần tử chỉ xuất hiện ở array arr_a arr_d = np.setdiff1d(arr_a, arr_b) print(arr_d) # - # <details> # <summary>Nhấn vào đây để xem kết quả !</summary> # # <pre>[1 3 5 6] # </pre> # # </details> # + colab={} colab_type="code" id="aSdpa26YlfgC" # Câu 3: Cho array arr_e = [2, 6, 1, 9, 10, 3, 27, 8, 6, 25, 16] arr_e = np.array([2,6,1,9,10,3,27,8,6,25,16]) # Tạo array arr_f chỉ chứa các phần tử có giá trị từ 5 đến 10 của arr_e arr_f = arr_e[(arr_e >= 5) & (arr_e <= 10)] print(arr_f) # - # <details> # <summary>Nhấn vào đây để xem kết quả !</summary> # # <pre>[ 6 9 10 8 6] # </pre> # # </details>
Chapter3_Exercise1b.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 회원 탈퇴 예측 # * Decision Tree # ## 1. 데이터 읽기 및 수정 # + import pandas as pd import warnings warnings.filterwarnings(action='ignore') customer = pd.read_csv('customer_join.csv') uselog_months = pd.read_csv('use_log_months.csv') # - customer.head() uselog_months.head() # * 이달과 지난 달의 이용횟수 집계 # + year_months = list(uselog_months['연월'].unique()) uselog = pd.DataFrame() for i in range(1, len(year_months)): tmp = uselog_months.loc[uselog_months['연월']==year_months[i]] tmp.rename(columns={'count':'count_0'}, inplace=True) tmp_before = uselog_months.loc[uselog_months['연월']==year_months[i-1]] del tmp_before['연월'] tmp_before.rename(columns={'count':'count_1'}, inplace=True) tmp = pd.merge(tmp, tmp_before, on='customer_id', how='left') uselog = pd.concat([uselog, tmp], ignore_index=True) uselog.head() # - # ## 2. 탈퇴 전월의 탈퇴 고객 데이터 # * 탈퇴한 회원 추출, end_date의 1개월 전을 계산 -> 연월에 저장 후 uselog와 customer_id, 연월로 결합 # + from dateutil.relativedelta import relativedelta exit_customer = customer.loc[customer['is_deleted']==1] exit_customer['exit_date'] = None exit_customer['end_date'] = pd.to_datetime(exit_customer['end_date']) for i in range(len(exit_customer)): exit_customer['exit_date'].iloc[i] = exit_customer['end_date'].iloc[i] - relativedelta(months=1) exit_customer['연월'] = exit_customer['exit_date'].dt.strftime('%Y%m') uselog['연월'] = uselog['연월'].astype(str) exit_uselog = pd.merge(uselog, exit_customer, on=['customer_id', '연월'], how='left') print(len(uselog)) exit_uselog.head() # - # * 결측치 제거 exit_uselog = exit_uselog.dropna(subset=['name']) print(len(exit_uselog)) print(len(exit_uselog['customer_id'].unique())) exit_uselog.head() # ## 3. 지속 회원의 데이터 작성 # + conti_customer = customer.loc[customer['is_deleted']==0] conti_uselog = pd.merge(uselog, conti_customer, on=['customer_id'], how='left') print(len(conti_uselog)) conti_uselog = conti_uselog.dropna(subset=['name']) print(len(conti_uselog)) # - conti_uselog = conti_uselog.sample(frac=1).reset_index(drop=True) conti_uselog = conti_uselog.drop_duplicates(subset="customer_id") print(len(conti_uselog)) conti_uselog.head() # 지속 회원 데이터와 탈퇴 회원 데이터를 세로로 결합 predict_data = pd.concat([conti_uselog, exit_uselog], ignore_index=True) print(len(predict_data)) predict_data.head() # ## 4. 예측할 달의 재적 기간을 작성 # + predict_data['period'] = 0 predict_data['now_date'] = pd.to_datetime(predict_data['연월'], format="%Y%m") predict_data['start_date'] = pd.to_datetime(predict_data['start_date']) for i in range(len(predict_data)): delta = relativedelta(predict_data['now_date'][i], predict_data['start_date'][i]) predict_data['period'][i] = int(delta.years * 12 + delta.months) predict_data.head() # - # ## 5. 결측치 제거 predict_data.isna().sum() predict_data = predict_data.dropna(subset=['count_1']) predict_data.isna().sum() # ## 6. 문자열 변수 처리를 위한 가공 target_col = ['campaign_name', 'class_name', 'gender', 'count_1', 'routine_flg', 'period', 'is_deleted'] predict_data = predict_data[target_col] predict_data.head() predict_data = pd.get_dummies(predict_data) predict_data.head() del predict_data["campaign_name_일반"] del predict_data["class_name_야간"] del predict_data["gender_M"] predict_data.head() # ## 7. 의사결정트리로 탈퇴 예측 모델 구축 # + from sklearn.tree import DecisionTreeClassifier import sklearn.model_selection exit = predict_data.loc[predict_data['is_deleted']==1] conti = predict_data.loc[predict_data['is_deleted']==0].sample(len(exit)) X = pd.concat([exit, conti], ignore_index=True) y = X['is_deleted'] del X['is_deleted'] X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y) model = DecisionTreeClassifier(random_state=0) model.fit(X_train, y_train) y_test_pred = model.predict(X_test) print(y_test_pred) # - results_test = pd.DataFrame({'y_test' : y_test, 'y_pred' : y_test_pred}) results_test.head() # ## 8. 예측 모델 평가 correct = len(results_test.loc[results_test['y_test']==results_test['y_pred']]) data_count = len(results_test) score_test = correct / data_count print(score_test) print(model.score(X_test, y_test)) print(model.score(X_train, y_train)) # + X = pd.concat([exit, conti], ignore_index=True) y = X['is_deleted'] del X['is_deleted'] X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X,y) model = DecisionTreeClassifier(random_state=0, max_depth=5) model.fit(X_train, y_train) print(model.score(X_test, y_test)) print(model.score(X_train, y_train)) # - # ## 9. 모델에 기여하는 변수 확인 importance = pd.DataFrame({'feature_names' : X.columns, 'coefficient' : model.feature_importances_}) importance
[분석실무]5.회원탈퇴예측.ipynb
# ##### Copyright 2021 Google LLC. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # jobshop_with_maintenance_sat # <table align="left"> # <td> # <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/examples/jobshop_with_maintenance_sat.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a> # </td> # <td> # <a href="https://github.com/google/or-tools/blob/master/examples/python/jobshop_with_maintenance_sat.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a> # </td> # </table> # First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab. # !pip install ortools # + # Copyright 2010-2021 Google # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Jobshop with maintenance tasks using the CP-SAT solver.""" import collections from ortools.sat.python import cp_model class SolutionPrinter(cp_model.CpSolverSolutionCallback): """Print intermediate solutions.""" def __init__(self): cp_model.CpSolverSolutionCallback.__init__(self) self.__solution_count = 0 def on_solution_callback(self): """Called at each new solution.""" print('Solution %i, time = %f s, objective = %i' % (self.__solution_count, self.WallTime(), self.ObjectiveValue())) self.__solution_count += 1 def jobshop_with_maintenance(): """Solves a jobshop with maintenance on one machine.""" # Create the model. model = cp_model.CpModel() jobs_data = [ # task = (machine_id, processing_time). [(0, 3), (1, 2), (2, 2)], # Job0 [(0, 2), (2, 1), (1, 4)], # Job1 [(1, 4), (2, 3)], # Job2 ] machines_count = 1 + max(task[0] for job in jobs_data for task in job) all_machines = range(machines_count) # Computes horizon dynamically as the sum of all durations. horizon = sum(task[1] for job in jobs_data for task in job) # Named tuple to store information about created variables. task_type = collections.namedtuple('Task', 'start end interval') # Named tuple to manipulate solution information. assigned_task_type = collections.namedtuple('assigned_task_type', 'start job index duration') # Creates job intervals and add to the corresponding machine lists. all_tasks = {} machine_to_intervals = collections.defaultdict(list) for job_id, job in enumerate(jobs_data): for task_id, task in enumerate(job): machine = task[0] duration = task[1] suffix = '_%i_%i' % (job_id, task_id) start_var = model.NewIntVar(0, horizon, 'start' + suffix) end_var = model.NewIntVar(0, horizon, 'end' + suffix) interval_var = model.NewIntervalVar(start_var, duration, end_var, 'interval' + suffix) all_tasks[job_id, task_id] = task_type(start=start_var, end=end_var, interval=interval_var) machine_to_intervals[machine].append(interval_var) # Add maintenance interval (machine 0 is not available on time {4, 5, 6, 7}). machine_to_intervals[0].append(model.NewIntervalVar(4, 4, 8, 'weekend_0')) # Create and add disjunctive constraints. for machine in all_machines: model.AddNoOverlap(machine_to_intervals[machine]) # Precedences inside a job. for job_id, job in enumerate(jobs_data): for task_id in range(len(job) - 1): model.Add(all_tasks[job_id, task_id + 1].start >= all_tasks[job_id, task_id].end) # Makespan objective. obj_var = model.NewIntVar(0, horizon, 'makespan') model.AddMaxEquality(obj_var, [ all_tasks[job_id, len(job) - 1].end for job_id, job in enumerate(jobs_data) ]) model.Minimize(obj_var) # Solve model. solver = cp_model.CpSolver() solution_printer = SolutionPrinter() status = solver.Solve(model, solution_printer) # Output solution. if status == cp_model.OPTIMAL: # Create one list of assigned tasks per machine. assigned_jobs = collections.defaultdict(list) for job_id, job in enumerate(jobs_data): for task_id, task in enumerate(job): machine = task[0] assigned_jobs[machine].append( assigned_task_type(start=solver.Value( all_tasks[job_id, task_id].start), job=job_id, index=task_id, duration=task[1])) # Create per machine output lines. output = '' for machine in all_machines: # Sort by starting time. assigned_jobs[machine].sort() sol_line_tasks = 'Machine ' + str(machine) + ': ' sol_line = ' ' for assigned_task in assigned_jobs[machine]: name = 'job_%i_%i' % (assigned_task.job, assigned_task.index) # Add spaces to output to align columns. sol_line_tasks += '%-10s' % name start = assigned_task.start duration = assigned_task.duration sol_tmp = '[%i,%i]' % (start, start + duration) # Add spaces to output to align columns. sol_line += '%-10s' % sol_tmp sol_line += '\n' sol_line_tasks += '\n' output += sol_line_tasks output += sol_line # Finally print the solution found. print('Optimal Schedule Length: %i' % solver.ObjectiveValue()) print(output) print('Statistics') print(' - conflicts : %i' % solver.NumConflicts()) print(' - branches : %i' % solver.NumBranches()) print(' - wall time : %f s' % solver.WallTime()) jobshop_with_maintenance()
examples/notebook/examples/jobshop_with_maintenance_sat.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Simulated X-ray Spectrum of Gas in CIE # # Simulated X-ray spectrum of solar abundance low-density ($n_e$=0.004), hot (T=10$^6$K) gas in collisional # ionization equilibrium (CIE). Ionic species responsible for various emission lines are labeled. Wavelengths # range from 150 to 250 Angstroms in steps of 0.5 Angstroms. # # This notebook uses ChiantiPy to compute the spectrum data plotted by Fig5_8_XRaySpectrum.ipynb. This notebook # may be used, for example, to explore other values of temperature, density, and emission measure # than used for Figure 5.8. # # To keep you from accidentally ovewriting the data file used by the notebook for Figure 5.8, we've named the # output spectrum file "mySpectrum.txt". # # You will need to install the latest versions of ChiantiPy and the CHIANTI atomic database - available separately - # before you can use this notebook, and the versions of each must be compatible. At the time of this notebook's # release, we tested it with ChiantiPy version 0.9.5 and CHIANTI version 10.0. See the caveat below. # # **This notebook only computes the spectrum, it does not plot it.** The calculation takes many minutes to # complete. # # # ## Caveat # # ChiantiPy and CHIANTI must both be installed on your system and uptodate to run this notebook. Exceptions # raised because of missing files are often due to having updated ChiantiPy but not the full CHIANTI database. # Being diligent about keeping the two in-sync is part of using Chianti. # # ### CHIANTI citation # # CHIANTI is a collaborative project involving George Mason University, the University of Michigan (USA), # University of Cambridge (UK) and NASA Goddard Space Flight Center (USA). # + import math import numpy as np # Throttle nuisance warnings import warnings warnings.filterwarnings('ignore',category=UserWarning, append=True) # Chianti CIE code import ChiantiPy.core as ch import ChiantiPy.tools.filters as cf # - # ## Calculate a simulated spectrum # # Compute the X-ray spectrum of a CIE plasma with these parameters: # * Temperature: T=10<sup>6</sup> # * Electron density: n$_e$=0.004 cm$^{-3}$ # * Emission Measure: EM=10<sup>25</sup>. # # The spectrum is calculated for wavelengths from 150 to 270 Angstroms in 0.5 Angstrom steps. # # The minimum abundance species to use for the calculation is set to **minAbund=2e-5**, which adds N, Mg, Si, # S, and Fe to the calculation (set this to 1e-4 to include just H, He, C, O, and Ne). Setting minAbund # to 1e-6 will add Na, Al, Ar, Ca, and Ni, but the computation time required can be very long. # # Elapsed computation time with minAbund=2e-5 was ~200 seconds for us, your performance will likely differ. # + # plasma parameters T = 1.0e+6 # plasma temperature in Kelvin ne = 0.004 # electron density in cm^-3, typical of the local HIM bubble em = 1.0e+25 # emission measure # Spectrum parameters minLam = 150.0 # starting wavelength in Angstroms maxLam = 270.0 # ending wavelength in Angstroms lamStep = 0.5 # wavelength step in Angstroms width = lamStep # Gaussian FWHM in Angstroms # Set minAbund low enough to include every species in the database minAbund = 2.e-5 # Do the computation... numLam = 1+int((maxLam-minLam)/lamStep) lam = np.linspace(minLam,maxLam,numLam) spec = ch.spectrum(T,ne,lam,filter=(cf.gaussian,width),em=em,minAbund=minAbund) # some individual ions fe9 = ch.ion('fe_9',temperature=T,eDensity=ne) fe9.spectrum(lam,filter=(cf.gaussian,width)) fe10 = ch.ion('fe_10',temperature=T,eDensity=ne) fe10.spectrum(lam,filter=(cf.gaussian,width)) fe11 = ch.ion('fe_11',temperature=T,eDensity=ne) fe11.spectrum(lam,filter=(cf.gaussian,width)) fe12 = ch.ion('fe_12',temperature=T,eDensity=ne) fe12.spectrum(lam,filter=(cf.gaussian,width)) s9 = ch.ion('s_9',temperature=T,eDensity=ne) s9.spectrum(lam,filter=(cf.gaussian,width)) si9 = ch.ion('si_9',temperature=T,eDensity=ne) si9.spectrum(lam,filter=(cf.gaussian,width)) he2 = ch.ion('he_2',temperature=T,eDensity=ne) he2.spectrum(lam,filter=(cf.gaussian,width)) # - # ## Create an ASCII file with the spectrum # # Multicolumn ASCII-text file with the calculated X-ray spectrum. # + specFile = 'mySpectrum.txt' out = open(specFile,'w') out.write(f'#\n') out.write(f'# CIE spectrum logT={math.log10(T):.1f}K ne={ne:.3f}/cc EM={em:.1e}\n') out.write(f'# Calculated using CHIANTI and ChiantiPy\n') out.write(f'#\n') out.write('lam total fe9 fe10 fe11 fe12 s9 si9 he2\n') for i, l in enumerate(lam): outStr = '%6.2f %6.3f %6.3f %6.3f %6.3f %6.3f %6.3f %6.3f %6.3f\n' % (l, spec.Spectrum['integrated'][i], fe9.Spectrum['intensity'][i]*em, fe10.Spectrum['intensity'][i]*em, fe11.Spectrum['intensity'][i]*em, fe12.Spectrum['intensity'][i]*em, s9.Spectrum['intensity'][i]*em, si9.Spectrum['intensity'][i]*em, he2.Spectrum['intensity'][i]*em) out.write(outStr) out.close()
Chapter5/cieSpec_Calc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introducing the Keras Functional API # # **Learning Objectives** # 1. Understand embeddings and how to create them with the feature column API # 1. Understand Deep and Wide models and when to use them # 1. Understand the Keras functional API and how to build a deep and wide model with it # # ## Introduction # # In the last notebook, we learned about the Keras Sequential API. The [Keras Functional API](https://www.tensorflow.org/guide/keras#functional_api) provides an alternate way of building models which is more flexible. With the Functional API, we can build models with more complex topologies, multiple input or output layers, shared layers or non-sequential data flows (e.g. residual layers). # # In this notebook we'll use what we learned about feature columns to build a Wide & Deep model. Recall, that the idea behind Wide & Deep models is to join the two methods of learning through memorization and generalization by making a wide linear model and a deep learning model to accommodate both. You can have a look at the original research paper here: [Wide & Deep Learning for Recommender Systems](https://arxiv.org/abs/1606.07792). # # <img src='assets/wide_deep.png' width='80%'> # <sup>(image: https://ai.googleblog.com/2016/06/wide-deep-learning-better-together-with.html)</sup> # # The Wide part of the model is associated with the memory element. In this case, we train a linear model with a wide set of crossed features and learn the correlation of this related data with the assigned label. The Deep part of the model is associated with the generalization element where we use embedding vectors for features. The best embeddings are then learned through the training process. While both of these methods can work well alone, Wide & Deep models excel by combining these techniques together. # # Each learning objective will correspond to a __#TODO__ in the [student lab notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/introduction_to_tensorflow/labs/4_keras_functional_api.ipynb) -- try to complete that notebook first before reviewing this solution notebook. # # Use the chown command to change the ownership of the repository. # !sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst # Ensure the right version of Tensorflow is installed. # !pip freeze | grep tensorflow==2.3.0 || pip install tensorflow==2.3.0 # Kindly ignore the deprecation warnings and incompatibility errors. # Install the required numpy version. # !pip install numpy==1.21.0 # Kindly ignore the deprecation warnings and incompatibility errors. # Start by importing the necessary libraries for this lab. # + # Importing necessary modules/libraries such as numpy, pandas and datetime. import datetime import os import shutil import numpy as np import pandas as pd import tensorflow as tf from matplotlib import pyplot as plt from tensorflow import keras from tensorflow import feature_column as fc from tensorflow.keras import Model from tensorflow.keras.layers import ( Input, Dense, DenseFeatures, concatenate) from tensorflow.keras.callbacks import TensorBoard print(tf.__version__) # - # It sets the backend of matplotlib to the 'inline' backend. The output of plotting commands is displayed inline within frontends, directly # below the code cell that produced it. The resulting plots will then also be stored in the notebook document. # %matplotlib inline # ## Load raw data # # We will use the taxifare dataset, using the CSV files that we created in the first notebook of this sequence. Those files have been saved into `../data`. # !ls -l ../data/*.csv # ## Use tf.data to read the CSV files # # We wrote these functions for reading data from the csv files above in the [previous notebook](2_dataset_api.ipynb). For this lab we will also include some additional engineered features in our model. In particular, we will compute the difference in latitude and longitude, as well as the Euclidean distance between the pick-up and drop-off locations. We can accomplish this by adding these new features to the features dictionary with the function `add_engineered_features` below. # # Note that we include a call to this function when collecting our features dict and labels in the `features_and_labels` function below as well. # + # Selecting specific CSV_COLUMNS, LABEL_COLUMN, DEFAULTS, UNWANTED_COLS. CSV_COLUMNS = [ 'fare_amount', 'pickup_datetime', 'pickup_longitude', 'pickup_latitude', 'dropoff_longitude', 'dropoff_latitude', 'passenger_count', 'key' ] LABEL_COLUMN = 'fare_amount' DEFAULTS = [[0.0], ['na'], [0.0], [0.0], [0.0], [0.0], [0.0], ['na']] UNWANTED_COLS = ['pickup_datetime', 'key'] # Create an input function reading a file using the Dataset API def features_and_labels(row_data): label = row_data.pop(LABEL_COLUMN) features = row_data for unwanted_col in UNWANTED_COLS: features.pop(unwanted_col) return features, label # Reading CSV files into a dataset. def create_dataset(pattern, batch_size=1, mode='eval'): dataset = tf.data.experimental.make_csv_dataset( pattern, batch_size, CSV_COLUMNS, DEFAULTS) dataset = dataset.map(features_and_labels) if mode == 'train': dataset = dataset.shuffle(buffer_size=1000).repeat() # take advantage of multi-threading; 1=AUTOTUNE dataset = dataset.prefetch(1) return dataset # - # ## Feature columns for Wide and Deep model # # For the Wide columns, we will create feature columns of crossed features. To do this, we'll create a collection of Tensorflow feature columns to pass to the `tf.feature_column.crossed_column` constructor. The Deep columns will consist of numeric columns and the embedding columns we want to create. # + # TODO 1 # 1. Bucketize latitudes and longitudes NBUCKETS = 16 latbuckets = np.linspace(start=38.0, stop=42.0, num=NBUCKETS).tolist() lonbuckets = np.linspace(start=-76.0, stop=-72.0, num=NBUCKETS).tolist() fc_bucketized_plat = fc.bucketized_column( source_column=fc.numeric_column("pickup_longitude"), boundaries=lonbuckets) fc_bucketized_plon = fc.bucketized_column( source_column=fc.numeric_column("pickup_latitude"), boundaries=latbuckets) fc_bucketized_dlat = fc.bucketized_column( source_column=fc.numeric_column("dropoff_longitude"), boundaries=lonbuckets) fc_bucketized_dlon = fc.bucketized_column( source_column=fc.numeric_column("dropoff_latitude"), boundaries=latbuckets) # 2. Cross features for locations fc_crossed_dloc = fc.crossed_column( [fc_bucketized_dlat, fc_bucketized_dlon], hash_bucket_size=NBUCKETS * NBUCKETS) fc_crossed_ploc = fc.crossed_column( [fc_bucketized_plat, fc_bucketized_plon], hash_bucket_size=NBUCKETS * NBUCKETS) fc_crossed_pd_pair = fc.crossed_column( [fc_crossed_dloc, fc_crossed_ploc], hash_bucket_size=NBUCKETS**4) # 3. Create embedding columns for the crossed columns fc_pd_pair = fc.embedding_column(categorical_column=fc_crossed_pd_pair, dimension=3) fc_dloc = fc.embedding_column(categorical_column=fc_crossed_dloc, dimension=3) fc_ploc = fc.embedding_column(categorical_column=fc_crossed_ploc, dimension=3) # - # ### Gather list of feature columns # # Next we gather the list of wide and deep feature columns we'll pass to our Wide & Deep model in Tensorflow. Recall, wide columns are sparse, have linear relationship with the output while continuous columns are deep, have a complex relationship with the output. We will use our previously bucketized columns to collect crossed feature columns and sparse feature columns for our wide columns, and embedding feature columns and numeric features columns for the deep columns. # + # TODO 2 wide_columns = [ # One-hot encoded feature crosses fc.indicator_column(fc_crossed_dloc), fc.indicator_column(fc_crossed_ploc), fc.indicator_column(fc_crossed_pd_pair) ] deep_columns = [ # Embedding_column to "group" together ... fc.embedding_column(fc_crossed_pd_pair, dimension=10), # Numeric columns fc.numeric_column("pickup_latitude"), fc.numeric_column("pickup_longitude"), fc.numeric_column("dropoff_longitude"), fc.numeric_column("dropoff_latitude") ] # - # ## Build a Wide and Deep model in Keras # # To build a wide-and-deep network, we connect the sparse (i.e. wide) features directly to the output node, but pass the dense (i.e. deep) features through a set of fully connected layers. Here’s that model architecture looks using the Functional API. # # First, we'll create our input columns using [tf.keras.layers.Input](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Input). # + INPUT_COLS = [ 'pickup_longitude', 'pickup_latitude', 'dropoff_longitude', 'dropoff_latitude', 'passenger_count' ] inputs = {colname : Input(name=colname, shape=(), dtype='float32') for colname in INPUT_COLS } # - # Then, we'll define our custom RMSE evaluation metric and build our wide and deep model. # + # Here, tf.reduce_mean computes the mean of elements across dimensions of a tensor. # tf.sqrt Computes element-wise square root of the input tensor. # tf.square computes square of x element-wise. def rmse(y_true, y_pred): return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true))) # TODO 3 def build_model(dnn_hidden_units): # Create the deep part of model deep = DenseFeatures(deep_columns, name='deep_inputs')(inputs) for num_nodes in dnn_hidden_units: deep = Dense(num_nodes, activation='relu')(deep) # Create the wide part of model wide = DenseFeatures(wide_columns, name='wide_inputs')(inputs) # Combine deep and wide parts of the model combined = concatenate(inputs=[deep, wide], name='combined') # Map the combined outputs into a single prediction value output = Dense(units=1, activation=None, name='prediction')(combined) # Finalize the model model = Model(inputs=list(inputs.values()), outputs=output) # Compile the keras model model.compile(optimizer="adam", loss="mse", metrics=[rmse, "mse"]) return model # - # Next, we can call the `build_model` to create the model. Here we'll have two hidden layers, each with 10 neurons, for the deep part of our model. We can also use `plot_model` to see a diagram of the model we've created. # + HIDDEN_UNITS = [10,10] # Calling the build model model = build_model(dnn_hidden_units=HIDDEN_UNITS) # Converts a Keras plot_model to see a diagram of the model that we have created. tf.keras.utils.plot_model(model, show_shapes=False, rankdir='LR') # - # Next, we'll set up our training variables, create our datasets for training and validation, and train our model. # # (We refer you the the blog post [ML Design Pattern #3: Virtual Epochs](https://medium.com/google-cloud/ml-design-pattern-3-virtual-epochs-f842296de730) for further details on why express the training in terms of `NUM_TRAIN_EXAMPLES` and `NUM_EVALS` and why, in this training code, the number of epochs is really equal to the number of evaluations we perform.) # + BATCH_SIZE = 1000 NUM_TRAIN_EXAMPLES = 10000 * 5 # training dataset will repeat, wrap around NUM_EVALS = 50 # how many times to evaluate NUM_EVAL_EXAMPLES = 10000 # enough to get a reasonable sample trainds = create_dataset( pattern='../data/taxi-train*', batch_size=BATCH_SIZE, mode='train') evalds = create_dataset( pattern='../data/taxi-valid*', batch_size=BATCH_SIZE, mode='eval').take(NUM_EVAL_EXAMPLES//1000) # + # %%time # Here, %%time prints the wall time for the entire cell steps_per_epoch = NUM_TRAIN_EXAMPLES // (BATCH_SIZE * NUM_EVALS) OUTDIR = "./taxi_trained" shutil.rmtree(path=OUTDIR, ignore_errors=True) # start fresh each time history = model.fit(x=trainds, steps_per_epoch=steps_per_epoch, epochs=NUM_EVALS, validation_data=evalds, callbacks=[TensorBoard(OUTDIR)]) # - # Just as before, we can examine the history to see how the RMSE changes through training on the train set and validation set. # + RMSE_COLS = ['rmse', 'val_rmse'] # Pandas DataFrame is two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns). pd.DataFrame(history.history)[RMSE_COLS].plot() # - # Copyright 2021 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
C03/C03-w04-Lab_keras_functional_api.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a name="top"></a> # <div style="width:1000 px"> # # <div style="float:right; width:98 px; height:98px;"> # <img src="https://docs.unidata.ucar.edu/images/logos/unidata_logo_vertical_150x150.png" alt="Unidata Logo" style="height: 98px;"> # </div> # # # Colored Surface Temperature # **Python-AWIPS Tutorial Notebook** # # <div style="clear:both"></div> # </div> # # --- # # <div style="float:right; width:250 px"><img src="../images/colored_surface_temp_preview.png" alt="[image text]" style="height: 300px;"></div> # # # # Objectives # # * Use python-awips to connect to an edex server # * Define and filter data request for METAR surface obs # * Define a color threshold and use it to plot a useful map # * Create a product similar to existing products in GEMPAK and CAVE # # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Imports" data-toc-modified-id="Imports-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Imports</a></span></li><li><span><a href="#Initial-Setup" data-toc-modified-id="Initial-Setup-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Initial Setup</a></span><ul class="toc-item"><li><span><a href="#Geographic-Filter" data-toc-modified-id="Geographic-Filter-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Geographic Filter</a></span></li><li><span><a href="#EDEX-Connection" data-toc-modified-id="EDEX-Connection-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>EDEX Connection</a></span></li></ul></li><li><span><a href="#Filter-by-Time" data-toc-modified-id="Filter-by-Time-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Filter by Time</a></span></li><li><span><a href="#Access-and-Convert-Temp-Data" data-toc-modified-id="Access-and-Convert-Temp-Data-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Access and Convert Temp Data</a></span></li><li><span><a href="#Define-Temperature-Thresholds" data-toc-modified-id="Define-Temperature-Thresholds-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Define Temperature Thresholds</a></span></li><li><span><a href="#Plot-the-Data!" data-toc-modified-id="Plot-the-Data!-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>Plot the Data!</a></span></li><li><span><a href="#See-Also" data-toc-modified-id="See-Also-7"><span class="toc-item-num">7&nbsp;&nbsp;</span>See Also</a></span><ul class="toc-item"><li><span><a href="#Additional-Documention" data-toc-modified-id="Additional-Documention-7.1"><span class="toc-item-num">7.1&nbsp;&nbsp;</span>Additional Documention</a></span></li></ul></li></ul></div> # - # ## Imports # # The imports below are used throughout the notebook. Note the first import is coming directly from python-awips and allows us to connect to an EDEX server. The subsequent imports are for data manipulation and visualization. from awips.dataaccess import DataAccessLayer from dynamicserialize.dstypes.com.raytheon.uf.common.time import TimeRange from datetime import datetime, timedelta import numpy as np import cartopy.crs as ccrs import warnings import matplotlib.pyplot as plt from shapely.geometry import Polygon from metpy.plots import StationPlot # <a href="#top">Top</a> # # --- # ## Initial Setup # ### Geographic Filter # # By defining a bounding box for the Continental US (CONUS), we're able to optimize the data request sent to the EDEX server. # CONUS bounding box and envelope geometry bbox=[-130, -70, 15, 55] envelope = Polygon([(bbox[0],bbox[2]),(bbox[0],bbox[3]), (bbox[1], bbox[3]),(bbox[1],bbox[2]), (bbox[0],bbox[2])]) # ### EDEX Connection # # First we establish a connection to Unidata's public EDEX server. With that connection made, we can create a [new data request object](http://unidata.github.io/python-awips/api/IDataRequest.html) and set the data type to ***obs***, and use the geographic ***envelope*** we just created. # New obs request edexServer = "edex-cloud.unidata.ucar.edu" DataAccessLayer.changeEDEXHost(edexServer) request = DataAccessLayer.newDataRequest("obs", envelope=envelope) params = ["temperature", "longitude", "latitude", "stationName"] request.setParameters(*(params)) # <a href="#top">Top</a> # # --- # ## Filter by Time # # We then want to limit our results based on time, so we create a time range for the last 15 minutes, and then send the request to the EDEX server to get our results, which are kept in the **obs** variable. # + # Get records from the last 15 minutes lastHourDateTime = datetime.utcnow() - timedelta(minutes = 15) start = lastHourDateTime.strftime('%Y-%m-%d %H:%M:%S') end = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') beginRange = datetime.strptime( start , "%Y-%m-%d %H:%M:%S") endRange = datetime.strptime( end , "%Y-%m-%d %H:%M:%S") timerange = TimeRange(beginRange, endRange) # Get response response = DataAccessLayer.getGeometryData(request,timerange) obs = DataAccessLayer.getMetarObs(response) lats = obs['latitude'] lons = obs['longitude'] print("Found " + str(len(response)) + " total records") print("Using " + str(len(obs['temperature'])) + " temperature records") # - # <a href="#top">Top</a> # # --- # ## Access and Convert Temp Data # # We access the *temperature* data from the **obs** variable which is stored in degrees Celsius (°C). To make it more relatable, we then convert the data to degrees Fahreheit (°F) # + # # Suppress nan masking warnings warnings.filterwarnings("ignore",category =RuntimeWarning) # get all temperature values and convert them from °C to °F tair = np.array(obs['temperature'], dtype=float) tair[tair == -9999.0] = 'nan' tair = (tair*1.8)+32 # - # <a href="#top">Top</a> # # --- # ## Define Temperature Thresholds # # In order to distinguish the temperatures, we'll create a color map to separate the values into different colors. This mapping will be used when plotting the temperature values on the map of the United States. # <div class="alert-info"> # <b>Tip:</b> Try playing around with the color ranges and see how that affects the final plot. # </div> thresholds = { '15': 'purple', '25': 'c', '35': 'royalblue', '45': 'darkgreen', '55': 'green', '65': 'y', '75': 'orange', '85': 'red' } # <a href="#top">Top</a> # # --- # ## Plot the Data! # # Here we create a plot and cycle through all the values from our color mapping. For each segement of our color mapping, mask the temperature values to only include the relevent temperatures and draw those on the plot. Do this for every segment of the color mapping to produce the final, colored figure. # + fig, ax = plt.subplots(figsize=(16,12),subplot_kw=dict(projection=ccrs.LambertConformal())) ax.set_extent(bbox) ax.coastlines(resolution='50m') ax.set_title(str(response[-1].getDataTime()) + " | Surface Temps (degF) | " + edexServer) # get the temperature limit (x) and color (value) for x, value in thresholds.items(): # create a new temperature value array subtair = tair.copy() # pair down the temperature values to a subset if x==max(thresholds): subtair[(subtair < int(x))] = 'nan' elif x==min(thresholds): subtair[(subtair >= int(x)+10)] = 'nan' else: subtair[(subtair < int(x))] = 'nan' subtair[(subtair >= int(x)+10)] = 'nan' # add these stations and their color to the stationplots stationplot = StationPlot(ax, lons, lats, transform=ccrs.PlateCarree(), fontsize=14) stationplot.plot_parameter('C', subtair, color=value) # - # <a href="#top">Top</a> # # --- # ## See Also # # # ### Additional Documention # # **python-awips** # # - [DataAccessLayer.changeEDEXHost()](http://unidata.github.io/python-awips/api/DataAccessLayer.html#awips.dataaccess.DataAccessLayer.changeEDEXHost) # - [DataAccessLayer.newDataRequest()](http://unidata.github.io/python-awips/api/DataAccessLayer.html#awips.dataaccess.DataAccessLayer.newDataRequest) # # **matplotlib** # # - [matplotlib.pyplot()](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.html) # - [matplotlib.pyplot.axes()](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.axes.html) # - [matplotlib.pyplot.figure()](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.figure.html) # <a href="#top">Top</a> # # ---
examples/notebooks/Colored_Surface_Temperature_Plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Práctica de Introducción al lenguaje Python # #### Ejercicio 1[3 puntos] # Se quiere simular una agenda de contactos de forma que de cada contacto se desea almacenar: nombre, apellidos, teléfono, y cuenta de email. # El programa debe permitir: # 1) Añadir un contacto # 2) Eliminar un contacto. # 3) Modificar un contacto. # 4) Consultar datos de un contacto. # 5) Guardar agenda en un fichero. # 6) Leer agenda de un fichero # 7) Finalizar. Cuando se finaliza,automaticamente guarda la agenda en un fichero de texto. # + #Cosas a tener en cuenta: #Para que no se pueda repetir los mismos contactos, usaremos el campo teléfono como clave primaria #Los contactos se guardarán en listas #La agenda también será una lista, por lo que guardará listas #Cuando lea un fichero, almacenará el contenido del fichero y borrará el contenido antiguo de la agenda # ------------- Funciones Auxiliares ------------- def comprobarTelefono(tlf, agenda): #Función que revisa si el telefono nuevo está repetido o no tam = len(agenda) i = 0 existe = False while(i < tam and existe == False and tam > 0): if(int(tlf) == int(agenda[i][3])): existe = True i = i + 1 return existe def mostrarAgenda(agenda): # Muestra un listado de los contactos almacenados en la lista agenda, mostrando solo el nombre y apellidos tam = len(agenda) i = 0 while(i < tam): print(i + 1, "- ", agenda[i][0], " ", agenda[i][1]," ", agenda[i][2]) i = i + 1 def completarCampos(): # Función para completar los campos del nuevo o modificado contacto print("Nombre: \n") nom = input() print("Primer Apellido: \n") ap1 = input() print("Segundo Apellido: \n") ap2 = input() print("Telefono: \n") tlf = int(input()) print("Email: \n") email = input() campos = [nom, ap1, ap2, tlf, email] return campos # ------------- Funciones Principales ------------- def aniadirContacto(agenda): # Recibe por parámetro la lista, comprobando que el teléfono no se repita print("Crea un nuevo contacto: \n") campos = completarCampos() aux = comprobarTelefono(campos[3], agenda) if(aux == False): agenda.append(campos) #Si el telefono es nuevo, se añade el nuevo contacto en la lista else: print("El contacto introducido ya existe \n") def eliminarContacto(agenda): #Muestra el listado de la agenda, y el usuario tiene que elegir qué contacto borrar print("Selecciona el contacto que desas eliminar: \n") mostrarAgenda(agenda) opcion = int(input()) - 1 if opcion < len(agenda) + 1 and opcion > -1: del agenda[opcion] print("Contacto eliminado con exito") else: print("No se ha podido borrar el contacto seleccionado") def modificarContacto(agenda): #Saldrá un sub-menú quepermite al usuario seleccionar qué campo quiere modificar, o modificar todos print("Selecciona el contacto que desas modificar: \n") mostrarAgenda(agenda) opcion = int(input()) - 1 print("Elige que quieres modificar: \n") print("1: Nombre") print("2: Primer Apellido") print("3: Segundo Apellido") print("4: Telefono") print("5:Email") print("6: Todos los campos") opcionCampos = int(input()) if opcionCampos == 6: #Si selecciona todos los campos, rellena todos los campos y comprueba si el teléfono está repetido campos = completarCampos() aux = comprobarTelefono(campos[3], agenda) if(aux == False): agenda[opcion] = campos else: print("El telefono introducido pertenece a otro contacto \n") else: #Si selecciona un campo singular, se modifica el dato del campo seleccionado print("Introduce el nuevo dato \n") datoNuevo = input() if opcionCampos == 4: #Si es el teléfono, comprueba el nuevo teléfono existe = comprobarTelefono(datoNuevo, agenda) if existe == True: print("El telefono introducido pertenece a otro contacto \n") else: agenda[opcion][opcionCampos - 1] = datoNuevo elif opcionCampos < 6 and opcionCampos > 0: agenda[opcion][opcionCampos - 1] = datoNuevo def consultarContacto(agenda): #Saldrá un listado de los contactos de la agenda, mostrando solo el nombre y apellidos print("Selecciona el contacto que desas consultar: \n") mostrarAgenda(agenda) opcion = int(input()) - 1 #Tras seleccionar el contacto, se mostrará todos los datos del contacto print(opcion + 1, "- ", agenda[opcion][0], " ", agenda[opcion][1]," ", agenda[opcion][2] ," ", agenda[opcion][3]," ", agenda[opcion][4], "\n") def guardarAgenda(agenda): #Abre un fichero en modo escritura y sobreescribe el contenido del fichero, con el contenido de la agenda fichero = open('agenda.txt', 'w') for contacto in agenda: for x in range(0,len(contacto)): fichero.write(str(contacto[x]) + ' ') fichero.write("\n") fichero.close() def leerAgenda(): #Lee la agenda almacenada y genera una lista nueva, sustituyendo la lista agenda fichero = open('agenda.txt') agendaNueva = [] lineas = fichero.readlines() print("Los nuevos contactos leidos son: \n") for x in range(0,len(lineas)): contacto = lineas[x].split() print(x+1, " ", contacto[0], " ", contacto[1]," ", contacto[2] ," ", contacto[3]," ", contacto[4]) contacto[3] = int(contacto[3]) agendaNueva.append(contacto) print("\n") return agendaNueva # --------------------------------------- **** MAIN **** --------------------------------------- option = 0 agenda = [] # Lista de listas que contendrá los contactos. while option != 7: # --------------- Menu --------------- print("Elige una opcion:") print("1: Aniadir un contacto") print("2: Eliminar un contacto") print("3: Modificar un contacto") print("4: Consultar un contacto") print("5: Guardar agenda") print("6: Leer agenda") print("7: Finalizar") option = int(input()) if option == 1: aniadirContacto(agenda) elif option == 2: eliminarContacto(agenda) elif option == 3: modificarContacto(agenda) elif option == 4: consultarContacto(agenda) elif option == 5: guardarAgenda(agenda) elif option == 6: agenda = leerAgenda() guardarAgenda(agenda) #opcion 7, finalizará el programa y guardará la agenda nueva # - # #### Ejercicio 2[4 puntos] # Considera el problema de resolver un sistema de 3 ecuaciones por el método de Gauss: # https://es.wikipedia.org/wiki/Sistema_de_ecuaciones_lineales#M%C3%A9todo_de_Gauss # # Se pide implementar un programa que dado un sistema de 3 ecuaciones expresado en forma de una lista de listas donde cada lista representa una ecuación del sistema, devuelva como resultado los valores de las incognitas. # # Se espera que el sistema que se introduzca tenga solución, pero no hace falta realizar esta comprobación en el código del programa. # # No se pueden usar ninguna función o método que calcule directamente el determinante. # ##### PLANTEAMIENTO DEL EJERCICIO: # # QUEREMOS CONVERTIR EL SISTEMA DE 3 ECUACIONES COMPLETO EN UN SISTEMA ESCALONADO DE FORMA QUE LA PRIMERA ECUACIÓN # TENGA 3 INCOGNITAS, LA SEGUNDA TENGA 2 Y LA ULTIMA TENGA 1, PARA DESPUÉS SOLUCIONAR EL SISTEMA "DE ABAJO A ARRIBA" # PARA ELLO, APLICAMOS TRANSFORMACIONES ELEMENTALES A LAS ECUACIONES PARA ELIMINAR INCÓGNITAS # # 1º INTERCAMBIMOS FILAS DE FORMA QUE LA QUE TENGA EL MENOR COEFICIENTE EN LA INCOGNITA X QUEDE ARRIBA # (ESTA OPERACION ES MEJOR REALIZARLA EN VALOR ABSOLUTO, ES DECIR |1| < |-3|) # # 2X + Y - 3Z = 7 1 -1 -4 4 # 5X -4Y + Z = -19 ---> 2 1 -3 7 # X - Y - 4Z = 4 5 -4 1 -19 # # RESTAR A SEGUNDA FILA, LA PRIMERA MULITPLICADA POR EL RESULTADO DE DIVIDIR EL COEFICIENTE X DE LA 2a ENTRE # EL COEFICIENTE X DE LA 1a # # IGUAL PARA LA TERCERA FILA PERO ESTA VEZ DIVIDIENDO EL COEFICIENTE X DE LA 3a ENTRE EL COEFICIENTE X DE LA 1a # # # LO QUE SERIA 2/1=2 y 5/1=5 Y EL SISTEMA QUEDARÍA TAL QUE ASI: # # 1 -1 -4 4 # 0 3 5 -1 # 0 1 21 -39 # # AHORA HAY QUE "ELIMINAR" LA INCOGNITA Y DE UNA DE LAS ECUACIONES, PARA ELLO VOLVEMOS A ORDENAR EL SISTEMA ESTA VEZ MIRANDO # UNICAMENTE LAS 2 ULTIMAS ECUACIONES Y FIJÁNDONOS EN EL COEFICIENTE DE LA INCOGNITA Y # UNA VEZ HECHO ESTO REALIZAMOS LA MISMA OPERACION QUE ANTES # # # 1 -1 -4 4 1 -1 -4 4 # 0 1 21 -39 ----> ec3 = ec3 - 3/1*ec2 ----> 0 1 21 -39 # 0 3 5 -1 0 0 -58 116 # # UNA VEZ TENEMOS ESTO, YA SOLO QUEDA DESPEJAR DE ABAJO A ARRIBA LAS INCÓGNITAS # # -58z = 116 ---> z = 116/-58 = -2 # # y = -39 -21z ---> y = 3 # # x = 4 +y +4z ---> x = -1 # # + import os import copy # ------------- Funciones Auxiliares ------------- def ordenarEcuaciones(sistema, control): #esta funcion ordena las ecuaciones por el valor en valor absoluto del coeficiente X o por el coeficiente Y #según indique la variable de control if control == 1: sistemaOrd = sorted(sistema, key=lambda sistema : abs(sistema[0])) else: sistemaOrd = sorted(sistema, key=lambda sistema : abs(sistema[1])) return sistemaOrd def calculo(ec1, ec2, factor): #multiplicar cada elemento de ec1 por factor y luego restar a ec2 ec1[0] = ec1[0] * factor ec1[1] = ec1[1] * factor ec1[2] = ec1[2] * factor ec1[3] = ec1[3] * factor ec2[0] = ec2[0] - ec1[0] ec2[1] = ec2[1] - ec1[1] ec2[2] = ec2[2] - ec1[2] ec2[3] = ec2[3] - ec1[3] return ec2 def despejaX(sistema): #calculamos el factor por el que multiplicaremos la primera ecuación factor1 = sistema[1][0] / sistema [0][0] factor2 = sistema[2][0] / sistema [0][0] #realizamos una copia de la primera ecuación del sistema para no tener que deshacer la operación #producto que realizamos sobre ella aux = copy.copy(sistema[0]) sistema[1] = calculo(aux, sistema[1], factor1) aux = copy.copy(sistema[0]) sistema[2] = calculo(aux, sistema[2], factor2) return sistema def despejaY(sistema): #igual que para despejar la X, calculamos el factor factor1 = sistema[2][1] / sistema[1][1] #realizamos la copia de la segunda ecuación para no tener que deshacer la operación producto aux = copy.copy(sistema[1]) sistema[2] = calculo(aux, sistema[2], factor1) return sistema # --------------------------------------- **** MAIN **** --------------------------------------- sistema = [] i = 0 print("Introduce uno a uno los 3 coeficientes de una ecuacion y su resultado:") while i < 3: #Leemos los coeficientes y el resultado de cada una de las ecuaciones print("Ecuacion ", i + 1, " : ") factor1 = int(input()) factor2 = int(input()) factor3 = int(input()) resul = int(input()) sistema.append([factor1, factor2, factor3, resul]) i = i + 1 #ordenamos las ecuaciones mirando el coeficiente X ordenarEcuaciones(sistema, 1) #eliminamos la incógnita X de las dos últimas ecuaciones despejaX(sistema) #creamos un sistema auxiliar que solo tendrá las 2 últimas ecuaciones para aprovechar la función de ordenar que hemos definido sistemaAux = [] sistemaAux.append(sistema[1]) sistemaAux.append(sistema[2]) ordenarEcuaciones(sistemaAux, 2) #eliminamos la incógnita Y de la tercera ecuación despejaY(sistema) #ahora tenemos el sistema escalonado y vamos resolviendo cada incognita despejando de abajo a arriba z = sistema[2][3] / sistema[2][2] y = (sistema[1][3] - sistema[1][2]*z) / sistema[1][1] x = (sistema[0][3] - sistema[0][2]*z - sistema[0][1]*y) / sistema[0][0] #imprimimos el resultado print("El resultado del sistema es: ") print("X = ", x) print("Y = ", y) print("Z = ", z) # - # #### Ejercicio 3[3 puntos] # El análisis del sentimiento de un texto consiste en asociar al texto un valor numérico que se obtiene de sumar/restar una cantidad por cada aparición de una palabra en el texto de acuerdo a un listado de palabras que tienen valores asociados. Más concretamente si se quiere obtener el análisis del sentimiento, se procesa el texto palabra a palabra: # 1) Se toma una palabra, y se busca en la lista de palabras que valor le corresponde. # 2) El valor se suma a un contador general. # 3) En caso de que la palabra no aparezca, se suma 0. # 4) Se pasa a la siguiente palabra # 5) Cuando ya no quedan más palabras por procesar del texto, se devuelve el número obtenido. # # Se pide implementar un programa para calcular el análisis del sentimiento de un texto que será proporcionado a través de un fichero. El listado con las palabras y los valores asociados se proporcionará mediante otro fichero que deberá tener en cada línea del mismo una palabra y un valor separados ambos por un blanco y finalizados por salto de linea. Para realizar el procesamiento no se distinguirá entre mayúsculas y minúsculas. # # No se pueden usar ninguna función o método que realice directamente el procesamiento. # + #Esta función procesa el primer fichero (que incluye cada palabra y su valor correspondiente) y lo mete en mayúsculas #en un diccionario # ------------- Funciones Principales ------------- def procesarDiccionario(diccionario): fichero = open("diccionario.txt") #Tener en cuenta que se leerá un fichero "diccionario.txt" para leer el diccionario for linea in fichero: linea = linea.upper() #Se escribe cada linea leida en mayúsculas linea = linea.split() #Cada palabra se separa por espacios diccionario[linea[0]] = linea[1] #Se añade en el diccionario la palabra (clave) y su valor #Esta función procesa el segundo fichero con el texto sobre el que vamos a hacer el análisis del sentimiento, comprueba si #cada palabra está en el diccionario e incrementa el contador con su respectivo valor def procesarTexto(diccionario): acum = 0 fichero = open("texto.txt") #Tener en cuenta que se leerá un fichero "texto.txt" para leer el texto for linea in fichero: linea = linea.upper() linea = linea.split() for palabra in linea: acum = acum + int(diccionario.get(palabra, 0)) #Se suma al contador "acum" si la palabra está en el diccionario #Si no existe la palabra, por defecto se mete el valor 0 return acum # --------------------------------------- **** MAIN **** --------------------------------------- diccionario = dict() procesarDiccionario(diccionario) acum = procesarTexto(diccionario) print("El resultado del análisis del sentimiento de este texto es: ", acum) # - # #### Normas de entrega # # * Fecha tope de entrega: 15/10/2020 # * La entrega se realizará subiendo al campus virtual un notebook de Jupyter con la solución. El archivo tendrá como nombre IntroPython_GrupoX donde X será el número de grupo correspondiente.
Practica 1/IntroPython_Grupo2.ipynb