blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
69afe832f90b7d85d2f11aa7bd10b5a3dad3ac62 | f86b7a8e430becbef820e411c6d2808f646934bd | /C9_Ex2.py | 817ff705f3ec305a04097a132a8652c000bd8c72 | [] | no_license | JoE11-y/PY4E | 2e5f2c87c60e86ce5bcefeb1eddac4c7821bca86 | c6d56f35a557580d449403511ac6e77af3036623 | refs/heads/main | 2023-01-11T15:24:56.603892 | 2020-11-14T05:02:38 | 2020-11-14T05:02:38 | 312,454,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | fname = input("Enter file name: ")
''' The method I created during my first trial
fhand = open(fname)
counts = dict()
for lines in fhand:
lines = lines.rstrip()
if lines.startswith("From"):
words = lines.split()
if words[0] == "From:": continue
for item in range(len(words)):
if item != 2: continue
#(Basic style of counting doubles in dictionaries)
#if words[item] not in counts:
# counts[words[item]] = 1
#else:
# counts[words[item]] += 1
# Python's own method
counts[words[item]] = counts.get(words[item],0) + 1
print(counts)
'''
#Alternative Method without using rstripand startswith
fhand = open(fname)
counts = dict()
for lines in fhand:
words = lines.split()
if len(words) == 0: continue #to skip empty lines if there exists any.
if words[0] != "From": continue
for item in range(len(words)):
if item != 2: continue
counts[words[item]] = counts.get(words[item],0) + 1
print(counts)
| [
"Josephedoh77@gmail.com"
] | Josephedoh77@gmail.com |
961b9ef2cec94e3824919a09b259a06ecf7673bc | f96f64b7c509b79dea692371c2cbd20e1bd246c0 | /cms/cmsmain.py | 1ff75e12ee563d3068a7d5b44635ab26f7b22e4a | [] | no_license | helloexp/AngelSword | 51cbf491e52cb7be601b641a632e65fe3ee82b74 | 4a77ba5f86a2ae422eba4b78856dcf2a6bf9b4c9 | refs/heads/master | 2021-01-21T11:03:16.795459 | 2017-08-24T09:14:37 | 2017-08-24T09:14:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,247 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: cms漏洞库
referer: unknow
author: Lucifer
description: 包含所有cms漏洞类型,封装成一个模块
'''
#autoset vuls
from cms.autoset.autoset_phpmyadmin_unauth import autoset_phpmyadmin_unauth_BaseVerify
#phpstudy vuls
from cms.phpstudy.phpstudy_probe import phpstudy_probe_BaseVerify
from cms.phpstudy.phpstudy_phpmyadmin_defaultpwd import phpstudy_phpmyadmin_defaultpwd_BaseVerify
#Hishop vulns
from cms.Hishop.hishop_productlist_sqli import hishop_productlist_sqli_BaseVerify
#SiteEngine vulns
from cms.siteengine.siteengine_comments_module_sqli import siteengine_comments_module_sqli_BaseVerify
#zfsoft vulns
from cms.zfsoft.zfsoft_service_stryhm_sqli import zfsoft_service_stryhm_sqli_BaseVerify
from cms.zfsoft.zfsoft_database_control import zfsoft_database_control_BaseVerify
from cms.zfsoft.zfsoft_default3_bruteforce import zfsoft_default3_bruteforce_BaseVerify
#ecshop vulns
from cms.ecshop.ecshop_uc_code_sqli import ecshop_uc_code_sqli_BaseVerify
from cms.ecshop.ecshop_flow_orderid_sqli import ecshop_flow_orderid_sqli_BaseVerify
#discuz! vulns
from cms.discuz.discuz_forum_message_ssrf import discuz_forum_message_ssrf_BaseVerify
from cms.discuz.discuz_focus_flashxss import discuz_focus_flashxss_BaseVerify
from cms.discuz.discuz_x25_path_disclosure import discuz_x25_path_disclosure_BaseVerify
from cms.discuz.discuz_plugin_ques_sqli import discuz_plugin_ques_sqli_BaseVerify
#亿邮 vulns
from cms.eyou.eyou_weakpass import eyou_weakpass_BaseVerify
from cms.eyou.eyou_admin_id_sqli import eyou_admin_id_sqli_BaseVerify
from cms.eyou.eyou_resetpw import eyou_resetpw_BaseVerify
from cms.eyou.eyou_user_kw_sqli import eyou_user_kw_sqli_BaseVerify
#金蝶 vulns
from cms.kingdee.kingdee_filedownload import kingdee_filedownload_BaseVerify
from cms.kingdee.kingdee_resin_dir_path_disclosure import kingdee_resin_dir_path_disclosure_BaseVerify
from cms.kingdee.kingdee_conf_disclosure import kingdee_conf_disclosure_BaseVerify
from cms.kingdee.kingdee_logoImgServlet_fileread import kingdee_logoImgServlet_fileread_BaseVerify
#乐语 vulns
from cms.looyu.looyu_down_filedownload import looyu_down_filedownload_BaseVerify
#smartoa vulns
from cms.smartoa.smartoa_multi_filedownload import smartoa_multi_filedownload_BaseVerify
#URP vulns
from cms.urp.urp_query import urp_query_BaseVerify
from cms.urp.urp_query2 import urp_query2_BaseVerify
from cms.urp.urp_ReadJavaScriptServlet_fileread import urp_ReadJavaScriptServlet_fileread_BaseVerify
#PKPMBS vulns
from cms.PKPMBS.pkpmbs_guestbook_sqli import pkpmbs_guestbook_sqli_BaseVerify
from cms.PKPMBS.pkpmbs_addresslist_keyword_sqli import pkpmbs_addresslist_keyword_sqli_BaseVerify
from cms.PKPMBS.pkpmbs_MsgList_sqli import pkpmbs_MsgList_sqli_BaseVerify
#帝友 vulns
from cms.diyou.dyp2p_latesindex_sqli import dyp2p_latesindex_sqli_BaseVerify
from cms.diyou.dyp2p_url_fileread import dyp2p_url_fileread_BaseVerify
#爱琴斯 vulns
from cms.iGenus.igenus_code_exec import igenus_code_exec_BaseVerify
from cms.iGenus.igenus_login_Lang_fileread import igenus_login_Lang_fileread_BaseVerify
from cms.iGenus.igenus_syslogin_Lang_fileread import igenus_syslogin_Lang_fileread_BaseVerify
#live800 vulns
from cms.live800.live800_downlog_filedownload import live800_downlog_filedownload_BaseVerify
from cms.live800.live800_loginAction_sqli import live800_loginAction_sqli_BaseVerify
from cms.live800.live800_sta_export_sqli import live800_sta_export_sqli_BaseVerify
from cms.live800.live800_services_xxe import live800_services_xxe_BaseVerify
#thinkphp vulns
from cms.thinkphp.onethink_category_sqli import onethink_category_sqli_BaseVerify
from cms.thinkphp.thinkphp_code_exec import thinkphp_code_exec_BaseVerify
#汇思 vulns
from cms.wizbank.wizbank_download_filedownload import wizbank_download_filedownload_BaseVerify
from cms.wizbank.wizbank_usr_id_sqli import wizbank_usr_id_sqli_BaseVerify
#汇文 vulns
from cms.libsys.libsys_ajax_asyn_link_old_fileread import libsys_ajax_asyn_link_old_fileread_BaseVerify
from cms.libsys.libsys_ajax_asyn_link_fileread import libsys_ajax_asyn_link_fileread_BaseVerify
from cms.libsys.libsys_ajax_get_file_fileread import libsys_ajax_get_file_fileread_BaseVerify
#通元内容管理系统 vulns
from cms.gpower.gpower_users_disclosure import gpower_users_disclosure_BaseVerify
#metinfo cms vulns
from cms.metinfo.metinfo_getpassword_sqli import metinfo_getpassword_sqli_BaseVerify
from cms.metinfo.metinfo_login_check_sqli import metinfo_login_check_sqli_BaseVerify
#用友 vulns
from cms.yonyou.yonyou_icc_struts2 import yonyou_icc_struts2_BaseVerify
from cms.yonyou.yonyou_user_ids_sqli import yonyou_user_ids_sqli_BaseVerify
from cms.yonyou.yonyou_multi_union_sqli import yonyou_multi_union_sqli_BaseVerify
from cms.yonyou.yonyou_initData_disclosure import yonyou_initData_disclosure_BaseVerify
from cms.yonyou.yonyou_createMysql_disclosure import yonyou_createMysql_disclosure_BaseVerify
from cms.yonyou.yonyou_test_sqli import yonyou_test_sqli_BaseVerify
from cms.yonyou.yonyou_getemaildata_fileread import yonyou_getemaildata_fileread_BaseVerify
from cms.yonyou.yonyou_ehr_ELTextFile import yonyou_ehr_ELTextFile_BaseVerify
from cms.yonyou.yonyou_a8_CmxUser_sqli import yonyou_a8_CmxUser_sqli_BaseVerify
from cms.yonyou.yonyou_cm_info_content_sqli import yonyou_cm_info_content_sqli_BaseVerify
from cms.yonyou.yonyou_a8_personService_xxe import yonyou_a8_personService_xxe_BaseVerify
from cms.yonyou.yonyou_u8_CmxItem_sqli import yonyou_u8_CmxItem_sqli_BaseVerify
from cms.yonyou.yonyou_fe_treeXml_sqli import yonyou_fe_treeXml_sqli_BaseVerify
from cms.yonyou.yonyou_ehr_resetpwd_sqli import yonyou_ehr_resetpwd_sqli_BaseVerify
from cms.yonyou.yonyou_nc_NCFindWeb_fileread import yonyou_nc_NCFindWeb_fileread_BaseVerify
from cms.yonyou.yonyou_a8_logs_disclosure import yonyou_a8_logs_disclosure_BaseVerify
from cms.yonyou.yonyou_status_default_pwd import yonyou_status_default_pwd_BaseVerify
#v2tech vulns
from cms.v2tech.v2Conference_sqli_xxe import v2Conference_sqli_xxe_BaseVerify
#digital campus vulns
from cms.digital_campus.digital_campus_log_disclosure import digital_campus_log_disclosure_BaseVerify
from cms.digital_campus.digital_campus_systemcodelist_sqli import digital_campus_systemcodelist_sqli_BaseVerify
#jeecms vulns
from cms.jeecms.jeecms_fpath_filedownload import jeecms_fpath_filedownload_BaseVerify
#shopex vulns
from cms.shopex.shopex_phpinfo_disclosure import shopex_phpinfo_disclosure_BaseVerify
#FineCMS vulns
from cms.finecms.finecms_uploadfile import finecms_uploadfile_BaseVerify
#hanweb vulns
from cms.hanweb.hanweb_readxml_fileread import hanweb_readxml_fileread_BaseVerify
from cms.hanweb.hanweb_downfile_filedownload import hanweb_downfile_filedownload_BaseVerify
from cms.hanweb.hanweb_VerifyCodeServlet_install import hanweb_VerifyCodeServlet_install_BaseVerify
#php168 vulns
from cms.php168.php168_login_getshell import php168_login_getshell_BaseVerify
#dedecms vulns
from cms.dedecms.dedecms_version import dedecms_version_BaseVerify
from cms.dedecms.dedecms_recommend_sqli import dedecms_recommend_sqli_BaseVerify
from cms.dedecms.dedecms_download_redirect import dedecms_download_redirect_BaseVerify
#umail vulns
from cms.umail.umail_physical_path import umail_physical_path_BaseVerify
from cms.umail.umail_sessionid_access import umail_sessionid_access_BaseVerify
#fsmcms vulns
from cms.fsmcms.fsmcms_p_replydetail_sqli import fsmcms_p_replydetail_sqli_BaseVerify
from cms.fsmcms.fsmcms_setup_reinstall import fsmcms_setup_reinstall_BaseVerify
from cms.fsmcms.fsmcms_columninfo_sqli import fsmcms_columninfo_sqli_BaseVerify
#qibocms vulns
from cms.qibocms.qibocms_search_sqli import qibocms_search_sqli_BaseVerify
from cms.qibocms.qibocms_js_f_id_sqli import qibocms_js_f_id_sqli_BaseVerify
from cms.qibocms.qibocms_s_fids_sqli import qibocms_s_fids_sqli_BaseVerify
from cms.qibocms.qibocms_search_code_exec import qibocms_search_code_exec_BaseVerify
#inspur vulns
from cms.inspur.inspur_multi_sqli import inspur_multi_sqli_BaseVerify
from cms.inspur.inspur_ecgap_displayNewsPic_sqli import inspur_ecgap_displayNewsPic_sqli_BaseVerify
#gobetters vulns
from cms.gobetters.gobetters_multi_sqli import gobetters_multi_sqli_BaseVerify
#lbcms vulns
from cms.lbcms.lbcms_webwsfw_bssh_sqli import lbcms_webwsfw_bssh_sqli_BaseVerify
#dswjcms vulns
from cms.dswjcms.dswjcms_p2p_multi_sqli import dswjcms_p2p_multi_sqli_BaseVerify
#wordpress vulns
from cms.wordpress.wordpress_plugin_azonpop_sqli import wordpress_plugin_azonpop_sqli_BaseVerify
from cms.wordpress.wordpress_plugin_ShortCode_lfi import wordpress_plugin_ShortCode_lfi_BaseVerify
from cms.wordpress.wordpress_url_redirect import wordpress_url_redirect_BaseVerify
from cms.wordpress.wordpress_woocommerce_code_exec import wordpress_woocommerce_code_exec_BaseVerify
from cms.wordpress.wordpress_plugin_mailpress_rce import wordpress_plugin_mailpress_rce_BaseVerify
from cms.wordpress.wordpress_admin_ajax_filedownload import wordpress_admin_ajax_filedownload_BaseVerify
from cms.wordpress.wordpress_restapi_sqli import wordpress_restapi_sqli_BaseVerify
#票友 vulns
from cms.piaoyou.piaoyou_multi_sqli import piaoyou_multi_sqli_BaseVerify
from cms.piaoyou.piaoyou_ten_sqli import piaoyou_ten_sqli_BaseVerify
from cms.piaoyou.piaoyou_six_sqli import piaoyou_six_sqli_BaseVerify
from cms.piaoyou.piaoyou_six2_sqli import piaoyou_six2_sqli_BaseVerify
from cms.piaoyou.piaoyou_int_order_sqli import piaoyou_int_order_sqli_BaseVerify
from cms.piaoyou.piaoyou_newsview_list import piaoyou_newsview_list_BaseVerify
#TCExam vulns
from cms.tcexam.tcexam_reinstall_getshell import tcexam_reinstall_getshell_BaseVerify
#最土团购 vulns
from cms.zuitu.zuitu_coupon_id_sqli import zuitu_coupon_id_sqli_BaseVerify
#iwms vulns
from cms.iwms.iwms_bypass_js_delete import iwms_bypass_js_delete_BaseVerify
#cxplus vulns
from cms.xplus.xplus_2003_getshell import xplus_2003_getshell_BaseVerify
from cms.xplus.xplus_mysql_mssql_sqli import xplus_mysql_mssql_sqli_BaseVerify
#东软uniportal vulns
from cms.uniportal.uniportal_bypass_priv_sqli import uniportal_bypass_priv_sqli_BaseVerify
#pageadmin vulns
from cms.pageadmin.pageadmin_forge_viewstate import pageadmin_forge_viewstate_BaseVerify
#ruvar vulns
from cms.ruvar.ruvar_oa_multi_sqli import ruvar_oa_multi_sqli_BaseVerify
from cms.ruvar.ruvar_oa_multi_sqli2 import ruvar_oa_multi_sqli2_BaseVerify
from cms.ruvar.ruvar_oa_multi_sqli3 import ruvar_oa_multi_sqli3_BaseVerify
#jumboecms vulns
from cms.jumboecms.jumboecms_slide_id_sqli import jumboecms_slide_id_sqli_BaseVerify
#joomla vulns
from cms.joomla.joomla_com_docman_lfi import joomla_com_docman_lfi_BaseVerify
from cms.joomla.joomla_index_list_sqli import joomla_index_list_sqli_BaseVerify
#360shop vulns
from cms.shop360.shop360_do_filedownload import shop360_do_filedownload_BaseVerify
#pstar vulns
from cms.pstar.pstar_warehouse_msg_01_sqli import pstar_warehouse_msg_01_sqli_BaseVerify
from cms.pstar.pstar_isfLclInfo_sqli import pstar_isfLclInfo_sqli_BaseVerify
from cms.pstar.pstar_qcustoms_sqli import pstar_qcustoms_sqli_BaseVerify
#trs vulns
from cms.trs.trs_wcm_pre_as_lfi import trs_wcm_pre_as_lfi_BaseVerify
from cms.trs.trs_inforadar_disclosure import trs_inforadar_disclosure_BaseVerify
from cms.trs.trs_lunwen_papercon_sqli import trs_lunwen_papercon_sqli_BaseVerify
from cms.trs.trs_infogate_xxe import trs_infogate_xxe_BaseVerify
from cms.trs.trs_infogate_register import trs_infogate_register_BaseVerify
from cms.trs.trs_was5_config_disclosure import trs_was5_config_disclosure_BaseVerify
from cms.trs.trs_wcm_default_user import trs_wcm_default_user_BaseVerify
from cms.trs.trs_wcm_infoview_disclosure import trs_wcm_infoview_disclosure_BaseVerify
from cms.trs.trs_was40_passwd_disclosure import trs_was40_passwd_disclosure_BaseVerify
from cms.trs.trs_was40_tree_disclosure import trs_was40_tree_disclosure_BaseVerify
from cms.trs.trs_ids_auth_disclosure import trs_ids_auth_disclosure_BaseVerify
from cms.trs.trs_was5_download_templet import trs_was5_download_templet_BaseVerify
#易创思 vulns
from cms.ecscms.ecscms_MoreIndex_sqli import ecscms_MoreIndex_sqli_BaseVerify
#金窗教务系统 vulns
from cms.gowinsoft_jw.gowinsoft_jw_multi_sqli import gowinsoft_jw_multi_sqli_BaseVerify
#siteserver vulns
from cms.siteserver.siteserver_background_taskLog_sqli import siteserver_background_taskLog_sqli_BaseVerify
from cms.siteserver.siteserver_background_log_sqli import siteserver_background_log_sqli_BaseVerify
from cms.siteserver.siteserver_UserNameCollection_sqli import siteserver_UserNameCollection_sqli_BaseVerify
from cms.siteserver.siteserver_background_keywordsFilting_sqli import siteserver_background_keywordsFilting_sqli_BaseVerify
from cms.siteserver.siteserver_background_administrator_sqli import siteserver_background_administrator_sqli_BaseVerify
#nitc vulns
from cms.nitc.nitc_suggestwordList_sqli import nitc_suggestwordList_sqli_BaseVerify
from cms.nitc.nitc_index_language_id_sqli import nitc_index_language_id_sqli_BaseVerify
#南大之星 vulns
from cms.ndstar.ndstar_six_sqli import ndstar_six_sqli_BaseVerify
#安财软件 vulns
from cms.acsoft.acsoft_GetXMLList_fileread import acsoft_GetXMLList_fileread_BaseVerify
from cms.acsoft.acsoft_GetFile_fileread import acsoft_GetFile_fileread_BaseVerify
from cms.acsoft.acsoft_GetFileContent_fileread import acsoft_GetFileContent_fileread_BaseVerify
#英福金银花ETMV9数字化校园平台
from cms.etmdcp.etmdcp_Load_filedownload import etmdcp_Load_filedownload_BaseVerify
#speedcms vulns
from cms.speedcms.speedcms_list_cid_sqli import speedcms_list_cid_sqli_BaseVerify
#任我行 vulns
from cms.weway.weway_PictureView1_filedownload import weway_PictureView1_filedownload_BaseVerify
#esccms vulns
from cms.esccms.esccms_selectunitmember_unauth import esccms_selectunitmember_unauth_BaseVerify
#wecenter vulns
from cms.wecenter.wecenter_topic_id_sqli import wecenter_topic_id_sqli_BaseVerify
#shopnum1 vulns
from cms.shopnum.shopnum_ShoppingCart1_sqli import shopnum_ShoppingCart1_sqli_BaseVerify
from cms.shopnum.shopnum_ProductListCategory_sqli import shopnum_ProductListCategory_sqli_BaseVerify
from cms.shopnum.shopnum_GuidBuyList_sqli import shopnum_GuidBuyList_sqli_BaseVerify
from cms.shopnum.shopnum_ProductDetail_sqli import shopnum_ProductDetail_sqli_BaseVerify
#fastmeeting vulns
from cms.fastmeeting.fastmeeting_download_filedownload import fastmeeting_download_filedownload_BaseVerify
#远古 vulns
from cms.viewgood.viewgood_two_sqli import viewgood_two_sqli_BaseVerify
from cms.viewgood.viewgood_pic_proxy_sqli import viewgood_pic_proxy_sqli_BaseVerify
from cms.viewgood.viewgood_GetCaption_sqli import viewgood_GetCaption_sqli_BaseVerify
#shop7z vulns
from cms.shop7z.shop7z_order_checknoprint_sqli import shop7z_order_checknoprint_sqli_BaseVerify
#dreamgallery vulns
from cms.dreamgallery.dreamgallery_album_id_sqli import dreamgallery_album_id_sqli_BaseVerify
#kxmail vulns
from cms.kxmail.kxmail_login_server_sqli import kxmail_login_server_sqli_BaseVerify
#shopnc vulns
from cms.shopnc.shopnc_index_class_id_sqli import shopnc_index_class_id_sqli_BaseVerify
#shadowsit vulns
from cms.shadowsit.shadowsit_selector_lfi import shadowsit_selector_lfi_BaseVerify
#phpcms vulns
from cms.phpcms.phpcms_digg_add_sqli import phpcms_digg_add_sqli_BaseVerify
from cms.phpcms.phpcms_authkey_disclosure import phpcms_authkey_disclosure_BaseVerify
from cms.phpcms.phpcms_flash_upload_sqli import phpcms_flash_upload_sqli_BaseVerify
from cms.phpcms.phpcms_product_code_exec import phpcms_product_code_exec_BaseVerify
from cms.phpcms.phpcms_v96_sqli import phpcms_v96_sqli_BaseVerify
from cms.phpcms.phpcms_v961_fileread import phpcms_v961_fileread_BaseVerify
#seacms vulns
from cms.seacms.seacms_search_code_exec import seacms_search_code_exec_BaseVerify
from cms.seacms.seacms_order_code_exec import seacms_order_code_exec_BaseVerify
#cmseasy vulns
from cms.cmseasy.cmseasy_header_detail_sqli import cmseasy_header_detail_sqli_BaseVerify
#phpmyadmin vulns
from cms.phpmyadmin.phpmyadmin_setup_lfi import phpmyadmin_setup_lfi_BaseVerify
#opensns vulns
from cms.opensns.opensns_index_arearank import opensns_index_arearank_BaseVerify
from cms.opensns.opensns_index_getshell import opensns_index_getshell_BaseVerify
#thinksns vulns
from cms.thinksns.thinksns_category_code_exec import thinksns_category_code_exec_BaseVerify
#others vulns
from cms.others.domino_unauth import domino_unauth_BaseVerify
from cms.others.hjsoft_sqli import hjsoft_sqli_BaseVerify
from cms.others.hnkj_researchinfo_dan_sqli import hnkj_researchinfo_dan_sqli_BaseVerify
from cms.others.gpcsoft_ewebeditor_weak import gpcsoft_ewebeditor_weak_BaseVerify
from cms.others.rap_interface_struts_exec import rap_interface_struts_exec_BaseVerify
from cms.others.hongan_dlp_struts_exec import hongan_dlp_struts_exec_BaseVerify
from cms.others.jiuyu_library_struts_exec import jiuyu_library_struts_exec_BaseVerify
from cms.others.yaojie_steel_struts_exec import yaojie_steel_struts_exec_BaseVerify
from cms.others.dkcms_database_disclosure import dkcms_database_disclosure_BaseVerify
from cms.others.damall_selloffer_sqli import damall_selloffer_sqli_BaseVerify
from cms.others.yeu_disclosure_uid import yeu_disclosure_uid_BaseVerify
from cms.others.clib_kinweblistaction_download import clib_kinweblistaction_download_BaseVerify
from cms.others.euse_study_multi_sqli import euse_study_multi_sqli_BaseVerify
from cms.others.suntown_upfile_fileupload import suntown_upfile_fileupload_BaseVerify
from cms.others.skytech_bypass_priv import skytech_bypass_priv_BaseVerify
from cms.others.mallbuilder_change_status_sqli import mallbuilder_change_status_sqli_BaseVerify
from cms.others.efuture_downloadAct_filedownload import efuture_downloadAct_filedownload_BaseVerify
from cms.others.kj65n_monitor_sqli import kj65n_monitor_sqli_BaseVerify
from cms.others.sinda_downloadfile_download import sinda_downloadfile_download_BaseVerify
from cms.others.lianbang_multi_bypass_priv import lianbang_multi_bypass_priv_BaseVerify
from cms.others.star_PostSuggestion_sqli import star_PostSuggestion_sqli_BaseVerify
from cms.others.hezhong_list_id_sqli import hezhong_list_id_sqli_BaseVerify
from cms.others.cicro_DownLoad_filedownload import cicro_DownLoad_filedownload_BaseVerify
from cms.others.huaficms_bypass_js import huaficms_bypass_js_BaseVerify
from cms.others.nongyou_multi_sqli import nongyou_multi_sqli_BaseVerify
from cms.others.zfcgxt_UserSecurityController_getpass import zfcgxt_UserSecurityController_getpass_BaseVerify
from cms.others.mainone_b2b_Default_sqli import mainone_b2b_Default_sqli_BaseVerify
from cms.others.mainone_SupplyList_sqli import mainone_SupplyList_sqli_BaseVerify
from cms.others.workyi_multi_sqli import workyi_multi_sqli_BaseVerify
from cms.others.newedos_multi_sqli import newedos_multi_sqli_BaseVerify
from cms.others.xtcms_download_filedownload import xtcms_download_filedownload_BaseVerify
from cms.others.gn_consulting_sqli import gn_consulting_sqli_BaseVerify
from cms.others.caitong_multi_sqli import caitong_multi_sqli_BaseVerify
from cms.others.anmai_teachingtechnology_sqli import anmai_teachingtechnology_sqli_BaseVerify
from cms.others.alkawebs_viewnews_sqli import alkawebs_viewnews_sqli_BaseVerify
from cms.others.caitong_multi_sleep_sqli import caitong_multi_sleep_sqli_BaseVerify
from cms.others.clib_kindaction_fileread import clib_kindaction_fileread_BaseVerify
from cms.others.mainone_ProductList_sqli import mainone_ProductList_sqli_BaseVerify
from cms.others.eis_menu_left_edit_sqli import eis_menu_left_edit_sqli_BaseVerify
from cms.others.tianbo_Type_List_sqli import tianbo_Type_List_sqli_BaseVerify
from cms.others.tianbo_TCH_list_sqli import tianbo_TCH_list_sqli_BaseVerify
from cms.others.tianbo_Class_Info_sqli import tianbo_Class_Info_sqli_BaseVerify
from cms.others.tianbo_St_Info_sqli import tianbo_St_Info_sqli_BaseVerify
from cms.others.nongyou_Item2_sqli import nongyou_Item2_sqli_BaseVerify
from cms.others.gxwssb_fileDownloadmodel_download import gxwssb_fileDownloadmodel_download_BaseVerify
from cms.others.anmai_grghjl_stuNo_sqli import anmai_grghjl_stuNo_sqli_BaseVerify
from cms.others.nongyou_ShowLand_sqli import nongyou_ShowLand_sqli_BaseVerify
from cms.others.nongyou_sleep_sqli import nongyou_sleep_sqli_BaseVerify
from cms.others.zf_cms_FileDownload import zf_cms_FileDownload_BaseVerify
from cms.others.shiyou_list_keyWords_sqli import shiyou_list_keyWords_sqli_BaseVerify
from cms.others.zhuofan_downLoadFile_download import zhuofan_downLoadFile_download_BaseVerify
from cms.others.gevercms_downLoadFile_filedownload import gevercms_downLoadFile_filedownload_BaseVerify
from cms.others.ips_community_suite_code_exec import ips_community_suite_code_exec_BaseVerify
from cms.others.skytech_geren_list_page_sqli import skytech_geren_list_page_sqli_BaseVerify
from cms.others.xuezi_ceping_unauth import xuezi_ceping_unauth_BaseVerify
from cms.others.haohan_FileDown_filedownload import haohan_FileDown_filedownload_BaseVerify
from cms.others.mingteng_cookie_deception import mingteng_cookie_deception_BaseVerify
from cms.others.jxt1039_unauth import jxt1039_unauth_BaseVerify
| [
"297954441@qq.com"
] | 297954441@qq.com |
37914a9c4814608da59a345b4a43f3bd0acdd6ec | 9f0ae1475328635c2e5123e65d8cf43fefa97248 | /relojito.py | 516009f01d96e755d0514b8a6cd8b3e4ea1adb6d | [] | no_license | SamuelMedrano27/Juego | 581ff67d2ac0d449ba1093990fd36375f71345f6 | 4ec0b3c58db088047b447555e0728d8a58c35b0f | refs/heads/main | 2023-05-12T18:57:18.552795 | 2021-06-10T02:07:42 | 2021-06-10T02:07:42 | 367,969,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,425 | py | import pygame
import sys
import random
from pygame.locals import *
pygame.init()
#COLORES
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
ORANGE=(255,165,5)
CELESTE=(66,148,255)
#PANTALLA
ANCHO=1000
ALTO=800
#JUGADOR
JUGADOR_POS=[500,750]
JUGADOR_TAM=20
#ENEMIGO1
ENEMIGO_TAM=25
ENEMIGO_POS=[random.randint(0,ANCHO-ENEMIGO_TAM),30]
#ENEMIGO2
ENEMIGO_TAM2=35
ENEMIGO_POS2=[random.randint(0,ANCHO-ENEMIGO_TAM2),30]
#ENEMIGO3
ENEMIGO_TAM3=30
ENEMIGO_POS3=[random.randint(0,ANCHO-ENEMIGO_TAM3),30]
#ENEMIGO4
ENEMIGO_TAM4=38
ENEMIGO_POS4=[random.randint(0,ANCHO-ENEMIGO_TAM4),30]
#ENEMIGO5
ENEMIGO_TAM5=42
ENEMIGO_POS5=[random.randint(0,ANCHO-ENEMIGO_TAM5),30]
#ventana
ventana= pygame.display.set_mode((ANCHO,ALTO))
pygame.display.set_caption("SAMMEX")
clock=pygame.time.Clock()
#Escribir
fuente = pygame.font.Font(None, 50)
texto1 = fuente.render("Bienvenidos al JOSHISAM", 0, (ORANGE))
#Escribir Reloj
fuente1 = pygame.font.SysFont("Arial",34,True,False)
info = fuente1.render("Cloock", 0, (ORANGE))
texto2 = fuente1.render("Tiempo :", 0, (WHITE))
#Funciones
def detectar_colision1(JUGADOR_POS,ENEMIGO_POS,):
jx=JUGADOR_POS[0]
jy=JUGADOR_POS[1]
ex=ENEMIGO_POS[0]
ey=ENEMIGO_POS[1]
if (ex>jx and ex<(jx+JUGADOR_TAM)) or (jx>=ex and jx<(ex+ENEMIGO_TAM)):
if (ey>jy and ey<(jy+JUGADOR_TAM)) or (jy>=ex and jy<(ey+ENEMIGO_TAM)):
return True
return False
def detectar_colision2(JUGADOR_POS,ENEMIGO_POS2,):
jx=JUGADOR_POS[0]
jy=JUGADOR_POS[1]
ex=ENEMIGO_POS2[0]
ey=ENEMIGO_POS2[1]
if (ex>jx and ex<(jx+JUGADOR_TAM)) or (jx>=ex and jx<(ex+ENEMIGO_TAM)):
if (ey>jy and ey<(jy+JUGADOR_TAM)) or (jy>=ex and jy<(ey+ENEMIGO_TAM)):
return True
return False
def detectar_colision3(JUGADOR_POS,ENEMIGO_POS3,):
jx=JUGADOR_POS[0]
jy=JUGADOR_POS[1]
ex=ENEMIGO_POS3[0]
ey=ENEMIGO_POS3[1]
if (ex>jx and ex<(jx+JUGADOR_TAM)) or (jx>=ex and jx<(ex+ENEMIGO_TAM3)):
if (ey>jy and ey<(jy+JUGADOR_TAM)) or (jy>=ex and jy<(ey+ENEMIGO_TAM3)):
return True
return False
def detectar_colision4(JUGADOR_POS,ENEMIGO_POS4,):
jx=JUGADOR_POS[0]
jy=JUGADOR_POS[1]
ex=ENEMIGO_POS4[0]
ey=ENEMIGO_POS4[1]
if (ex>jx and ex<(jx+JUGADOR_TAM)) or (jx>=ex and jx<(ex+ENEMIGO_TAM4)):
if (ey>jy and ey<(jy+JUGADOR_TAM)) or (jy>=ex and jy<(ey+ENEMIGO_TAM4)):
return True
return False
def detectar_colision5(JUGADOR_POS,ENEMIGO_POS5,):
jx=JUGADOR_POS[0]
jy=JUGADOR_POS[1]
ex=ENEMIGO_POS5[0]
ey=ENEMIGO_POS5[1]
if (ex>jx and ex<(jx+JUGADOR_TAM)) or (jx>=ex and jx<(ex+ENEMIGO_TAM5)):
if (ey>jy and ey<(jy+JUGADOR_TAM)) or (jy>=ex and jy<(ey+ENEMIGO_TAM5)):
return True
return False
game_over=False
running= True
while not game_over:
if running:
running =False
ventana.fill(BLACK)
#Movimiento enemigo 1
if ENEMIGO_POS[1]>=0 and ENEMIGO_POS[1]<ALTO:
ENEMIGO_POS[1]+=20
else:
ENEMIGO_POS[0]=random.randint(0,ANCHO-ENEMIGO_TAM)
ENEMIGO_POS[1]=0
#moviemiento enemigo 2
if ENEMIGO_POS2[1]>=0 and ENEMIGO_POS2[1]<ALTO:
ENEMIGO_POS2[1]+=25
else:
ENEMIGO_POS2[0]=random.randint(0,ANCHO-ENEMIGO_TAM2)
ENEMIGO_POS2[1]=0
#moviemiento enemigo 3
if ENEMIGO_POS3[1]>=0 and ENEMIGO_POS3[1]<ALTO:
ENEMIGO_POS3[1]+=25
else:
ENEMIGO_POS3[0]=random.randint(0,ANCHO-ENEMIGO_TAM3)
ENEMIGO_POS3[1]=0
#moviemiento enemigo 4
if ENEMIGO_POS4[1]>=0 and ENEMIGO_POS4[1]<ALTO:
ENEMIGO_POS4[1]+=25
else:
ENEMIGO_POS4[0]=random.randint(0,ANCHO-ENEMIGO_TAM4)
ENEMIGO_POS4[1]=0
#moviemiento enemigo 5
if ENEMIGO_POS5[1]>=0 and ENEMIGO_POS5[1]<ALTO:
ENEMIGO_POS5[1]+=25
else:
ENEMIGO_POS5[0]=random.randint(0,ANCHO-ENEMIGO_TAM5)
ENEMIGO_POS5[1]=0
#Colisiones
if detectar_colision1(JUGADOR_POS,ENEMIGO_POS,):
game_over=True
if detectar_colision2(JUGADOR_POS,ENEMIGO_POS2,):
game_over=True
if detectar_colision3(JUGADOR_POS,ENEMIGO_POS3,):
game_over=True
if detectar_colision4(JUGADOR_POS,ENEMIGO_POS4,):
game_over=True
if detectar_colision5(JUGADOR_POS,ENEMIGO_POS5,):
game_over=True
#DIBUJAR ENEMIGO1
pygame.draw.circle(ventana, RED, (ENEMIGO_POS[0], ENEMIGO_POS[1]), ENEMIGO_TAM, ENEMIGO_TAM)
#DIBUJAR ENEMIGO2
pygame.draw.circle(ventana, WHITE, (ENEMIGO_POS2[0], ENEMIGO_POS2[1]), ENEMIGO_TAM2, ENEMIGO_TAM2)
#DIBUJAR ENEMIGO3
pygame.draw.circle(ventana, BLUE, (ENEMIGO_POS3[0], ENEMIGO_POS3[1]), ENEMIGO_TAM3, ENEMIGO_TAM3)
#DIBUJAR ENEMIGO4
pygame.draw.circle(ventana, ORANGE, (ENEMIGO_POS4[0], ENEMIGO_POS4[1]), ENEMIGO_TAM4, ENEMIGO_TAM4)
#DIBUJAR ENEMIGO5
pygame.draw.circle(ventana, CELESTE, (ENEMIGO_POS5[0], ENEMIGO_POS5[1]), ENEMIGO_TAM5, ENEMIGO_TAM5)
#Dibujar jugador
pygame.draw.circle(ventana, GREEN, (JUGADOR_POS[0], JUGADOR_POS[1]), JUGADOR_TAM, JUGADOR_TAM)
segundos=pygame.time.get_ticks()/1000
segundos=str(segundos)
contador=fuente1.render(segundos,0,(GREEN))
ventana.blit(contador, (120,20))
ventana.blit(texto1, (300,50))
ventana.blit(texto2, (5,20))
clock.tick(54)#Velocidad
pygame.display.update()
for event in pygame.event.get():
if event.type ==pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
x=JUGADOR_POS[0]
if x>=9:
if event.key == pygame.K_LEFT:
x-=JUGADOR_TAM
if event.key == pygame.K_RIGHT:
x+=JUGADOR_TAM
JUGADOR_POS[0]=x
else:
x=10
JUGADOR_POS[0]=x
if x<=ANCHO-2:
if event.key == pygame.K_LEFT:
x-=JUGADOR_TAM
if event.key == pygame.K_RIGHT:
x+=JUGADOR_TAM
JUGADOR_POS[0]=x
else:
x=ANCHO-3
JUGADOR_POS[0]=x
| [
"samu100pre@gmail.com"
] | samu100pre@gmail.com |
650a8a893c1dcebd6bb63eb7ae18ee8468bf566d | 3c1e51cdc1e8fe95cd1dc9674954622b7ee1e71a | /backend/mobilegeee_28456/settings.py | 60372fff369e36c9a4d533d7b555077d3c91e624 | [] | no_license | crowdbotics-apps/mobilegeee-28456 | ed22c9fd3008f73442bee4af7fed0887a5ae436d | dedcfddd27f9707bfc584f602341cc32d2e79034 | refs/heads/master | 2023-06-09T02:30:41.908432 | 2021-07-05T13:52:25 | 2021-07-05T13:52:25 | 383,155,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,114 | py | """
Django settings for mobilegeee_28456 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mobilegeee_28456.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mobilegeee_28456.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
973a32d7cadd4805cd5facd891aa17ccbdc9f521 | 3b65e030d5abe9aa6f68c9c83a706aeb31fbed22 | /logscanlib.py | 3e0b28d3f5a7bb85fcfb4c7180a7c05fd7266054 | [] | no_license | GeekHades/logscan | 803dde06a5f1df87f43b8b24cda02fd00d812868 | 260b6cf0046754b24726e503f171b99095fd9499 | refs/heads/master | 2021-01-23T05:10:38.203059 | 2014-02-22T11:16:42 | 2014-02-22T11:16:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,211 | py | """
logscanlib
~~~~~~~~~~
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import re
import sys
import datetime
from gzip import GzipFile
CODEMAP = {
'%Y': '(?P<Y>\d{4})',
'%m': '(?P<m>\d{2})',
'%d': '(?P<d>[ |\d]\d)',
'%H': '(?P<H>\d{2})',
'%M': '(?P<M>\d{2})',
'%S': '(?P<S>\d{2})',
'%a': '(?P<a>[a-zA-Z]{3})',
'%A': '(?P<A>[a-zA-Z]{6,9})',
'%b': '(?P<b>[a-zA-Z]{3})',
'%B': '(?P<B>[a-zA-Z]{3,9})',
'%c': '(?P<c>([a-zA-Z]{3} ){2} \d{1,2} (\d{2}:){2}\d{2}) \d{4}',
'%I': '(?P<I>\d{2})',
'%j': '(?P<j>\d{3})',
'%p': '(?P<p>[A-Z]{2})',
'%U': '(?P<U>\d{2})',
'%W': '(?P<W>\d{2})',
'%w': '(?P<w>\d{1})',
'%y': '(?P<y>\d{2})',
'%x': '(?P<x>(\d{2}/){2}\d{2})',
'%X': '(?P<X>(\d{2}:){2}\d{2})',
'%%': '%',
'%s': '^(?P<s>\d{10})',
'timestamp': '(?P<S>\d{10}\.\d{3})',
}
TIMECODES = [
'%Y-%m-%d %H:%M:%S',
'%b %d %X %Y',
'%b %d %X',
'%s',
'timestamp',
]
def add_timecodes(timecodes):
global TIMECODES
TIMECODES += [c for c in timecodes if not c in TIMECODES]
class TimeCodeError(Exception):
"""
Raised if no timecode fits.
"""
class Log():
"""Get time specific access to a logfile.
"""
def __init__(self, fileobj, timecode=None):
if timecode:
self._set_timecode(timecode)
self._name = fileobj.name
if self.name.endswith('.gz'):
fileobj = GzipFile(fileobj=fileobj)
self._fileobj = fileobj
if self.name == sys.stdin.name:
self._lines = self._fileobj.readlines()
else:
self._lines = None
self._start = None
self._end = None
_timecode = None
_regexp = None
@classmethod
def _set_timecode(cls, timecode):
cls._timecode = timecode
for code in CODEMAP:
timecode = timecode.replace(code, CODEMAP[code])
cls._regexp = re.compile(timecode)
@classmethod
def _detect_timecode(cls, line):
"""Try to find a matching timecode.
"""
for timecode in TIMECODES:
cls._set_timecode(timecode)
try:
time = cls._get_linetime(line)
except TimeCodeError:
continue
else:
return time
cls._timecode = None
raise TimeCodeError("...no proper timecode was found")
@classmethod
def _get_linetime(cls, line):
"""Get the logtime of a line.
"""
if not cls._timecode:
return cls._detect_timecode(line)
match = cls._regexp.search(line)
if not match:
raise TimeCodeError("invalid timecode: '%s'" % cls._timecode)
if cls._timecode in ['timestamp', '%s']:
time = datetime.datetime.fromtimestamp(float(match.group()))
else:
time = datetime.datetime.strptime(match.group(), cls._timecode)
if time.year == 1900: #TODO: maybe find a more elegant solution
today = datetime.datetime.today()
time = time.replace(year=today.year)
if time > today:
time = time.replace(year=today.year - 1)
return time
def _get_first_line(self):
if self._lines:
return self.lines[0]
self._fileobj.seek(0)
return self._fileobj.readline()
def _get_last_line(self):
# gzip.seek don't support seeking from end on
if self._lines or isinstance(self._fileobj, GzipFile):
return self.lines[-1]
else:
size = os.stat(self.name).st_size
if size < 400:
seek = -size
else:
seek = -400
self._fileobj.seek(seek, 2)
return self._fileobj.readlines()[-1]
def _get_index(self, time, index=0):
if not time:
return None
if time <= self.start:
return 0
if time > self.end:
return len(self.lines)
i = index or 0
while time > self._get_linetime(self.lines[i]):
i += 1
if i == len(self.lines):
break
return i
@property
def name(self):
"""filename
"""
return self._name
@property
def start(self):
"""start-time of the log
"""
if not self._start:
first_line = self._get_first_line()
self._start = self._get_linetime(first_line)
return self._start
@property
def end(self):
"""end-time of the log
"""
if not self._end:
last_line = self._get_last_line()
self._end = self._get_linetime(last_line)
return self._end
@property
def lines(self):
"""all lines of the log
"""
if not self._lines:
self._fileobj.seek(0)
self._lines = self._fileobj.readlines()
return self._lines
def get_section(self, start=None, end=None):
"Get loglines between two specified datetimes."
if start and start > self.end:
return list()
if end and end <= self.start:
return list()
index1 = self._get_index(start)
index2 = self._get_index(end, index1)
return self.lines[index1:index2]
def close(self):
"""Close the fileobject.
"""
self._fileobj.close()
class RotatedLogs():
"""Get time-specific access to rotated logfiles.
"""
def __init__(self, fileobj, timecode=None):
self._name = fileobj.name
self._files = [Log(fileobj, timecode)]
if self.name != sys.stdin.name:
self._rotate()
def _rotate(self):
i = 1
name = self.name
insert = lambda name:\
self._files.insert(0, Log(open(name, 'rb')))
while 1:
name = '%s.%s' % (self.name, i)
if os.path.isfile(name):
insert(name)
elif os.path.isfile(name + '.gz'):
insert(name + '.gz')
else:
break
i += 1
@property
def name(self):
"""
filename
"""
return self._name
@property
def quantity(self):
"""
number of rotated logfiles
"""
return len(self._files)
@property
def start(self):
"""
start-time of the log
"""
return self._files[0].start
@property
def end(self):
"""
end-time of the log
"""
return self._files[-1].end
@property
def lines(self):
"""
lines of all logfiles
"""
lines = list()
for file in self._files:
lines += file.lines
return lines
def get_section(self, start=None, end=None):
"""
Get loglines between two specified datetimes.
"""
if start and start > self.end:
return list()
if end and end <= self.start:
return list()
if not (start or end):
return self.lines
files = self._files[:]
files.reverse()
lines = list()
for file in files:
if end and end <= file.start:
continue
else:
lines = file.get_section(start, end) + lines
if start and start >= file.start:
break
return lines
def close(self):
"""Close all logfiles.
"""
for file in self._files:
file.close()
| [
"thomaslfuss@gmx.de"
] | thomaslfuss@gmx.de |
273756255366ff0a61cc4722b0c151c78ed0c2b7 | 1117f6fc0a09b43b97a396de7155dfa4f5015bac | /single_pages/models.py | 0437e8fa9220801ef3bc4de8d955112d51c903d6 | [] | no_license | youngwoo2020/turtleneck | 69e218eb9d5a1c4f526d8a580cd3dfd6997130f7 | 669340790f032a1c0052bfb4d1bec5bad45f46e0 | refs/heads/main | 2023-08-22T04:14:11.459663 | 2021-10-22T11:44:55 | 2021-10-22T11:44:55 | 413,274,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | from django.db import models
# Create your models here.
class User(models.Model):
user_name = models.CharField(max_length=200, null=False)
user_id = models.CharField(max_length=200, null=False)
user_password = models.CharField(max_length=200, null=False)
user_point = models.IntegerField(default=10000)
def __str__(self):
return self.user_name
| [
"youngwoo202078@gamil.com"
] | youngwoo202078@gamil.com |
1187a68a19b872d637c6b16caef681ea72ae907f | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/msData/datatypes/Facets/unsignedByte/unsignedByte_totalDigits003.py | 95d57599df105ea2f4f13dfa3f01a81276e9e890 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 162 | py | from output.models.ms_data.datatypes.facets.unsigned_byte.unsigned_byte_total_digits003_xsd.unsigned_byte_total_digits003 import Test
obj = Test(
foo=123
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
cfc7a1964380fa13b399e8c2cca280f16cb6e155 | fc3b7a0889a56961d886810490c6d6d551d46215 | /catgan_example/normal_utils.py | 0a47edfb0d1ed82ccb641772ec977aba1ae1b4ae | [] | no_license | hsyang1222/gan_metric | 7dec2b170da32f5f929cbe0d03bc9ef6f423fd55 | fcb7dc9821875b5ba43529773bb3fbd5dc7cd5f5 | refs/heads/main | 2023-04-14T09:15:50.697686 | 2021-05-06T12:49:03 | 2021-05-06T12:49:03 | 363,570,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,641 | py | from __future__ import print_function
import os
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.utils.data import TensorDataset, DataLoader
import numpy as np
import __main__ as main
print(main.__file__)
from datetime import datetime
import os
dataroot = ''
try:
import nsml
from nsml import DATASET_PATH, SESSION_NAME
print('DATASET_PATH, SESSION_NAME:', DATASET_PATH, '\n', SESSION_NAME)
dataroot = os.path.join(DATASET_PATH, 'train/')
except ImportError:
dataroot = '/data/dataset/cifar10/'
import easydict
args = easydict.EasyDict({
'dataset':'cifar10',
'dataroot':dataroot, # '/data/dataset/'
'workers':2,
'batchSize':64,
'imageSize':64,
'nz':100,
'ngf':64,
'ndf':64,
'niter':25,
'lr':0.0002,
'beta1':0.5,
'cuda':True,
'dry_run':False,
'ngpu':1,
'netG':'',
'netD':'',
'netE':'',
'netZ':'',
'manualSeed':None,
'classes':None,
'outf':'result/' + main.__file__.split('.')[0] + '_' + str(datetime.today().month) + '_' + str(datetime.today().day) + '_' + str(datetime.today().hour),
'n_show': 5,
})
#opt = parser.parse_args()
normal_opt = args
print(normal_opt)
os.makedirs(normal_opt.outf, exist_ok=True)
os.makedirs(normal_opt.outf + '/images/', exist_ok=True)
os.makedirs(normal_opt.outf + '/model/', exist_ok=True)
device = torch.cuda.device("cuda" if normal_opt.cuda else "cpu")
def calculate_activation_statistics(dataloader,model,batch_size=128, dims=2048,
cuda=False):
model.eval()
act=np.empty((len(dataloader), dims))
pred_list = []
for data in dataloader :
batch=data[0].cuda()
pred = model(batch)[0]
pred_list.append(pred.detach().cpu())
pred = torch.cat(pred_list)
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.size(2) != 1 or pred.size(3) != 1:
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
act= pred.cpu().data.numpy().reshape(pred.size(0), -1)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
from scipy import linalg
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def calculate_fretchet(images_real,images_fake,model):
mu_1,std_1=calculate_activation_statistics(images_real,model,cuda=True)
mu_2,std_2=calculate_activation_statistics(images_fake,model,cuda=True)
"""get fretched distance"""
fid_value = calculate_frechet_distance(mu_1, std_1, mu_2, std_2)
return fid_value
def torchlist_to_dataloader(fake_list) :
fake_set = torch.cat(fake_list)
fakeDataset = TensorDataset(fake_set)
fakeDataloader = DataLoader(fakeDataset, batch_size=normal_opt.batchSize,
shuffle=True, num_workers=int(normal_opt.workers))
return fakeDataloader
| [
"hsyang1222@snu.ac.kr"
] | hsyang1222@snu.ac.kr |
9a6c4d6eb57d5da3b1338ec38322bb55362205de | f9084cb6daa33ff9df5d00a203ffc457d118a138 | /api/bank/models.py | faec5b5009898c17c199d5a105c776c582c1fc6b | [] | no_license | sanjivyash/Django-REST-API | e720136998944a0e6498bc5a6040e3dd3fd10ad0 | c0321f7172752783637694ff85d5b6b8f86580e6 | refs/heads/master | 2023-07-28T21:21:30.707176 | 2020-07-01T16:23:15 | 2020-07-01T16:23:15 | 276,410,773 | 0 | 0 | null | 2021-09-22T19:20:54 | 2020-07-01T15:13:00 | TSQL | UTF-8 | Python | false | false | 784 | py | from django.db import models
class BankDetails(models.Model):
bank_id = models.AutoField(primary_key=True)
bank_name = models.CharField(max_length=49, blank=True, null=True)
bank_ifsc = models.CharField(max_length=11, blank=True, null=True)
bank_branch = models.CharField(max_length=74, blank=True, null=True)
bank_address = models.CharField(max_length=195, blank=True, null=True)
bank_city = models.CharField(max_length=50, blank=True, null=True)
bank_district = models.CharField(max_length=50, blank=True, null=True)
bank_state = models.CharField(max_length=26, blank=True, null=True)
def __str__(self):
return f'{self.bank_name} - {self.bank_ifsc}'
class Meta:
managed = False
db_table = 'bank_details' | [
"sanjiv.yash@gmail.com"
] | sanjiv.yash@gmail.com |
7f272352336b3b5980e08c0293f3c0bd05d64ca4 | 15d6d74172d0927687bf39401582e8f2ce943dab | /package_test/test_gridLayer.py | acea81336d851eaaf848d5edd34d799bd60b4ce2 | [] | no_license | VencenZhao/py-utils | c5b861c5d7ddc49db54e29838574c65855f65d9b | f079df1159efb9c6d9fa4034487618dd9a433875 | refs/heads/master | 2022-01-20T02:55:52.774028 | 2019-07-24T02:51:36 | 2019-07-24T02:51:36 | 198,378,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | # -*- coding: UTF-8 -*-
import sys
sys.path.append('..')
from package_grid.GridLayer import GridLayer
def main(args):
# (0, 1, False) 部级WGS84坐标
# (0, 1, True) 部级gcj坐标
# (1, 1, False) 省级WGS84坐标
# (1, 1, True) 省级gcj坐标
# (2, 1, False) 市级WGS84坐标
# (2, 1, True) 市级gcj坐标
# (3, 1, False) 区县级WGS84坐标
# (3, 1, True) 区县级gcj坐标
gl = GridLayer(int(args[0]), 1, bool(int(args[1])))
gl.createLayer()
pass
if __name__ == '__main__':
# print(sys.argv[1:])
main(sys.argv[1:]) | [
"zhaobaojue@gmail.com"
] | zhaobaojue@gmail.com |
a5dbdb6f26c7bfee74ad32ab213debd273d682df | b92c39c8498e0c6579a65430e63b7db927d01aea | /python/cookbook/decorator_3.py | d19549b43373954c5dcf57ea393088dd0dcf6812 | [] | no_license | szqh97/test | 6ac15ad54f6d36e1d0efd50cbef3b622d374bb29 | ba76c6ad082e2763554bdce3f1b33fea150865dc | refs/heads/master | 2020-04-06T05:40:55.776424 | 2019-01-14T06:37:38 | 2019-01-14T06:37:38 | 14,772,703 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from functools import wraps, partial
import logging
def logged(func=None, *, level=logging.DEBUG, name=None, message=None):
if func is None:
return partial(logged, level=level, name=name, message=message)
logname = name if name else func.__module__
log = logging.getLogger(logname)
logmsg = message if message else func.__name__
@wraps(func)
def wrapper(*args, **kwargs):
log.log(level, logmsg)
return func(*args, **kwargs)
return wrapper
@logged()
def add(x, y):
return x + y
@logged(level=logging.CRITICAL, name='example')
def spam():
print('Spam')
| [
"szqh97@163.com"
] | szqh97@163.com |
1448b70a663c5dd7830596c8aff74103fd219278 | a2153f1fc6f4ac8236b17ccc4cd740ddafe3e3bc | /posts/migrations/0001_initial.py | 02ef94f3002ddc46ed9bf3c10aced121a52e2d18 | [] | no_license | vivekbhandari93/SNS | 58d15d9e9f09d07e877a638e92e5f686a3118a2b | ce445603eeee57928a8d8b681e950e48b1ab58b1 | refs/heads/master | 2022-12-22T22:10:13.517565 | 2020-09-26T07:32:57 | 2020-09-26T07:32:57 | 297,431,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,215 | py | # Generated by Django 3.1.1 on 2020-09-23 16:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('groups', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now=True)),
('message', models.TextField()),
('message_html', models.TextField(editable=False)),
('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts', to='groups.group')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created_at'],
'unique_together': {('user', 'message')},
},
),
]
| [
"ervivekbhandari@gmail.com"
] | ervivekbhandari@gmail.com |
002fe3accc7a0a0684f8890376a1128bf94c0d4f | 86acf9935f9338625d0ed33fa4b84124d3c6a8e5 | /Udemy_code/code/s03/SayMyName.py | b4cb9fb43f064d571d3afcb6a04d2d989cac651e | [] | no_license | hernbrem/TYK2CodeAAA | 5c085fada3faaf078ae872857b17ebf8ace049e4 | a5d1108a9899f3394ece4745c70db934ad7c1c75 | refs/heads/master | 2023-05-07T01:03:20.868800 | 2021-05-26T18:45:35 | 2021-05-26T18:45:35 | 325,387,331 | 0 | 0 | null | 2021-02-04T19:16:43 | 2020-12-29T21:08:05 | Python | UTF-8 | Python | false | false | 231 | py | # SayMyName.py
# Ask the user for their name
name = input("What is your name? ")
# Print their name 100 times
for x in range(100):
# Print their name followed by a space, not a new line
print(name, end = " rules! ")
| [
"76752866+hernbrem@users.noreply.github.com"
] | 76752866+hernbrem@users.noreply.github.com |
dbddd79e567508195a8af37251561dc297fc8987 | 791630499123723b9589c0b6fb42f4954b7f254d | /Solving_XOr_gate/activation_layer.py | 47890c096dfd25deac8b792167a0de1ced7ca440 | [] | no_license | szijderveld/Solving_MNIST_from_scratch | 92ff7d2b59dfcde32dbb2848a9b3604c2525eca5 | d35f2e0b81001c75dc1e28b1d5431bbb7de93e7f | refs/heads/master | 2023-03-07T17:51:15.542994 | 2021-02-22T13:28:00 | 2021-02-22T13:28:00 | 266,547,979 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py |
from layer import Layer
# For our model to learn anything we need to aply non-linear functions to the output of our functions
class ActivationLayer(Layer):
def __init__(self, activation, activation_prime):
self.activation = activation
self.activation_prime = activation_prime
#returns the activation input
def forward_propagation(self, input_data):
self.input = input_data
self.output = self.activation(self.input)
return self.output
#return input_error = dE/dX for a given for a given output_error = dE/dY.
#learning_rate is not used because there is no 'learnable' parameters.
def backward_propagation(self, output_error, learning_rate):
return self.activation_prime(self.input) * output_error
| [
"noreply@github.com"
] | noreply@github.com |
da7d7fab4e7d7a7d007985b96cfdab50b0ed6471 | 4281075c31975ef70b45612cb155a2e333800724 | /chatapp/settings.py | ead402ec54427761bb5651ccca43008a898fbebd | [] | no_license | ChristC9/ChatApp | a9c950a556929b053d9a43306a3dc98f3009c0df | c2f792710fc6cd183890e36cf86cd0ac531b23aa | refs/heads/master | 2023-06-22T17:46:08.839658 | 2021-07-11T09:12:19 | 2021-07-11T09:12:19 | 384,899,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,578 | py | """
Django settings for chatapp project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-$%6fz5b-1d3cg&qm!3^p!ew!)c4$ce_x=hcjs2t8%1r1w@i$u+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'chatapp',
'chat',
'channels',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chatapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chatapp.wsgi.application'
ASGI_APPLICATION = 'chatapp.asgi.application'
# ASGI_APPLICATION = 'chatapp.asgi.application'
# CHANNEL_LAYERS = {
# 'default': {
# 'BACKEND': 'channels_redis.core.RedisChannelLayer',
# 'CONFIG': {
# "hosts": [('127.0.0.1', 8000)],
# },
# },
# }
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"nyanhtetaungucsy106@gmail.com"
] | nyanhtetaungucsy106@gmail.com |
e924dd9290da87bcf7962e4a2e76cf2fac9d5e8a | 9e461f40bbdf63d9c00c99f398758f5b236db863 | /Python/motion_client/main.py | 017a2466ac1b43da634527a033e525fdbcd3ed5c | [] | no_license | robbynickles/portfolio | b490a98de9bc7daf6d14b074b2726a06359a8462 | 269b0593ce5e0773fa18f74c4374fcc0bccc5c40 | refs/heads/master | 2021-01-10T05:17:00.762021 | 2015-12-25T00:36:23 | 2015-12-25T00:36:23 | 48,555,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,567 | py | from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.core.window import Window
w, h = Window.width, Window.height
from libs.swipeplane2 import SwipePlane
from libs.device.device_view import DeviceTest
from libs.client.client_view import Client
from plyer import accelerometer, compass, gyroscope
from plyer.libs.server_utils import shutdown_server_thread
class MobileSensorTest(BoxLayout):
def __init__(self):
super(MobileSensorTest, self).__init__()
self.add_widget( DeviceTest( 'Accel', 'Accelerometer', accelerometer, accelerometer._get_acceleration ) )
self.add_widget( DeviceTest( 'Compass', 'Compass', compass, compass._get_orientation ) )
self.add_widget( DeviceTest( 'Gyro', 'Gyroscope', gyroscope, gyroscope._get_orientation ) )
def input_sources( self ):
return self.children
class MobileSensorTestApp(App):
def on_pause(self):
return True
def on_resume(self):
pass
def on_stop(self):
shutdown_server_thread()
def build(self):
swipe_plane = SwipePlane()
self.mobile_sensor_test = MobileSensorTest()
page1 = BoxLayout( pos=(0,0), size=(w, h) )
page1.add_widget( self.mobile_sensor_test )
swipe_plane.add_page( page1 )
page2 = BoxLayout( pos=(1.2*w,0), size=(w, h) )
page2.add_widget( Client( self.mobile_sensor_test.input_sources(), cols=1 ) )
swipe_plane.add_page( page2 )
return swipe_plane
if __name__ == '__main__':
MobileSensorTestApp().run()
| [
"r.nickles7@gmail.com"
] | r.nickles7@gmail.com |
64f97e236d713e1627d64ec3e03f6c532a3d2e76 | 0f0af12b45aa6f50fb418f9236fc622e56bbbfee | /server/podbaby/history/serializers.py | 783c7dba88c4eec2024b18765cff0ede0b0e6e8b | [] | no_license | danjac/podbaby2 | 234863e5d2ad39902bc64e63e285e7b507049727 | 17f02b0707120797cb3c3cccb95dacddff6375fa | refs/heads/master | 2020-09-24T23:11:08.186829 | 2016-12-03T07:42:59 | 2016-12-03T07:42:59 | 66,766,438 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | from rest_framework import serializers
from history.models import Play
class PlaySerializer(serializers.ModelSerializer):
class Meta:
model = Play
fields = (
'episode',
'created',
)
| [
"danjac354@gmail.com"
] | danjac354@gmail.com |
6b2cd42d4259675afcf80d619d0b639176d6d3eb | bdf66bab1a8519e8eba7b97460bbd62e9b150dc2 | /classroom/exercises/lesson6/exercise5.py | 4273ecd14596dcb532ec023c272c5a84f96d9517 | [] | no_license | vyniciuss/udacity-Data-Streaming-ND-Kafka | 9c3990d4767b5322740f369e079c115c8164303b | 398b35d353ecb7a3259466615310ab56f8c9663b | refs/heads/master | 2020-12-13T14:18:38.228519 | 2019-12-03T10:36:04 | 2019-12-03T10:36:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | from dataclasses import asdict, dataclass
import json
import random
import faust
@dataclass
class ClickEvent(faust.Record):
email: str
timestamp: str
uri: str
number: int
score: int = 0
#
# TODO: Define a scoring function for incoming ClickEvents.
# It doens't matter _how_ you score the incoming records, just perform
# some modification of the `ClickEvent.score` field and return the value
#
#def add_score(...):
app = faust.App("exercise5", broker="kafka://localhost:9092")
clickevents_topic = app.topic("com.udacity.streams.clickevents", value_type=ClickEvent)
scored_topic = app.topic(
"com.udacity.streams.clickevents.scored",
key_type=str,
value_type=ClickEvent,
)
@app.agent(clickevents_topic)
async def clickevent(clickevents):
#
# TODO: Add the `add_score` processor to the incoming clickevents
# See: https://faust.readthedocs.io/en/latest/reference/faust.streams.html?highlight=add_processor#faust.streams.Stream.add_processor
#
async for ce in clickevents:
await scored_topic.send(key=ce.uri, value=ce)
if __name__ == "__main__":
app.main()
| [
"ben@spothero.com"
] | ben@spothero.com |
0485b3c30dddb3fec104ad7ec3bf10074c7e250c | 94c4f0b70dfbe3801329d32405cb601a133ecc50 | /app/models/staffModels.py | 4add501f0926d5e01a049933be6ac830b31a706a | [
"Apache-2.0"
] | permissive | luxutao/staffms | 5f23eaf8b63cdcb8e549103be2319df9b7bb5c7e | 6fe2a263fca4a817fbd18965327bd8ad5326dc6b | refs/heads/master | 2023-02-07T23:05:25.199261 | 2020-06-06T04:03:03 | 2020-06-06T04:03:03 | 192,301,997 | 1 | 0 | Apache-2.0 | 2023-02-02T06:35:00 | 2019-06-17T08:02:16 | Python | UTF-8 | Python | false | false | 2,667 | py | #!/usr/local/bin/python3
# -*- conding: utf-8 -*-
from ..utils import db
from .jobModels import Job
from .companyModels import Company
from .departModels import Department
from .staffInfoModels import StaffInfo
from .logModels import Log
from sqlalchemy.orm import relationship
class Staff(db.Model):
__tablename__ = 'staff'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(255))
email = db.Column(db.String(255))
number = db.Column(db.String(24))
jointime = db.Column(db.DateTime, server_default=db.func.now())
leavetime = db.Column(db.DateTime, default=None)
is_leave = db.Column(db.Boolean, default=False)
job = db.Column(db.Integer, db.ForeignKey('job.id'))
job_to = relationship("Job",backref="staff_of_job")
salary = db.Column(db.Integer)
equity = db.Column(db.Integer)
salary_structure = db.Column(db.Integer, default=12)
performance = db.Column(db.Integer, default=0)
company = db.Column(db.Integer, db.ForeignKey('company.id'))
company_to = relationship("Company", backref="staff_of_company")
department = db.Column(db.Integer, db.ForeignKey('department.id'))
department_to = relationship("Department", backref="staff_of_department")
staffinfo = db.Column(db.Integer, db.ForeignKey('staffinfo.id'))
staffinfo_to = relationship("StaffInfo", backref="staff_of_staffinfo")
leader = db.Column(db.Integer)
create_time = db.Column(db.DateTime, server_default=db.func.now())
modify_time = db.Column(db.DateTime, server_default=db.func.now())
def __init__(self, **kwargs):
for key in kwargs.keys():
setattr(self, key, kwargs.get(key))
def __repr__(self):
return '<Staff %s>' % self.name
def to_dict(self):
model_dict = dict(self.__dict__)
del model_dict['_sa_instance_state']
model_dict['job'] = self.job_to.to_dict()
model_dict['company'] = self.company_to.to_dict()
model_dict['department'] = self.department_to.to_dict()
model_dict['staffinfo'] = self.staffinfo_to.to_dict()
for key in ['jointime', 'leavetime', 'create_time', 'modify_time']:
if model_dict[key]:
formatdate = '%Y-%m-%d' if key in ['jointime', 'leavetime'] else '%Y-%m-%d %H:%M:%S'
model_dict[key] = model_dict[key].strftime(formatdate)
model_dict['is_leave'] = '是' if model_dict['is_leave'] == 1 else '否'
model_dict['leader'] = '无' if self.leader == 0 else self.query.get(self.leader).name
model_dict['log'] = [log.to_dict() for log in self.log_of_staff]
return model_dict
| [
"luxutao"
] | luxutao |
7ad882dffba848593ced53b6c630af79d96bd3b8 | 75d258d0cc8b07134a3db656a16e8c27557e3572 | /n12_m14/circuit_n12_m14_s6_e6_pEFGH.py | 97fdd8fcfb80bed2805d9ebf6bf2f4aeea17ce97 | [] | no_license | tonybruguier/martinis_et_al_data | 7c5acee8cb18586607c0ffdc25bc9b616e0847be | 1a35e6712c5bd4b48ef0027707b52dd81e5aa3f3 | refs/heads/master | 2023-02-23T09:36:24.179239 | 2021-01-24T20:23:04 | 2021-01-24T20:23:04 | 332,266,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45,838 | py | import cirq
import numpy as np
QUBIT_ORDER = [
cirq.GridQubit(3, 3),
cirq.GridQubit(3, 4),
cirq.GridQubit(3, 5),
cirq.GridQubit(3, 6),
cirq.GridQubit(4, 3),
cirq.GridQubit(4, 4),
cirq.GridQubit(4, 5),
cirq.GridQubit(4, 6),
cirq.GridQubit(5, 3),
cirq.GridQubit(5, 4),
cirq.GridQubit(5, 5),
cirq.GridQubit(5, 6),
]
CIRCUIT = cirq.Circuit(
[
cirq.Moment(
operations=[
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 4)),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 6)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 4)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 5)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 6)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 3)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 4)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 0.2767373377033284).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * -0.18492941569567625).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -0.33113463396189063).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 0.40440704518468423).on(cirq.GridQubit(5, 5)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.5862983338115253, phi=0.5200148508319427).on(
cirq.GridQubit(3, 4), cirq.GridQubit(3, 5)
),
cirq.FSimGate(theta=1.5346175385256955, phi=0.5131039467233695).on(
cirq.GridQubit(5, 4), cirq.GridQubit(5, 5)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -0.6722145774944012).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * 0.7640224995020534).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 0.049341949396894985).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 0.02393046182589869).on(cirq.GridQubit(5, 5)),
]
),
cirq.Moment(
operations=[
(cirq.X ** 0.5).on(cirq.GridQubit(3, 3)),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 5)
),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 6)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 5)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 6)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 4)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 5)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -4.480708067260001).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 4.525888267898699).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * 2.135954522972214).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -2.1822665205802965).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -3.7780476633662574).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 3.817335880513747).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * 0.7811374803446167).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * -0.6780279413275597).on(cirq.GridQubit(5, 4)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.541977006124425, phi=0.6073798124875975).on(
cirq.GridQubit(3, 5), cirq.GridQubit(3, 6)
),
cirq.FSimGate(theta=1.5138652502397498, phi=0.47710618607286504).on(
cirq.GridQubit(4, 3), cirq.GridQubit(4, 4)
),
cirq.FSimGate(theta=1.5849169442855044, phi=0.54346233613361).on(
cirq.GridQubit(4, 5), cirq.GridQubit(4, 6)
),
cirq.FSimGate(theta=1.5398075246432927, phi=0.5174515645943538).on(
cirq.GridQubit(5, 3), cirq.GridQubit(5, 4)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 5.048199817882042).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -5.0030196172433445).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * -2.6543362735839113).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 2.6080242759758283).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 3.9045088495271663).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -3.8652206323796765).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * -1.5516585295358842).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * 1.6547680685529413).on(cirq.GridQubit(5, 4)),
]
),
cirq.Moment(
operations=[
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 3)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 5)),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 6)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 4)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 6)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 3)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -3.2786928385561493).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 3.339006443218924).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * -5.390755870544794).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 5.4172568990486605).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 4.367652291347506).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * -3.9105776028384707).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.589821065740506, phi=0.5045391214115686).on(
cirq.GridQubit(4, 3), cirq.GridQubit(5, 3)
),
cirq.FSimGate(theta=1.5472406430590444, phi=0.5216932173558055).on(
cirq.GridQubit(4, 4), cirq.GridQubit(5, 4)
),
cirq.FSimGate(theta=1.5707871303628709, phi=0.5176678491729374).on(
cirq.GridQubit(4, 6), cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 2.9425087256630427).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -2.882195121000268).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * 4.466531408750767).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -4.440030380246901).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * -4.89701654221443).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * 5.354091230723465).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 3)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 5)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 6)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 5)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 6)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 3)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 5)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 12.703597923836748).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * -12.7869629079138).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 3.782562501914174).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -3.873596611893716).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 4.772639843256901).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * -4.771314675186062).on(cirq.GridQubit(4, 6)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.4668587973263782, phi=0.4976074601121169).on(
cirq.GridQubit(3, 3), cirq.GridQubit(4, 3)
),
cirq.FSimGate(theta=1.603651215218248, phi=0.46649538437100246).on(
cirq.GridQubit(3, 5), cirq.GridQubit(4, 5)
),
cirq.FSimGate(theta=1.6160334279232749, phi=0.4353897326147861).on(
cirq.GridQubit(3, 6), cirq.GridQubit(4, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -12.477250219528523).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * 12.39388523545147).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -5.4898636407973544).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 5.398829530817813).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -5.863871460773714).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * 5.8651966288445525).on(cirq.GridQubit(4, 6)),
]
),
cirq.Moment(
operations=[
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 3)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 4)),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 6)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 4)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 5)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 6)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 3)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 5.16073733770325).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * -5.068929415695599).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -3.587134633961795).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 3.6604070451845887).on(cirq.GridQubit(5, 5)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.5862983338115253, phi=0.5200148508319427).on(
cirq.GridQubit(3, 4), cirq.GridQubit(3, 5)
),
cirq.FSimGate(theta=1.5346175385256955, phi=0.5131039467233695).on(
cirq.GridQubit(5, 4), cirq.GridQubit(5, 5)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -5.556214577494324).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * 5.648022499501975).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 3.305341949396799).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * -3.232069538174005).on(cirq.GridQubit(5, 5)),
]
),
cirq.Moment(
operations=[
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 3)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 5)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 6)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 4)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 6)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 4)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 5)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 7.565359127187911).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * -7.506809626368408).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * -15.28470806725993).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 15.329888267898626).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * 7.019954522972137).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -7.066266520580219).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -13.842047663366333).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 13.881335880513822).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * 3.001137480344569).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * -2.8980279413275123).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 5.563573798571002).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * -5.8504123921354285).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.2947043217999283, phi=0.4859467238431821).on(
cirq.GridQubit(3, 3), cirq.GridQubit(3, 4)
),
cirq.FSimGate(theta=1.541977006124425, phi=0.6073798124875975).on(
cirq.GridQubit(3, 5), cirq.GridQubit(3, 6)
),
cirq.FSimGate(theta=1.5138652502397498, phi=0.47710618607286504).on(
cirq.GridQubit(4, 3), cirq.GridQubit(4, 4)
),
cirq.FSimGate(theta=1.5849169442855044, phi=0.54346233613361).on(
cirq.GridQubit(4, 5), cirq.GridQubit(4, 6)
),
cirq.FSimGate(theta=1.5398075246432927, phi=0.5174515645943538).on(
cirq.GridQubit(5, 3), cirq.GridQubit(5, 4)
),
cirq.FSimGate(theta=1.4593314109380113, phi=0.5230636172671492).on(
cirq.GridQubit(5, 5), cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -7.378072351850649).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * 7.436621852670151).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * 15.852199817881967).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -15.80701961724327).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * -7.538336273583833).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 7.492024275975751).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 13.968508849527241).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -13.929220632379753).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * -3.771658529535837).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * 3.874768068552894).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * -5.593307215154117).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * 5.30646862158969).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 4)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 6)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 4)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 5)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 6)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 3)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 4)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -8.162692838556204).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 8.223006443218978).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * -12.938755870544817).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 12.965256899048683).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * -12.724144773112773).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 12.73446915351482).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * 11.027652291347495).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * -10.570577602838458).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.589821065740506, phi=0.5045391214115686).on(
cirq.GridQubit(4, 3), cirq.GridQubit(5, 3)
),
cirq.FSimGate(theta=1.5472406430590444, phi=0.5216932173558055).on(
cirq.GridQubit(4, 4), cirq.GridQubit(5, 4)
),
cirq.FSimGate(theta=1.5124128267683938, phi=0.5133142626030278).on(
cirq.GridQubit(4, 5), cirq.GridQubit(5, 5)
),
cirq.FSimGate(theta=1.5707871303628709, phi=0.5176678491729374).on(
cirq.GridQubit(4, 6), cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 7.826508725663096).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -7.7661951210003215).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * 12.014531408750791).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -11.988030380246926).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 11.590471496440383).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -11.580147116038336).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * -11.55701654221442).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * 12.014091230723457).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
(cirq.X ** 0.5).on(cirq.GridQubit(3, 3)),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 6)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 5)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 6)
),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 3)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 5)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 26.023597923836856).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * -26.106962907913907).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 25.356253063938887).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * -25.2805848307585).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 8.370562501914259).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -8.461596611893802).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 10.100639843256841).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * -10.099314675186001).on(cirq.GridQubit(4, 6)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.4668587973263782, phi=0.4976074601121169).on(
cirq.GridQubit(3, 3), cirq.GridQubit(4, 3)
),
cirq.FSimGate(theta=1.47511091993527, phi=0.538612093835262).on(
cirq.GridQubit(3, 4), cirq.GridQubit(4, 4)
),
cirq.FSimGate(theta=1.603651215218248, phi=0.46649538437100246).on(
cirq.GridQubit(3, 5), cirq.GridQubit(4, 5)
),
cirq.FSimGate(theta=1.6160334279232749, phi=0.4353897326147861).on(
cirq.GridQubit(3, 6), cirq.GridQubit(4, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -25.79725021952863).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * 25.713885235451578).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -24.48288974563276).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * 24.55855797881315).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -10.07786364079744).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 9.986829530817898).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -11.191871460773655).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * 11.193196628844492).on(cirq.GridQubit(4, 6)),
]
),
cirq.Moment(
operations=[
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 4)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 6)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 4)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 6)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 3)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 5)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 10.044737337703173).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * -9.952929415695523).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -8.401251133882973).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 8.52245467467511).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -6.843134633961698).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 6.916407045184491).on(cirq.GridQubit(5, 5)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.5862983338115253, phi=0.5200148508319427).on(
cirq.GridQubit(3, 4), cirq.GridQubit(3, 5)
),
cirq.FSimGate(theta=1.5289739216684795, phi=0.5055240639761313).on(
cirq.GridQubit(4, 4), cirq.GridQubit(4, 5)
),
cirq.FSimGate(theta=1.5346175385256955, phi=0.5131039467233695).on(
cirq.GridQubit(5, 4), cirq.GridQubit(5, 5)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -10.440214577494247).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * 10.5320224995019).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 8.199075778124648).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -8.07787223733251).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 6.561341949396702).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * -6.48806953817391).on(cirq.GridQubit(5, 5)),
]
),
cirq.Moment(
operations=[
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 6)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 3)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 5)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 6)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 4)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 5)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 12.597359127188014).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * -12.538809626368511).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * -26.08870806725985).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 26.13388826789855).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * 11.90395452297206).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -11.950266520580142).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -23.906047663366408).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 23.945335880513902).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * 5.221137480344522).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * -5.118027941327464).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 9.263573798570924).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * -9.55041239213535).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.2947043217999283, phi=0.4859467238431821).on(
cirq.GridQubit(3, 3), cirq.GridQubit(3, 4)
),
cirq.FSimGate(theta=1.541977006124425, phi=0.6073798124875975).on(
cirq.GridQubit(3, 5), cirq.GridQubit(3, 6)
),
cirq.FSimGate(theta=1.5138652502397498, phi=0.47710618607286504).on(
cirq.GridQubit(4, 3), cirq.GridQubit(4, 4)
),
cirq.FSimGate(theta=1.5849169442855044, phi=0.54346233613361).on(
cirq.GridQubit(4, 5), cirq.GridQubit(4, 6)
),
cirq.FSimGate(theta=1.5398075246432927, phi=0.5174515645943538).on(
cirq.GridQubit(5, 3), cirq.GridQubit(5, 4)
),
cirq.FSimGate(theta=1.4593314109380113, phi=0.5230636172671492).on(
cirq.GridQubit(5, 5), cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -12.410072351850753).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * 12.468621852670255).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * 26.656199817881895).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -26.611019617243198).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * -12.422336273583753).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 12.376024275975672).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 24.032508849527318).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -23.993220632379824).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * -5.991658529535789).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * 6.094768068552847).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * -9.293307215154037).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * 9.006468621589612).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
(cirq.X ** 0.5).on(cirq.GridQubit(3, 3)),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 5)
),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 6)),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 4)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 6)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 4)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 5)
),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -13.046692838556257).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 13.107006443219033).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * -20.486755870544844).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 20.51325689904871).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * -19.82814477311278).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 19.838469153514826).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * 17.687652291347487).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * -17.230577602838448).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.589821065740506, phi=0.5045391214115686).on(
cirq.GridQubit(4, 3), cirq.GridQubit(5, 3)
),
cirq.FSimGate(theta=1.5472406430590444, phi=0.5216932173558055).on(
cirq.GridQubit(4, 4), cirq.GridQubit(5, 4)
),
cirq.FSimGate(theta=1.5124128267683938, phi=0.5133142626030278).on(
cirq.GridQubit(4, 5), cirq.GridQubit(5, 5)
),
cirq.FSimGate(theta=1.5707871303628709, phi=0.5176678491729374).on(
cirq.GridQubit(4, 6), cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 12.71050872566315).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -12.650195121000372).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * 19.562531408750814).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -19.53603038024695).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 18.69447149644039).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -18.684147116038343).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * -18.21701654221441).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * 18.674091230723448).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 4)),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 6)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 3)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 5)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 6)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 3)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 4)),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 39.34359792383697).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * -39.42696290791402).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 38.52825306393881).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * -38.452584830758425).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 12.958562501914345).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -13.049596611893888).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 15.428639843256777).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * -15.42731467518594).on(cirq.GridQubit(4, 6)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.4668587973263782, phi=0.4976074601121169).on(
cirq.GridQubit(3, 3), cirq.GridQubit(4, 3)
),
cirq.FSimGate(theta=1.47511091993527, phi=0.538612093835262).on(
cirq.GridQubit(3, 4), cirq.GridQubit(4, 4)
),
cirq.FSimGate(theta=1.603651215218248, phi=0.46649538437100246).on(
cirq.GridQubit(3, 5), cirq.GridQubit(4, 5)
),
cirq.FSimGate(theta=1.6160334279232749, phi=0.4353897326147861).on(
cirq.GridQubit(3, 6), cirq.GridQubit(4, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -39.11725021952874).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * 39.03388523545169).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -37.65488974563269).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * 37.730557978813074).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -14.665863640797525).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 14.574829530817984).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -16.519871460773594).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * 16.52119662884443).on(cirq.GridQubit(4, 6)),
]
),
cirq.Moment(
operations=[
(cirq.X ** 0.5).on(cirq.GridQubit(3, 3)),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 5)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 6)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 6)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 3)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 5)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 14.928737337703097).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * -14.836929415695444).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -12.10125113388289).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 12.22245467467503).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -10.099134633961603).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 10.172407045184396).on(cirq.GridQubit(5, 5)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.5862983338115253, phi=0.5200148508319427).on(
cirq.GridQubit(3, 4), cirq.GridQubit(3, 5)
),
cirq.FSimGate(theta=1.5289739216684795, phi=0.5055240639761313).on(
cirq.GridQubit(4, 4), cirq.GridQubit(4, 5)
),
cirq.FSimGate(theta=1.5346175385256955, phi=0.5131039467233695).on(
cirq.GridQubit(5, 4), cirq.GridQubit(5, 5)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -15.32421457749417).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * 15.416022499501823).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 11.899075778124569).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -11.777872237332431).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 9.817341949396608).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * -9.744069538173814).on(cirq.GridQubit(5, 5)),
]
),
cirq.Moment(
operations=[
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 3)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 4)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 5)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 6)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 4)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 5)),
(cirq.Y ** 0.5).on(cirq.GridQubit(4, 6)),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 3)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 4)
),
(cirq.Y ** 0.5).on(cirq.GridQubit(5, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * 17.629359127188117).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * -17.570809626368614).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * -36.89270806725978).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * 36.93788826789848).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * 16.787954522971983).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * -16.834266520580062).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * -33.970047663366486).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * 34.00933588051398).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * 7.441137480344476).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * -7.338027941327417).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * 12.963573798570843).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * -13.250412392135269).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
cirq.FSimGate(theta=1.2947043217999283, phi=0.4859467238431821).on(
cirq.GridQubit(3, 3), cirq.GridQubit(3, 4)
),
cirq.FSimGate(theta=1.541977006124425, phi=0.6073798124875975).on(
cirq.GridQubit(3, 5), cirq.GridQubit(3, 6)
),
cirq.FSimGate(theta=1.5138652502397498, phi=0.47710618607286504).on(
cirq.GridQubit(4, 3), cirq.GridQubit(4, 4)
),
cirq.FSimGate(theta=1.5849169442855044, phi=0.54346233613361).on(
cirq.GridQubit(4, 5), cirq.GridQubit(4, 6)
),
cirq.FSimGate(theta=1.5398075246432927, phi=0.5174515645943538).on(
cirq.GridQubit(5, 3), cirq.GridQubit(5, 4)
),
cirq.FSimGate(theta=1.4593314109380113, phi=0.5230636172671492).on(
cirq.GridQubit(5, 5), cirq.GridQubit(5, 6)
),
]
),
cirq.Moment(
operations=[
cirq.rz(np.pi * -17.442072351850854).on(cirq.GridQubit(3, 3)),
cirq.rz(np.pi * 17.500621852670356).on(cirq.GridQubit(3, 4)),
cirq.rz(np.pi * 37.46019981788182).on(cirq.GridQubit(3, 5)),
cirq.rz(np.pi * -37.415019617243125).on(cirq.GridQubit(3, 6)),
cirq.rz(np.pi * -17.306336273583675).on(cirq.GridQubit(4, 3)),
cirq.rz(np.pi * 17.260024275975592).on(cirq.GridQubit(4, 4)),
cirq.rz(np.pi * 34.09650884952739).on(cirq.GridQubit(4, 5)),
cirq.rz(np.pi * -34.057220632379895).on(cirq.GridQubit(4, 6)),
cirq.rz(np.pi * -8.211658529535743).on(cirq.GridQubit(5, 3)),
cirq.rz(np.pi * 8.3147680685528).on(cirq.GridQubit(5, 4)),
cirq.rz(np.pi * -12.993307215153958).on(cirq.GridQubit(5, 5)),
cirq.rz(np.pi * 12.706468621589535).on(cirq.GridQubit(5, 6)),
]
),
cirq.Moment(
operations=[
(cirq.Y ** 0.5).on(cirq.GridQubit(3, 3)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 4)),
(cirq.X ** 0.5).on(cirq.GridQubit(3, 5)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(3, 6)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(4, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 5)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(4, 6)
),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 3)
),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 4)),
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5).on(
cirq.GridQubit(5, 5)
),
(cirq.X ** 0.5).on(cirq.GridQubit(5, 6)),
]
),
]
)
| [
"tony.bruguier@gmail.com"
] | tony.bruguier@gmail.com |
06e73adb0564782fe97690b3cfa9e67a2fe741ec | ac98203be5bfea90667a1698f864c27df6874a19 | /solutions/1-25/problem_022/problem_022.py | fe72a883f73e32a2c2c819b90834a03de6851f6f | [] | no_license | samvbeckmann/Euler | a3d340862c64c6a4521a01a1eaad35c8409e1543 | df8b6c77cc4da64953d8bda8a2e9416207d7743d | refs/heads/master | 2021-01-10T21:50:17.901402 | 2015-12-12T00:33:17 | 2015-12-12T00:33:17 | 46,757,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | import re
# Solution to Project Euler problem 22
total = 0
with open('p022_names.txt', 'r') as file_names:
name_list = re.split(',', file_names.readline())
name_list.sort()
for x in range(len(name_list)):
temp_total = sum([ord(char) - 64 for char in name_list[x].replace('"', '')])
total += (x + 1) * temp_total
print(total) | [
"samvbeckmann@gmail.com"
] | samvbeckmann@gmail.com |
fc019e03a0ec2faaedaaf366a1c30c010b4fbc68 | 97fcd33403e69e7e5bb60d27b7de73bb7c58b060 | /awacs/applicationinsights.py | 0d90c4b4f0fe2b10f6a8ea40d350fb33b7c02a67 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | isabella232/awacs | d2b132b527da6b6c2e89da26e9fdbc1d5ca7f191 | 41a131637c16a6912c17f92ac3bbf2a3bf978631 | refs/heads/master | 2023-01-09T07:45:22.199974 | 2020-11-16T05:11:01 | 2020-11-16T05:11:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,536 | py | # Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action as BaseAction
from aws import BaseARN
service_name = 'CloudWatch Application Insights'
prefix = 'applicationinsights'
class Action(BaseAction):
def __init__(self, action=None):
sup = super(Action, self)
sup.__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource='', region='', account=''):
sup = super(ARN, self)
sup.__init__(service=prefix, resource=resource, region=region,
account=account)
CreateApplication = Action('CreateApplication')
CreateComponent = Action('CreateComponent')
DeleteApplication = Action('DeleteApplication')
DeleteComponent = Action('DeleteComponent')
DescribeApplication = Action('DescribeApplication')
DescribeComponent = Action('DescribeComponent')
DescribeComponentConfiguration = Action('DescribeComponentConfiguration')
DescribeComponentConfigurationRecommendation = \
Action('DescribeComponentConfigurationRecommendation')
DescribeObservation = Action('DescribeObservation')
DescribeProblem = Action('DescribeProblem')
DescribeProblemObservations = Action('DescribeProblemObservations')
ListApplications = Action('ListApplications')
ListComponents = Action('ListComponents')
ListProblems = Action('ListProblems')
UpdateApplication = Action('UpdateApplication')
UpdateComponent = Action('UpdateComponent')
UpdateComponentConfiguration = Action('UpdateComponentConfiguration')
| [
"mark@peek.org"
] | mark@peek.org |
8df96bdca93c8ac21e2f1bbfaf44925c191e836e | e7e497b20442a4220296dea1550091a457df5a38 | /main_project/release-gyp/user/user_cache.gyp | 8ad0946152bb7f37f49b7e30cfb8de571639ce27 | [] | no_license | gunner14/old_rr_code | cf17a2dedf8dfcdcf441d49139adaadc770c0eea | bb047dc88fa7243ded61d840af0f8bad22d68dee | refs/heads/master | 2021-01-17T18:23:28.154228 | 2013-12-02T23:45:33 | 2013-12-02T23:45:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,361 | gyp | {
#包含公共设置
'includes':[
'user_common.gyp',
],
'variables':{
'service_name' : 'UserCache',
'service_src_path' : '<(main_project_path)/user/<(service_name)/',
},
'target_defaults' : {
'include_dirs' : [
'/usr/local/distcache-dev/include',
'/usr/local/distcache-util/include/',
'/usr/local/distcache/include/',
'<(main_project_path)/tripod-new/src/cpp/include',
'<(main_project_path)/TalkUtil/src',
'<(main_project_path)/third-party/include/',
'<(main_project_path)/third-party/apr/include/apr-1',
'<(main_project_path)/third-party/libactivemq/include/activemq-cpp-3.4.1',
'<(main_project_path)/third-party/redis-c-driver/',
'<(main_project_path)/message_pipe/src/cpp/',
],
'link_settings' : {
'libraries' : [
'-L../third-party/libactivemq/lib',
'-lactivemq-cpp',
#只用xce-dev或third-party的任一个都有问题,
'-L../third-party/apr/lib',
'-L/usr/local/xce-dev/lib64',
'-lapr-1', '-laprutil-1',
'-L/usr/local/distcache-util/lib',
'-lrdc-client',
],
'ldflags': [
'-Wl,-rpath /usr/local/xce-dev/lib64',
'-Wl,-rpath /usr/lib64',
],
},
},
'targets' : [
######################################################
{
'target_name' : 'DistUserCacheReloader',
#'type' : 'executable',
'type' : '<(target_mode)',
'sources' : [
'<!@(find <(service_src_path)/dist/reloader -name "*.cpp")',
],
'dependencies' : [
'./user_slice_and_adapter.gyp:*',
],
},
######################################################
{
'target_name' : '<(service_name)',
#'type' : 'executable',
'type' : '<(target_mode)',
'sources' : [
'<!@(find <(service_src_path)/src -name "*.cpp")',
],
'dependencies' : [
'./user_slice_and_adapter.gyp:*',
],
},
######################################################
{
'target_name' : 'UserCacheAgent',
#'type' : 'executable',
'type' : '<(target_mode)',
'sources' : [
'<!@(find <(service_src_path)/agent -name "*.cpp")',
],
'dependencies' : [
'./user_slice_and_adapter.gyp:*',
],
},
######################################################
{
'target_name' : 'UserCacheReloader',
#'type' : 'executable',
'type' : '<(target_mode)',
'sources' : [
'<!@(find <(service_src_path)/reloader -name "*.cpp")',
],
'dependencies' : [
'./user_slice_and_adapter.gyp:*',
],
},
######################################################
{
'target_name' : 'UserCachePreloader',
#'type' : 'executable',
'type' : '<(target_mode)',
'sources' : [
'<!@(find <(service_src_path)/preloader -name "*.cpp")',
],
'dependencies' : [
'./user_slice_and_adapter.gyp:*',
],
},
######################################################
{
'target_name' : '<(service_name)Test',
'type' : 'executable',
'dependencies' : [
'../gtest.gyp:gtest',
'../gtest.gyp:gtest_main',
'./user_slice_and_adapter.gyp:*'
# '../3rdparty.gyp:hiredis',
# '../base.gyp:base',
# '../xce_base.gyp:xce_base',
#'../tripod2.gyp:tripod_core',
],
'sources' : [
],
},
] #end targets
}
| [
"liyong19861014@gmail.com"
] | liyong19861014@gmail.com |
aab904e0dd9171dd8ec4654f1d5645563f17a7d9 | 135deefb66effbc14dd80cebfea8a0878704bcbd | /settings/config.py | 7035053dd3a861754b1ac9816e6fd18bfbc710e3 | [] | no_license | pedroagrodrigues/SimplePythonServer | be4817c738e7e843315ba2f89429ffd21419e88f | 71cc51fe0d3aa8e782d36a5bd4790edad8ede329 | refs/heads/main | 2023-02-10T16:21:37.224280 | 2020-12-17T21:39:04 | 2020-12-17T21:39:04 | 310,068,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | #initialization variables
development = True
#Becareful 0.0.0.0 as default is visible on your
# local network with your device IP
NameServer = "127.0.0.1:8080"
class Config(object):
DEBUG = False
TESTING = False
try:
SERVER_NAME = NameServer
except:
print("No Name Server defined using default at 127.0.0.1:5000")
ENV = "production"
class ProductionConfig(Config):
pass
class DevelopmentConfig(Config):
ENV = "development"
DEBUG = True
class TestingConfig(Config):
TESTING = True
| [
"pedroagrodrigues@gmail.com"
] | pedroagrodrigues@gmail.com |
ae0cbcf21e4403bb670df1175cdd43b21d2172f2 | f47c540d5e8d1b773de57a9e66e0ad2af9b13e9c | /mwana/apps/alerts/tests.py | 6890ca7b162ff7dce5fdd1174ed4c00a4dcd90a8 | [] | no_license | mwana/mwana | e70d1b0c59f2f3ad1300452f58b908a57210415d | 698c2f693f2c49f6dc41c1c0b6669300b619b579 | refs/heads/develop | 2021-01-17T10:11:17.502708 | 2016-02-15T10:56:56 | 2016-02-15T10:56:56 | 600,738 | 5 | 6 | null | 2016-02-03T19:12:45 | 2010-04-08T13:24:18 | Python | UTF-8 | Python | false | false | 6,188 | py | # vim: ai ts=4 sts=4 et sw=4
from datetime import timedelta
import time
from django.conf import settings
from mwana.apps.alerts import tasks as smstasks
from mwana.apps.alerts.models import SMSAlertLocation
from mwana.apps.labresults.models import Result
from mwana.apps.labresults.testdata.reports import *
from mwana.apps.locations.models import Location
from mwana.apps.locations.models import LocationType
import mwana.const as const
from rapidsms.models import Contact
from rapidsms.tests.scripted import TestScript
class SMSAlertsSetUp(TestScript):
def setUp(self):
# this call is required if you want to override setUp
super(SMSAlertsSetUp, self).setUp()
self.type = LocationType.objects.get_or_create(singular="clinic", plural="clinics", slug=const.CLINIC_SLUGS[2])[0]
self.type1 = LocationType.objects.get_or_create(singular="district", plural="districts", slug="districts")[0]
self.type2 = LocationType.objects.get_or_create(singular="province", plural="provinces", slug="provinces")[0]
self.luapula = Location.objects.create(type=self.type2, name="Luapula Province", slug="400000")
self.mansa = Location.objects.create(type=self.type1, name="Mansa District", slug="403000", parent=self.luapula)
self.samfya = Location.objects.create(type=self.type1, name="Samfya District", slug="402000", parent=self.luapula)
self.kawambwa = Location.objects.create(type=self.type1, name="Kawambwa District", slug="401000", parent=self.luapula)
self.mibenge = Location.objects.create(type=self.type, name="Mibenge Clinic", slug="403029", parent=self.mansa, send_live_results=True)
self.kashitu = Location.objects.create(type=self.type, name="Kashitu Clinic", slug="402026", parent=self.samfya, send_live_results=True)
self.mansa_central = Location.objects.create(type=self.type, name="Central Clinic", slug="403012", parent=self.mansa, send_live_results=True)
self.salanga = Location.objects.create(type=self.type, name="Salanga Clinic", slug="401012", parent=self.kawambwa, send_live_results=True)
self.assertEqual(SMSAlertLocation.objects.count(), 0)
# Enable only mansa district to receive SMS alerts
SMSAlertLocation.objects.create(enabled=True, district=self.mansa)
SMSAlertLocation.objects.create(enabled=False, district=self.samfya)
from datetime import datetime
today = datetime.today()
late = today - timedelta(days=60)
# let clinics from different districts have pending results
Result.objects.create(clinic=self.mibenge, arrival_date=late, result="N", notification_status='notified')
Result.objects.create(clinic=self.salanga, arrival_date=late, result="N", notification_status='notified')
Result.objects.create(clinic=self.mansa_central, arrival_date=late, result="N", notification_status='notified')
# mark the clinic as having received results before (active)
Result.objects.create(clinic=self.mibenge, arrival_date=late, result="N", notification_status='sent')
Result.objects.create(clinic=self.salanga, arrival_date=late, result="N", notification_status='sent')
Result.objects.create(clinic=self.mansa_central, arrival_date=late, result="N", notification_status='sent')
# register staff for the clinics and also their districts and provinces
self.assertEqual(Contact.objects.count(), 0, "Contact list is not empty")
#create different users - control and non control
script = """
luapula_pho > join pho 400000 Luapula PHO 1111
mansa_dho > join dho 403000 Mansa DHO 1111
samfya_dho > join dho 402000 Samfya DHO 1111
kawambwa_dho > join dho 401000 Kawambwa DHO 1111
salanga_worker > join clinic 401012 Salanga Man 1111
mibenge_worker > join clinic 403029 Mibenge Man 1111
kashitu_worker > join clinic 402026 kashitu Man 1111
cental_worker > join clinic 403012 Central Man 1111
mibenge_cba > join cba 403029 1 Mibenge CBA
kashitu_cba > join cba 402026 2 kashitu cba
central_cba1 > join cba 403012 3 Central cba1
central_cba2 > join cba 403012 4 Central cba2
"""
self.runScript(script)
self.assertEqual(Contact.objects.count(), 12)
msgs = self.receiveAllMessages()
self.assertEqual(12, len(msgs))
def tearDown(self):
# this call is required if you want to override tearDown
super(SMSAlertsSetUp, self).tearDown()
class TestSendingSMSAlerts(SMSAlertsSetUp):
def testClinicsNotRetrievingResultsAlerts(self):
time.sleep(.1)
self.startRouter()
smstasks.send_clinics_not_retrieving_results_alerts(self.router)
msgs = self.receiveAllMessages()
self.assertEqual(len(msgs), 1)
self.assertEqual(msgs[0].text, "ALERT! Mansa Dho, Clinics haven't retrieved results: Mibenge Clinic, Central Clinic")
self.stopRouter()
def testHubsNotSendingDbsAlerts(self):
time.sleep(.1)
self.startRouter()
smstasks.send_hubs_not_sending_dbs_alerts(self.router)
msgs = self.receiveAllMessages()
self.assertEqual(len(msgs), 1)
self.assertEqual(msgs[0].text, "The Mansa District district hub (Unkown hub) has not sent samples to Unkown lab in over 11 days.")
self.stopRouter()
def testClinicsNotSendingDbsAlerts(self):
# from mansa district only central clinic used SENT keyword recently
script = """
cental_worker> sent 5
cental_worker < Hello Central Man! We received your notification that 5 DBS samples were sent to us today from Central Clinic. We will notify you when the results are ready.
"""
self.runScript(script)
time.sleep(.1)
self.startRouter()
smstasks.send_clinics_not_sending_dbs_alerts(self.router)
msgs = self.receiveAllMessages()
self.assertEqual(len(msgs), 1)
self.assertEqual(msgs[0].text, "ALERT! Mansa Dho, Clinics haven't sent DBS to hub: Mibenge Clinic")
self.stopRouter()
| [
"sinkalation@gmail.com"
] | sinkalation@gmail.com |
6832e191748203e0bdf0348b62d123f4f9a026de | c3819b28c53d4b9cdfe6a7bccfbc42f91597abdd | /codingjobs/apps/job/forms.py | 605b6b54308bf752f968a4b148f08b4e98483210 | [] | no_license | alanrr33/tablonTrabajosDjango | 5954cb692ce6c50fd55f542e7d9f4d0413179e93 | 5e9be89f329911f8efa398cde597698dc8b5ffe3 | refs/heads/main | 2023-04-03T21:25:32.141486 | 2021-04-23T17:34:10 | 2021-04-23T17:34:10 | 360,956,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | from django import forms
from .models import Job,Application
class AddJobForm(forms.ModelForm):
#modificar opciones del form
class Meta:
model= Job
fields=['title','short_description','long_description','company_name','company_address','company_zipcode','company_place','company_country','company_size']
class ApplicationForm(forms.ModelForm):
class Meta:
model=Application
fields=['content','experience']
| [
"alanrr33@gmail.com"
] | alanrr33@gmail.com |
64a0e5f338a6de8f6c1df93824411ccbe8d8581d | d95409fcc56255d8855692326113fe117de1beb7 | /src/controllers/events.py | d5a9e153490ece9aa039b84f0e3ddd71c73990bd | [] | no_license | ymauray/hotshotpy | 2fa7370439d63fba3f320f8259ccfb43ac28cfa6 | 412e5ee114147deebd4dcaa92fd031a7d85dd88b | refs/heads/main | 2023-03-22T02:07:30.202964 | 2021-03-05T07:22:17 | 2021-03-05T07:22:17 | 342,718,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | import hotshotpy
from threading import current_thread
def get(query_string):
conn = hotshotpy.create_connection()
c = conn.cursor()
sql = """select current_event_id from params;"""
c.execute(sql)
rows = c.fetchall()
current_event_id = rows[0][0]
c.close()
c = conn.cursor()
sql = """select * from events order by id desc;"""
c.execute(sql)
rows = c.fetchall()
c.close()
events = []
for row in rows:
event = dict()
for key in row.keys():
event[key] = row[key]
events.append(event)
return {
"current_event_id": current_event_id,
"events": events
}
def set_current(query_string):
current_event_id = query_string['current'][0]
conn = hotshotpy.create_connection()
sql = f"""update params set current_event_id = {current_event_id};"""
hotshotpy.execute_sql(conn, sql)
conn.commit()
conn.close()
| [
"yannick.mauray@gmail.com"
] | yannick.mauray@gmail.com |
6c4341ff0a02d05d703a313b230f2b20432ee739 | 8e92bc6503aaf7b72befc629c35aa7a31163b950 | /Assignment 7.2.py | 7ae0c4a0783e5ecfa4de17fdcfb76c463d221748 | [] | no_license | dandeco/Python-Data-Structures_University-of-Michigan | c6d42b7f1af5519b3449762db90fee58df417b04 | 473e60f54131add80fcd719d0dbc9e3e873a09a8 | refs/heads/master | 2023-04-21T01:51:50.162717 | 2021-05-12T01:54:39 | 2021-05-12T01:54:39 | 364,479,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | #SPOILER ALERT - MY CODE WAS NOT ACCEPTED BY THE AUTOGRADER,
#AS IT GAVE A RESULT OF 0.07338518518519
#WHEN THE OUTPUT IS SUPPOSED TO BE 0.7507185185185187
# Use the file name mbox-short.txt as the file name
fname = input("Enter file name: ")
fh = open('mbox-short.txt', 'r')
count = 0
nottotal = 0
for line in fh:
if not line.startswith("X-DSPAM-Confidence:"):
continue
count = count + 1
findfloat = line.find(':')
extractnum = line[findfloat + 1:]
extractfloat = float(extractnum)
nottotalfloat = float(nottotal)
total = nottotalfloat + extractfloat
grandtotal = total + extractfloat
avg = grandtotal / count
print("Average spam confidence:", avg)
| [
"zackpol123@gmail.com"
] | zackpol123@gmail.com |
e0fce4003873a8670986d5c14802f144ef931dea | 84e715389e7197bbed9c59f72d4f22e2fdd77898 | /db_update.py | 5e8728cb21180699a7b18d1e4be975f5d1f281ab | [] | no_license | palexster/usn-search | 3d71b3221558c3683c89f03f8975333c24b48124 | 7254c565d60db23c1446747b8b727220855f45e3 | refs/heads/master | 2021-04-06T05:33:28.376314 | 2018-02-06T16:11:10 | 2018-02-06T16:11:10 | 124,539,738 | 1 | 0 | null | 2018-03-09T12:47:05 | 2018-03-09T12:47:05 | null | UTF-8 | Python | false | false | 3,236 | py | import re
import datetime
import requests
from pymongo import MongoClient
from bs4 import BeautifulSoup
from insert_mongo import store_cve
def verify_status(status):
if status == "released" or status == "ignored" or status == "not-affected" or status == "needed" or status == "ignored" or status == "needs-triage" or status == "DNE" or status=="pending":
return True
return False
def parse_cve_page(db, html, cve):
soup = BeautifulSoup(html, "lxml")
packages = soup.find_all("div", class_="pkg")
print("Storing {}...".format(cve))
if packages:
for package in packages:
package_name = package.find("div", class_="value").a.text
for tr in package.find_all("tr"):
if len(tr.find_all("td")[1].text.split()) >= 1:
ubuntu = tr.find_all("td")[0].text.partition(":")[0]
status = tr.find_all("td")[1].text.split()[0]
priority = soup.find("div", class_="item").find_all("div")[1].text
if 'released' in tr.find_all("td")[1].text and len(tr.find_all("td")[1].text.split()) > 1:
version = tr.find_all("td")[1].text.split()[1].split("(")[1].split(")")[0]
store_cve(db, package_name, ubuntu, version, cve, status, priority)
elif verify_status(status):
store_cve(db, package_name, ubuntu, "", cve, status, priority)
else:
store_cve(db, "", "", "", cve, "", "")
else:
store_cve(db, "", "", "", cve, "", "")
def export_cves():
cves = set()
client = MongoClient()
db = client.cve_ubuntu
result = db.cve_ubuntu.find({}, {'cve': 1, '_id': 0})
for document in result:
cves.add(document['cve'])
return cves
def check_cve_ubuntu(db):
missing = set()
this_year = datetime.datetime.now().year
cves = export_cves()
for year in range(1999, this_year+1):
count = 0
print("Checking {}... ".format(year), end="")
r = requests.get("http://people.canonical.com/~ubuntu-security/cve/{}/".format(year))
try:
r.raise_for_status()
target = re.compile("CVE-\d+-\d+")
cves_regex = re.findall(target, r.text)
for cve in cves_regex:
if cve.lower() not in cves: # cve doesnt exist, we need to create it
if cve not in missing:
missing.add(cve)
count += 1
print("{}/{}".format(len(cves_regex) - count, len(cves_regex)))
except requests.exceptions.HTTPError as e:
print('Error.')
print("Total missing: {}".format(len(missing)))
return missing
def download_cves(db, missing):
for cve in missing:
year = cve.split("-")[1]
r = requests.get("http://people.canonical.com/~ubuntu-security/cve/{}/{}.html".format(year, cve))
parse_cve_page(db, r.text, cve)
def main():
client = MongoClient()
db = client.cve_ubuntu
missing = check_cve_ubuntu(db)
if missing:
download_cves(db, missing)
else:
print("Nothing to update.")
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
de55aeff6d1ec0e012c0115d8ae17cb46f82f628 | 0ff389bf5ebd6f4b3a3269629f7be05b6a05bb64 | /core/domain/skill_fetchers_test.py | 7157cca1446a277c7f96c5e3e43ad22053b8e8e1 | [
"Apache-2.0"
] | permissive | hardikkat24/oppia | 24e2f7fb101c3fa2304b725b000286b7e583d9ae | c194b58bae8a1acdb6af1d123b0e1c9d7cca5fac | refs/heads/develop | 2023-07-18T00:50:28.439157 | 2021-07-25T10:07:35 | 2021-07-25T10:07:35 | 330,551,883 | 1 | 0 | Apache-2.0 | 2021-08-15T23:08:44 | 2021-01-18T04:14:16 | Python | UTF-8 | Python | false | false | 9,393 | py | # coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for fetching skill domain objects."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import skill_domain
from core.domain import skill_fetchers
from core.domain import skill_services
from core.domain import state_domain
from core.platform import models
from core.tests import test_utils
import feconf
(skill_models,) = models.Registry.import_models([models.NAMES.skill])
class SkillFetchersUnitTests(test_utils.GenericTestBase):
"""Tests for skill fetchers."""
SKILL_ID = None
USER_ID = 'user'
MISCONCEPTION_ID_1 = 1
def setUp(self):
super(SkillFetchersUnitTests, self).setUp()
example_1 = skill_domain.WorkedExample(
state_domain.SubtitledHtml('2', '<p>Example Question 1</p>'),
state_domain.SubtitledHtml('3', '<p>Example Explanation 1</p>')
)
skill_contents = skill_domain.SkillContents(
state_domain.SubtitledHtml('1', '<p>Explanation</p>'), [example_1],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
'1': {}, '2': {}, '3': {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'1': {}, '2': {}, '3': {}
}
})
)
misconceptions = [skill_domain.Misconception(
self.MISCONCEPTION_ID_1, 'name', '<p>description</p>',
'<p>default_feedback</p>', True)]
self.SKILL_ID = skill_services.get_new_skill_id()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.user_id_admin = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.skill = self.save_new_skill(
self.SKILL_ID, self.USER_ID, description='Description',
misconceptions=misconceptions,
skill_contents=skill_contents,
prerequisite_skill_ids=['skill_id_1', 'skill_id_2'])
def test_get_multi_skills(self):
example_1 = skill_domain.WorkedExample(
state_domain.SubtitledHtml('2', '<p>Example Question 1</p>'),
state_domain.SubtitledHtml('3', '<p>Example Explanation 1</p>')
)
self.save_new_skill(
'skill_a', self.user_id_admin, description='Description A',
misconceptions=[],
skill_contents=skill_domain.SkillContents(
state_domain.SubtitledHtml('1', '<p>Explanation</p>'),
[example_1],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
'1': {}, '2': {}, '3': {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'1': {}, '2': {}, '3': {}
}
})
)
)
self.save_new_skill(
'skill_b', self.user_id_admin, description='Description B',
misconceptions=[],
skill_contents=skill_domain.SkillContents(
state_domain.SubtitledHtml('1', '<p>Explanation</p>'),
[example_1],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
'1': {}, '2': {}, '3': {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'1': {}, '2': {}, '3': {}
}
})
)
)
skills = skill_fetchers.get_multi_skills(['skill_a', 'skill_b'])
self.assertEqual(len(skills), 2)
self.assertEqual(skills[0].id, 'skill_a')
self.assertEqual(skills[0].description, 'Description A')
self.assertEqual(skills[0].misconceptions, [])
self.assertEqual(skills[1].id, 'skill_b')
self.assertEqual(skills[1].description, 'Description B')
self.assertEqual(skills[1].misconceptions, [])
with self.assertRaisesRegexp(
Exception, 'No skill exists for ID skill_c'):
skill_fetchers.get_multi_skills(['skill_a', 'skill_c'])
def test_get_skill_by_id(self):
expected_skill = self.skill.to_dict()
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(skill.to_dict(), expected_skill)
def test_get_skill_from_model_with_invalid_skill_contents_schema_version(
self):
commit_cmd = skill_domain.SkillChange({
'cmd': skill_domain.CMD_CREATE_NEW
})
model = skill_models.SkillModel(
id='skill_id',
description='description',
language_code='en',
misconceptions=[],
rubrics=[],
next_misconception_id=0,
misconceptions_schema_version=1,
rubric_schema_version=1,
skill_contents_schema_version=0,
all_questions_merged=False
)
commit_cmd_dicts = [commit_cmd.to_dict()]
model.commit(
self.user_id_admin, 'skill model created', commit_cmd_dicts)
with self.assertRaisesRegexp(
Exception,
'Sorry, we can only process v1-v%d skill schemas at '
'present.' % feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION):
skill_fetchers.get_skill_from_model(model)
def test_get_skill_from_model_with_invalid_misconceptions_schema_version(
self):
commit_cmd = skill_domain.SkillChange({
'cmd': skill_domain.CMD_CREATE_NEW
})
model = skill_models.SkillModel(
id='skill_id',
description='description',
language_code='en',
misconceptions=[],
rubrics=[],
next_misconception_id=0,
misconceptions_schema_version=0,
rubric_schema_version=3,
skill_contents_schema_version=2,
all_questions_merged=False
)
commit_cmd_dicts = [commit_cmd.to_dict()]
model.commit(
self.user_id_admin, 'skill model created', commit_cmd_dicts)
with self.assertRaisesRegexp(
Exception,
'Sorry, we can only process v1-v%d misconception schemas at '
'present.' % feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION):
skill_fetchers.get_skill_from_model(model)
def test_get_skill_from_model_with_invalid_rubric_schema_version(self):
commit_cmd = skill_domain.SkillChange({
'cmd': skill_domain.CMD_CREATE_NEW
})
model = skill_models.SkillModel(
id='skill_id',
description='description',
language_code='en',
misconceptions=[],
rubrics=[],
next_misconception_id=0,
misconceptions_schema_version=2,
rubric_schema_version=0,
skill_contents_schema_version=2,
all_questions_merged=False
)
commit_cmd_dicts = [commit_cmd.to_dict()]
model.commit(
self.user_id_admin, 'skill model created', commit_cmd_dicts)
with self.assertRaisesRegexp(
Exception,
'Sorry, we can only process v1-v%d rubric schemas at '
'present.' % feconf.CURRENT_RUBRIC_SCHEMA_VERSION):
skill_fetchers.get_skill_from_model(model)
def test_get_skill_from_model_with_description(self):
self.assertEqual(
skill_fetchers.get_skill_by_description('Description').to_dict(),
self.skill.to_dict()
)
self.assertEqual(
skill_fetchers.get_skill_by_description('Does not exist'),
None
)
def test_get_skill_by_id_with_different_versions(self):
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': skill_domain.SKILL_PROPERTY_LANGUAGE_CODE,
'old_value': 'en',
'new_value': 'bn'
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist, 'update language code')
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID, version=1)
self.assertEqual(skill.id, self.SKILL_ID)
self.assertEqual(skill.language_code, 'en')
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID, version=2)
self.assertEqual(skill.id, self.SKILL_ID)
self.assertEqual(skill.language_code, 'bn')
| [
"noreply@github.com"
] | noreply@github.com |
be9021cb1764a95862447e0983fdcff4264e2cde | 9aa44a8c5405e269fba97d9d63fbaadbe49ba73e | /Data preprocessing/dataset.py | b2a101a6acbb3bff119021361948c90c02d7e130 | [] | no_license | imrahul361/Machine-Learning-Algorithms-and-Templates-in-Python-and-R | 9de9c26df3b448dd48c7a2ba34c59345a0456671 | e9ef656f4e6ea37c51b4a513271e7b0306de1391 | refs/heads/master | 2020-07-07T02:52:32.078929 | 2019-10-27T11:20:01 | 2019-10-27T11:20:01 | 203,222,555 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 2 17:13:11 2019
@author: rey10
"""
#Importing the Libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#Importing the dataset
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 3].values
#Splitting the dataset into Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
#Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)"""
| [
"imrahul3610@gmail.com"
] | imrahul3610@gmail.com |
9e6f62bedba6a56d1b1f15824a19730fab546a8c | 659d44eb7549cfa346a0224edc7559520d776529 | /InterpretPredictions.py | 475888022ecee26204ad43ccee84a9b503fe6943 | [] | no_license | willsmithorg/gridtools | 862db6a3573785128e7348dd1332b309fbb17cbb | cfe83b0a9b33992c68fd885ae10c5c954314c912 | refs/heads/master | 2023-04-18T19:38:34.536593 | 2021-05-03T00:57:48 | 2021-05-03T00:57:48 | 336,426,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,181 | py | import pandas as pd
import numpy as np
import logging
import warnings
# from xgboost import XGBRegressor
from Column import Column
from MakeNumericColumns import MakeNumericColumns
logging.basicConfig(level=logging.INFO, datefmt='%H:%M:%S', format='%(asctime)s.%(msecs)03d - %(filename)s:%(lineno)d - %(message)s')
class InterpretPredictions:
def __init__(self):
self.mnc = MakeNumericColumns()
self.mnc.RegisterDefaultNumericers()
# Get the prediction with the highest probability for each cell.
# Return it back as 2 DataTables : the first is the prediction, the 2nd is the probability of the prediction.
def SinglePredictionPerCell(self, results_labels, results_proba):
dfpredicted = pd.DataFrame()
dfprobas = pd.DataFrame()
# For each column, get the index of the highest probability.
for colname, proba in results_proba.items():
max_indices = np.argmax(proba, axis=1)
prediction_labels = results_labels[colname][max_indices]
prediction_proba = np.max(results_proba[colname], axis=1)
dfpredicted[colname] = prediction_labels
dfprobas[colname] = prediction_proba
return dfpredicted, dfprobas
# Display the probability percentage difference between the top 1 and the 2nd top percentages.
# We consider this to be how confident we are.
def Confidence(self, results_proba):
dfconfidence = pd.DataFrame()
for colname, proba in results_proba.items():
#print(colname)
#print(proba)
proba_sorted = np.sort(proba, axis=1)
# We define confidence as the difference between the 1st and the 2nd highest probabilities.
confidence = proba_sorted[:,-1]-proba_sorted[:,-2]
dfconfidence[colname] = confidence
return dfconfidence
# Return a boolean array of the instances where we think the prediction is wrong.
# In some cases we are confident that the observed is wrong, but don't have a strong prediction.
# In other cases, we have a strong prediction
def boolDifferences(self, observeddf, results_labels, results_proba):
dfconfidence = self.Confidence(results_proba)
dfpredicted, dfprobas = self.SinglePredictionPerCell(results_labels, results_proba)
a = self._BoolDifferencesConfidentObservedWrong (observeddf, results_labels, dfpredicted, results_proba, dfconfidence)
b = self._BoolDifferencesConfidentPredictionCorrect(observeddf, results_labels, dfpredicted, results_proba, dfconfidence)
return a | b
# These are the differences when we are pretty sure that the prediction is accurate, and it's different
# from observed.
def _BoolDifferencesConfidentPredictionCorrect(self, observeddf, results_labels, dfpredicted, results_proba, dfconfidence):
boolDiff = pd.DataFrame()
for colname in dfpredicted:
# Keep tightening the confidence we expect in the top value
# until we are getting <= 5% predicted-wrong errors. Otherwise we're just
# spamming the column with errors.
proportion = 1
threshold = 0.7
while proportion > 0.05 and threshold < 0.95:
# Predicted <> Observed AND confident in predicted.
boolDiff[colname] = dfpredicted[colname].ne(observeddf[colname]) & dfconfidence[colname].gt(threshold)
proportion = np.mean(boolDiff[colname].astype(int))
threshold += 0.05
return boolDiff
# These are the differences when we are sure the observed is wrong, but are not sure what the correct value is.
def _BoolDifferencesConfidentObservedWrong(self, observeddf, results_labels, dfpredicted, results_proba, dfconfidence):
boolDiff = pd.DataFrame()
# We have to again map the input data to a numeric array, so we can index the correct column
# of the results_proba, which contains the probability of each possible class of each row of the particular column.
for colname in dfpredicted:
#print(colname)
column = Column(observeddf[colname])
numericCol = self.mnc.ProcessColumn(column, 'Y').astype(int)
#print(numericCol)
probs_for_observed = results_proba[colname][np.arange(results_proba[colname].shape[0]), numericCol]
#print(probs_for_observed)
# We don't want too many cells to fail on a given column.
# So take the 2nd percentile on a given column or 20% probability of accuracy, whichever is lower.
secondpercentile = np.percentile(probs_for_observed, 2)
#print(firstpercentile)
threshold = np.min([secondpercentile, 0.2])
boolDiff[colname] = probs_for_observed < threshold
return boolDiff
| [
"will@willsmith.org"
] | will@willsmith.org |
cc7c313990a752b0eea8829bbf89e10a65814597 | 06671e14ae54f887be05a64c632712537d38add6 | /integration_distributed_training/config_files/helios/13_repeat20x031/config_00166.py | e23ff123e7c6311a920a8756d990f5e9c1cc62bb | [] | no_license | Jessilee/ImportanceSamplingSGD | cf74a220a55b468b72fed0538b3a6740f532fcb2 | 0831b9b1833726391a20594d2b2f64f80e1b8fe2 | refs/heads/master | 2021-01-24T10:12:48.285641 | 2016-02-05T19:25:34 | 2016-02-05T19:25:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,175 | py | import numpy as np
import os
def get_model_config():
model_config = {}
#Importance sampling or vanilla sgd.
model_config["importance_algorithm"] = "isgd"
#model_config["importance_algorithm"] = "sgd"
#Momentum rate, where 0.0 corresponds to not using momentum
model_config["momentum_rate"] = 0.95
#The learning rate to use on the gradient averaged over a minibatch
model_config["learning_rate"] = 0.01
#config["dataset"] = "mnist"
model_config["dataset"] = "svhn"
#config["dataset"] = "kaldi-i84"
if model_config["dataset"] == "mnist":
print "Error. Missing values of (Ntrain, Nvalid, Ntest)"
quit()
model_config["num_input"] = 784
model_config["num_output"] = 10
elif model_config["dataset"] == "svhn":
(Ntrain, Nvalid, Ntest) = (574168, 30220, 26032)
model_config["num_input"] = 3072
model_config["num_output"] = 10
model_config["normalize_data"] = True
elif model_config["dataset"] == "kaldi-i84":
(Ntrain, Nvalid, Ntest) = (5436921, 389077, 253204)
model_config["num_input"] = 861
model_config["num_output"] = 3472
model_config["normalize_data"] = False
model_config['Ntrain'] = Ntrain
model_config['Nvalid'] = Nvalid
model_config['Ntest'] = Ntest
# Pick one, depending where you run this.
# This could be done differently too by looking at fuelrc
# or at the hostname.
#import socket
#data_root = { "serendib":"/home/dpln/data/data_lisa_data",
# "lambda":"/home/gyomalin/ML/data_lisa_data",
# "szkmbp":"/Users/gyomalin/Documents/fuel_data"}[socket.gethostname().lower()]
data_root = "/rap/jvb-000-aa/data/alaingui"
model_config["mnist_file"] = os.path.join(data_root, "mnist/mnist.pkl.gz")
model_config["svhn_file_train"] = os.path.join(data_root, "svhn/train_32x32.mat")
model_config["svhn_file_extra"] = os.path.join(data_root, "svhn/extra_32x32.mat")
model_config["svhn_file_test"] = os.path.join(data_root, "svhn/test_32x32.mat")
model_config["kaldi-i84_file_train"] = os.path.join(data_root, "kaldi/i84_train.gz")
model_config["kaldi-i84_file_valid"] = os.path.join(data_root, "kaldi/i84_valid.gz")
model_config["kaldi-i84_file_test"] = os.path.join(data_root, "kaldi/i84_test.gz")
model_config["load_svhn_normalization_from_file"] = True
model_config["save_svhn_normalization_to_file"] = False
model_config["svhn_normalization_value_file"] = os.path.join(data_root, "svhn/svhn_normalization_values.pkl")
model_config["hidden_sizes"] = [2048, 2048, 2048, 2048]
# Note from Guillaume : I'm not fond at all of using seeds,
# but here it is used ONLY for the initial partitioning into train/valid.
model_config["seed"] = 9999494
#Weights are initialized to N(0,1) * initial_weight_size
model_config["initial_weight_size"] = 0.01
#Hold this fraction of the instances in the validation dataset
model_config["fraction_validation"] = 0.05
model_config["master_routine"] = ["sync_params"] + ["refresh_importance_weights"] + (["process_minibatch"] * 32)
model_config["worker_routine"] = ["sync_params"] + (["process_minibatch"] * 10)
model_config["turn_off_importance_sampling"] = False
assert model_config['Ntrain'] is not None and 0 < model_config['Ntrain']
assert model_config['Nvalid'] is not None
assert model_config['Ntest'] is not None
return model_config
def get_database_config():
# Try to connect to the database for at least 10 minutes before giving up.
# When setting this to below 1 minute on Helios, the workers would give up
# way to easily. This value also controls how much time the workers will
# be willing to wait for the parameters to be present on the server.
connection_setup_timeout = 10*60
# Pick one, depending where you run this.
# This could be done differently too by looking at fuelrc
# or at the hostname.
#import socket
#experiment_root_dir = { "serendib":"/home/dpln/tmp",
# "lambda":"/home/gyomalin/ML/tmp",
# "szkmbp":"/Users/gyomalin/tmp"}[socket.gethostname().lower()]
experiment_root_dir = "/rap/jvb-000-aa/data/alaingui/experiments_ISGD/00166"
redis_rdb_path_plus_filename = os.path.join(experiment_root_dir, "00166.rdb")
logging_folder = experiment_root_dir
want_rdb_background_save = True
# This is part of a discussion about when we should the master
# start its training with uniform sampling SGD and when it should
# perform importance sampling SGD.
# The default value is set to np.Nan, and right now the criterion
# to decide if a weight is usable is to check if it's not np.Nan.
#
# We can decide to add other options later to include the staleness
# of the importance weights, or other simular criterion, to define
# what constitutes a "usable" value.
default_importance_weight = np.NaN
#default_importance_weight = 1.0
want_master_to_do_USGD_when_ISGD_is_not_possible = True
master_usable_importance_weights_threshold_to_ISGD = 0.1 # cannot be None
# The master will only consider importance weights which were updated this number of seconds ago.
staleness_threshold_seconds = 20
staleness_threshold_num_minibatches_master_processed = None
# Guillaume is not so fond of this approach.
importance_weight_additive_constant = 10.0
serialized_parameters_format ="opaque_string"
# These two values don't have to be the same.
# It might be possible that the master runs on a GPU
# and the workers run on CPUs just to try stuff out.
workers_minibatch_size = 2048
master_minibatch_size = 128
# This is not really being used anywhere.
# We should consider deleting it after making sure that it
# indeed is not being used, but then we could argue that it
# would be a good idea to use that name to automatically determine
# the values of (Ntrain, Nvalid, Ntest).
dataset_name='svhn'
L_measurements=["individual_importance_weight", "individual_gradient_square_norm", "individual_loss", "individual_accuracy", "minibatch_gradient_mean_square_norm"]
L_segments = ["train", "valid", "test"]
#
# The rest of this code is just checks and quantities generated automatically.
#
assert workers_minibatch_size is not None and 0 < workers_minibatch_size
assert master_minibatch_size is not None and 0 < master_minibatch_size
assert dataset_name is not None
assert serialized_parameters_format in ["opaque_string", "ndarray_float32_tostring"]
assert 0.0 <= master_usable_importance_weights_threshold_to_ISGD
assert master_usable_importance_weights_threshold_to_ISGD <= 1.0
return dict(connection_setup_timeout=connection_setup_timeout,
workers_minibatch_size=workers_minibatch_size,
master_minibatch_size=master_minibatch_size,
dataset_name=dataset_name,
L_measurements=L_measurements,
L_segments=L_segments,
want_only_indices_for_master=True,
want_exclude_partial_minibatch=True,
serialized_parameters_format=serialized_parameters_format,
default_importance_weight=default_importance_weight,
want_master_to_do_USGD_when_ISGD_is_not_possible=want_master_to_do_USGD_when_ISGD_is_not_possible,
master_usable_importance_weights_threshold_to_ISGD=master_usable_importance_weights_threshold_to_ISGD,
staleness_threshold_seconds=staleness_threshold_seconds,
staleness_threshold_num_minibatches_master_processed=staleness_threshold_num_minibatches_master_processed,
importance_weight_additive_constant=importance_weight_additive_constant,
logging_folder=logging_folder,
redis_rdb_path_plus_filename=redis_rdb_path_plus_filename,
want_rdb_background_save=want_rdb_background_save)
def get_helios_config():
# Optional.
return {}
| [
"gyomalin@gmail.com"
] | gyomalin@gmail.com |
50cd5197dbde5407d096cd6bf38e891490a82a9c | fd2582f33b2be357f17bcdebb208c7756ff9199d | /djangosession/urls.py | 31225394e61473cd5f5577ea1bdf9ba7facdb042 | [] | no_license | gangadharsa/djangosession | 6ab4826d6183bbb3ded78bd88d111902f9c714d7 | c8e4563115e63663c4c618ce25b555d38ad83a9b | refs/heads/master | 2020-05-17T04:17:30.039012 | 2019-04-25T20:19:22 | 2019-04-25T20:19:22 | 183,504,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | """djangosession URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url,include
urlpatterns = [
path('admin/', admin.site.urls),
path('cookieapp/',include('cookieapp.urls'))
]
| [
"gangadharsahoo1991@gmail.com"
] | gangadharsahoo1991@gmail.com |
7fe7c4b64ac62153c70d4b3d0855529bed47e511 | dc425d30f0e3230bb961383e7da9d3ee35b9b9a5 | /MatrixOperations.py | 09519d1f8ee944ddda468b35412e1a7f1621825e | [] | no_license | csujaan/Python-Programs | e9ac0ce797c67d4d6856c3a5aae05efe3dc41879 | a0c1a35bbc7ab83ec5f9d192d571d63161287801 | refs/heads/master | 2021-05-01T15:45:33.249797 | 2018-02-10T17:50:36 | 2018-02-10T17:50:36 | 121,037,642 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,997 | py | r1 = int(raw_input("\nEnter number of rows of matrix 1 : "))
c1 = int(raw_input("Enter number of columns of matrix 1 : "))
r2 = int(raw_input("\nEnter number of rows of matrix 2 : "))
c2 = int(raw_input("Enter number of columns of matrix 2 : "))
#______________________________
# creating matrices of size r*c
#------------------------------
Mat1 = [[0 for j in range(c1)]for i in range(r1)]
Mat2 = [[0 for j in range(c2)]for i in range(r2)]
Mat3 = [[0 for j in range(c2)]for i in range(r1)]
Mat4 = [[0 for j in range(c1)]for i in range(r1)]
Mat5 = [[0 for j in range(c1)]for i in range(r1)]
Mat6 = [[0 for j in range(r1)]for i in range(c1)]
Mat7 = [[0 for j in range(r2)]for i in range(c2)]
#_____________________________
# assigning values in matrices
#-----------------------------
print "\n\nInput values of matrix 1 below:\n"
for i in range(r1):
for j in range(c1):
Mat1[i][j] = int(raw_input("Enter the value at position "+"("+str(i)+","+str(j)+")"+" : "))
print "\nInput values of matrix 2 below:\n"
for i in range(r2):
for j in range(c2):
Mat2[i][j] = int(raw_input("Enter the value at position "+"("+str(i)+","+str(j)+")"+" : "))
#__________________
# printing matrices
#------------------
print "\n\nMatrix 1 is:"
for i in range(r1):
for j in range(c1):
print Mat1[i][j],
print
print "\nMatrix 2 is:"
for i in range(r2):
for j in range(c2):
print Mat2[i][j],
print
#____________________________________
# calculate transpose of two matrices
#------------------------------------
for i in range(c1):
for j in range(r1):
Mat6[i][j] += Mat1[j][i]
for i in range(c2):
for j in range(r2):
Mat7[i][j] += Mat2[j][i]
print "\n\nTranspose of the matrix 1 is:"
for i in range(c1):
for j in range(r1):
print Mat6[i][j],
print
print "\nTranspose of the matrix 2 is:"
for i in range(c2):
for j in range(r2):
print Mat7[i][j],
print
#__________________________________
# calculate product of two matrices
#----------------------------------
if c1!=r2 :
print "\n\n***Matrices CANNOT be Multiplied***"
print "\n\t_____________________"
print "\tABORTING THE PROGRAM!"
print "\t---------------------\n"
flag=0
else :
for i in range(r1):
for j in range(c2):
for k in range(c1):
Mat3[i][j] += Mat1[i][k] * Mat2[k][j]
flag=1
print "\n\nProduct of these matrices is:"
for i in range(r1):
for j in range(c2):
print Mat3[i][j],
print
#____________________________________________________
# calculate addition and substraction of two matrices
#----------------------------------------------------
if flag!=0 :
if r1!=c2 :
print "\n\n**Matrices CANNOT be Added or Subtracted**"
print "\n\t _____________________"
print "\t Aborting the program!"
print "\t ---------------------\n"
elif r1!=r2 :
print "\n\n**Matrices CANNOT be Added or Subtracted**"
print "\n\t _____________________"
print "\t Aborting the program!"
print "\t ---------------------\n"
elif c1!=c2 :
print "\n\n**Matrices CANNOT be Added or Subtracted**"
print "\n\t _____________________"
print "\t Aborting the program!"
print "\t ---------------------\n"
else:
for i in range(r1):
for j in range(c1):
Mat4[i][j] += Mat1[i][j] + Mat2[i][j]
for i in range(r1):
for j in range(c1):
Mat5[i][j] += Mat1[i][j] - Mat2[i][j]
print "\n\nSum of these matrices is:"
for i in range(r1):
for j in range(c1):
print Mat4[i][j],
print
print "\n\nDifference of these matrices is:"
for i in range(r1):
for j in range(c1):
print Mat5[i][j],
print
else :
pass
| [
"noreply@github.com"
] | noreply@github.com |
9aeb54f76c07253c954009c25fbf4d01203fbe2b | 0e4be4f82ffcd8b74c5cae9a46642df1d798941c | /accounts/url_reset.py | be9173b84eda8c6fb007faaf59f1e33c0e154aac | [] | no_license | NinjaAiden/django-ecommerce | 796b9a2874ed033bd7fe32c387697e1ad52082f0 | a137be367e9cd4bfabf273f7a06a3827b70a50cc | refs/heads/master | 2022-11-27T13:10:28.608622 | 2019-07-10T10:29:55 | 2019-07-10T10:29:55 | 191,956,899 | 0 | 0 | null | 2022-11-22T02:09:53 | 2019-06-14T14:28:37 | Python | UTF-8 | Python | false | false | 684 | py | from django.conf.urls import url
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.views import password_reset, password_reset_done, password_reset_confirm, password_reset_complete
urlpatterns = [
url(r'^$', password_reset,
{'post_reset_redirect': reverse_lazy('password_reset_done')}, name="password_reset"),
url(r'^done/$', password_reset_done, name="password_reset_done"),
url(r'^(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$', password_reset_confirm,
{'post_reset_redirect': reverse_lazy('password_reset_complete')}, name='password_reset_confirm'),
url(r'^complete/$', password_reset_complete, name='password_reset_complete')
] | [
"ubuntu@ip-172-31-35-128.ec2.internal"
] | ubuntu@ip-172-31-35-128.ec2.internal |
3e75ea7b2bf8866ced4df1f373e969df4f81413c | d86b618cad188ecb4ceb802249eaad3ac89b7b47 | /document_detection/hed/hed_train_data_generator.py | c438c3741bf1c1c9f95d0fbbdf4a273cf6148636 | [] | no_license | victoriest/deep-learning-playgroud | 3779ea8998d3aefcba9f854ec324a28379e56c02 | c4062fb1464d3fbaab837c9531a48cd83f4eb9e5 | refs/heads/master | 2022-12-04T13:43:24.794098 | 2021-01-26T01:50:07 | 2021-01-26T01:50:07 | 186,946,627 | 5 | 0 | null | 2022-11-21T21:07:28 | 2019-05-16T03:46:36 | Jupyter Notebook | UTF-8 | Python | false | false | 6,984 | py | """
通过文档图片, 以及背景图片, 生成含有文档元素的场景图片作为训练数据
"""
import os
import random
import shutil
import string
import cv2
import numpy as np
def make_image_height_greater_than_width(img_path):
g = os.walk(img_path)
for path, dir_list, file_list in g:
for file_name in file_list:
d = os.path.join(path, file_name)
print(path + '/' + file_name, os.path.exists(d))
im = cv2.imread(path + '/' + file_name, cv2.IMREAD_COLOR)
(h, w, _) = im.shape
if h > w:
continue
im = np.rot90(im)
cv2.imwrite(path + '/' + file_name, im)
def resize_image_to_normal(img_path):
g = os.walk(img_path)
for path, dir_list, file_list in g:
for file_name in file_list:
d = os.path.join(path, file_name)
print(path + '/' + file_name, os.path.exists(d))
im = cv2.imread(path + '/' + file_name, cv2.IMREAD_COLOR)
(h, w, _) = im.shape
if h < 2000 and w < 2000:
continue
im = cv2.resize(im, (int(w * 0.25), int(h * 0.25)))
cv2.imwrite(path + '/' + file_name, im)
def random_transform(bg_img_path, t_img_path, target_img_path, gt_img_path):
bg_img = cv2.imread(bg_img_path, cv2.IMREAD_COLOR)
t_img = cv2.imread(t_img_path, cv2.IMREAD_COLOR)
(bg_h, bg_w, _) = bg_img.shape
t_img = cv2.resize(t_img, (bg_w, bg_h))
pts1 = np.float32([[0, 0], [bg_w, 0], [0, bg_h], [bg_w, bg_h]])
x, y = bg_w / 2, bg_h / 2
# 左上角
x1, y1 = random.randint(int(x / 3), int(2 * x / 3)), random.randint(int(y / 4), int(y / 2))
# 右上角
x2, y2 = bg_w - random.randint(int(x / 3), int(2 * x / 3)), random.randint(int(y / 4), int(y / 2))
# 左下角
x3, y3 = random.randint(int(x / 10), int(x / 2)), bg_h - random.randint(int(x / 10), int(y / 3))
# 右下角
x4, y4 = bg_w - random.randint(int(x / 10), int(x / 2)), bg_h - random.randint(int(x / 10), int(y / 3))
pts2 = np.float32([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])
M = cv2.getPerspectiveTransform(pts1, pts2)
t_img = cv2.warpPerspective(t_img, M, (bg_w, bg_h))
# Now create a mask of logo and create its inverse mask also
img2gray = cv2.cvtColor(t_img, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 0, 255, cv2.THRESH_BINARY) # 这个254很重要
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of logo in ROI
img_bg = cv2.bitwise_and(bg_img, bg_img, mask=mask_inv)
# Take only region of logo from logo image.
img_fg = cv2.bitwise_and(t_img, t_img, mask=mask)
# Put logo in ROI and modify the main image
dst = cv2.add(img_fg, img_bg)
cv2.imwrite(target_img_path, dst)
gt_img = np.zeros((bg_h, bg_w, 1), np.uint8)
# gt_img = cv2.line(gt_img, (x1, y1), (x2, y2), 255, 2)
# gt_img = cv2.line(gt_img, (x2, y2), (x4, y4), 255, 2)
# gt_img = cv2.line(gt_img, (x3, y3), (x4, y4), 255, 2)
# gt_img = cv2.line(gt_img, (x3, y3), (x1, y1), 255, 2)
rect = np.array([[x1, y1], [x2, y2], [x4, y4], [x3, y3]])
cv2.fillConvexPoly(gt_img, rect, (255, 255, 255))
cv2.imwrite(gt_img_path, gt_img)
def gen_train_data(bg_dir, t_dir, dest_dir, gt_dir):
bg_path = []
g = os.walk(bg_dir)
for path, dir_list, file_list in g:
for file_name in file_list:
d = os.path.join(path, file_name)
bg_path.append((file_name[0], d))
t_path = []
g = os.walk(t_dir)
for path, dir_list, file_list in g:
for file_name in file_list:
d = os.path.join(path, file_name)
t_path.append((file_name[0], d))
bg_num = len(bg_path)
t_num = len(t_path)
for i in range(10000):
bg_idx = random.randint(0, bg_num - 1)
t_idx = random.randint(0, t_num - 1)
bp = bg_path[bg_idx][1]
tp = t_path[t_idx][1]
dest_file_name = ''.join(random.sample(string.ascii_letters + string.digits, 8)) + '.jpg'
dest_path = os.path.join(dest_dir, dest_file_name)
gt_img_path = os.path.join(gt_dir, dest_file_name)
print(i, bp, tp, dest_path, gt_img_path)
random_transform(bp, tp, dest_path, gt_img_path)
def gen_train_pair_lst(data_path):
lst_file = os.path.join(data_path, 'train_pair.lst')
with open(lst_file, 'w+') as f:
g = os.walk(os.path.join(data_path, 'data'))
for path, dir_list, file_list in g:
for file_name in file_list:
d1 = os.path.join('data', file_name)
d2 = os.path.join('gt', file_name)
print(d1, d2)
f.writelines(d1 + ' ' + d2 + '\n')
def zoom_out_train_data(data_path):
g = os.walk(os.path.join(data_path, 'data'))
for path, dir_list, file_list in g:
for file_name in file_list:
d1 = os.path.join(data_path, 'data', file_name)
d2 = os.path.join(data_path, 'gt', file_name)
im = cv2.imread(d1, cv2.IMREAD_COLOR)
(h, w, _) = im.shape
im = cv2.resize(im, (int(w / 2), int(h / 2)))
cv2.imwrite(d1, im)
im = cv2.imread(d2, cv2.IMREAD_COLOR)
(h, w, _) = im.shape
im = cv2.resize(im, (int(w / 2), int(h / 2)))
cv2.imwrite(d2, im)
print(d1, d2)
def move_full_data_to_simple_data(src_path, dst_path):
simple_data_arr = []
g = os.walk(os.path.join(src_path, 'data'))
for path, dir_list, file_list in g:
for file_name in file_list:
simple_data_arr.append(file_name)
random.shuffle(simple_data_arr)
for i in range(10000):
src1 = os.path.join(src_path, 'data', simple_data_arr[i])
src2 = os.path.join(src_path, 'gt', simple_data_arr[i])
dst1 = os.path.join(dst_path, 'data', simple_data_arr[i])
dst2 = os.path.join(dst_path, 'gt', simple_data_arr[i])
shutil.copy(src1, dst1)
shutil.copy(src2, dst2)
def resize_to_480(data_path):
g = os.walk(os.path.join(data_path, 'data'))
i = 0
for path, dir_list, file_list in g:
for file_name in file_list:
d1 = os.path.join(data_path, 'data', file_name)
d2 = os.path.join(data_path, 'gt', file_name)
im = cv2.imread(d1, cv2.IMREAD_COLOR)
im = cv2.resize(im, (480, 480))
cv2.imwrite(d1, im)
im = cv2.imread(d2, cv2.IMREAD_COLOR)
im = cv2.resize(im, (480, 480))
cv2.imwrite(d2, im)
print(i, d1, d2)
i += 1
if __name__ == "__main__":
# gen_train_data('D:/_data/_hed/_pre/bg', 'D:/_data/_hed/_pre/target', 'D:/_data/_hed/_pre/data', 'D:/_data/_hed/_pre/gt')
# gen_train_pair_lst('D:/_data/_hed/_pre')
# zoom_out_train_data('D:/_data/_hed/_pre')
# move_full_data_to_simple_data('D:/_data/_hed/train', 'D:/_data/_hed/simple_train')
resize_to_480('D:/_data/_hed/simple_train')
| [
""
] | |
bc283233d13a00a77775533d8a86173d369ae022 | 2cb0e0322c28b0558e25c6db9c2e9dbd23c42ab6 | /dnsmanage/app/analysis_file.py | 7d47d0296428cf08afad63a8dc337480b6939aca | [] | no_license | laura5102/sexop | 6fabe304e3454a640ead812001adf933e8b16448 | 9b7b1115fef0469525e5f65ce29ca0fa2e3cf759 | refs/heads/master | 2021-01-21T11:11:01.883064 | 2017-04-01T06:57:30 | 2017-04-01T06:57:30 | 83,531,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | from django.template import Template, Context
from django.http import HttpResponse
def analysis_file(request):
fp = open('F:/PycharmProjects/dnsmanage/templates/server_manage.html')
t = Template(fp.read())
fp.close()
html = t.render(Context({'user': 'John Smith',
'worker_processes': 'Super Lawn Mower',
'error_log': 'Outdoor Equipment',
'pid': 'pid',
'events': 'eventslog',
'http': 'http'}))
return HttpResponse(html)
| [
"liulei@secoo.com"
] | liulei@secoo.com |
9ef493a2809ddfbd28285207b15e4a5638286568 | 7af81a5479e1a14546268c3470e7f270b8ebe709 | /sciki_learn/FeatureSelection.py | 724d457ec926514d552bad95a4af6623a5fe1d21 | [] | no_license | AP1003624/TianChiBigData | 4be0092bd5540060d743aaa2e1e182afd57a4e98 | 38167e97fe33dbdf9cc3e4d15d566ad69494593c | refs/heads/master | 2016-09-14T01:34:57.069171 | 2016-04-27T10:03:30 | 2016-04-27T10:03:30 | 57,135,192 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | # coding=utf8
# Create by 吴俊 on 2016/4/4
import urllib
import numpy as np
# Data Loading
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data"
raw_data = urllib.urlopen(url=url)
dataset = np.loadtxt(raw_data,delimiter=',')
X = dataset[:,0:7]
y = dataset[:,8]
# Feature Selection
from sklearn import metrics
from sklearn.ensemble import ExtraTreesClassifier
model = ExtraTreesClassifier()
model.fit(X,y)
# display the relative importance of each attribute
print(model.feature_importances_)
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
# create the RFE model and select 3 attributes
rfe = RFE(model,3)
rfe = rfe.fit(X,y)
# summarize the selection of the attributes
print(rfe.support_)
print(rfe.ranking_)
| [
"646706230@qq.com"
] | 646706230@qq.com |
91894a16131a27036b025a3bc5d448ed6f8bea85 | e5c53d23f2a9e86cfd47b7da4e1ae47e3e68ff98 | /ngcc_full_assemble.py | d538e56f71e43b5b2c5f94efa3aa2d22665776aa | [] | no_license | ndricke/CRT | 4271d19fd76e90dab194e8c930c55b2f28c3322d | ff50a332f56ee1ced344888f71ea1e8f12b6ffe8 | refs/heads/master | 2021-07-23T04:00:10.730331 | 2021-07-05T23:02:35 | 2021-07-05T23:02:35 | 166,302,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,512 | py | import sys
import pandas as pd
import numpy as np
import re
import CatO2Df
indir = "~/work/CRT/autoq/"
df_m = pd.read_csv(indir+"mepyr1_assembled.csv")
df_tr = pd.read_csv(indir+"tetrid1_assembled.csv")
df_ty = pd.read_csv(indir+"tetry1_assembled.csv")
df_etr = pd.read_csv(indir+"order_tetridEnum_assembled.csv")
df_ety = pd.read_csv(indir+"order_tetryEnum_assembled.csv")
df_trAS = pd.read_csv(indir+"order_tetridAS_assembled.csv")
df_tyAS = pd.read_csv(indir+"order_tetryAS_assembled.csv")
### Picking which dataframe
#catalyst = "mepyr"
#catalyst = "tetry"
#catalyst = "tetrid"
catalyst = "all"
df_tr = pd.concat([df_tr, df_etr, df_trAS], sort=True)
df_ty = pd.concat([df_ty, df_ety, df_tyAS], sort=True)
df_ty_17 = df_ty[df_ty["Active_Site"] == 18]
df_ty_20 = df_ty[df_ty["Active_Site"] == 21]
diff_names = ['O2H', 'O', 'OH']
energy_names = ['CatalystOOH_Energy', 'CatalystO_Energy', 'CatalystOH_Energy']
tetrid_shift =(-150.97535,-75.18120,-75.84124)
mepyr_shift =(-150.96147,-75.16639,-75.82740)
ty20_shift = (-150.96852,-75.18826,-75.83892)
ty17_shift = (-150.96756,-75.17159,-75.83235)
df_m = CatO2Df.AddEnergyDiffs(df_m, energy_names, diff_names, mepyr_shift)
#df_tr = CatO2Df.AddEnergyDiffs(df_tr, energy_names, diff_names, tetrid_shift)
#df_ty_17 = CatO2Df.AddEnergyDiffs(df_ty_17, energy_names, diff_names, ty17_shift)
#df_ty_20 = CatO2Df.AddEnergyDiffs(df_ty_20, energy_names, diff_names, ty20_shift)
df_tr = CatO2Df.AddEnergyDiffs(df_tr, energy_names, diff_names, mepyr_shift)
df_ty_17 = CatO2Df.AddEnergyDiffs(df_ty_17, energy_names, diff_names, mepyr_shift)
df_ty_20 = CatO2Df.AddEnergyDiffs(df_ty_20, energy_names, diff_names, mepyr_shift)
#df_m_unshift = CatO2Df.AddEnergyDiffs(df_m, energy_names, diff_names)
#df_m_shift = CatO2Df.AddEnergyDiffs(df_m, energy_names, diff_names, mepyr_shift)
#df_tr_unshift = CatO2Df.AddEnergyDiffs(df_tr, energy_names, diff_names)
#df_ty_17 = CatO2Df.AddEnergyDiffs(df_ty_17, energy_names, diff_names)
#df_ty_20 = CatO2Df.AddEnergyDiffs(df_ty_20, energy_names, diff_names)
#print(df_m_unshift[["Tag", "O2H_diff"]]/27.211)
#print(df_m_unshift[["Tag", "O_diff"]]/27.211)
#print(df_m_unshift[["Tag", "OH_diff"]]/27.211)
#print()
#print(df_m_shift[["Tag", "O2H_diff"]])
#print(df_m_shift[["Tag", "O_diff"]])
#print(df_m_shift[["Tag", "OH_diff"]])
#print(df_tr_unshift[["Tag", "O2H_diff"]])
#print(df_tr_unshift[["Tag", "O_diff"]])
#print(df_tr_unshift[["Tag", "OH_diff"]])
#print()
#print(df_tr_shift[["Tag", "O2H_diff"]])
#print(df_tr_shift[["Tag", "O_diff"]])
#print(df_tr_shift[["Tag", "OH_diff"]])
catalyst_dict = {"mepyr":[df_m], "tetry":[df_ty_17, df_ty_20], "tetrid":[df_tr], "all":[df_m, df_tr, df_ty_17, df_ty_20]}
df = pd.concat(catalyst_dict[catalyst], sort=True)
#df_name_list = ["mepyr_cycle.csv", "tetrid_cycle.csv", "tetry17_cycle.csv", "tetry20_cycle.csv"]
df_name_list = ["mepyr_msh_cycle.csv", "tetrid_msh_cycle.csv", "tetry17_msh_cycle.csv", "tetry20_msh_cycle.csv"]
for i, df in enumerate(catalyst_dict[catalyst]):
df = df.drop("Unnamed: 0", 1)
df = df.drop("Unnamed: 0.1", 1)
df = df[df["Cat-O2_Bond_Length"] < 1.7]
df = df[df["Cat-O2_Bond_Length"] > 1.3]
#df = df.drop("Unnamed: 0.1.1", 1)
#df = df.drop("Unopt_Cat-O2_Energy", 1)
df.to_csv(df_name_list[i], index=False)
## This dataframe appears to have duplicates in tetrid, and they aren't all the same format
## This may or may not fix the issue of energies being super off from the bare catalyst
#df.to_csv("gcc_assembled.csv", index=False)
| [
"nathan.d.ricke@gmail.com"
] | nathan.d.ricke@gmail.com |
c03f696a49956f662cb99f55e7689b448d0c43ae | 61b409a6668aeaf6992bfee54ac00b731026e84a | /yunda_admin/migrations/0003_auto_20151011_1834.py | 463790b2c3ece4e45700746bbfd9147e17a8ed37 | [] | no_license | chuan137/yunda | 969be0639b8a95b12aa2644f3f8fbc9b88bab2dc | 31fb48fe05887668207b509f628e644b2ed7ad43 | refs/heads/master | 2021-01-20T20:14:23.483513 | 2016-07-27T22:14:08 | 2016-07-27T22:14:08 | 64,133,344 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('yunda_admin', '0002_auto_20150330_2055'),
]
operations = [
migrations.AlterField(
model_name='depositentry',
name='created_at',
field=models.DateTimeField(default=datetime.datetime.now),
preserve_default=True,
),
migrations.AlterField(
model_name='depositwithdraw',
name='created_at',
field=models.DateTimeField(default=datetime.datetime.now),
preserve_default=True,
),
]
| [
"root@ip-172-31-27-95.eu-central-1.compute.internal"
] | root@ip-172-31-27-95.eu-central-1.compute.internal |
d0240796168ac702939291c06a92514c321338ca | ed89770be8e0234b838dcc341111a3050a7d6f42 | /fileq.py | a5133aba37cdaf6308d2218bea4a636f51858274 | [] | no_license | Nishant0210/fastApi-demo | 0ddb05c0cffce3463987fd4433d42f5ef76709be | 73f3e88595097ac2741b53332cd7a1a590aa0e1a | refs/heads/master | 2023-08-29T12:03:50.347580 | 2021-10-28T11:13:05 | 2021-10-28T11:13:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | def sum(a,b):
c = a+b
return {
"ans":c
}
| [
"57897383+Nishant0210@users.noreply.github.com"
] | 57897383+Nishant0210@users.noreply.github.com |
73ee4399e91893c19eeda6ced4e9abc9adf5fb3e | 000a37a38588b86612d34af9a203d224aad92dc5 | /src/plotting/plotMutualInfo_alongEpoch.py | ed8feb01a89da461734924797682e694b9f0e674 | [] | no_license | arayabrain/multi-modal-integration | c88beb2f0f99f385b35bc10983aa5c60ff935f6a | 8f90634494c3e684e06bddfa963d266972298693 | refs/heads/master | 2021-04-03T07:47:15.825146 | 2018-03-29T07:46:47 | 2018-03-29T07:46:47 | 124,494,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,245 | py | import pylab as plt
import numpy as np
import pickle
itrList = [0,1000,2000,3000,4000,5000];
lineType = [':','-.','--','-'];
for i in range(4):
pkl_file = open('data/mutualInfo_bin20_l'+str(i+1)+'_alongEpochs.pkl', 'rb')
data = pickle.load(pkl_file)
pkl_file.close();
IV = data[0];
IA = data[1];
MI = np.mean(data,axis=0);
plt.plot(itrList,MI,lineType[i],label="Avg. MI (input x layer "+str(i+1)+")");
# plt.plot(itrList,IA)
plt.ylabel("Information [bit]")
plt.xlabel("Epoch")
plt.title("Average mutual information")
plt.legend();
plt.show()
# # ## plot the info
# plt.subplot(4,1,4-i);
# plt.plot(-np.sort(-MI_untrained.flatten()),':',label="IV_untrained");
# plt.plot(-np.sort(-MI_shuffled.flatten()),'--',label="IV_shuffled");
# plt.plot(-np.sort(-MI_trained.flatten()),label="IV_trained");
# plt.title("Layer "+ str(4-i))
# plt.xlabel("s-r pair rank");
# plt.ylabel("Information [bit]")
# # plt.ylim((max(IV_trained.max(),IV_untrained.max())*-0.1,max(IV_trained.max(),IV_untrained.max())*1.1));
# plt.ylim([-0.1,1.1])
# plt.legend();
# # ## plot the info (one modality at time)
# plt.subplot(2,1,1);
# plt.plot(-np.sort(-IV_trained.flatten()),label="IV_trained");
# plt.plot(-np.sort(-IV_untrained.flatten()),label="IV_untrained");
# plt.title("Visual Input Unit x Encoded Unit")
# plt.xlabel("s-r pair rank");
# plt.ylabel("Mutual Information [bit]")
# # plt.ylim((max(IV_trained.max(),IV_untrained.max())*-0.1,max(IV_trained.max(),IV_untrained.max())*1.1));
# plt.ylim([-0.1,1.1])
# plt.legend();
#
# plt.subplot(2,1,2);
# plt.plot(-np.sort(-IA_trained.flatten()),label="IA_trained");
# plt.plot(-np.sort(-IA_untrained.flatten()),label="IA_untrained");
# plt.title("Audio Input Unit x Encoded Unit")
# plt.xlabel("s-r pair rank");
# plt.ylabel("Mutual Information [bit]")
# # plt.ylim((max(IA_trained.max(),IA_untrained.max())*-0.1,max(IA_trained.max(),IA_untrained.max())*1.1));
# plt.ylim([-0.1,1.1])
# plt.legend();
# plt.subplots_adjust(hspace=1.0)
# plt.show() | [
"aki.hero.ox@gmail.com"
] | aki.hero.ox@gmail.com |
bca8f5be65fd0c50e8e256d6f394c6d8aad0c6b9 | 7b9551100d42c6e97bf0ca98e972c255b2db5d99 | /thirdparty/phonenumbers/__init__.py | f98713a777e33d54dd61e5d4006884ab3ddfc7c2 | [] | no_license | xealot/vocalbroker | ae787da5ab8e3c120a41761e83d3c80a502dffbb | ad906392bbc57f5684f9609736ea38c21c2af6e1 | refs/heads/master | 2016-09-06T17:13:34.191127 | 2012-03-06T03:21:31 | 2012-03-06T03:21:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,382 | py | """Python phone number parsing and formatting library
Examples of use:
>>> import phonenumbers
>>> x = phonenumbers.parse("+442083661177", None)
>>> print x
Country Code: 44 National Number: 2083661177 Leading Zero: False
>>> type(x)
<class 'phonenumbers.phonenumber.PhoneNumber'>
>>> str(phonenumbers.format_number(x, phonenumbers.PhoneNumberFormat.NATIONAL))
'020 8366 1177'
>>> str(phonenumbers.format_number(x, phonenumbers.PhoneNumberFormat.INTERNATIONAL))
'+44 20 8366 1177'
>>> str(phonenumbers.format_number(x, phonenumbers.PhoneNumberFormat.E164))
'+442083661177'
>>> y = phonenumbers.parse("020 8366 1177", "GB")
>>> print y
Country Code: 44 National Number: 2083661177 Leading Zero: False
>>> x == y
True
>>>
>>> formatter = phonenumbers.AsYouTypeFormatter("US")
>>> print formatter.input_digit("6")
6
>>> print formatter.input_digit("5")
65
>>> print formatter.input_digit("0")
650
>>> print formatter.input_digit("2")
650-2
>>> print formatter.input_digit("5")
650-25
>>> print formatter.input_digit("3")
650-253
>>> print formatter.input_digit("2")
650-2532
>>> print formatter.input_digit("2")
(650) 253-22
>>> print formatter.input_digit("2")
(650) 253-222
>>> print formatter.input_digit("2")
(650) 253-2222
>>>
>>> text = "Call me at 510-748-8230 if it's before 9:30, or on 703-4800500 after 10am."
>>> for match in phonenumbers.PhoneNumberMatcher(text, "US"):
... print match
...
PhoneNumberMatch [11,23) 510-748-8230
PhoneNumberMatch [51,62) 703-4800500
>>> for match in phonenumbers.PhoneNumberMatcher(text, "US"):
... print phonenumbers.format_number(match.number, phonenumbers.PhoneNumberFormat.E164)
...
+15107488230
+17034800500
>>>
"""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# 'Some people, when confronted with a problem, think "I know,
# I'll use regular expressions." Now they have two problems.'
# -- jwz 1997-08-12
# Data class definitions
from .phonenumber import PhoneNumber, CountryCodeSource, FrozenPhoneNumber
from .phonemetadata import REGION_CODE_FOR_NON_GEO_ENTITY, NumberFormat, PhoneNumberDesc, PhoneMetadata
# Functionality
from .asyoutypeformatter import AsYouTypeFormatter
from .phonenumberutil import (COUNTRY_CODE_TO_REGION_CODE, SUPPORTED_REGIONS, UNKNOWN_REGION,
MatchType, NumberParseException, PhoneNumberFormat,
PhoneNumberType, ValidationResult,
convert_alpha_characters_in_number,
country_code_for_region,
country_code_for_valid_region,
example_number,
example_number_for_type,
example_number_for_non_geo_entity,
format_by_pattern,
format_in_original_format,
format_national_number_with_carrier_code,
format_national_number_with_preferred_carrier_code,
format_number_for_mobile_dialing,
format_number,
format_out_of_country_calling_number,
format_out_of_country_keeping_alpha_chars,
is_alpha_number,
is_nanpa_country,
is_number_match,
is_possible_number,
is_possible_number_string,
is_possible_number_with_reason,
is_valid_number,
is_valid_number_for_region,
length_of_geographical_area_code,
length_of_national_destination_code,
national_significant_number,
ndd_prefix_for_region,
normalize_digits_only,
number_type,
parse,
region_code_for_country_code,
region_code_for_number,
truncate_too_long_number,)
from .shortnumberutil import connects_to_emergency_number, is_emergency_number
from .phonenumbermatcher import PhoneNumberMatch, PhoneNumberMatcher, Leniency
from .geocoder import (area_description_for_number, country_name_for_number,
description_for_number, description_for_valid_number)
# Version number is taken from the upstream libphonenumber version
# together with an indication of the version of the Python-specific code.
__version__ = "4.6b1"
__all__ = ['PhoneNumber', 'CountryCodeSource', 'FrozenPhoneNumber',
'REGION_CODE_FOR_NON_GEO_ENTITY', 'NumberFormat', 'PhoneNumberDesc', 'PhoneMetadata',
'AsYouTypeFormatter',
# items from phonenumberutil.py
'COUNTRY_CODE_TO_REGION_CODE', 'SUPPORTED_REGIONS', 'UNKNOWN_REGION',
'MatchType', 'NumberParseException', 'PhoneNumberFormat',
'PhoneNumberType', 'ValidationResult',
'convert_alpha_characters_in_number',
'country_code_for_region',
'country_code_for_valid_region',
'example_number',
'example_number_for_type',
'example_number_for_non_geo_entity',
'format_by_pattern',
'format_in_original_format',
'format_national_number_with_carrier_code',
'format_national_number_with_preferred_carrier_code',
'format_number_for_mobile_dialing',
'format_number',
'format_out_of_country_calling_number',
'format_out_of_country_keeping_alpha_chars',
'is_alpha_number',
'is_nanpa_country',
'is_number_match',
'is_possible_number',
'is_possible_number_string',
'is_possible_number_with_reason',
'is_valid_number',
'is_valid_number_for_region',
'length_of_geographical_area_code',
'length_of_national_destination_code',
'national_significant_number',
'ndd_prefix_for_region',
'normalize_digits_only',
'number_type',
'parse',
'region_code_for_country_code',
'region_code_for_number',
'truncate_too_long_number',
# end of items from phonenumberutil.py
'connects_to_emergency_number', 'is_emergency_number',
'PhoneNumberMatch', 'PhoneNumberMatcher', 'Leniency',
'area_description_for_number',
'country_name_for_number',
'description_for_number',
'description_for_valid_number',
]
if __name__ == '__main__': # pragma no cover
import doctest
doctest.testmod()
| [
"trey@ktrl.com"
] | trey@ktrl.com |
c01dc4f7c6ff37261a76993ea66294d68c5a26c6 | 60b7afe8d8f7e5663005e336ed6a7f5179a8b71a | /venv/bin/pip | b8abd8533446581f7c338fdc2681655eb20d0a30 | [] | no_license | Shristi19/LeetCode-30-Day-Challenge | bb70cb727298230e6686814e2ed9b94628fe878b | faa870a6fcf7d12eebd72363d7862d005e3ac095 | refs/heads/master | 2021-05-23T18:50:42.986456 | 2020-04-07T13:28:11 | 2020-04-07T13:28:11 | 253,424,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | #!/Users/shristijalan/PycharmProjects/LeetCode/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"shristijalan@Shristis-MacBook-Air.local"
] | shristijalan@Shristis-MacBook-Air.local | |
3f581ac057c150a970aa9fb97e40dc6307aae90d | 56f2f420e6a492a5c0506f9dd763cc06f3d5e3ee | /cifar_mlp.py | 62606a28dde7e8adf531e310789587eafc92839f | [] | no_license | xiaohuji/image_semantic_analysis | 4ca5778b4f44fd91663a8e4bd923f128772675f0 | e35074fd4468c6dd7a2d76ce03eac48a4ecf992e | refs/heads/master | 2023-05-29T19:55:46.208868 | 2021-06-10T01:32:59 | 2021-06-10T01:32:59 | 356,190,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,632 | py | import time
import torch
import torch.nn as nn
import numpy as np
import torch.optim as optim
import torchvision
import torch.utils.data
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV
from torch.autograd import Variable
from torchvision.transforms import transforms
def sklearn_mlp(x_train, x_test, y_train, y_test):
# split = len(x_train)*2/5
x_train_data, y_train_data = x_train, y_train
# x_valid_data, y_valid_data = x_train[split:], y_train[split:]
x_test_data, y_test_data = x_test, y_test
# 合并训练集,验证集
# param_grid = {"hidden_layer_sizes": [(100,), (100, 30)], "solver": ['adam', 'sgd', 'lbfgs'],
# "max_iter": [20], "verbose": [True], "early_stopping": [True]}
param_grid = {"hidden_layer_sizes": [(100, 20)], "solver": ['adam', 'sgd', 'lbfgs'],
"max_iter": [20], "verbose": [True], "early_stopping": [True]}
mlp = MLPClassifier()
clf = GridSearchCV(mlp, param_grid, n_jobs=-1)
print(mlp.n_layers_)
clf.fit(x_train_data, y_train_data)
print(clf.score(x_test_data, y_test_data))
print(clf.get_params().keys())
print(clf.best_params_)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(3072, 120)
self.d1 = nn.Dropout(p=0.5)
self.r1 = nn.ReLU()
self.fc2 = nn.Linear(120, 10)
def forward(self, x):
# x = x.view(-1, 16 * 5 * 5)
x = self.fc1(x)
x = self.d1(x)
x = self.r1(x)
x = self.fc2(x)
return x
def torch_mlp(train_n, test_n):
transform = transforms.Compose([
# 把灰度范围从0 - 255变换到0 - 1之间
transforms.ToTensor(),
# 标准化变换到-1 - 1之间
# 数据如果分布在(0, 1)之间,可能实际的bias,就是神经网络的输入b会比较大,而模型初始化时b = 0
# 这样会导致神经网络收敛比较慢,经过Normalize后,可以加快模型的收敛速度。
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
trainset = torchvision.datasets.CIFAR10(
root='./data',
train=True,
download=True,
transform=transform
)
# 创建测试集
testset = torchvision.datasets.CIFAR10(
root='./data',
train=False,
download=True,
transform=transform
)
trainloader = torch.utils.data.DataLoader(trainset[train_n], batch_size=1, shuffle=False)
testloader = torch.utils.data.DataLoader(testset[test_n], batch_size=1, shuffle=False)
net = Net()
# 权值初始化
for n in net.modules():
if isinstance(n, nn.Linear):
nn.init.xavier_uniform_(n.weight)
nn.init.kaiming_normal(n.weight.data)
# 损失函数
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001)
for epoch in range(1):
for i, data in enumerate(trainloader):
inputs, labels = data
inputs, labels = Variable(inputs), Variable(labels)
outputs = net(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("Finished Training")
print("Beginning Testing")
correct = 0
total = 0
for data in testloader:
images, labels = data
outputs = net(Variable(images))
predicted = torch.max(outputs, 1)[1].data.numpy()
total += labels.size(0)
correct += (predicted == labels.data.numpy()).sum()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))
if __name__ == '__main__':
trainset = torchvision.datasets.CIFAR10(root='./data', train=True)
testset = torchvision.datasets.CIFAR10(root='./data', train=False)
cifar_train = trainset.data.reshape((50000, 3072))
cifar_train_label = trainset.targets
cifar_test = testset.data.reshape((10000, 3072))
cifar_test_label = testset.targets
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# sklearn发现隐层不好初始化
# t1 = time.clock()
# sklearn_mlp(cifar_train[:100], cifar_test[:20], cifar_train_label[:100], cifar_test_label[:20])
# t2 = time.clock()
# print('svm_c_time:', t2 - t1)
# ---------------------------------------------------------------------------------
# 改用torch
t1 = time.clock()
torch_mlp(train_n=10, test_n=10)
t2 = time.clock()
print('svm_c_time:', t2 - t1)
| [
"767297037@qq.com"
] | 767297037@qq.com |
78cd2f33390faf608322f0ed7af9416a3db032e6 | ee1db6398a73882e750c86257b43390c5ec2a654 | /fpm/test_sample/__main__.py | 88d1cb6ff7d27bb7bcff44f25b9f0bd0d15376b0 | [
"MIT"
] | permissive | gscigala/packet-generation | c795131e202e59d16fc99eca61008b850df9c6c5 | 137b2853e57756a9ade1af2e95c8b2839f401121 | refs/heads/master | 2021-01-23T03:12:48.984559 | 2017-04-04T13:50:46 | 2017-04-04T13:50:46 | 86,057,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | import argparse
import logging
import logging.config
from pkg_resources import resource_filename
import gi
from gi.repository import GLib
from pipeline import Pipeline
LOG_CONFIG_PATH = resource_filename(__name__, "/data/logging.conf")
def main():
# Load logger
logging.config.fileConfig(LOG_CONFIG_PATH)
# Create logger
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dot", help="generate dot (you have to export GST_DEBUG_DUMP_DOT_DIR)", action="store_true", default = False)
args = parser.parse_args()
export_dot = args.dot
# Create mainloop
mainloop = GLib.MainLoop()
# Create pipeline
p = Pipeline(export_dot, mainloop)
logger.info("GStreamer pipeline created.")
# Start pipeline
p.run()
logger.info("Pipeline running.")
mainloop.run()
if __name__ == "__main__":
main()
| [
"guillaume.scigala@smile.fr"
] | guillaume.scigala@smile.fr |
79ae6089ad6be6b58d2ffa5c5819cdeffca5037a | 5d6a464bcf381a44588d6a0a475f666bdc8b5f05 | /unittests/namespace_matcher_tester.py | a517fd7da3b64b22821666f82bf8e3b8183eb0f0 | [
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | iMichka/pygccxml | d4f2ac032a742f1cd9c73876f6ba6a85d2047837 | f872d056f477ed2438cd22b422d60dc924469805 | refs/heads/develop | 2023-08-05T04:35:32.774634 | 2017-01-10T06:04:17 | 2017-01-10T06:04:17 | 45,710,813 | 0 | 2 | BSL-1.0 | 2023-08-20T21:02:24 | 2015-11-06T22:14:37 | Python | UTF-8 | Python | false | false | 2,084 | py | # Copyright 2014-2017 Insight Software Consortium.
# Copyright 2004-2009 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
import unittest
import parser_test_case
from pygccxml import parser
from pygccxml import declarations
class Test(parser_test_case.parser_test_case_t):
COMPILATION_MODE = parser.COMPILATION_MODE.ALL_AT_ONCE
def __init__(self, *args):
parser_test_case.parser_test_case_t.__init__(self, *args)
self.header = 'bit_fields.hpp'
self.declarations = None
def setUp(self):
if not self.declarations:
self.declarations = parser.parse([self.header], self.config)
def test(self):
criteria = declarations.namespace_matcher_t(name='bit_fields')
declarations.matcher.get_single(criteria, self.declarations)
self.assertTrue(
str(criteria) == '(decl type==namespace_t) and (name==bit_fields)')
def test_allow_empty(self):
global_ns = declarations.get_global_namespace(self.declarations)
global_ns.init_optimizer()
self.assertTrue(
0 == len(global_ns.namespaces('does not exist', allow_empty=True)))
class unnamed_ns_tester_t(parser_test_case.parser_test_case_t):
COMPILATION_MODE = parser.COMPILATION_MODE.ALL_AT_ONCE
def __init__(self, *args):
parser_test_case.parser_test_case_t.__init__(self, *args)
self.header = 'unnamed_ns_bug.hpp'
self.declarations = None
def setUp(self):
if not self.declarations:
self.declarations = parser.parse([self.header], self.config)
def test(self):
declarations.matcher.get_single(
declarations.namespace_matcher_t(name='::'), self.declarations)
def create_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test))
suite.addTest(unittest.makeSuite(unnamed_ns_tester_t))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run(create_suite())
if __name__ == "__main__":
run_suite()
| [
"michkapopoff@gmail.com"
] | michkapopoff@gmail.com |
9afb84441cfdc98e6e22eebbaedb05932c2885a7 | d2e3a98b715f96ea47b38530060ee045e874ce84 | /networks.py | 36a86196f6529f9b1bbbb92d9cc77f1a2f992593 | [] | no_license | iiakash/semi_supervised_classification | 16d70bfedd0cf645ce3f58db20f4c5c073996abd | da966353579e2658326366e0b40b1d1329dc1c8b | refs/heads/master | 2023-01-05T04:30:24.743805 | 2020-11-06T00:36:11 | 2020-11-06T00:36:11 | 272,254,209 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,455 | py | import torch
import torch.nn as nn
import copy
class CAE_3_bn(nn.Module):
def __init__(self, input_channels, out_channels, kernel_size, leaky = True, out_padding = False):
super(CAE_3_bn, self).__init__()
self.input_channels = input_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
if leaky:
self.relu = nn.LeakyReLU()
else:
self.relu = nn.ReLU()
if out_padding:
pad = 1
else:
pad = 0
self.conv1 = nn.Conv1d(in_channels = input_channels, out_channels = out_channels,
kernel_size = kernel_size, stride = 2)
self.bn1_1 = nn.BatchNorm1d(num_features = out_channels, eps = 1e-5,
momentum = 0.1)
self.conv2 = nn.Conv1d(in_channels = out_channels,
out_channels = out_channels*2,
kernel_size = kernel_size, stride = 2)
self.bn2_1 = nn.BatchNorm1d(num_features = out_channels*2, eps = 1e-5,
momentum = 0.1)
self.conv3 = nn.Conv1d(in_channels = out_channels*2,
out_channels = out_channels*4,
kernel_size = kernel_size, stride = 1)
self.bn3_1 = nn.BatchNorm1d(num_features = out_channels*4, eps = 1e-5,
momentum = 0.1)
self.deconv3 = nn.ConvTranspose1d(in_channels = out_channels*4,
out_channels = out_channels*2,
kernel_size = kernel_size, stride = 1)
self.bn3_2 = nn.BatchNorm1d(num_features = out_channels*2, eps = 1e-5,
momentum = 0.1)
self.deconv2 = nn.ConvTranspose1d(in_channels = out_channels*2,
out_channels = out_channels,
kernel_size = kernel_size, stride = 2, output_padding = 1)
self.bn2_2 = nn.BatchNorm1d(num_features = out_channels, eps = 1e-5,
momentum = 0.1)
self.deconv1 = nn.ConvTranspose1d(in_channels = out_channels,
out_channels = input_channels, kernel_size = kernel_size,
stride = 2, output_padding = 1)
self.bn1_2 = nn.BatchNorm1d(num_features = input_channels, eps = 1e-5,
momentum = 0.1)
self.relu1_1 = copy.deepcopy(self.relu)
self.relu2_1 = copy.deepcopy(self.relu)
self.relu3_1 = copy.deepcopy(self.relu)
self.relu1_2 = copy.deepcopy(self.relu)
self.relu2_2 = copy.deepcopy(self.relu)
self.relu3_2 = copy.deepcopy(self.relu)
def encoder(self, x):
x = self.conv1(x)
x = self.relu1_1(x)
x = self.bn1_1(x)
x = self.conv2(x)
x = self.relu2_1(x)
x = self.bn2_1(x)
x = self.conv3(x)
x = self.relu3_1(x)
x = self.bn3_1(x)
return x
def decoder(self, x):
x = self.deconv3(x)
x = self.relu1_2(x)
x = self.bn3_2(x)
x = self.deconv2(x)
x = self.relu2_2(x)
x = self.bn2_2(x)
x = self.deconv1(x)
x = self.relu3_2(x)
x = self.bn1_2(x)
return x
def forward(self, x):
bottleneck = self.encoder(x)
reconst = self.decoder(bottleneck)
return reconst, bottleneck
class Classifier(nn.Module):
def __init__(self, network, leaky = True):
super(Classifier, self).__init__()
self.encoder = network.encoder
self.fc1 = nn.Linear(in_features = 1024, out_features = 300)
self.fc2 = nn.Linear(in_features = 300, out_features = 22)
if leaky:
self.relu = nn.LeakyReLU()
else:
self.relu = nn.ReLU()
#self.sigmoid = nn.Sigmoid()
self.relu1_1 = copy.deepcopy(self.relu)
self.relu2_1 = copy.deepcopy(self.relu)
def forward(self, x):
x = self.encoder(x)
x = self.relu1_1(x)
x = x.view(x.size(0),-1)
x = self.fc1(x)
x = self.relu2_1(x)
x = self.fc2(x)
return x
| [
"noreply@github.com"
] | noreply@github.com |
6c174fd36109a273b4b2fe7880cdd02d3fcaa1c2 | 9fff2942787f57b66c90187c0d9c5fc8eec86a06 | /libs/images.py | cf513a1d4047441598ce3d7fd2fe2c22726a10a2 | [] | no_license | huhuqianyun/FreeTime | 310e2101b4e97a3b16587147e7932cd3deecd035 | b3fb99c940b92f70f7c54593405639c36bc97243 | refs/heads/master | 2020-07-20T18:49:09.783128 | 2019-10-05T07:49:31 | 2019-10-05T07:49:31 | 206,694,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | from PIL import Image
# 生成path图像的缩略图
def make_thumb(path, size):
pixbuf = Image.open(path)
width, height = pixbuf.size
if width > size:
delta = width / size
height = int(height / delta)
pixbuf.thumbnail((size, height), Image.ANTIALIAS)
return pixbuf
| [
"1805203675@qq.com"
] | 1805203675@qq.com |
191b568b9e8adf1a28e2e7a59bdf26c8961b1746 | 42a6b017c1af312b291842dea4904aebb33252d3 | /Python/findSqrt.py | a08b933a09f61d838c26fec8c1f2a54c2eda8e74 | [] | no_license | musbel/Snippets | 20c733780203c3eb12e383280991877780dbe094 | 0b87dba20944f8840de6e3d847a8b517c3e5b057 | refs/heads/master | 2021-01-01T19:19:21.890535 | 2017-06-12T20:44:15 | 2017-06-12T20:44:15 | 29,825,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py |
def findSqrt(n):
eps = 10^-14
x = n; # Initial guess
for i in range(100):
z = n / x
if abs(x - z) < eps:
break
x = 0.5 * (x + z)
return x
if __name__ == "__main__":
print findSqrt(9)
| [
"musbel@gmail.com"
] | musbel@gmail.com |
e801fb21a2f964da0d8bbefd7ceadbdf62432240 | 846591b48ae482df21140fedf16e436158dda585 | /WindModel.py | fe4addacec518f936971cca1d3fca9bfd8003d34 | [] | no_license | Buziwe/AppDev-Summative-Assignment | 65074240324de941ec99a65d265b108283c7705d | cad5760389451e56403b3c566ab4506e8aad1209 | refs/heads/master | 2022-12-22T20:02:44.272498 | 2020-09-27T10:06:27 | 2020-09-27T10:06:27 | 298,809,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,886 | py | import json
import urllib.request, urllib.parse, urllib.error
import pandas as pd
import numpy as np
import pickle
from sklearn import metrics
from sklearn import linear_model
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
import math
import datetime
data = pd.read_csv('wind_generation_data.csv', sep=",")
# Defining features for the modelling
X = data.drop(['Power Output'], axis = 1).values
y = data['Power Output'].values
# Create training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Fit training sets in the model
lm = linear_model.LinearRegression()
lm.fit(X_train,y_train)
### Using the Model on the dataset from the open weather api
# Load weather data from Openweather API using the urllib library
json_string = urllib.request.urlopen('https://api.openweathermap.org/data/2.5/onecall?lat=53.556563&lon=8.598084&exclude=hourly,minutely&appid=d51a4ce07859ab07bfd361e1c455940e').read()
WindWeather = json.loads(json_string)
# Convert the loaded data to a dataframe and choose the daily weather conditions
data = pd.DataFrame(WindWeather['daily'])
# Dropping all columns not needed to fit in model
df1 = data.drop(['sunrise','sunset','pressure','humidity','dew_point', 'clouds','uvi','pop','rain','sunrise','sunset','feels_like','weather','temp'], axis = 1)
# Renaming the column as per the model fitting dataframe
df1= df1.rename(columns = {'dt': 'Date','wind_deg': 'direction', 'wind_speed': 'wind speed'}, inplace = False)
# Converting the Epoch time to normal time
df1["Date"] = pd.to_datetime(df1["Date"],unit = 's')
# Creating a Day column from Date for the maintanence schedule logic
df1["Day"] = df1["Date"].dt.day
# Drop the date and day column for predicting Power Output
data2 = df1.drop(['Date','Day'], axis = 1)
# Define X inputs to fit and predict
X1 = data2.values
# Predicting power output
y = lm.predict(X1)
# Convert the predicted power output from an array into a dataframe
PowerOutput_Wind = pd.DataFrame(y)
# Append the predicted power output to df1.
df1['PowerOutput_Wind'] = PowerOutput_Wind
# Display the expected power output for 4 consecutive days
df1 = df1[0:5]
#print(df1) #for prediction display
# Logic for maintenance schedule according to the maintenance csv days
df1['PowerOutput_Wind'] = np.select([df1.Day == 3, df1.Day == 5,df1.Day == 7,df1.Day == 8,df1.Day == 15,df1.Day == 24,df1.Day == 28],
[0.7*df1.PowerOutput_Wind,0.6*df1.PowerOutput_Wind, 0.5*df1.PowerOutput_Wind,0.45*df1.PowerOutput_Wind,0.55*df1.PowerOutput_Wind,
0.9*df1.PowerOutput_Wind,0.3*df1.PowerOutput_Wind ], default=df1.PowerOutput_Wind)
#print(df1) #for prediction display
#Save model as a Pickle file
pickle.dump(lm,open('modelWind.pkl','wb'))
model= pickle.load(open('modelWind.pkl','rb'))
| [
"61239896+Buziwe@users.noreply.github.com"
] | 61239896+Buziwe@users.noreply.github.com |
6d53bd2ad8620c52fba55ab8bda20744ee97b8a0 | dbe1f4110921a08cb13e22ea325d503bd5627195 | /chuhuo_2.7_clickhouse/bluedon/bdwafd/newscantools/plugins/Phpcms_post_clickSqlInjectionScript.py | 1001b1ccc85c453fcecb86d7c9553f38992ae182 | [] | no_license | Hehouhua/waf_branches | 92dc1b1cbecba20f24ef6c7372dde7caa43f9158 | ca76f3a1ed8150b423474c9e37aee37841a5ee35 | refs/heads/main | 2023-01-07T11:33:31.667688 | 2020-11-03T06:58:33 | 2020-11-03T06:58:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,626 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from lib.common import *
def run_domain(http,ob):
list = []
try:
domain = ob['domain']
detail = u''
url = "%s://%s%s" % (ob['scheme'],ob['domain'],ob['base_path'])
expurl="%s%s"%(url,"index.php?m=poster&c=index&a=poster_click&sitespaceid=1&id=2")
data=""
headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1) Gecko/20090624 Firefox/3.5","Accept": "text/plain","Referer": "1',(SELECT 1 FROM (select count(*),concat(floor(rand(0)*2),(SELECT concat(username,0x4E56535F54455354,password,0x5f,encrypt) FROM v9_admin WHERE 1 ))a from information_schema.tables group by a)b),'1')#"}
#res, content = http.request(expurl,"POST",data,headers)
res, content = yx_httplib2_request(http,expurl,"POST",data,headers)
#print content
if content.find('NVS_TEST')>=0:
#print ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
request = postRequest(expurl,"POST",headers,)
response = getResponse(res)
list.append(getRecord(ob,ob['scheme']+"://"+ob['domain'],ob['level'],detail,request,response))
except Exception,e:
logging.getLogger().error("File:phpcms_post_click.py, run_domain function :" + str(e) + ",task id:" + ob['task_id'] + ",domain id:" + ob['domain_id'])
write_scan_log(ob['task_id'],ob['domain_id'],"File:phpcms_post_click.py, run_domain function :" + str(e))
#end try
return list
#end def | [
"hanson_wong@qq.com"
] | hanson_wong@qq.com |
3148dbe87159b49706db261924a5808aea215be9 | 6805008e2ec86649bfcf69916375a969fd17c7b3 | /.github/workflows/Abu.py | 04ea2c2e22bc01c51192810afb6d6a6c886c1874 | [] | no_license | Abu213/abu | c23a2b17b892372462dd46996352bbc794ba5433 | 8d0b25555ff963f318d2d234b294d812ef100fb8 | refs/heads/master | 2022-12-13T17:02:22.019464 | 2020-08-30T06:56:09 | 2020-08-30T06:56:09 | 291,415,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,712 | py | # Compiled By : AbuBaker
# GitHub : https://github.com/AbuBaker
# YouTube Channel : Abu Baker
import marshal,zlib,base64
exec(marshal.loads(zlib.decompress(base64.b64decode("eJztWVtvW8cR3kNJlERJ1sW2ZNmufZzEipxEvEmULDtKqosvQmxJpiQrpSAQhzxL8lDnQp1dWpJrFyjcvjepk5cG7UNeAgRNUvQlRYH+kgIF3KeiL/0H6czsOSRlKUnThz6Vl+XsZXZnZ+ebmT0ssuDVAd8fw1f8XWPs8K/MZMzUmM1YjoW0xnJaSEdYLhLSbSzXFtLtLNce0h0s1xHSUZaLhnQny3WGdBfLdYV0N8t1h3SM5WIh3cNyPSHdy3K9Id3Hcn1ER5h9ijn9LNfPNKy3MXuAOYMsN6jq7cjnDLHcENO23OusnZ9muzHmD2vw4oxVzzCzgz3TWFiJtlY6qeJq7H1+lnFNdWmsOszMLlXpZdUh9gxUNcL4CKueY3xUdUDlPMPuC6x6EUeYsMEY9GmaeY6VNRye/xEz+9gvgPsSM08RcZmZ/UTozBwg4gozYTuvMHOIqq8y8zTLvcbKQF9l5hlqHGPmWSJeZ+YwEeOMX2PmCNuNMP9vGh9DCTSXlLc+PgqHbX0Dr5VxOHEmY1BsVHxumGueZ6u2figWPdflRWl57i3f93zV0QnFgu/tC+6Ls0DXrFpat1whDdvWfb5X50IKMfxyj8OLFcO1HnOBM9cOZcVz0/p70BaHSju01WXpuuwCwjEO8tJyuIXrCRSuDotNGGXuSnEJGpcM+5G1m0jFp+NJffye5dYPbuqbN/V51/Q9y9Sn4lPx9E195f1MRl+oW7aZeG91I5NJTl/Tt28vzK8kbi9Mzd8E6mEilYQ54J2ejk9fh6aFh4mpzGxyKjWdhNrS/cRPTe4KSx7OTcaTb+1bpqzMpZLXk29VuFWuyLnUbDr5FEbeW0xYMr+8AWT2yBSL2cSaJyS/7xUsm0PD/dsJQ9QFrrUUUmsriaLnxEtGkRc8bze+a0jDNVCAh4n59c31fC6ZnF+C+vrDRCaO066uJVI4+3zi4Pr0DcN3uFGwJh7NGDd3iiGuUX2LqEI8DDj4O3clmDWYb4SsAeCxPh6BrhWBWt++sqPfOrDkeBueMraD2EiLQ0Hnw7ET520WAoeZ5SzaxHmst9O6HVoR18ZKVyjDupLhqYaYfRphBzcYSLO0k2ZP29iTCCL3icYkSQeolVQFJAE6zz2LsNGRJ21sZAYY906zLZAHWJ6hLUdxA2SURkWaj6tFiSJIXP+KhSRZ0XhH2HggkcM3XNNziA1Jy5W0VRssDEcWuTR2x9tDngKV+1SaVFrHdEAqMorG7uXQaJkWhXefllTawHk7Q238rKGNg3nc6NLOLCoFqCoLtoZqacfjAn1ByzAoSJJqUBHQPwxaedrO9vrYFowDNc1ALeCIklOjfthXFRo7sAv8GXKqk8eTWcliKXDrV64KgeJd2L4qbqYc63J4nBe2kw7t8UpS4D5ipHJSk+Wa/EApkddsMF5lL9LP0pAoVUyvLmn0vm9JTgeRHcBiEIvToY6r2Z4TTQu9xetYPxWotE/r18a1mDak1IrD2kK1YnHwNntCyh1Z2kmihhtGrwxKC+ptBAJJujFJ49peD9si3bQRKmi35a8v/fKfD77+7bu0bdpY9hwWo6EWSnZdVOj40WlRk7A5rxG2aG+PqeTHbQbHVgznsZHCxi7a4aA2AHsUcNwsdmE7dXM247z46Pcvnv88+Hz0RaP5+bOg56MvgG5txp4/EPFpyPlH+jToFuLojE1Wmh2nhjk+Dzk+py4isPfLI8J8RdN92cLaKsGzJmuLtIop2MSnyI0tf6aP4mshWtYDIYPPV9isf99LMU46Ez/sFVN8086iZ3JTXzjU9Rs6NU05GxDT9AeGBdEpHHXHkpV6oWVMRcqauJFIlKkjDm4+gWxHuW4Hjr/Bpbr1+ULBEOGgnwCQ6gX+nctvcJuXfcPRX5rpyEQrnmyZZYXDtu5Yvl3yLe6asdiLjz/8js8D+M4rcln9rDf6JhtUFr4TzZHwWTjyc6QLJ2ioeckSRdsAIPkNCZd12JDrScguRM2DaFywuV7yfN1wD3XHEkKH/CD2X54vRcsjTuS28s0mxaRyG3sKPiWJPmVpZ5ziVzt6kpE7R4KpcrNBKrk3HPgRCk7kReNggYqIN4i4LoaA6Nt+8cnzHf2eVy5bbllfdvVmyGrxNxewuIjFj7A3ErofaUlrlxyMd9zBUCy3dlebMakfnEsX62MUG2nzOCYabj4JE6mNmYG3jGDe0Ba4SNDFE8qyMa+8d8JIVA5jI6CIkTt33VEMRcCunKzKpiEsBApTOTRoFNQJ2TFO+afvmxJSaFS+2YOVYN5elaWPYjJ98mKHOuatkGJDcv2M0o9gmQEateVeYu2QespuVorQ9WAIrweQF1PWfJ69Dyn4yTPTMQ9RDkKJg80NX+ABbf/rd89/tdOA6urqPX1z/VZ2Zf7+rRu6xFi3R8g0EJmK48Unv97RFUfqZQ46PX286Pk+ZOTXKPPGjG3ZfWTYkPJuwlSu4fC4SpovNSU4IsLa/Pr61mp2CURAG8T0+9vWbg4VKK3eXNxC4zomwRpsY9/zzbjoxkzKA2OOywNJlukLDPZh0h/HZdOQ9osz3zLHOK6Y7WgkEZCBcofIggFXEl/iEr6xn7fcWl02UUFDJK1N+/NqkNBhWH2PH9IVhpKV5VVF4yIQsCueSRwq8cXR9UCZdKQ1lEkhzdvl8niG8ib8bGH9OgGsD9LfXnjj70CkA347I2G9D95Bm9YGUBzUTms9VG8gseGG+k9EoklFm4IFFB0BOqMt6OwkKN1HfJHRkhH/hboUyoroOaj1M2qNUes/vgX4PQFKQ+B2IuIAagpIkJpWu8LUCivdChenkDvGqj2EuUbaFSAuqAwq5L5CUgyRFN+wUOJevOk2NkBAQ3tZyb6G6nkVTScVAGzSQfdpufoWRFl9eSnRNMYMjkuHtj3pqHGbAl3tBhyoGx/XVzx9Ga554EkP9TXfg/Di6NfIZLcnm5x4OYqLKGEBXwI9ckz1pp3FiucJrq/W8Lqsv/OOrmfRSMVAYOL6lu+RdweDJUykqEyLiwFUP2ji7471iCvhIEwfhRNlzWIMVz6KWQwfEMgheqzXi0UuRKlu24eBO5jHvNgsTyAe9DAj2d/fjx+qnILSEryfu9xObC7e5XJvfq++uetVDx4+WuOFB7VCsnwyhmkXSbpi4jYDaVo2O94bYjmLsMlew+INLDrDEEa7y+KJZt9iral10QadtgQ9QigWFU/dSeG3FhA14zg0E4wQ0QrN8I2Q7IBrRC99FWg7KTrGGkCFPgAnQbOzNUh+/DI0Mb5EgowBYdHobg/iS0FrB7lB0iC+fEDx5QjcCMnRMIVAKtqgVMhReO5EFDfiJmAP4IsYVWGtMwiHcHuUCokIwgjiFcA5ArCkizRgEm+CeD/sYdVevBxitQPx+aSD7UaZv6wFWxqkuaPsSZRV+7AXxowS2qHhlALnEErWz6oDiNkWgJ9prbRevwLN7H2omWdpzmE2iqvWNXOkOdc5xTFIi+DY0WPrqD58REce4nzTQxzFDd0hT45C114K19OBk4CE29fD1FyfLxa9uivFxLHRSx4mpxCBFcca94XnGnaDI3WMA8ca+grfPzY9wF65MnEVuP6jDJaSyO03d/SVulPgfuKWY1g2BHgEZuCE+oMRoVtEt/IBoiZwBYUJo2Y1Hz+hN1CBMWHUZSVOWnzXILeSxyjozqUnZ2Yys7PJ2cxsajqTuZrOpDMzi8lSaippGAVulgrTGaOYnjFmJme5mTLS6enJQmoMsnbHkHNVUNCYMHfzj0BX4C/nUmMcpZ4TIyDVmO0VDZvPcTe/uT5WC2SeE3exD7jmLE+MlTnkAYbkeQFCwRT5IkhucQFTCas8N1nKZDKl2VmQI1UqmjOGkSxOTZUy10uZdJqXpiV6pdYdKcd6telY/3d+FUMEMrXqnzIcjmlK3hFlZUJN94rZnn6/LqT+kPtW6bBhPQscNMyVBcUtehozoVgDzsBGaIJlM9G0iGURRKgrlM62OGxyyThLNt7w33rDiaPjJ9dNjwwLPnljPF8ibM8wyZ1nZ1jr9SU7ja66M2STBfLqlmVSFX6wWts3aRLTkEb2UjhWnJCC4Wy/wfpC4OfD5KqPfHyMfHqvNkQ+/jL09VJ/6OnR1w8QHaNkjJLAfB4s0s3n6ab1/6fQP/Qp9PgbDSuKhkdPeoXT5PigLIsIpHy8YoiKbRXodH1Oli/p3wjI0bLvsiCtr/s2DsJeBXWsIXuZS/QR1NP4l4GWCv+NkJg3OnVbWjXfQwzDxPGa59lZNBd5umVonB8UOeVwIosQz5J54k1h2anBnYFuDoQL9RyZo4VLRIzg0uQlA1bhbtEj2d/BYYOqLw9ymTbPQ3rpgUSomNuGDR1DL/Xzks9FhQbk0aWQ8Hc3NtayqmdNbQFkQZs0TLMCmgI3SulUFgfTM1MF0WsNdCKQCkZxN4sZDN1ZKDsi6AT2jlcesPcGuo48ScAhbzueWbf5O/TQGBnbtM+0AXifCWA02BaDEtOpi1pvpLuju6s71n0xClRUg/Jq9+Xu13q1fwPoK52T"))))
| [
"noreply@github.com"
] | noreply@github.com |
b8d70c7e1d89cf7954f4409f200d302c1c6ad52d | d0f965bea168e80870bd5ad02ccc7a6538a8a327 | /networks/diff_flow.py | 5257b70d666b70ee1f261d433045d20daa808a07 | [] | no_license | cenkbircanoglu/DiffFlow | 4897d4f2e31a86fce2035040aaf4e1cf5d21f649 | 62200f0e1089c97e16e7fc38cf3db2526bfeae11 | refs/heads/master | 2023-08-15T23:14:39.206327 | 2021-10-15T02:25:51 | 2021-10-15T02:25:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,679 | py | from utils.sdefunction import SdeF
from .base_model import BaseModel
__all__ = ["DiffFlow", "QuickDiffFlow"]
# pylint: disable=too-many-arguments, arguments-differ
class DiffFlow(BaseModel):
def __init__(
self, data_shape, timestamp, diffusion, condition, drift_net, score_net
):
super().__init__(data_shape, drift_net, score_net)
self.register_buffer("timestamps", timestamp)
self.register_buffer("diffusion", diffusion)
self.register_buffer("condition", condition)
assert self.timestamps.shape == self.diffusion.shape
self.register_buffer("delta_t", self.timestamps[1:] - self.timestamps[:-1])
def forward(self, x):
return super().forward(x, self.timestamps, self.diffusion, self.condition)
def backward(self, z):
return super().backward(z, self.timestamps, self.diffusion, self.condition)
def sample(self, n_samples):
z = self._distribution.sample(n_samples).view(-1, *self.data_shape)
x, _ = self.backward(z)
return x
def sample_noise(self, n_samples):
return self._distribution.sample(n_samples).view(-1, *self.data_shape)
def noise_log_prob(self, z):
return self._distribution.log_prob(z)
class QuickDiffFlow(DiffFlow):
def forward(self, x):
return SdeF.apply(
x,
self,
self.timestamps,
self.diffusion,
self.condition,
*tuple(self.parameters())
)
def forward_cond(self, x, timestamps, diffusion, condition):
return SdeF.apply(
x, self, timestamps, diffusion, condition, *tuple(self.parameters())
)
| [
"zqsh19970218@gmail.com"
] | zqsh19970218@gmail.com |
0c2d08e7c38473e487ebea5aaafa78bd27b2e4d2 | 56602773815ce44cef2df6184b10e63488787882 | /Perfil_Modulo.py | 0ee1f85913745f5bf18ec9dbd97f7f42e03b0b58 | [] | no_license | JonatasHdS/Squad-D-Coleta-Instagram | 6dd9ef9f8bed54e8f113d96b8d21ddab932198c3 | dcdcde0e5f89494243da7ba534334375b0fd8532 | refs/heads/master | 2022-12-28T18:19:23.150055 | 2020-10-18T15:51:18 | 2020-10-18T15:51:18 | 292,061,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,312 | py | import instaloader, time, datetime, csv, re
import datetime, Subtarefas_Modulo
# Função que busca se os comentários do post analisado contém alguma das palavras-chaves relacionadas ao covid
def comentario_relacionado(comments):
corona_list = ["coron", "covid", "quarentena", "homeoffice", "pandemia"]
for comment in comments:
for x in corona_list:
string = comment.text.replace(" ", "")
if Subtarefas_Modulo.kmp(string,x) != []:
return True
return False
# Função que busca se o texto do post analisado contém alguma das palavras-chaves relacionadas ao covid
def texto_relacionado(caption):
corona_list = ["coron", "covid", "quarent", "homeoffice", "pand", "virus"]
for x in corona_list:
string = caption.replace(" ", "")
if Subtarefas_Modulo.kmp(string,x) != []:
return True
return False
# Função que filtra se o post coletado tem relacionamento com o tema emprego
def emprego_relacionado(comments, caption):
emprego_list = ['empreg', 'demiss', 'demit', 'homeoffice', 'trabalh', 'auxilio', 'auxílio']
for x in emprego_list:
string = caption.replace(" ", "")
if Subtarefas_Modulo.kmp(string,x) != []:
return True
for comment in comments:
for x in emprego_list:
string = comment.text.replace(" ", "")
if Subtarefas_Modulo.kmp(string,x) != []:
return True
return False
def coleta_perfil(loader):
#filtra os emojis
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f" # dingbats
u"\u3030"
"]+", flags=re.UNICODE)
business_csv = str(input("Digite o nome do arquivo CSV que deve ser lido: ")) #decide o arquivo csv a ser lido
cont = 0
cont_max = int(input("Digite o número de posts que devem ser coletados do perfil: "))
with open(business_csv+'.csv', 'r') as arquivo_csv:
leitor = csv.reader(arquivo_csv, delimiter=',')
for linha in leitor:
username = linha[0]
profile = instaloader.Profile.from_username(loader.context, username)
print("-----NOVO PERFIL-----")
print(profile.username)
posts = profile.get_posts()
with open(str(profile.username)+'.csv', 'w', encoding='utf-8', newline='') as file:
writer = csv.writer(file)
writer.writerow(["Usuario", "Genero", "Data", "Likes", "Comentarios", "Texto", "Hashtags", "Patrocinado", "Usuarios marcados", "Comentário Rel.", "Texto Rel.", "PostId"])
for post in posts:
comentarios = post.get_comments()
if post.is_video == False and post.caption != None and emprego_relacionado(comentarios, post.caption):
print(post.date)
cont += 1
print("Número de posts coletados: ", cont)
nome = post.owner_profile.full_name
if nome:
nome = nome.split()
genero = Subtarefas_Modulo.consulta_genero(nome[0])
else:
genero = "None"
writer.writerow([post.owner_username, genero, post.date, post.likes, post.comments,emoji_pattern.sub(r'', post.caption), post.caption_hashtags, post.is_sponsored, post.tagged_users, comentario_relacionado(comentarios), texto_relacionado(post.caption), post.shortcode]) #Coleta os dados referentes as colunas do arquivo csv
if cont == cont_max: #Caso o número necessário de posts seja coletado ou não exista mais posts nessa margem de tempo, finaliza o programa
print("Finalizado!!")
break | [
"61765878+JonatasHdS@users.noreply.github.com"
] | 61765878+JonatasHdS@users.noreply.github.com |
8c9cac2973d6978608f4768621bb61a098589c65 | 8316b326d035266d41875a72defdf7e958717d0a | /Regression/Poly_linear_regression_boston_house_predict.py | 465a1cccc33d4b05fe7f9a57daa1fb6da7a7de61 | [] | no_license | MrFiona/MachineLearning | 617387592b51f38e59de64c090f943ecee48bf1a | 7cb49b8d86abfda3bd8b4b187ce03faa69e6302d | refs/heads/master | 2021-05-06T17:18:49.864855 | 2018-01-24T15:29:36 | 2018-01-24T15:29:36 | 111,804,323 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,783 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Time : 2017-11-28 15:22
# Author : MrFiona
# File : Poly_linear_regression_boston_house_predict.py
# Software: PyCharm Community Edition
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.linear_model import LinearRegression, LassoCV, RidgeCV, ElasticNetCV
from sklearn.linear_model.coordinate_descent import ConvergenceWarning
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
names = ['CRIM','ZN', 'INDUS','CHAS','NOX','RM','AGE','DIS','RAD','TAX','PTRATIO','B','LSTAT']
def notEmpty(s):
return s != ''
mpl.rcParams[u'font.sans-serif'] = [u'simHei']
mpl.rcParams[u'axes.unicode_minus'] = False
warnings.filterwarnings(action = 'ignore', category=ConvergenceWarning)
np.set_printoptions(linewidth=100, suppress=True)
df = pd.read_csv('../datas/boston_housing.data', header=None)
# print(df.values)
data = np.empty((len(df), 14))
for i, d in enumerate(df.values):
d = list(map(float,list(filter(notEmpty, d[0].split(' ')))))
data[i] = d
x, y = np.split(data, (13,), axis=1)
y = y.ravel()
# print('x:\t', x, type(x))
# print('y:\t', y, type(y))
print ("样本数据量:%d, 特征个数:%d" % x.shape)
print ("target样本数据量:%d" % y.shape[0])
models = [
Pipeline([
('ss', StandardScaler()),
('poly', PolynomialFeatures()),
('linear', RidgeCV(alphas=np.logspace(-3, 1, 20)))
]),
Pipeline([
('ss', StandardScaler()),
('poly', PolynomialFeatures()),
('linear', LassoCV(alphas=np.logspace(-3, 1, 20)))
])
]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
parameters = {
"poly__degree": [3,2,1],
"poly__interaction_only": [True, False],
"poly__include_bias": [True, False],
"linear__fit_intercept": [True, False]
}
titles = ['Ridge', 'Lasso']
colors = ['g-', 'b-']
plt.figure(figsize=(16, 8), facecolor='w')
ln_x_test = range(len(x_test))
plt.plot(ln_x_test, y_test, 'r-', lw=2, label=u'真实值')
for t in range(2):
model = GridSearchCV(models[t], param_grid=parameters, n_jobs=1)
model.fit(x_train, y_train)
print("%s算法:最优参数:" % titles[t], model.best_params_)
print("%s算法:R值=%.3f" % (titles[t], model.best_score_))
y_predict = model.predict(x_test)
plt.plot(ln_x_test, y_predict, colors[t], lw=t + 3, label=u'%s算法估计值,$R^2$=%.3f' % (titles[t], model.best_score_))
plt.legend(loc='upper left')
plt.grid(True)
plt.title(u"波士顿房屋价格预测")
plt.show() | [
"1160177283@qq.com"
] | 1160177283@qq.com |
ec7ef7eced1a834945a207197484f10120845033 | 61b94f39ae02818f1cf8a37e6488839f265074ea | /auth/views.py | b2c78fd9d79995f45da0339b3e027899527bdba6 | [] | no_license | chapagainmanoj/todo | 2a2a2f9cba12ecde5f01a225f5bb7f7eec6370a0 | 2b4241e9043d703f90aceee73d4aeaad53520a43 | refs/heads/master | 2023-02-09T13:45:50.196025 | 2021-01-07T09:12:51 | 2021-01-07T09:12:51 | 326,952,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | from starlette.responses import PlainTextResponse
from starlette.endpoints import HTTPEndpoint
class Homepage(HTTPEndpoint):
async def get(self, request):
if request.user.is_authenticated:
return PlainTextResponse('Todo! ' + request.user.display_name)
return PlainTextResponse('Todo!')
| [
"chapagainmanoj35@gmail.com"
] | chapagainmanoj35@gmail.com |
c361d0158e68057e3da329065870caacc57e0aac | b334faaf0a9625c1e10a6c7b5ca7c5698515176c | /dlgo/agent/base.py | 2a4508c919795fe71d939a5988792a5d61841e21 | [] | no_license | davearch/Deep-Learning-Go-Bot | cccf60d602dd7a46d667d2cb4d428abc24de1022 | e785d48c21ce9b038551915820f5412afbf23b69 | refs/heads/master | 2023-02-05T03:38:20.986619 | 2020-12-31T10:10:12 | 2020-12-31T10:10:12 | 325,771,730 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | class Agent:
def __init__(self):
pass
def select_move(self, game_state):
raise NotImplementedError() | [
"darchuletajr@gmail.com"
] | darchuletajr@gmail.com |
76211858ab6a708b317c5f35feab4b6549499e56 | 856199f56be68ca90566cd96e5bbe753b4d6c652 | /scqubits/settings.py | bcb6de6e933bb2dc0ff13ce0808329e132bdd0ba | [
"BSD-3-Clause"
] | permissive | kaiterm/scqubits | 7a94054599200dc73f35ba3d8a15ec9a92dec311 | 8bcca2401760231e2fdd4268239332712310d47d | refs/heads/master | 2022-11-20T01:02:52.620136 | 2020-07-24T20:11:15 | 2020-07-24T20:11:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,355 | py | """
scqubits: superconducting qubits in Python
===========================================
[J. Koch](https://github.com/jkochNU), [P. Groszkowski](https://github.com/petergthatsme)
scqubits is an open-source Python library for simulating superconducting qubits. It is meant to give the user
a convenient way to obtain energy spectra of common superconducting qubits, plot energy levels as a function of
external parameters, calculate matrix elements etc. The library further provides an interface to QuTiP, making it
easy to work with composite Hilbert spaces consisting of coupled superconducting qubits and harmonic modes.
Internally, numerics within scqubits is carried out with the help of Numpy and Scipy; plotting capabilities rely on
Matplotlib.
"""
# settings.py
#
# This file is part of scqubits.
#
# Copyright (c) 2019, Jens Koch and Peter Groszkowski
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#######################################################################################################################
import matplotlib as mpl
from cycler import cycler
# a switch for displaying of progress bar; default: show only in ipython
PROGRESSBAR_DISABLED = False
try:
if __IPYTHON__:
IN_IPYTHON = True
except NameError:
PROGRESSBAR_DISABLED = True
IN_IPYTHON = False
# default energy units
DEFAULT_ENERGY_UNITS = 'GHz'
# define settings for tqdm progressbar
TQDM_KWARGS = {'disable': PROGRESSBAR_DISABLED,
'leave': False}
# run ParameterSweep directly upon initialization
AUTORUN_SWEEP = True
# enable/disable the CENTRAL_DISPATCH system
DISPATCH_ENABLED = True
# For parallel processing ----------------------------------------------------------------------------------------------
# store processing pool once generated
POOL = None
# number of cores to be used by default in methods that enable parallel processing
NUM_CPUS = 1
# Select multiprocessing library
# Options: 'multiprocessing'
# 'pathos'
MULTIPROC = 'pathos'
# Matplotlib options ---------------------------------------------------------------------------------------------------
# set custom matplotlib color cycle
mpl.rcParams['axes.prop_cycle'] = cycler(color=["#016E82",
"#333795",
"#2E5EAC",
"#4498D3",
"#CD85B9",
"#45C3D1",
"#AA1D3F",
"#F47752",
"#19B35A",
"#EDE83B",
"#ABD379",
"#F9E6BE"])
# set matplotlib defaults
mpl.rcParams['font.family'] = "sans-serif"
mpl.rcParams['font.sans-serif'] = "Helvetica Neue, Helvetica, Arial"
mpl.rcParams['figure.dpi'] = 150
mpl.rcParams['font.size'] = 11
mpl.rcParams['axes.labelsize'] = 11
mpl.rcParams['xtick.labelsize'] = 10
mpl.rcParams['ytick.labelsize'] = 10
# toggle top and right axes on and off
DESPINE = True
| [
"jens-koch@northwestern.edu"
] | jens-koch@northwestern.edu |
03d62954d12744b87d777ac72a2a8132cb557051 | f31cca99d55823f14506c686177baab9fa0a95dc | /social computing/udacity-cs253-master/UNIT 4/hw-shanky/blog.py | 9f4668ac00cbc37ea8aa94aea0fdef6c29f1470f | [] | no_license | Bhavana1801/Studious-eureka | 4edc69fd668f412f834e18c8dfd695d11a125408 | 195acbddba795d56a1d004f1fc9a380f15bc4004 | refs/heads/master | 2021-01-19T06:36:32.593208 | 2016-08-19T05:28:05 | 2016-08-19T05:28:05 | 59,879,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,027 | py | import os
import re
import random
import hashlib
import hmac
from string import letters
import webapp2
import jinja2
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
#password hash using salt
def make_salt(length = 5):
return ''.join(random.choice(letters) for x in range(length))
def make_pw_hash(name, pw, salt = None):
if not salt:
salt = make_salt()
h = hashlib.sha256(name + pw + salt).hexdigest()
return '%s,%s' % (salt, h)
def valid_pw(name, password, h):
salt = h.split(',')[0]
return h == make_pw_hash(name, password, salt)
#verifying cookie
secret = 'manmanmanfu!up'
def make_secure_val(val):
return '%s|%s' % (val, hmac.new(secret, val).hexdigest())
def check_secure_val(secure_val):
val = secure_val.split('|')[0]
if secure_val == make_secure_val(val):
return val
#blog handler
class BlogHandler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def login(self, user):
self.set_secure_cookie('user_id', str(user.key().id()))
def logout(self):
self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')
def set_secure_cookie(self, name, val):
cookie_val = make_secure_val(val)
self.response.headers.add_header(
'Set-Cookie',
'%s=%s; Path=/' % (name, cookie_val))
def read_secure_cookie(self, name):
cookie_val = self.request.cookies.get(name)
return cookie_val and check_secure_val(cookie_val)
#def initialize(self, *a, **kw):
# webapp2.RequestHandler.initialize(self, *a, **kw)
# uid = self.read_secure_cookie('user_id')
# self.user = uid and User.by_id(int(uid))
class MainPage(BlogHandler):
def get(self):
self.write('Hello, Udacity!')
class User(db.Model):
name = db.StringProperty(required = True)
pw_hash = db.StringProperty(required = True)
email = db.StringProperty()
@classmethod
def by_id(cls, uid):
return User.get_by_id(uid)
@classmethod
def by_name(cls, name):
u = User.all().filter('name =', name).get()
return u
@classmethod
def register(cls, name, pw, email = None):
pw_hash = make_pw_hash(name, pw)
return User(name = name,
pw_hash = pw_hash,
email = email)
@classmethod
def login(cls, name, pw):
u = cls.by_name(name)
if u and valid_pw(name, pw, u.pw_hash):
return u
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
def valid_username(username):
return username and USER_RE.match(username)
PASS_RE = re.compile(r"^.{3,20}$")
def valid_password(password):
return password and PASS_RE.match(password)
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
def valid_email(email):
return not email or EMAIL_RE.match(email)
class Signup(BlogHandler):
def get(self):
self.render("signup-form.html")
def post(self):
have_error = False
self.username = self.request.get('username')
self.password = self.request.get('password')
self.verify = self.request.get('verify')
self.email = self.request.get('email')
params = dict(username = self.username,
email = self.email)
if not valid_username(self.username):
params['error_username'] = "That's not a valid username."
have_error = True
if not valid_password(self.password):
params['error_password'] = "That wasn't a valid password."
have_error = True
elif self.password != self.verify:
params['error_verify'] = "Your passwords didn't match."
have_error = True
if not valid_email(self.email):
params['error_email'] = "That's not a valid email."
have_error = True
if have_error:
self.render('signup-form.html', **params)
else:
self.done()
def done(self, *a, **kw):
raise NotImplementedError
class Register(Signup):
def done(self):
#make sure the user doesn't already exist
u = User.by_name(self.username)
if u:
msg = 'That user already exists.'
self.render('signup-form.html', error_username = msg)
else:
u = User.register(self.username, self.password, self.email)
u.put()
self.login(u)
self.redirect('/welcome')
class Login(BlogHandler):
def get(self):
self.render('login-form.html')
def post(self):
username = self.request.get('username')
password = self.request.get('password')
u = User.login(username, password)
if u:
self.login(u)
self.redirect('/welcome')
else:
msg = 'Invalid login'
self.render('login-form.html', error = msg)
class Logout(BlogHandler):
def get(self):
self.logout()
self.redirect('/signup')
class Welcome(BlogHandler):
def get(self):
uid = self.read_secure_cookie('user_id')
self.user = uid and User.by_id(int(uid))
if self.user:
self.render('welcome.html', username = self.user.name)
else:
self.redirect('/signup')
app = webapp2.WSGIApplication([('/', MainPage),
('/signup', Register),
('/login', Login),
('/logout', Logout),
('/welcome', Welcome),
],
debug=True)
| [
"bhavana.manchana@gmail.com"
] | bhavana.manchana@gmail.com |
9e996135c634f8b8973b7bed6a59fa85b5114b64 | 51179de1086d67af16b58d48068822f3534e609b | /vrtkvm/interface.py | 35ba9eebad903d4a2af0f6092dc0860faf5f06d9 | [] | no_license | bad-youngster/webkvm-1 | ffa7d16c9d767a64c064ca3c7541ec3f75669026 | f0ac3308c2ab6cb5a481dac8ae4a1a0e0432d1c4 | refs/heads/master | 2020-04-11T21:59:45.215288 | 2018-12-18T11:29:02 | 2018-12-18T11:29:02 | 162,122,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,580 | py | from vrtkvm import util
from vrtkvm.connection import WvmConnect
from libvirt import VIR_INTERFACE_XML_INACTIVE
class WvmInterfaces(WvmConnect):
def get_iface_info(self, name):
iface = self.get_iface(name)
xml = iface.XMLDesc(0)
mac = iface.MACString()
itype = util.get_xml_path(xml, "/interface/@type")
state = iface.isActive()
return {'name': name, 'type': itype, 'state': state, 'mac': mac}
def define_iface(self, xml, flag=0):
self.wvm.interfaceDefineXML(xml, flag)
def create_iface(self, name, itype, mode, netdev, ipv4_type, ipv4_addr, ipv4_gw,
ipv6_type, ipv6_addr, ipv6_gw, stp, delay):
xml = """<interface type='%s' name='%s'>
<start mode='%s'/>""" % (itype, name, mode)
if ipv4_type == 'dhcp':
xml += """<protocol family='ipv4'>
<dhcp/>
</protocol>"""
if ipv4_type == 'static':
address, prefix = ipv4_addr.split('/')
xml += """<protocol family='ipv4'>
<ip address='%s' prefix='%s'/>
<route gateway='%s'/>
</protocol>""" % (address, prefix, ipv4_gw)
if ipv6_type == 'dhcp':
xml += """<protocol family='ipv6'>
<dhcp/>
</protocol>"""
if ipv6_type == 'static':
address, prefix = ipv6_addr.split('/')
xml += """<protocol family='ipv6'>
<ip address='%s' prefix='%s'/>
<route gateway='%s'/>
</protocol>""" % (address, prefix, ipv6_gw)
if itype == 'bridge':
xml += """<bridge stp='%s' delay='%s'>
<interface name='%s' type='ethernet'/>
</bridge>""" % (stp, delay, netdev)
xml += """</interface>"""
self.define_iface(xml)
iface = self.get_iface(name)
iface.create()
class WvmInterface(WvmConnect):
def __init__(self, host, login, passwd, conn, iface):
WvmConnect.__init__(self, host, login, passwd, conn)
self.iface = self.get_iface(iface)
def _XMLDesc(self, flags=0):
return self.iface.XMLDesc(flags)
def get_start_mode(self):
try:
xml = self._XMLDesc(VIR_INTERFACE_XML_INACTIVE)
return util.get_xml_path(xml, "/interface/start/@mode")
except:
return None
def is_active(self):
return self.iface.isActive()
def get_mac(self):
mac = self.iface.MACString()
if mac:
return mac
else:
return None
def get_type(self):
xml = self._XMLDesc()
return util.get_xml_path(xml, "/interface/@type")
def get_ipv4_type(self):
try:
xml = self._XMLDesc(VIR_INTERFACE_XML_INACTIVE)
ipaddr = util.get_xml_path(xml, "/interface/protocol/ip/@address")
if ipaddr:
return 'static'
else:
return 'dhcp'
except:
return None
def get_ipv4(self):
xml = self._XMLDesc()
int_ipv4_ip = util.get_xml_path(xml, "/interface/protocol/ip/@address")
int_ipv4_mask = util.get_xml_path(xml, "/interface/protocol/ip/@prefix")
if not int_ipv4_ip or not int_ipv4_mask:
return None
else:
return int_ipv4_ip + '/' + int_ipv4_mask
def get_ipv6_type(self):
try:
xml = self._XMLDesc(VIR_INTERFACE_XML_INACTIVE)
ipaddr = util.get_xml_path(xml, "/interface/protocol[2]/ip/@address")
if ipaddr:
return 'static'
else:
return 'dhcp'
except:
return None
def get_ipv6(self):
xml = self._XMLDesc()
int_ipv6_ip = util.get_xml_path(xml, "/interface/protocol[2]/ip/@address")
int_ipv6_mask = util.get_xml_path(xml, "/interface/protocol[2]/ip/@prefix")
if not int_ipv6_ip or not int_ipv6_mask:
return None
else:
return int_ipv6_ip + '/' + int_ipv6_mask
def get_bridge(self):
if self.get_type() == 'bridge':
xml = self._XMLDesc()
return util.get_xml_path(xml, "/interface/bridge/interface/@name")
else:
return None
def stop_iface(self):
self.iface.destroy()
def start_iface(self):
self.iface.create()
def delete_iface(self):
self.iface.undefine()
| [
"should829@gmail.com"
] | should829@gmail.com |
a8b8512e6ea0ec586187bd2802066aaa9f29d45c | 739ae207b26081d9a6f2981dfc7215ffcfa88ed8 | /Aula_7.py | eb7ff00d600b8bffe821273f09c133f9c3f7b46a | [] | no_license | NicolasGoes/3-Link---8-videos-aulas- | f8132f41ec434b6ed4497aaec757277c0dc77ece | 1a01fba0503a917dad5d017a8faaeb60f7ad91f6 | refs/heads/master | 2022-04-25T00:26:23.676412 | 2020-04-26T06:15:15 | 2020-04-26T06:15:15 | 258,954,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,601 | py | import tensorflow as tf
import numpy as np
dados_x = np.random.randn(4,8) # cria matriz de 4 linhas e 8 colunas com números aleatórios
dados_w = np.random.randn(8,2) # cria matriz de 8 linhas e 2 colunas com números aleatórios
b = tf.random_normal((4,2),0,1) # cria matriz (tensor) de 4 linhas e 2 colunas com números aleatórios
x = tf.placeholder(tf.float32, shape=(4, 8))
w = tf.placeholder(tf.float32, shape=(8, 2))
operacao = tf.matmul(x, w) + b
maximo = tf.reduce_max(operacao) # encontra o maior valor da matriz operacao
with tf.Session() as sess:
saida1 = sess.run(operacao, feed_dict={x: dados_x, w: dados_w})
saida2 = sess.run(maximo, feed_dict={x: dados_x, w: dados_w})
print(saida2)
import numpy as np
x1 = np.random.randn(4,8)
w1 = np.random.randn(8,2)
x2 = np.random.randn(4,8)
w2 = np.random.randn(8,2)
x3 = np.random.randn(10,3)
w3 = np.random.randn(3,1)
x4 = np.random.randn(4,8)
w4 = np.random.randn(8,2)
x5 = np.random.randn(4,8)
w5 = np.random.randn(8,2)
lista_x = [x1, x2, x3, x4, x5]
lista_w = [w1, w2, w3, w4, w5]
lista_saida = []
b = tf.random_normal((4,2),0,1) # cria matriz (tensor) de 4 linhas e 2 colunas com números aleatórios
x = tf.placeholder(tf.float32, shape=(None, None))
w = tf.placeholder(tf.float32, shape=(None, None))
operacao = tf.matmul(x, w)
maximo = tf.reduce_max(operacao) # encontra o maior valor da matriz operacao
with tf.Session() as sess:
for i in range(5):
saida = sess.run(maximo, feed_dict={x: lista_x[i], w: lista_w[i]})
lista_saida.append(saida)
print(lista_saida) | [
"noreply@github.com"
] | noreply@github.com |
4c689ed8b9f67eb07534fa41d34cc7137c3f53b0 | fa80812c753c81dd76a7cf5343296b465efd1922 | /rate_a_car_app/tests/utils.py | e6a140c7f29829cbaa513062852a56222f29721f | [] | no_license | wojlas/Rate-a-car | a08d60d78530a5b3940872c191469dd571cbf4df | e37becaa5fe7d4d7f8868b3473d6b5e2e3e4a7d3 | refs/heads/main | 2023-08-05T09:48:36.843801 | 2021-10-01T12:44:19 | 2021-10-01T12:44:19 | 395,976,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,646 | py | from PIL import Image
from django.contrib.auth.models import User
from faker import Faker
import io
from rate_a_car_app.models import CarModel, Brand, Images
faker = Faker("pl_PL")
def fake_user():
d = faker.simple_profile()
return {'username': d['username'],
'password': faker.name(),
'first_name': faker.first_name(),
'last_name': faker.last_name(),
'email': faker.email()}
def create_fake_user():
User.objects.create_user(**fake_user())
def create_fake_user_with_second_pass():
data = fake_user()
data['password2'] = data['password']
data['password1'] = data['password']
return data
def fake_brand():
d = faker.simple_profile()
return {'brand': d['username']}
def create_fake_same_pass():
password = faker.name()
return {'password1': password,
'password2': password}
def fake_car_data():
return {'brand': Brand.objects.first(),
'model': faker.last_name(),
'version': faker.last_name(),
'production_from': faker.pyint(max_value=1980),
'production_to': faker.pyint(max_value=2021)}
def create_car_brand():
Brand.objects.create(**fake_brand())
def create_car_models():
CarModel.objects.create(**fake_car_data())
def generate_photo_file():
file = io.BytesIO()
image = Image.new('RGBA', size=(100, 100), color=(155, 0, 0))
image.save(file, 'png')
file.name = 'test.png'
file.seek(0)
return file
def upload_photo():
model = CarModel.objects.first()
Images.objects.create(carmodel=model,
image=generate_photo_file()) | [
"wojlas93@gmail.com"
] | wojlas93@gmail.com |
37368cb571c1ae51acfd9efc23f61a86256180a5 | b7a78ea841b307844acf3d240465056319cf4a84 | /ejercicio2.py | 988bfb9023df8d7d40bf7f0fabddbbf27c0daab1 | [] | no_license | victorgld02/ejerciciospython | 4ac831fdfd535a767f732692e54c626e0a3e40ad | f6efee10c32c589163781fc17875fcc8fa21c82f | refs/heads/master | 2022-05-24T02:40:32.699375 | 2020-04-30T15:10:23 | 2020-04-30T15:10:23 | 260,244,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | print ("BIENVENIDO A EMPAREJANDO.COM")
nombre= input("Tu nombre:")
ano= int(input( "¿Año de nacimiento?" ))
tegusta= input("¿Te gusta taburete?")
edad = 2020 - ano
print ("hola", nombre, ". Si no me equivoco tienes", edad, "años.")
if tegusta == "si" or tegusta == "Si":
print('OK Boomer, lo tuyo va a ser un caso difícil.')
elif tegusta == "no" or tegusta == "No":
print('Bueno, al menos es un comienzo. Veremos qué se puede hacer contigo.') | [
"victorgld02@gmail.com"
] | victorgld02@gmail.com |
f934c461efd1172ea7582599d83aca716921c911 | dc15c0e28e65c4978bd260bdbe1f562462c12e45 | /test.py | 59e3b1d1d0d89f7a2ac6c65ccb77c00954375743 | [] | no_license | gaybro8777/PMTools | c91ad1e68ff9d9a64da22b1cc2e9e469f0a88dad | c4771b8946373a5b48db7fa00c999c2eac48c885 | refs/heads/master | 2023-07-23T03:06:19.351132 | 2018-05-23T00:54:49 | 2018-05-23T00:54:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,108 | py | def _getOutNeighbors(g, node):
return [e.target for e in g.es[g.incident(node,mode="OUT")]]
def _countPaths(g, startName, endName, k):
#mark all the nodes per number of times traversed (max of k)
g.vs["pathCountHits"] = 0
#get start node
startNode = g.vs.find(name=startName)
#get immediate out-edge neighbors of START
q = _getOutNeighbors(g, startNode)
pathct = 0
print("out neighbors: "+str(q))
while len(q) > 0:
#pop front node
nodeId = q[0]
node = g.vs[nodeId]
q = q[1:]
#get type of edge pointing to this node; note this detects if any edge pointing to node is LOOP type--unnecessary in our topology (loops only have one entrant edge), but robust
isLoop = [e for e in g.es if e.target == nodeId][0]["type"] == "LOOP"
print("isloop: "+str(isLoop))
print(str(node["pathCountHits"]))
node["pathCountHits"] += 1
if node["name"] == endName:
print("++")
pathct += 1
#append non-loop successors, or loop successors whom we have traversed fewer than k time, to horizon
elif not isLoop or node["pathCountHits"] < k:
q += _getOutNeighbors(g, node)
return pathct
| [
"jesse_waite@hotmail.com"
] | jesse_waite@hotmail.com |
db8694ebf7d5685301e2ad916517b43690b7ac20 | e2e08d7c97398a42e6554f913ee27340226994d9 | /pyautoTest-master(ICF-7.5.0)/test_case/scg/scg_LOG/test_c142881.py | 017398d281b068be2333be366936895d37d5e8d4 | [] | no_license | lizhuoya1111/Automated_testing_practice | 88e7be512e831d279324ad710946232377fb4c01 | b3a532d33ddeb8d01fff315bcd59b451befdef23 | refs/heads/master | 2022-12-04T08:19:29.806445 | 2020-08-14T03:51:20 | 2020-08-14T03:51:20 | 287,426,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,288 | py | import pytest
import time
import sys
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from page_obj.scg.scg_def_physical_interface import *
from page_obj.scg.scg_def_vlan_interface import *
from page_obj.scg.scg_def_bridge import *
from page_obj.common.rail import *
from page_obj.scg.scg_def_log import *
from page_obj.common.ssh import *
from page_obj.scg.scg_def_dhcp import *
from page_obj.scg.scg_dev import *
from page_obj.scg.scg_def_ifname_OEM import *
from page_obj.scg.scg_def import *
test_id = 142881
# 先产生安全日志 再删除
def test_c142881(browser):
try:
login_web(browser, url=dev1)
# 安全日志过滤级别改为all
edit_log_filter_lzy(browser, index="3", all='yes', debug='yes/no', info='yes/no', notice='yes/no',
warning='yes/no', error='yes/no', critical='yes/no', emerg='yes/no', alert="yes/no")
# IPMac绑定
add_ip_mac_binding_jyl(browser, ip="12.1.1.2", interface=interface_name_2, mac_add="auto_mac")
# 设置
edit_ip_mac_binding_rule_jyl(browser, interface=interface_name_2, source_mac_binding="enable",
policy_for_undefined_host="alert")
# 登录Dev2 修改2接口ip
sign_out_jyl(browser)
login_web(browser, url=dev2)
delete_physical_interface_ip_jyl(browser, interface=interface_name_2, ip="12.1.1.2")
add_physical_interface_static_ip_jyl(browser, interface=interface_name_2, ip='12.1.1.3', mask='24')
# 82 ping 12.1.1.1
sleep(1)
diag_ping(browser, ipadd="12.1.1.1", interface=interface_name_2)
# 登录Dev1
sign_out_jyl(browser)
login_web(browser, url=dev1)
# 获取安全日志总数 不为0
num2 = get_log_counts_lzy(browser, log_type=安全日志)
print(num2)
# 删除安全日志
delete_log(browser, log_type=安全日志)
# 获取安全日志总数为0
num1 = get_log_counts_lzy(browser, log_type=安全日志)
print(num1)
# 获取管理日志
log1 = get_log(browser, 管理日志)
# 还原
# 还原安全日志过滤级别error critical alert emerg
edit_log_filter_lzy(browser, index="3", all='yes/no', debug='yes/no', info='yes/no', notice='yes/no',
warning='yes/no', error='yes', critical='yes', emerg='yes', alert="yes")
# 删除IPMac绑定
delete_ip_mac_banding_jyl(browser, ip="12.1.1.2")
# 恢复IPMac设置
edit_ip_mac_binding_rule_jyl(browser, interface=interface_name_2, source_mac_binding="disenable",
policy_for_undefined_host="allow")
# 82 接口2改IP
sign_out_jyl(browser)
login_web(browser, url=dev2)
delete_physical_interface_ip_jyl(browser, interface=interface_name_2, ip="12.1.1.3")
add_physical_interface_static_ip_jyl(browser, interface=interface_name_2, ip='12.1.1.2', mask='24')
try:
assert "刪除日志成功" in log1 and num1 == 0 and num2 != 0
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "刪除日志成功" in log1 and num1 == 0 and num2 != 0
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
print(err)
reload(hostip=dev1)
rail_fail(test_run_id, test_id)
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c" + str(test_id) + ".py"])
| [
"15501866985@163.com"
] | 15501866985@163.com |
11774cab8ab8b849d8287ce7a299505e8750722b | 555377aa073d24896d43d6d20d8f9f588d6c36b8 | /paleomix/common/bamfiles.py | 4b3c6c6f539923c279d04dea982a2307263c0bee | [
"MIT"
] | permissive | jfy133/paleomix | 0688916c21051bb02b263e983d9b9efbe5af5215 | f7f687f6f69b2faedd247a1d289d28657710a8c2 | refs/heads/master | 2022-11-10T18:37:02.178614 | 2020-06-14T12:24:09 | 2020-06-14T12:24:09 | 270,936,768 | 0 | 0 | MIT | 2020-06-09T07:46:19 | 2020-06-09T07:46:18 | null | UTF-8 | Python | false | false | 4,837 | py | #!/usr/bin/python
#
# Copyright (c) 2012 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import itertools
# BAM flags as defined in the BAM specification
BAM_SUPPLEMENTARY_ALIGNMENT = 0x800
BAM_PCR_DUPLICATE = 0x400
BAM_QUALITY_CONTROL_FAILED = 0x200
BAM_SECONDARY_ALIGNMENT = 0x100
BAM_IS_LAST_SEGMENT = 0x80
BAM_IS_FIRST_SEGMENT = 0x40
BAM_NEXT_IS_REVERSED = 0x20
BAM_READ_IS_REVERSED = 0x10
BAM_NEXT_IS_UNMAPPED = 0x8
BAM_READ_IS_UNMAPPED = 0x4
BAM_PROPER_SEGMENTS = 0x2
BAM_SEGMENTED = 0x1
# Default filters when processing reads
EXCLUDED_FLAGS = (
BAM_SUPPLEMENTARY_ALIGNMENT
| BAM_PCR_DUPLICATE
| BAM_QUALITY_CONTROL_FAILED
| BAM_SECONDARY_ALIGNMENT
| BAM_READ_IS_UNMAPPED
)
class BAMRegionsIter:
"""Iterates over a BAM file, yield a separate iterator for each contig
in the BAM or region in the list of regions if these are species, which in
turn iterates over individual positions. This allows for the following
pattern when parsing BAM files:
for region in BAMRegionsIter(handle):
# Setup per region
for (position, records) in region:
# Setup per position
...
# Teardown per position
# Teardown per region
The list of regions given to the iterator is expected to be in BED-like
records (see e.g. paleomix.common.bedtools), with these properties:
- contig: Name of the contig in the BED file
- start: 0-based offset for the start of the region
- end: 1-based offset (i.e. past-the-end) of the region
- name: The name of the region
"""
def __init__(self, handle, regions=None, exclude_flags=EXCLUDED_FLAGS):
"""
- handle: BAM file handle (c.f. module 'pysam')
- regions: List of BED-like regions (see above)
"""
self._handle = handle
self._regions = regions
self._excluded = exclude_flags
def __iter__(self):
if self._regions:
for region in self._regions:
records = self._handle.fetch(region.contig, region.start, region.end)
records = self._filter(records)
tid = self._handle.gettid(region.contig)
yield _BAMRegion(tid, records, region.name, region.start, region.end)
else:
def _by_tid(record):
"""Group by reference ID."""
return record.tid
# Save a copy, as these are properties generated upon every access!
names = self._handle.references
lengths = self._handle.lengths
records = self._filter(self._handle)
records = itertools.groupby(records, key=_by_tid)
for (tid, items) in records:
if tid >= 0:
name = names[tid]
length = lengths[tid]
else:
name = length = None
yield _BAMRegion(tid, items, name, 0, length)
def _filter(self, records):
"""Filters records by flags, if 'exclude_flags' is set."""
if self._excluded:
pred = lambda record: not record.flag & self._excluded
return filter(pred, records)
return records
class _BAMRegion:
"""Implements iteration over sites in a BAM file. It is assumed that the
BAM file is sorted, and that the input records are from one contig.
"""
def __init__(self, tid, records, name, start, end):
self._records = records
self.tid = tid
self.name = name
self.start = start
self.end = end
def __iter__(self):
def _by_pos(record):
"""Group by position."""
return record.pos
for group in itertools.groupby(self._records, _by_pos):
yield group
| [
"MikkelSch@gmail.com"
] | MikkelSch@gmail.com |
b865747d25a963ea30d051c763b151966b68b592 | 667f153e47aec4ea345ea87591bc4f5d305b10bf | /Solutions/Ch1Ex032.py | 146193ba642a281f4a5d647a77ffee5e055d6028 | [] | no_license | Parshwa-P3/ThePythonWorkbook-Solutions | feb498783d05d0b4e5cbc6cd5961dd1e611f5f52 | 5694cb52e9e9eac2ab14b1a3dcb462cff8501393 | refs/heads/master | 2022-11-15T20:18:53.427665 | 2020-06-28T21:50:48 | 2020-06-28T21:50:48 | 275,670,813 | 1 | 0 | null | 2020-06-28T21:50:49 | 2020-06-28T21:26:01 | Python | UTF-8 | Python | false | false | 489 | py | # Ch1Ex032.py
# Author: Parshwa Patil
# ThePythonWorkbook Solutions
# Exercise No. 32
# Title: Sort three numbers
def main():
print("Enter numbers: ")
numbers = list(map(int, input().strip().split()))
for i in range(len(numbers) - 1):
for j in range(1, len(numbers)):
if numbers[j - 1] > numbers[j]:
numbers[j - 1], numbers[j] = numbers[j], numbers[j - 1]
print("Sorted: ")
for n in numbers: print(str(n) + " ", end="")
if __name__ == "__main__": main() | [
"noreply@github.com"
] | noreply@github.com |
45366820b93539dfbb54537e7edb9c697a3fb407 | 53f5ed008da6c9232fa5036e1906846d38869720 | /Django/learning_site/learning_site/wsgi.py | 9b4a5cf76dfae40651e11efdc7be32625931bfd3 | [] | no_license | eusouocristian/learning_python | e3c1cf099f5a8d1489b8166ac8b9f44a423a2ba0 | b150ad6dee566c56c2f35dd099a1b3ea27704475 | refs/heads/master | 2023-04-27T22:22:13.196823 | 2022-11-24T15:40:27 | 2022-11-24T15:40:27 | 172,799,546 | 1 | 0 | null | 2023-04-21T20:55:42 | 2019-02-26T22:17:56 | HTML | UTF-8 | Python | false | false | 403 | py | """
WSGI config for learning_site project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'learning_site.settings')
application = get_wsgi_application()
| [
"eusouocristian@hotmail.com"
] | eusouocristian@hotmail.com |
5b280285f27573c959aee08c238f3f1b99495f48 | c02b98e6d83d22aa16dc494cb8cc537191dccd8e | /main/animation.py | 2b6854b7fb0d5e4c9caee39c302d1493ad818712 | [] | no_license | Reepca/jcpeter-game | 798d2a09cd227745046f55eac2914de8d8599d60 | 840fd926de74f0e128fd7f17fb1af1bac29145e5 | refs/heads/master | 2021-01-09T06:19:52.338233 | 2017-11-07T16:59:50 | 2017-11-07T16:59:50 | 80,957,530 | 0 | 1 | null | 2017-11-07T05:57:20 | 2017-02-05T00:09:53 | Python | UTF-8 | Python | false | false | 1,536 | py |
# This class is based heavily off of the example at
# https://processing.org/examples/animatedsprite.html
class Animation(object):
def __init__(self, prefix, type, numFrames, displayRate=30):
self.prefix = prefix
self.type = type
self.numFrames = numFrames
self.displayRate = displayRate
self.tempRate = 0
self.currentFrame = 0
self.time = millis()
self.images = []
for img in range(numFrames):
tmp = loadImage(self.prefix + nf(img, 4) + self.type)
self.images.append(tmp)
def display(self, x, y):
image(self.images[self.currentFrame], x, y)
self.__updateFrame()
def flipXDisplay(self, x, y):
pushMatrix()
scale(-1.0, 1.0)
image(self.images[self.currentFrame], -x - self.images[self.currentFrame].width, y);
popMatrix()
self.__updateFrame()
def __updateFrame(self):
timeAtDisplay = millis()
if timeAtDisplay - self.time > 1000 / self.displayRate:
self.currentFrame = (self.currentFrame + 1) % self.numFrames
self.time = timeAtDisplay
def getWidth(self):
return self.images[self.currentFrame].width
def getHeight(self):
return self.images[self.currentFrame].height
def pause(self):
self.tempRate = self.displayRate
self.displayRate = 1
def resume(self):
self.displayRate = self.tempRate | [
"jristvedt@gmail.com"
] | jristvedt@gmail.com |
92ffd6bc7322742b3d8da89f9f43fec5692453de | 4554fcb85e4c8c33a5b5e68ab9f16c580afcab41 | /projecteuler/test_xiaobai_17.py | d9b6678e2bc92bba87fc83a8f6d9bb16ee3c82a9 | [] | no_license | xshen1122/Follow_Huang_Python | 12f4cebd8ddbc241a1c32cfa16288f059b530557 | fcea6d1361aa768fb286e1ef4a22d5c4d0026667 | refs/heads/master | 2021-01-01T04:37:31.081142 | 2017-12-05T07:31:34 | 2017-12-05T07:31:34 | 97,211,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,630 | py | # test_xiaobai_17.py
# coding: utf-8
'''
If the numbers 1 to 5 are written out in words: one, two, three, four, five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.
If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used?
'''
def getLetter(yourlist):
ss=''
for item in yourlist:
ss += item
return len(ss)
def getList(yourlist):
for item in one_digit[:-1]:
yourlist.append(yourlist[0]+item)
return yourlist
if __name__ == '__main__':
one_digit = ['one','two','three','four','five','six','seven','eight','nine','ten']
teenage_digit = ['eleven','twelve','thirteen','fourteen','fifteen','sixteen','seventeen','eighteen','nineteen']
twenty_digit = ['twenty']
thirty_digit = ['thirty']
forty_digit = ['forty']
fifty_digit = ['fifty']
sixty_digit = ['sixty']
seventy_digit = ['seventy']
eighty_digit = ['eighty']
ninety_digit = ['ninety']
hundred_digit = ['hundredand']
letter_list = []
letter_list.append(getLetter(one_digit))
letter_list.append(getLetter(getList(twenty_digit)))
letter_list.append(getLetter(getList(thirty_digit)))
letter_list.append(getLetter(getList(forty_digit)))
letter_list.append(getLetter(getList(fifty_digit)))
letter_list.append(getLetter(getList(sixty_digit)))
letter_list.append(getLetter(getList(seventy_digit)))
letter_list.append(getLetter(getList(eighty_digit)))
letter_list.append(getLetter(getList(ninety_digit)))
result = 0
for item in letter_list:
result += item
print result # 1-99 has 787 letters
# 100 - 199 has ??
#以下就按100-199,200-299, 900-999,1000来计数即可
| [
"xueqin.shen@outlook.com"
] | xueqin.shen@outlook.com |
a750bf674ac95fff0489fc65f47ad53a1b8c5bf9 | f48ff7f1372c68330d5e2809eef37613b870e565 | /douban2/pipelines.py | 819dbdde62a9b1cfbf06770e001b362551acfc9c | [] | no_license | Cg110778/douban- | 645033c1090986691e4c66ca533a60e104a89902 | 0bba585b98f7cdbcf52e1d54d9651c55e51c124d | refs/heads/master | 2020-03-27T20:11:19.397265 | 2018-10-05T11:15:08 | 2018-10-05T11:15:08 | 147,046,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,433 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from douban2.items import *
class MongoPipeline(object):
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DB')
)
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
#self.db[DoubanuserItem.collection].create_index([('id', pymongo.ASCENDING)])
def close_spider(self, spider):
self.client.close()
def process_item(self, item, spider):
if isinstance(item, DoubandetailmovieItem):
self.db[item.collection].update({'movie_id': item.get('movie_id')}, {'$set': item}, True)
if isinstance(item, DoubandetailmoviecommentItem) :
self.db[item.collection].update(
{'movie_id': item.get('movie_id')},
{'$addToSet':
{
'movie_comment_info': {'$each':item['movie_comment_info']}
}
}, True)#电影
if isinstance(item, DoubandetailmusicItem):
self.db[item.collection].update({'music_id': item.get('music_id')}, {'$set': item}, True)
if isinstance(item, DoubandetailmusiccommentItem) :
self.db[item.collection].update(
{'music_id': item.get('music_id')},
{'$addToSet':
{
'music_comment_info': {'$each':item['music_comment_info']}
}
}, True)#音乐
if isinstance(item, DoubandetailbookItem):
self.db[item.collection].update({'book_id': item.get('book_id')}, {'$set': item}, True)
if isinstance(item, DoubandetailbookcommentItem) :
self.db[item.collection].update(
{'book_id': item.get('book_id')},
{'$addToSet':
{
'book_comment_info': {'$each':item['book_comment_info']},
}
}, True)#书籍
return item
| [
"18336962196@163.com"
] | 18336962196@163.com |
348d20f2b5b85a9ad4cb90820aff6d3f9abacd92 | 49f64ff72edb4ab098eccb1893d850d56ae9f41a | /wizard/__init__.py | c5f7f3878b8e0e48922703ba6209ac28556df37c | [] | no_license | gosyspro/zh_mrp | e5dc44e20a7d9a324cdcf96a89a0f62239b725f2 | 9e834d4ccc332515bdacbda9b0c56c5e49f0d8bf | refs/heads/master | 2020-04-23T22:05:24.685690 | 2019-02-19T14:59:42 | 2019-02-19T14:59:42 | 171,490,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | # -*- coding: utf-8 -*-
import zh_mrp_wizard | [
"noreply@github.com"
] | noreply@github.com |
918b130ca63ba126b6cb2012931943d50b57d588 | 5a660b94f7fe42659177d8e52447db7593a87c53 | /blog/blog/settings.py | 4891098cc3504ba33317948ca94e539de6f18b07 | [
"MIT"
] | permissive | Gorun-he/blog | 485643f5d6a9cf895ceadd76af58c52dbb90a664 | 98b86dfaaadc58290e9e37f3e345ac945a6ad1b7 | refs/heads/main | 2023-07-29T19:27:21.968771 | 2021-09-03T02:06:44 | 2021-09-03T02:06:44 | 402,610,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,080 | py | """
Django settings for blog project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@3)kg$3@oowom1#&raln0(35srblc(e@k7s-&%!8d3q&tx#0%9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"hejiwei93@126.com"
] | hejiwei93@126.com |
caabff8732edb255684b674dadbd5e62516d970d | e8fe7878b463f975e74029a7f7e9617feb2bb7d6 | /licensekeys/models.py | 4b4d1264aa6b052df876374a32500f071db716a8 | [] | no_license | Farizov/authServer | 85c79cf2ba277a1500e3793d7717cb7a68783ff6 | 4fc89e85d4d0155e687b2414d72ae7bbef0f8ec1 | refs/heads/master | 2023-07-02T09:23:00.001849 | 2021-08-04T05:50:45 | 2021-08-04T05:50:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | from django.db import models
import random
import string
def generate_license():
keyset = string.ascii_uppercase + string.digits
keysetpart = lambda : "".join(random.choices(keyset, k=3))
return "-".join([keysetpart() for _ in range(4)])
# Create your models here.
class LicenseKey(models.Model):
key = models.CharField(max_length=15, default=generate_license, editable=False, primary_key=True)
hardware_id = models.CharField(max_length=200, blank=True)
name = models.CharField(max_length=255)
email = models.EmailField(null=True, blank=True)
last_activity = models.DateTimeField(blank=True, null=True)
active = models.BooleanField(default=True) #If the license key is active or has expired
| [
"atulsyan850@gmail.com"
] | atulsyan850@gmail.com |
5a380d07f579329852a0e83a874f250f2cbda60c | 1c2c5240222e48cf6ed617378b23ce12c7f69231 | /backend_pms/asgi.py | 3b584ad3f8d2261c6920ecad850891f9d554084d | [] | no_license | MayowaFunmi/pms_backend | 5537d642a76ce18205f4a40a84a52c0ebfb24d5b | 0ddc8a3718bf54dd5f30394ae18c70653634d79f | refs/heads/master | 2023-02-06T23:26:15.429155 | 2021-01-02T14:39:06 | 2021-01-02T14:39:06 | 322,831,233 | 0 | 0 | null | 2021-01-02T13:30:05 | 2020-12-19T11:26:19 | Python | UTF-8 | Python | false | false | 399 | py | """
ASGI config for backend_pms project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend_pms.settings')
application = get_asgi_application()
| [
"akinade.mayowa@gmail.com"
] | akinade.mayowa@gmail.com |
0633702d1bac349dea40d1d2627459ac5afa4fdc | 49b0316c236e7baf15a1c2e619ae27225db78265 | /lab5/lab5.py | 484b2d2ad2c805caa397771b0a5d5cd36455275c | [] | no_license | rexxy-sasori/ECE420 | 616bea55bd3ef66d35655009c0d9ed45910ce373 | 548d1bf59e63c318b9414a7c0556be6ce48bf31f | refs/heads/master | 2021-06-11T06:29:57.066798 | 2017-03-17T05:58:22 | 2017-03-17T05:58:22 | 79,028,868 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,026 | py | import numpy as np
from numpy.fft import fft
import matplotlib.pyplot as plt
import scipy.io.wavfile as spwav
from mpldatacursor import datacursor
import sys
from scipy import signal
from IPython.display import Audio
plt.style.use('ggplot')
# Note: this epoch list only holds for "test_vector_all_voiced.wav"
epoch_marks_orig = np.load("test_vector_all_voiced_epochs.npy")
F_s, audio_data = spwav.read("test_vector_all_voiced.wav")
N = len(audio_data)
######################## YOUR CODE HERE ##############################
#plt.plot(audio_data)
#plt.show()
F_new = 441
new_epoch_spacing = F_s / F_new
audio_out = np.zeros(N)
#print(epoch_marks_orig)
def find_nearest(array, value):
idx = (np.abs(array - value)).argmin()
return array[idx],idx
# Suggested loop
space = (int)(new_epoch_spacing)
for i in range(space,N-space,space):
closest,closest_idx = find_nearest(epoch_marks_orig,i)
if closest_idx == 0:
p0 = closest
elif closest_idx == len(epoch_marks_orig) - 1 or closest_idx == len(epoch_marks_orig):
p0 = (int)((epoch_marks_orig[closest_idx]-epoch_marks_orig[closest_idx-1])/2)
else:
p0 = (int)((epoch_marks_orig[closest_idx+1]-epoch_marks_orig[closest_idx-1])/2)
#'''
start_window = closest - p0
end_window = closest + p0 + 1
current_impulse = audio_data[start_window:end_window]
print("closest",closest,
"current new spacing",i,
"current p0",p0,
"current size",len(current_impulse))
current_impulse = signal.triang(len(current_impulse)) * current_impulse
start_accept = i - p0
end_accept = i + p0 + 1
if start_accept < 0:
start_accept = 0
end_accept = len(current_impulse)
if end_accept > N:
end_accept = N
start_accept = N - len(current_impulse)
audio_out[start_accept:end_accept] += current_impulse
#'''
spwav.write("audio_out.wav", rate = F_s, data = audio_out)
plt.plot(audio_data,'g')
plt.hold(True)
plt.plot(audio_out,'b')
plt.show()
| [
"963136786@qq.com"
] | 963136786@qq.com |
e695473d9f847ac429d6eccab9bf55d07f86d876 | c3bfa9a02f15b3a0378c952ccd66eb6bdbe2b066 | /Multi_CorrFunc.py | 66fac65ba411275fc705c71b44df3dc4f5a5a332 | [] | no_license | DriftingPig/eBOSS-ELG | 22e1ebfe6d7e852cb2698f3e9081b803b5c90c38 | f9d3bce5251e5559c5e3878131679d0746d81f84 | refs/heads/master | 2021-06-22T00:20:39.986994 | 2017-08-29T00:35:55 | 2017-08-29T00:35:55 | 101,699,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,077 | py | #DD_i.txt,DR_i.txt,RR_i.txt
Sep_interval = 40
import numpy as np
import math
import numpy.polynomial.legendre as lgd
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot as plt
filename = 'NewData_subfiles_chunk23_wCut.3_upweight'
def JKnife_CorrFunc(Njob, Jacknife=-1,k0=1, order=0,name = filename):
# pp = PdfPages('Correlation_Function_of_order_No'+str(order)+'.pdf')
# b2=np.loadtxt('xi0isoChallenge_matterpower6.0.dat').transpose()
# plt.plot(b2[0],b2[1]*b2[0]*b2[0]*k0)
filenameDD=[]
filenameDR=[]
filenameRR=[]
for i in range(0,Njob):
for j in range(i,Njob):
filenameDD.append('./'+name+'/D'+str(i)+'D'+str(j)+'.txt')
for i in range(0,Njob):
for j in range(0,Njob):
filenameDR.append('./'+name+'/D'+str(i)+'R'+str(j)+'.txt')
for i in range(0,Njob):
for j in range(i,Njob):
filenameRR.append('./'+name+'/R'+str(i)+'R'+str(j)+'.txt')
FilelistDD = []
FilelistRR = []
FilelistDR = []
COUNT = 0
void_pair_line = [0]*100 #interval of angular scales
void_pair = []
for i in range(0,Sep_interval):
void_pair.append(void_pair_line)
for i in range(0,Njob):
for j in range(i,Njob):
if i!=Jacknife and j!=Jacknife :
aDD = np.loadtxt(filenameDD[COUNT])
FilelistDD.append(aDD)
else:
FilelistDD.append(void_pair)
COUNT+=1
COUNT = 0
for i in range(0,Njob):
for j in range(0,Njob):
if i!=Jacknife and j!=Jacknife :
aDR = np.loadtxt(filenameDR[COUNT])
FilelistDR.append(aDR)
else:
FilelistDR.append(void_pair)
COUNT+=1
COUNT = 0
for i in range(0,Njob):
for j in range(i,Njob):
if i!=Jacknife and j!=Jacknife :
aRR = np.loadtxt(filenameRR[COUNT])
FilelistRR.append(aRR)
else:
FilelistRR.append(void_pair)
COUNT+=1
DD_total=[]
DR_total=[]
RR_total=[]
for i in range(0,len(aDD[0])):
DD_total.append([0]*len(aDD.transpose()))
DR_total.append([0]*len(aDD.transpose()))
RR_total.append([0]*len(aDD.transpose()))
for i in range(0,(Njob+1)*Njob/2):
for j in range(0,len(aDD)):
for k in range(0,len(aDD.transpose())):
DD_total[j][k]+=FilelistDD[i][j][k]
for i in range(0,Njob*Njob):
for j in range(0,len(aDD)):
for k in range(0,len(aDD.transpose())):
DR_total[j][k]+=FilelistDR[i][j][k]
for i in range(0,Njob*(Njob+1)/2):
for j in range(0,len(aDD)):
for k in range(0,len(aDD.transpose())):
RR_total[j][k]+=FilelistRR[i][j][k]
TotalPoints=np.loadtxt('./'+name+'/totalpoints.txt')
DD_total_num=0
DR_total_num=0
RR_total_num=0
for i in range(0,len(TotalPoints)):
if TotalPoints[i][3]!=Jacknife and TotalPoints[i][4]!=Jacknife :
DD_total_num+=TotalPoints[i][0]
DR_total_num+=TotalPoints[i][1]
RR_total_num+=TotalPoints[i][2]
print 'DD_total_num='+str(DD_total_num)+' DR_total_num'+str(DR_total_num)+' RR_total_num'+str(RR_total_num)
Final_total=[]
for i in range(0,len(aDD)):
Final_total.append([0]*(len(aDD.transpose())))
#print str(Final_total[0][0])
for i in range(0,len(aDD)):
for j in range(0,len(aDD.transpose())):
Final_total[i][j] = ((DD_total[i][j]/DD_total_num)-(DR_total[i][j]/DR_total_num)*2+(RR_total[i][j]/RR_total_num))/((RR_total[i][j]/RR_total_num))
return Final_total
def CorrFunc(Njob,k0=1, order=0,name_corr = filename):
Final_total = []
Final_total = JKnife_CorrFunc(Njob,-1,k0,order,name = name_corr)
d=[0]*len(Final_total[0])
d[order]+=1
b=[0]*len(Final_total)
for i in range(0,len(Final_total)):
for j in range(0,len(Final_total[0])):
b[i]=b[i]+0.01*Final_total[i][j]*lgd.legval(Final_total[i][j],d)
b[i]=(2*order+1)*b[i]
c=[(i+0.5)*5 for i in range(0,len(Final_total))]
for i in range (0,len(Final_total)):
b[i]=b[i]*c[i]*c[i]
Jacknife_list = []
for i in range(0,Njob):
Jacknife_list.append(JKnife_CorrFunc(Njob,i,k0,order,name = name_corr))
CorrFun_Err_temp = [0]*len(Final_total)
CorrFun_Err = [0]*len(Final_total)
for k in range(0,Njob):
for i in range(0,len(Final_total)):
for j in range(0,len(Final_total[0])):
CorrFun_Err_temp[i]+=0.01*Jacknife_list[k][i][j]*lgd.legval(Jacknife_list[k][i][j],d)
CorrFun_Err_temp[i] = (2*order+1)*CorrFun_Err_temp[i]
CorrFun_Err_temp[i]=c[i]*c[i]*CorrFun_Err_temp[i]
CorrFun_Err[i]+=(CorrFun_Err_temp[i]-b[i])*(CorrFun_Err_temp[i]-b[i])
CorrFun_Err_temp[i]=0
for i in range (0,len(Final_total)):
CorrFun_Err[i]=math.sqrt(CorrFun_Err[i]*(Njob-1)/Njob)
plt.errorbar(c,b,yerr=CorrFun_Err)
f = open('./data/'+name_corr+'.txt','w')
for i in range(0,len(c)):
f.write(str(c[i])+' '+str(b[i])+' '+str(CorrFun_Err[i])+'\n')
f.close()
b2=np.loadtxt('xi0isoChallenge_matterpower6.0.dat').transpose()
plt.plot(b2[0],b2[1]*b2[0]*b2[0]*k0)
plt.xlabel('Mpc',size=16)
plt.show()
return True
def CorrFunc_Add(Njob=20,k0=1, order=0):
b2=np.loadtxt('xi0isoChallenge_matterpower6.0.dat').transpose()
plt.plot(b2[0],b2[1]*b2[0]*b2[0]*k0)
plt.xlabel('Mpc',size=16)
CorrFunc(Njob,name_corr = 'NewData_subfiles_chunk21')
CorrFunc(Njob,name_corr = 'NewData_subfiles_chunk22')
CorrFunc(Njob,name_corr = 'NewData_subfiles_chunk23')
plt.show()
def CorrFunc_ALL_sub(index,Njob=20,k0=1,order=0):
Final_total = []
Final_total = JKnife_CorrFunc(Njob,index,k0,order)
d=[0]*len(Final_total[0])
d[order]+=1
b=[0]*len(Final_total)
for i in range(0,len(Final_total)):
for j in range(0,len(Final_total[0])):
b[i]=b[i]+0.01*Final_total[i][j]*lgd.legval(Final_total[i][j],d)
b[i]=(2*order+1)*b[i]
c=[(i+0.5)*5 for i in range(0,len(Final_total))]
for i in range (0,len(Final_total)):
b[i]=b[i]*c[i]*c[i]
plt.plot(c,b)
return True
def CorrFunc_ALL(Njob=20):
for i in range(0,20):
CorrFunc_ALL_sub(i,Njob)
plt.show()
return True
def JKnife_show(index):
import sys
CorrFunc_ALL_sub(index)
plt.show()
print 'continue?'
sys.stdin.readline()
plt.clf()
return True
def weighed_tot(k0=1):
chunk21 = np.loadtxt('./data/NewData_subfiles_chunk21_wCut.3_upweight.txt').transpose()
chunk22 = np.loadtxt('./data/NewData_subfiles_chunk22_wCut.3_upweight.txt').transpose()
chunk23 = np.loadtxt('./data/NewData_subfiles_chunk23_wCut.3_upweight.txt').transpose()
chunk_tot = [0]*len(chunk21[0])
chunk_err_tot = [0]*len(chunk21[0])
for i in range(0,len(chunk_tot)):
chunk_tot[i]+=chunk21[1][i]/chunk21[2][i]**(2)
chunk_tot[i]+=chunk22[1][i]/chunk22[2][i]**(2)
chunk_tot[i]+=chunk23[1][i]/chunk23[2][i]**(2)
chunk_tot[i]=chunk_tot[i]/(chunk21[2][i]**(-2)+chunk22[2][i]**(-2)+chunk23[2][i]**(-2))
chunk_err_tot[i] = (chunk21[2][i]**(-2)+chunk22[2][i]**(-2)+chunk23[2][i]**(-2))**(-0.5)
p21 = plt.errorbar(chunk21[0],chunk21[1],yerr=chunk21[2])
p22 = plt.errorbar(chunk22[0],chunk22[1],yerr=chunk22[2])
p23 = plt.errorbar(chunk23[0],chunk23[1],yerr=chunk23[2])
plt.axis([0,200,-100,100])
ptot = plt.errorbar(chunk21[0],chunk_tot,yerr=chunk_err_tot)
b2 = np.loadtxt('xi0isoChallenge_matterpower6.0.dat').transpose()
ptheory = plt.plot(b2[0],b2[1]*b2[0]*b2[0]*k0)
plt.legend((p21, p22,p23,ptot,ptheory), ('chunk21', 'chunk22','chunk23','total','theory'))
from matplotlib.backends.backend_pdf import PdfPages
with PdfPages('./data/output/combined_wCut.3') as pdf:
plt.title('combined correlation function for three chunks')
plt.xlabel('Mpc')
pdf.savefig()
plt.show()
f = open('./data/combined_wCut.3_upweight.txt','w')
for i in range(0,len(chunk21[0])):
f.write(str(chunk21[0][i])+' '+str(chunk_tot[i])+' '+str(chunk_err_tot[i])+'\n')
return True
if __name__=='__main__':
chunk21 = np.loadtxt('./data/NewData_subfiles_chunk21_wCut.3.txt').transpose()
chunk22 = np.loadtxt('./data/NewData_subfiles_chunk22_wCut.3.txt').transpose()
chunk23 = np.loadtxt('./data/NewData_subfiles_chunk23_wCut.3.txt').transpose()
chunkcb = np.loadtxt('./data/combined_wCut.3.txt').transpose()
plt.plot(chunk21[0],chunk21[1],color = 'red',alpha=0.3)
plt.fill_between(chunk21[0],chunk21[1]-chunk21[2],chunk21[1]+chunk21[2],color = 'salmon',alpha=0.3)
plt.plot(chunk22[0],chunk22[1],color = 'green',alpha=0.3)
plt.fill_between(chunk22[0],chunk22[1]-chunk22[2],chunk22[1]+chunk22[2],color = 'lime',alpha=0.3)
plt.plot(chunk23[0],chunk23[1],color = 'mediumblue',alpha=0.3)
plt.fill_between(chunk23[0],chunk23[1]-chunk23[2],chunk23[1]+chunk23[2],color = 'blue',alpha=0.3)
plt.plot(chunkcb[0],chunkcb[1],color = 'black')
plt.fill_between(chunkcb[0],chunkcb[1]-chunkcb[2],chunkcb[1]+chunkcb[2],color = 'k',alpha=0.6)
import matplotlib.patches as mpatches
red_patch = mpatches.Patch(color='red', label='chunk21')
green_patch = mpatches.Patch(color='green', label='chunk22')
blue_patch = mpatches.Patch(color='blue', label= 'chunk23')
black_patch = mpatches.Patch(color='black',label='combined')
plt.legend(handles=[red_patch,green_patch,blue_patch,black_patch])
from matplotlib.backends.backend_pdf import PdfPages
with PdfPages('./data/CorrelationFunction.pdf') as pdf:
plt.title('Correlation Function of three chunks and their combination')
plt.xlabel('Mpc')
plt.axis([0,200,-50,100])
pdf.savefig()
plt.show()
pdf.close()
| [
"huikong@cori08-bond0.224"
] | huikong@cori08-bond0.224 |
f05395dab70d8c66fb5f4715c9ec88b1cf9dcd55 | baaa0a58293df059d81df9fa304a2633cf2c0ca2 | /xtraRep/createWorkoutPlan.py | 0eaa5ef68c9cb38bdbf6aef9248f63cdce5d4c00 | [] | no_license | alex-waldron/newXtraRepWebsite | 6e089b17144bd2a8559b352480bfbc5d26b064f6 | e60ed5566bd14eedf57a8797c6dc812efa86de74 | refs/heads/master | 2023-02-17T06:17:39.568981 | 2021-01-13T15:52:49 | 2021-01-13T15:52:49 | 329,356,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,787 | py | import functools
import click
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
from . import mySQLdb
import mysql.connector
from datetime import datetime
from xtraRep.auth import login_required
bp = Blueprint('createWorkoutPlan', __name__,
url_prefix='/create-workout-plan')
muscleGroups = ['chest', 'back', 'triceps', 'biceps', 'shoulders', 'legs']
workoutdb = mySQLdb.get_mySQLdb()
WORKOUT_PLAN_IMAGE_FOLDER = "xtraRep/static/images/workoutPlanImages/"
WORKOUT_IMAGE_FOLDER = "xtraRep/static/images/workoutImages/"
EXERCISE_IMAGE_FOLDER = "xtraRep/static/images/exerciseImages/"
class SingleWorkout:
def __init__(self, name, musclesWorked, user):
self.name = name
self.musclesWorked = musclesWorked
self.dateCreated = datetime.now()
self.user = user
class Exercise:
def __init__(self, exerciseName, sets, reps, picOfExercise, notes):
self.exerciseName = exerciseName
self.sets = sets
self.reps = reps
self.picOfExercise = picOfExercise
self.notes = notes
def getFileExtension(file):
return file.filename.split('.')[-1]
@bp.route('/', methods=('GET', 'POST'))
@login_required
def createWorkoutPlanHome():
return render_template('createWorkoutPlan/createWorkoutPlanHome.html')
@bp.route('/get-started', methods=('GET', 'POST'))
@login_required
def getStarted():
if request.method == 'POST':
workoutPlanName = request.form['workoutPlanName']
workoutPlanDescription = request.form['workoutPlanDescription']
programLength = request.form['programLength']
programDifficulty = request.form['difficulty']
workoutPlanPic = request.files['workoutPlanPicture']
workoutPlanPicExt = getFileExtension(workoutPlanPic)
db = mySQLdb.get_mySQLdb()
cur = db.cursor()
cur.execute(
'INSERT INTO workoutPlans (user_id, workoutPlanName, workoutPlanDescription, programLength, programDifficulty) VALUES (%s,%s,%s,%s,%s)',
(g.user['id'], workoutPlanName, workoutPlanDescription,
programLength, programDifficulty)
)
db.commit()
cur.execute(
'SELECT LAST_INSERT_ID()'
)
workoutPlanId = cur.fetchone()[0]
filename = str(workoutPlanId) + "." + workoutPlanPicExt
mediaLoc = '"' + WORKOUT_PLAN_IMAGE_FOLDER + filename + '"'
cur.execute(
'UPDATE workoutPlans SET mediaLoc = {0} WHERE id={1}'.format(
mediaLoc, workoutPlanId)
)
db.commit()
workoutPlanPic.save(WORKOUT_PLAN_IMAGE_FOLDER + filename)
return redirect(url_for('createWorkoutPlan.addWorkoutsToPlan', workoutPlanId=workoutPlanId))
return render_template('createWorkoutPlan/getStarted.html')
@bp.route('/add-workouts-to-plan/<int:workoutPlanId>')
@login_required
def addWorkoutsToPlan(workoutPlanId):
# CREATE LAYOUT FOR PROGRAM WHERE EACH DAY CAN BE CLICKED ON AND CREATED/EDITED
db = mySQLdb.get_mySQLdb()
cur = db.cursor()
cur.execute(
"SELECT programLength, workoutPlanName FROM workoutPlans WHERE id = {}".format(workoutPlanId)
)
queryResult = cur.fetchone()
programLength = queryResult[0]
workoutPlanName = queryResult[1]
cur.execute(
"SELECT dayForWorkout FROM workouts WHERE workoutPlanId = {}".format(workoutPlanId)
)
daysCompleted = cur.fetchall()
adjustedList = [day[0] for day in daysCompleted]
return render_template('createWorkoutPlan/addWorkoutsToPlan.html', numOfRows=int(programLength/8)+1, programLength=programLength, workoutPlanId=workoutPlanId, daysCompleted = adjustedList, workoutPlanName=workoutPlanName)
@bp.route('/add-workout/<int:day>/<int:workoutPlanId>', methods=('GET', 'POST'))
@login_required
def addWorkout(day, workoutPlanId):
if request.method == 'POST':
db = mySQLdb.get_mySQLdb()
cursor = db.cursor()
workoutPic = request.files['workoutPicture']
workoutPicExtension = getFileExtension(workoutPic)
nameOfWorkout = request.form["nameOfWorkout"]
workoutDescription = request.form['workoutDescription']
musclesWorked = request.form.getlist('musclesWorked')
muscleWorkString = ""
for muscle in musclesWorked:
muscleWorkString = muscleWorkString + muscle + ","
cursor.execute(
'INSERT INTO workouts (user_id, workoutPlanId, dayForWorkout, workoutName, workoutDescription, musclesWorked) VALUES (%s,%s,%s,%s,%s,%s)',
(g.user['id'], workoutPlanId, day, nameOfWorkout,
workoutDescription, muscleWorkString)
)
db.commit()
cursor.execute(
'SELECT LAST_INSERT_ID()'
)
workoutId = cursor.fetchone()[0]
saveFileLocation = WORKOUT_IMAGE_FOLDER + str(workoutId) + "." + workoutPicExtension
workoutPic.save(saveFileLocation)
cursor.execute(
'UPDATE workouts SET mediaLoc = {0} WHERE id={1}'.format(
'"' + saveFileLocation + '"', workoutId)
)
db.commit()
return redirect(url_for('createWorkoutPlan.addExercises', workout_id=workoutId, workoutPlanId=workoutPlanId))
return render_template('createWorkoutPlan/addWorkout.html')
@bp.route('/add-exercises/<int:workout_id>/<int:workoutPlanId>', methods=("GET", "POST"))
@login_required
def addExercises(workout_id, workoutPlanId):
if request.method == 'POST':
db = mySQLdb.get_mySQLdb()
cursor = db.cursor()
numExercises = 1
while True:
try:
request.form['exerciseName{}'.format(numExercises)]
except:
break
exerciseName = request.form['exerciseName{}'.format(numExercises)]
#picOfExercise = request.form['picOfExercise{}'.format(numExercises)]
sets = request.form.get('sets{}'.format(numExercises))
pic = request.files['picOfExercise{}'.format(numExercises)]
picExt = getFileExtension(pic)
reps = request.form['reps{}'.format(numExercises)]
notes = request.form['notesOnExercise{}'.format(numExercises)]
cursor.execute(
'INSERT INTO exercises (workout_id, exerciseName, numOfSets, reps, notes) VALUES (%s,%s,%s,%s,%s)',
(workout_id, exerciseName, sets, reps, notes)
)
db.commit()
cursor.execute(
'SELECT LAST_INSERT_ID()'
)
exerciseId = cursor.fetchone()[0]
saveFileLoc = EXERCISE_IMAGE_FOLDER + str(exerciseId) + "." + picExt
cursor.execute(
'UPDATE exercises SET mediaLoc = {0} WHERE id={1}'.format(
'"' + saveFileLoc + '"', exerciseId)
)
db.commit()
pic.save(saveFileLoc)
numExercises = numExercises + 1
return redirect(url_for('createWorkoutPlan.addWorkoutsToPlan', workoutPlanId=workoutPlanId))
return render_template('createWorkoutPlan/addExercises.html')
@bp.route('load-user-workout-plans', methods=('GET', 'POST'))
@login_required
def loadUserWorkoutPlan():
if request.method == 'POST':
workoutPlanIdSelection = request.form["workoutPlanSelection"]
return redirect(url_for("createWorkoutPlan.addWorkoutsToPlan", workoutPlanId=workoutPlanIdSelection))
userId = g.user['id']
db = mySQLdb.get_mySQLdb()
cur = db.cursor(dictionary=True)
cur.execute(
"SELECT * FROM workoutPlans WHERE user_id={}".format("'" + str(userId) + "'")
)
return render_template('createWorkoutPlan/loadUserWorkoutPlans.html', userWorkoutPlanList = cur.fetchall())
| [
"awaldro1@stevens.edu"
] | awaldro1@stevens.edu |
aaa85ecdd7c711cc27222f29f7030135a3685945 | 39172cfbe73dad73ac86b783aa6fa81914618905 | /9_4_미래도시.py | 6c0e392d37d8c61162e7cb11962d71f19cb4b1ad | [] | no_license | mouseking-sw/CodingTest | 59398405b85abd1aa6f8c3bc83e9c2788eddabb8 | 351001ea819253704f547fae6afe63787885e816 | refs/heads/master | 2022-12-29T05:40:02.310822 | 2020-10-15T01:12:10 | 2020-10-15T01:12:10 | 291,616,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | n,m= map(int,input().split())
INF= int(1e9)
graph = [[INF] * (m+1) for _ in range(n+1) ]
for a in range(1,n+1):
for b in graph:
if a==b:
graph[a][b]=0 #자기자신 노드 0으로 초기회
for i in range(m):
a,b= map(int,input().split())
graph[a][b]=1
graph[b][a]=1
# 거쳐 갈 노드 k와 최종목적지 노드 x 입력
x,k = map(int,input().split())
for k in range(1,n+1):
for a in range(1,n+1):
for b in range(1,n+1):
graph[a][b]= min(graph[a][b],graph[a][k]+graph[k][b])
distance = graph[1][k]+ graph[k][x]
if distance >= INF:
print("-1")
else:
print(distance)
| [
"sang-won@josang-won-ui-MacBookPro.local"
] | sang-won@josang-won-ui-MacBookPro.local |
8ec80dcdcd24378590cd4ec62ca17a81bc5666c9 | 9fc901cd0365aaaca0c7055f8132c8576326f4d8 | /Backend manage datasets/dataset/handlers/delete_dataset_handler.py | ab4496dd713bc11162b23aac7c8c6db8c1dcac26 | [
"MIT"
] | permissive | KamilJerzyWojcik/Machine_learning_App | d42cfc06e589d9eada1a866783b729a751e08d8a | 6afe0b6cc1d2791b94255668757ff920224fed0a | refs/heads/master | 2023-01-12T07:20:00.682439 | 2019-10-28T18:53:43 | 2019-10-28T18:53:43 | 205,706,514 | 0 | 0 | null | 2023-01-07T09:18:32 | 2019-09-01T16:58:38 | HTML | UTF-8 | Python | false | false | 456 | py | from ..models import Dataset
from django.http import JsonResponse
import json
import os
def handle(request):
dataset_id = json.loads(request.body)
dataset_to_delete = Dataset.objects.filter(pk=dataset_id)
name_deleted = list(dataset_to_delete)[0].name
path = list(dataset_to_delete)[0].url
if os.path.isfile(path):
os.remove(path)
dataset_to_delete.delete()
return JsonResponse({"deleted_dataset": f'{name_deleted}'})
| [
"kamil.jerzy.wojcik@gmail.comgit config --global user.name Kamil"
] | kamil.jerzy.wojcik@gmail.comgit config --global user.name Kamil |
05012122357bf0181a223e2dd10a4e94c1fbd8da | bd01241660a9a6a6e0f3df6d70ef50a9f4f3d3ea | /models/multi_carrier.py | 2ebeabb2d0fcf63dd5ca8443f3553f357b5f3c09 | [] | no_license | Josvth/hermes-optimization | aeed40b238ee0fe2de28a97cdbcfa43cb629ec99 | 9b7a019e6f7630955ca26f12a05a3dc159f9660a | refs/heads/master | 2023-05-03T19:14:19.542033 | 2021-05-24T03:55:25 | 2021-05-24T03:55:25 | 274,066,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | from models import vcm
from numba import njit
@njit
def get_sub_carriers(B_Hz):
# Determine number of sub-carriers
if B_Hz <= 100e6:
return 1
if B_Hz <= 200e6:
return 2
if B_Hz <= 300e6:
return 3
return 0 | [
"josvth@gmail.com"
] | josvth@gmail.com |
826762d57f3dbab7668b2b8c75d1746aaab34a9a | b76d63d516cbc595ca5bfc3aa77760aa9f3591e2 | /memo_python.py | d824a2c9a37332a6086279f054dbadc812285a5f | [] | no_license | odin78/monitoringSAD | 63aaa76ba891fd8150d74cfb100ae0adc66f3e3b | 5233af78e03998209db2db0f5d4d37a16e6a1115 | refs/heads/master | 2020-03-26T14:31:38.071155 | 2018-08-16T13:53:25 | 2018-08-16T13:53:25 | 144,992,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | #!/usr/bin/env python3 ---pour la portabilité entre environnements unix
#import d'un module ou d'un package
import modulename
#import d'une seule fonction d'un module
from modulename import functioname
#importer un module en le renommant
import packagename.modulename as newname
#variables classiques: adresse mémoire où est stockée un valeur
[minusculePartiesaccoléeesenmajuscule]
#variables privées d'une classe: commençent toujours par un tiret bas ou un underscore
#noms de classes en majuscule
#noms de méthodes en minusculePartiesaccoléeesenmajuscule
#indentation
def printme( str ):
....print str
....return;
#classes
class Eggs:
```This class defines an egg```
def __init__(self):
self.__cooked = 0
def do_cook(self):
self.__cooked = 1
def is_cooked(self):
return self.__coocked | [
"noreply@github.com"
] | noreply@github.com |
e917475fabe877dec477e34d78bf01e715efba8c | bcc199a7e71b97af6fbfd916d5a0e537369c04d9 | /leetcode/solved/2568_Minimum_Fuel_Cost_to_Report_to_the_Capital/solution.py | ff61ddbb440902ec03f8f9a1b947035f18fde637 | [] | no_license | sungminoh/algorithms | 9c647e82472905a2c4e505c810b622b734d9d20d | 1389a009a02e90e8700a7a00e0b7f797c129cdf4 | refs/heads/master | 2023-05-01T23:12:53.372060 | 2023-04-24T06:34:12 | 2023-04-24T06:34:12 | 87,406,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,604 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 sungminoh <smoh2044@gmail.com>
#
# Distributed under terms of the MIT license.
"""
There is a tree (i.e., a connected, undirected graph with no cycles) structure country network consisting of n cities numbered from 0 to n - 1 and exactly n - 1 roads. The capital city is city 0. You are given a 2D integer array roads where roads[i] = [ai, bi] denotes that there exists a bidirectional road connecting cities ai and bi.
There is a meeting for the representatives of each city. The meeting is in the capital city.
There is a car in each city. You are given an integer seats that indicates the number of seats in each car.
A representative can use the car in their city to travel or change the car and ride with another representative. The cost of traveling between two cities is one liter of fuel.
Return the minimum number of liters of fuel to reach the capital city.
Example 1:
Input: roads = [[0,1],[0,2],[0,3]], seats = 5
Output: 3
Explanation:
- Representative1 goes directly to the capital with 1 liter of fuel.
- Representative2 goes directly to the capital with 1 liter of fuel.
- Representative3 goes directly to the capital with 1 liter of fuel.
It costs 3 liters of fuel at minimum.
It can be proven that 3 is the minimum number of liters of fuel needed.
Example 2:
Input: roads = [[3,1],[3,2],[1,0],[0,4],[0,5],[4,6]], seats = 2
Output: 7
Explanation:
- Representative2 goes directly to city 3 with 1 liter of fuel.
- Representative2 and representative3 go together to city 1 with 1 liter of fuel.
- Representative2 and representative3 go together to the capital with 1 liter of fuel.
- Representative1 goes directly to the capital with 1 liter of fuel.
- Representative5 goes directly to the capital with 1 liter of fuel.
- Representative6 goes directly to city 4 with 1 liter of fuel.
- Representative4 and representative6 go together to the capital with 1 liter of fuel.
It costs 7 liters of fuel at minimum.
It can be proven that 7 is the minimum number of liters of fuel needed.
Example 3:
Input: roads = [], seats = 1
Output: 0
Explanation: No representatives need to travel to the capital city.
Constraints:
1 <= n <= 105
roads.length == n - 1
roads[i].length == 2
0 <= ai, bi < n
ai != bi
roads represents a valid tree.
1 <= seats <= 105
"""
from typing import List
import pytest
import sys
class Solution:
def minimumFuelCost(self, roads: List[List[int]], seats: int) -> int:
"""Mar 20, 2023 23:06"""
graph = {}
for a, b in roads:
graph.setdefault(a, set()).add(b)
graph.setdefault(b, set()).add(a)
def dfs(a, parent=None):
if a not in graph:
return 0, 0, 0
cars_total = 0
remainders_total = 1
cost_total = 0
for b in graph[a]:
if b == parent:
continue
cars, remainders, cost = dfs(b, a)
cars_total += cars
remainders_total += remainders
cost_total += cost + cars + min(remainders, 1)
c, r = divmod(remainders_total, seats)
cars_total += c
return cars_total, r, cost_total
return dfs(0)[2]
@pytest.mark.parametrize('args', [
(([[0,1],[0,2],[0,3]], 5, 3)),
(([[3,1],[3,2],[1,0],[0,4],[0,5],[4,6]], 2, 7)),
(([], 1, 0)),
])
def test(args):
assert args[-1] == Solution().minimumFuelCost(*args[:-1])
if __name__ == '__main__':
sys.exit(pytest.main(["-s", "-v"] + sys.argv))
| [
"smoh2044@gmail.com"
] | smoh2044@gmail.com |
11beeff4b742c8cf977384d6451dd9fb6d5394db | a7dad9064668a8c50452a52159c7a8563f32695e | /rcnn/modeling/hier_rcnn/outputs.py | 3ad759af64e6d8d0c11097877a0be27181834bf4 | [
"MIT"
] | permissive | yuanli12139/Hier-R-CNN | 0283d3666f1789897df0065cf3416abc0039136d | 55d142bff9de489017d394968c68521ea5e58033 | refs/heads/master | 2023-01-14T11:11:31.273910 | 2020-11-14T11:46:36 | 2020-11-14T11:46:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,737 | py | import math
import torch
from torch import nn
from torch.nn import functional as F
from utils.net import make_conv
from models.ops import Conv2d, ConvTranspose2d, interpolate, Scale
from rcnn.modeling import registry
from rcnn.core.config import cfg
@registry.ROI_HIER_OUTPUTS.register("hier_output")
class Hier_output(nn.Module):
def __init__(self, dim_in):
super(Hier_output, self).__init__()
num_classes = cfg.HRCNN.NUM_CLASSES
num_convs = cfg.HRCNN.OUTPUT_NUM_CONVS
conv_dim = cfg.HRCNN.OUTPUT_CONV_DIM
use_lite = cfg.HRCNN.OUTPUT_USE_LITE
use_bn = cfg.HRCNN.OUTPUT_USE_BN
use_gn = cfg.HRCNN.OUTPUT_USE_GN
use_dcn = cfg.HRCNN.OUTPUT_USE_DCN
prior_prob = cfg.HRCNN.PRIOR_PROB
self.norm_reg_targets = cfg.HRCNN.NORM_REG_TARGETS
self.centerness_on_reg = cfg.HRCNN.CENTERNESS_ON_REG
cls_tower = []
bbox_tower = []
for i in range(num_convs):
conv_type = 'deform' if use_dcn and i == num_convs - 1 else 'normal'
cls_tower.append(
make_conv(dim_in, conv_dim, kernel=3, stride=1, dilation=1, use_dwconv=use_lite,
conv_type=conv_type, use_bn=use_bn, use_gn=use_gn, use_relu=True, kaiming_init=False,
suffix_1x1=use_lite)
)
bbox_tower.append(
make_conv(dim_in, conv_dim, kernel=3, stride=1, dilation=1, use_dwconv=use_lite,
conv_type=conv_type, use_bn=use_bn, use_gn=use_gn, use_relu=True, kaiming_init=False,
suffix_1x1=use_lite)
)
dim_in = conv_dim
self.add_module('cls_tower', nn.Sequential(*cls_tower))
self.add_module('bbox_tower', nn.Sequential(*bbox_tower))
self.cls_deconv = ConvTranspose2d(conv_dim, conv_dim, 2, 2, 0)
self.bbox_deconv = ConvTranspose2d(conv_dim, conv_dim, 2, 2, 0)
self.cls_logits = Conv2d(
conv_dim, num_classes, kernel_size=3, stride=1, padding=1
)
self.bbox_pred = Conv2d(
conv_dim, 4, kernel_size=3, stride=1, padding=1
)
self.centerness = Conv2d(
conv_dim, 1, kernel_size=3, stride=1, padding=1
)
# Initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# initialize the bias for focal loss
bias_value = -math.log((1 - prior_prob) / prior_prob)
torch.nn.init.constant_(self.cls_logits.bias, bias_value)
self.scales = nn.ModuleList([Scale(init_value=1.0) for _ in range(1)])
def forward(self, x):
logits = []
bbox_reg = []
centerness = []
cls_tower = self.cls_tower(x)
box_tower = self.bbox_tower(x)
_cls_tower = F.relu(self.cls_deconv(cls_tower))
_box_tower = F.relu(self.bbox_deconv(box_tower))
logits.append(self.cls_logits(_cls_tower))
if self.centerness_on_reg:
centerness.append(self.centerness(_box_tower))
else:
centerness.append(self.centerness(_cls_tower))
bbox_pred = self.scales[0](self.bbox_pred(_box_tower))
if self.norm_reg_targets:
bbox_pred = F.relu(bbox_pred)
if self.training:
bbox_reg.append(bbox_pred)
else:
bbox_reg.append(bbox_pred * 0.5)
else:
bbox_reg.append(torch.exp(bbox_pred))
locations = compute_locations(centerness, [0.5])
return logits, bbox_reg, centerness, locations
def compute_locations(features, strides):
locations = []
for level, feature in enumerate(features):
h, w = feature.size()[-2:]
locations_per_level = compute_locations_per_level(
h, w, strides[level],
feature.device
)
locations.append(locations_per_level)
return locations
def compute_locations_per_level(h, w, stride, device):
shifts_x = torch.arange(
0, w * stride, step=stride,
dtype=torch.float32, device=device
)
shifts_y = torch.arange(
0, h * stride, step=stride,
dtype=torch.float32, device=device
)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
locations = torch.stack((shift_x, shift_y), dim=1) + stride // 2
return locations
| [
"priv@bupt.edu.cn"
] | priv@bupt.edu.cn |
0f6959e8b0cceca7092c8b800527680ba1e71b99 | 2bc8f66fd34ba1b93de82c67954a10f8b300b07e | /general_backbone/configs/image_clf_config.py | 278078f2ccad81f5d77ba37d852e540bba918d42 | [] | no_license | DoDucNhan/general_backbone | 7dabffed5a74e622ba23bf275358ca2d09faddc1 | 686c92ab811221d594816207d86a0b97c9b4bc73 | refs/heads/main | 2023-08-31T14:59:23.873555 | 2021-10-23T06:34:14 | 2021-10-23T06:34:14 | 420,419,141 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,926 | py | # Copyright (c) general_backbone. All rights reserved.
# --------------------Config for model training------------------------
train_conf = dict(
# General config
model='resnet18',
epochs=300,
start_epoch=0,
pretrained=True,
num_classes=2,
eval_metric='top1',
# Checkpoint
output='checkpoint/resnet50',
checkpoint_hist=10,
recovery_interval=10,
initial_checkpoint=None,
resume=None,
no_resume_opt=False,
# Logging
log_interval=50,
log_wandb=False,
local_rank=0,
# DataLoader
batch_size=16,
num_workers=8,
prefetch_factor=2,
pin_memory=True,
shuffle=True,
# Learning rate
lr=0.001,
lr_noise_pct=0.67,
lr_noise_std=1.0,
lr_cycle_mul=1.0,
lr_cycle_decay=0.1,
lr_cycle_limit=1.0,
sched='cosin',
min_lr=1e-6,
warmup_lr=0.0001,
warmup_epochs=5,
lr_k_decay=1.0,
decay_epochs=100,
decay_rate=0.1,
patience_epochs=10,
cooldown_epochs=10,
)
test_conf = dict(
# Data Loader
batch_size=16,
shuffle=False,
num_workers=8,
prefetch_factor=2,
pin_memory=True
)
# --------------------Config for Albumentation Transformation
# You can add to dict_transform a new Albumentation Transformation class with its argument and values:
# Learn about all Albumentation Transformations, refer to link: https://albumentations.ai/docs/getting_started/transforms_and_targets/
# Note: the order in the dictionary is matched with the processive order of transformations
data_root = 'toydata/image_classification'
img_size=224
data_conf=dict(
dict_transform=dict(
RandomResizedCrop={'width':256, 'height':256, 'scale':(0.9, 1.0), 'ratio':(0.9, 1.1), 'p':0.5},
ColorJitter={'brightness':0.35, 'contrast':0.5, 'saturation':0.5, 'hue':0.2, 'always_apply':False, 'p':0.5},
ShiftScaleRotate={'shift_limit':0.05, 'scale_limit':0.05, 'rotate_limit':15, 'p':0.5},
RGBShift={'r_shift_limit': 15, 'g_shift_limit': 15, 'b_shift_limit': 15, 'p': 0.5},
RandomBrightnessContrast={'p': 0.5},
Normalize={'mean':(0.485, 0.456, 0.406), 'std':(0.229, 0.224, 0.225)},
Resize={'height':img_size, 'width': img_size},
ToTensorV2={'always_apply':True}
),
class_2_idx=None, # Dictionary link class with indice. For example: {'dog':0, 'cat':1}, Take the folder name for label If None.
img_size=img_size,
data = dict(
train=dict(
data_dir=data_root,
name_split='train',
is_training=True,
debug=False, # If you want to debug Augumentation, turn into True
dir_debug = 'tmp/alb_img_debug', # Directory where to save Augmentation debug
shuffle=True
),
eval=dict(
data_dir=data_root,
name_split='test',
is_training=False,
shuffle=False
)
)
) | [
"phamdinhkhanh.tkt53.neu@gmail.com"
] | phamdinhkhanh.tkt53.neu@gmail.com |
3c6efd3975b2933f360fcc57fa1d1394bdbdbcc0 | da8adef15efbdacda32b19196b391f63d5026e3a | /SistemasInteligentes/P4/main.py | e8b16e5345b6a927679fdea2c978a99fee08ce29 | [] | no_license | rubcuadra/MachineLearning | 05da95c1f800e6acbce97f6ca825bd7a41d806a6 | aa13dd007a7954d50586cca6dd413a04db18ef77 | refs/heads/master | 2021-03-19T17:33:14.080691 | 2018-10-19T23:43:27 | 2018-10-19T23:43:27 | 100,544,903 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,237 | py | from reversi_player import Agent
from reversi import ReversiBoard
from random import shuffle
_agent2 = None
def P1Turn(board, player): #Player
move = input('Enter your move: ')
return move
def P2Turn(board, player): #IA
return _agent2.getBestMovement(board, player)
#Nivel => Profundidad de busqueda
#Fichas => 0 Blancas, 1 Negras (Para la computadora)
#Inicia => 0 Computadora, 1 Contrario
def othello(nivel, fichas=1, inicio=1):
global _agent2
_agent2 = Agent(nivel) #Crear agente para P2
#P2 is the computer
board = ReversiBoard()
print("=== GAME STARTED ===")
print(f"{board.P2S} = Blacks")
print(f"{board.P1S} = Whites\n")
print(board)
#Who starts and set tokens
if inicio == 1:
order = ["P1","P2"]
turns = [P1Turn,P2Turn]
tokens = [board.P2,board.P1] if fichas == 0 else [board.P1,board.P2]
else:
order = ["P2","P1"]
turns = [P2Turn,P1Turn]
tokens = [board.P1,board.P2] if fichas == 0 else [board.P2,board.P1]
while not ReversiBoard.isGameOver(board):
for i in range(2):
P1Score = board.score( board.P1 )
P2Score = board.score( board.P2 )
print("Scores:\t",f"{board.P1S}:{P1Score}","\t",f"{board.P2S}:{P2Score}")
if board.canPlayerMove( tokens[i] ) :
print(f"{order[i]} turn, throwing {board.getSymbol(tokens[i])}")
while True:
move = turns[i]( ReversiBoard( board.value ) ,tokens[i])
if ReversiBoard.cellExists(move):
r = board.throw(tokens[i],move)
if len(r) > 0:
print(f"Selection: {move}")
board.doUpdate(tokens[i],r)
break
print("Wrong movement, try again")
print(board)
if P1Score == P2Score: print("TIE !!")
else: print(f"Winner is {board.P1S if P1Score>P2Score else board.P2S}")
if __name__ == '__main__':
level = 2 #Dificultad de la AI
npc = 0 #Fichas del P2(AI). 0 es Blancas
starts = 1 #0 => P2 (AI) empieza
othello(level,npc,starts) | [
"rubcuadra@gmail.com"
] | rubcuadra@gmail.com |
e5275455035e38ee32d645380324d246df232a01 | 4b8eb3863480804f3d2bb8f815c442e276d8aea8 | /django_project/auth_/urls.py | 6d6e5f47197107740f1592f4ef4c9358ffd1686d | [] | no_license | RauzaAl/PaintingShop | 820fe664c59c14862bb0b8d519213ac6bc9ebaab | 30ff9122f0ecda0ce3ec2f4194348ae234152dc7 | refs/heads/main | 2023-05-01T13:58:07.851323 | 2021-05-16T11:14:11 | 2021-05-16T11:14:11 | 356,239,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | from django.urls import path
from rest_framework_extensions.routers import ExtendedSimpleRouter
from rest_framework_jwt.views import obtain_jwt_token
from django_project.auth_.views import register, UsersViewSet
router = ExtendedSimpleRouter()
router.register('users', UsersViewSet, basename='users')
urlpatterns = [
path('register/', register),
path('login/', obtain_jwt_token),
]
urlpatterns += router.urls
| [
"rauza907@gmail.com"
] | rauza907@gmail.com |
92f1a06dd44710bd1874697a3fc2219ec9564935 | d319d33b927702cd7a28e238adb07f02f4af6da5 | /clstest.py | 1b517ca351489bc04a143fb6a736c37c67145b43 | [] | no_license | jingqiangliu/mib-parse | 04f357fdf6f0a66b742d3d33a7fddf4133d5da09 | 7b9dd7fce67bb1025f3b234401608d52bead5da7 | refs/heads/master | 2020-09-27T15:11:25.988380 | 2019-04-02T09:18:10 | 2019-04-02T09:18:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py |
from frame import omci_entities
if __name__ == '__main__':
for attr_name in dir(omci_entities):
cls = getattr(omci_entities,attr_name)
if not isinstance(cls,type) or not issubclass(cls,omci_entities.EntityClass):
continue
#print cls,cls.class_id,cls.__name__
for attr in cls.attributes:
print attr.field.name | [
"yuntao.cao@nocsys.com.cn"
] | yuntao.cao@nocsys.com.cn |
22112a517309402f21f46dd20e2544557c9eb18a | 3a432da8bc7056c0949ab83edc91665860c5de1e | /gcd.py | 248cdeeb56cb25de3702fd9bb584cd8d87af98f2 | [] | no_license | vesselofgod/Python_Lecture_Note | 2fd5d6dc84ac53082e73f82a7464f040364cdced | 06d9de8bdd80af0f84dc1bb9d14424e79cd608d2 | refs/heads/master | 2022-04-06T20:28:18.508039 | 2020-02-03T04:37:59 | 2020-02-03T04:37:59 | 196,804,150 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | def gcd(a,b):
if (b>a): a,b=b,a
while(b!=0):
a=a%b
a,b=b,a
return a
| [
"noreply@github.com"
] | noreply@github.com |
160ed6ae71362727c6c59c828251d2cbfd53f599 | decdcd71a854085d08e431c0c4a8233dda1ea630 | /_build/jupyter_execute/03-arma/06-pronosticos.py | 8b58450a21933a06fa7e545b633b97ee19d6b66c | [] | no_license | davidmorasalazar/econometria | 429839c147de6dbd5ec0563c30b9237541135cf2 | be96b2da38ee0b25f24663b032d2be3d1008e6b0 | refs/heads/master | 2023-04-02T07:07:29.111940 | 2021-04-05T06:14:35 | 2021-04-05T06:14:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,362 | py | ```{include} ../math-definitions.md
```
import bccr
import matplotlib.pyplot as plt
plt.style.use('seaborn')
import numpy as np
import pandas as pd
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
isi = bccr.SW(isi=25725)
res = ARIMA(isi, order=[3,0,0]).fit()
# Pronósticos con modelos ARMA
## Pronósticos con modelos ARMA
Sea $y^*_{t+j|t}$ un pronóstico de $y_{t+j}$ pasado en datos observados hasta $t$.
Definimos el “mejor” pronóstico de este tipo como aquel que minimiza el **error cuadrático medio**
\begin{equation*}
\text{MSE}\left(y^*_{t+j|t}\right) \equiv \E\left(y_{t+j} - y^*_{t+j|t}\right)^2
\end{equation*}
Aunque no lo probamos aquí, resulta que el pronóstico con el menor error cuadrático medio es la esperanza de $y_{t+j}$ condicional en los datos hasta $t$:
\begin{equation*}
y^*_{t+j|t} = \E\left(y_{t+j} | y_t, y_{t-1}, \dots \right) \equiv \E_t\left(y_{t+j}\right)
\end{equation*}
## Pronosticando valores puntuales de la serie
Es muy sencillo obtener de manera recursiva estos pronósticos para los modelos ARMA, siguiendo estos 3 pasos:
1. Se escribe la ecuación ARMA de manera que $y_t$ quede a la izquierda y todos los otros términos a la derecha.
2. Se sustituye el índice $t$ por $T+h$.
3. En el lado derecho de la ecuación, se sustituyen:
* observaciones futuras con sus pronósticos,
* errores futuros con cero,
* errores pasados con sus respectivos residuos.
Empezando con $h=1$, se repiten los pasos 2 y 3 para valores $h=2,3,\dots$ hasta que todos los pronósticos hayan sido calculados.
Para ilustrar el procedimiento, consideremos este proceso ARMA(1,2)
\begin{equation*}
(1-\phi\Lag)\tilde{y}_t = (1 + \theta_1\Lag + \theta_2\Lag^2)\epsilon_t
\end{equation*}
**Paso 1:** $\tilde{y}_t = \phi\tilde{y}_{t-1} + \epsilon_t + \theta_1\epsilon_{t-1} + \theta_2\epsilon_{t-2}$
Para $h=1$:
\begin{align*}
\tilde{y}_{T+1} &= \phi\tilde{y}_{T} + \epsilon_{T+1} + \theta_1\epsilon_{T} + \theta_2\epsilon_{T-1}\\
\tilde{y}^*_{T+1|T} &= \phi\tilde{y}_{T} + \theta_1\hat{\epsilon}_{T} + \theta_2\hat{\epsilon}_{T-1}
\end{align*}
Para $h=2$:
\begin{align*}
\tilde{y}_{T+2} &= \phi\tilde{y}_{T+1} + \epsilon_{T+2} + \theta_1\epsilon_{T+1} + \theta_2\epsilon_{T}\\
y^*_{T+2|T} &= \phi\tilde{y}^*_{T+1|T} + \theta_2\hat{\epsilon}_{T}
\end{align*}
Para $h=3$:
\begin{align*}
\tilde{y}_{T+3} &= \phi\tilde{y}_{T+2} + \epsilon_{T+3} + \theta_1\epsilon_{T+2} + \theta_2\epsilon_{T+1}\\
y^*_{T+3|T} &= \phi\tilde{y}^*_{T+2|T}
\end{align*}
Es fácil comprobar que, siguiendo este procedimiento, una vez que $h>p$, $h>q$ la ecuación de pronóstico será
\begin{equation*}
\tilde{y}^*_{T+h} = \phi_1\tilde{y}^*_{T+h-1} + \dots + \phi_p\tilde{y}^*_{T+h-p}
\end{equation*}
Esto es una ecuación en diferencia de orden $p$, de la cual sabemos que sus raíces están dentro del círculo unitario. Por ello
\begin{align*}
\lim\limits_{h\to\infty}\tilde{y}^*_{T+h} &= 0 & \text{es decir }\quad \lim\limits_{h\to\infty}y^*_{T+h} &= \mu
\end{align*}
Esto nos dice que, para valores grandes de $h$, el pronóstico es simplemente la media del proceso.
## Cuantificando la incertidumbre de los pronósticos puntuales
Para saber qué tan precisos esperamos que sean estos pronósticos, necesitamos cuantificar su error cuadrático medio
\begin{align*}
\text{MSE}\left(y^*_{t+j|t}\right) &\equiv \E\left(y_{t+j} - y^*_{t+j|t}\right)^2 \\
&= \E\left(y_{t+j} - \E_t\left(y_{t+j}\right)\right)^2 \\
&= \Var_t\left(y_{t+j}\right)
\end{align*}
Es decir, necesitamos cuantificar la varianza de $y_{t+j}$ condicional en los datos disponibles en $t$.
Por razones de tiempo, no vamos a desarrollar estas fórmulas durante este curso.
{{ empieza_ejemplo }} Pronosticando la inflación {{ fin_titulo_ejemplo }}
```{margin} Archivos:
* bccr.ServicioWeb
* ISI-AR3.ipynb
```
from scipy.stats import norm
plt.style.use('seaborn-dark')
horizon = 36
fcast =res.get_forecast(steps=horizon, alpha=0.05)
fig, ax =plt.subplots(figsize=[12,4])
isi.plot(ax=ax)
fcast.predicted_mean.plot(ax=ax)
ax.axhline(isi.values.mean(), color='C2')
ax.fill_between(fcast.row_labels, *fcast.conf_int().values.T,
facecolor='C1', alpha=0.25, interpolate=True);
Podemos hacer intervalos de distintos niveles de significancia:
fig, ax =plt.subplots(figsize=[12,4])
isi['2018':].plot(ax=ax)
fcast.predicted_mean.plot(ax=ax)
ax.axhline(isi.values.mean(), color='C2')
for alpha in np.arange(1,6)/10:
ax.fill_between(fcast.row_labels, *fcast.conf_int(alpha=alpha).values.T,
facecolor='C1', alpha=0.1, interpolate=True);
## ¿Qué podría salir mal?
En todas las fórmulas que hemos elaborado para los pronósticos hasta ahora, está implícito que conocemos los verdaderos valores de los parámetros.
En la práctica, esos parámetros son estimados a partir de los datos.
Tomemos por ejemplo un proceso AR(1):
\begin{align*}
y_{t+1} &= \phi y_t + \epsilon_{t+1} \tag{valor verdadero} \\
y^*_{t+1|t} &= \hat{\phi} y_t \tag{pronóstico} \\
&\Rightarrow \\
\notationbrace{y_{t+1} - y^*_{t+1|t}}{error de pronóstico} &= \notationbrace{\left(\phi-\hat{\phi}\right)}{“sesgo”}y_t + \notationbrace{\epsilon_{t+1}}{innovación}
\end{align*}
{{ termina_ejemplo }} | [
"romero-aguilar.1@osu.edu"
] | romero-aguilar.1@osu.edu |
d46339f7656553d9127c6339a00339fa6b46a8a2 | 16e6cb34c867db200381f435d389f56dbe54995b | /master.py | 6f602aae04621e92ecc057975e318c44abe021fb | [] | no_license | jenchen1398/Parallel-Pong | 73efe42a5e32381f4dd78378625081c1cc28eea3 | 4a7b578b33343775f3a779c70eabc6c2850c75cd | refs/heads/master | 2021-01-20T23:51:31.465531 | 2015-08-05T17:20:45 | 2015-08-05T17:20:45 | 40,201,468 | 0 | 0 | null | 2015-08-04T18:15:47 | 2015-08-04T18:15:47 | null | UTF-8 | Python | false | false | 3,721 | py | import pypong, socket, struct, threading, select, time, pygame
from pypong.player import BasicAIPlayer, Player
player_left = None # set the players as global so the control thread has access
player_right = None # simplest way to do it
running = True
def setup(ip, port, display, mini_display):
global player_left, player_right
rect = pygame.image.load( 'assets/paddle.png' ).get_rect()
configuration = {
'screen_size': display,
'individual_screen_size': mini_display,
'paddle_image': 'assets/paddle.png',
'paddle_left_position': 10,
'paddle_right_position': display[0] - rect.w,
'paddle_velocity': 120.,
'paddle_bounds': (1, display[1] - 1),
'line_image': 'assets/dividing-line.png',
'ball_image': 'assets/ball.png',
'ball_velocity': 80.,
'ball_velocity_bounce_multiplier': 1.105,
'ball_velocity_max': 130.,
}
#make a socket, and connect to a already running server socket
# read some file with the ip addresses and put them in the variables ip addersses
# hard coded for now
connections = []
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((ip, port))
server_socket.listen(15)
connections.append(server_socket)
# Prepare game
#player_left = BasicAIPlayer()#None, 'up', 'down')
player_left = Player(None, 'up', 'down')
#player_right = BasicAIPlayer()#None, 'up', 'down')
player_right = Player(None, 'up', 'down')
pygame.init()
pygame.display.set_mode((200,200))
game = pypong.Game(player_left, player_right, configuration)
threading.Thread(target = ctrls, args = [game]).start()
# Main game loop
while game.running:
findnewConnections(connections, server_socket)
game.update()
coordinates = struct.pack('iiii', game.ball.position_vec[0], game.ball.position_vec[1], game.paddle_left.rect.y, game.paddle_right.rect.y )
# loop over clients and send the coordinates
for sock in connections:
if sock is not server_socket:
sock.send(coordinates)
# wait for them to send stuff back to avoid a race condition.
#for x in range( 0,len( clisocket ) ):
#clisocket[x].recv( 16 )
print 'server is closing'
server_socket.close()
def findnewConnections(connections, server_socket):
read_sockets, write_sockets, error_sockets = select.select(connections,[],[], 0.)
for sock in read_sockets:
if sock == server_socket:
sockfd, addr = server_socket.accept()
connections.append(sockfd)
def ctrls(game):
global player_left, player_right
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game.running = False
if event.type == pygame.KEYDOWN:
player_left.input_state = None
player_right.input_state = None
if event.key == pygame.K_r:
player_right.input_state = 'up'
if event.key == pygame.K_f:
player_right.input_state = 'down'
if event.key == pygame.K_UP:
player_left.input_state = 'up'
if event.key == pygame.K_DOWN:
player_left.input_state = 'down'
if event.key == pygame.K_ESCAPE:
running = False
if event.type == pygame.KEYUP:
player_left.input_state = None
player_right.input_state = None
| [
"euromkay@gmail.com"
] | euromkay@gmail.com |
bcd47c049189dca5af79f4d85687e6732a673dce | bfe5ab782ca4bb08433d70bdd142913d40a40a8d | /Codes/141) exercise27.py | 57d04640fbaad4771a75ad7ddec47a0b9a69418c | [] | no_license | akshat12000/Python-Run-And-Learn-Series | 533099d110f774f3c322c2922e25fdb1441a6a55 | 34a28d6c29795041a5933bcaff9cce75a256df15 | refs/heads/main | 2023-02-12T19:20:21.007883 | 2021-01-10T05:08:09 | 2021-01-10T05:08:09 | 327,510,362 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | # Create a laptop class with attributes like brand name, model name, price
# Create two instance(object) of your laptop class
class Laptop:
def __init__(self,brand_name,model_name,price):
self.brand_name=brand_name
self.model_name=model_name
self.price=price
self.full_name=brand_name+' '+model_name # we can create an extra or less instance variables
l1=Laptop("HP","Pavilion",50000)
l2=Laptop("Dell","Inspiron",45000)
print(f"Brand Name: {l1.brand_name}, Model Name: {l1.model_name}, Price: {l1.price}, Full Name: {l1.full_name}")
print(f"Brand Name: {l2.brand_name}, Model Name: {l2.model_name}, Price: {l2.price}, Full Name: {l2.full_name}") | [
"noreply@github.com"
] | noreply@github.com |
ef6e6fa4ed687ca9fc3aa5fbf8c1f2e2e13b1c1d | ee7f2c15a1b7db1b584450a20095f1fb4ae2f2ad | /webScrapper.py | bad0725c8c12d8ddbd64076fb1fb839a049c2562 | [
"MIT"
] | permissive | jkuan1/nwHacks2020 | 2a7a8541faa3985a5004f0010059991f14051e33 | 3f2fece58a2d94cd7e878a3af1d18d4180634646 | refs/heads/master | 2020-12-09T12:00:44.876342 | 2020-01-12T19:46:05 | 2020-01-12T19:46:05 | 233,297,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | import requests
import json
import textwrap
from bs4 import BeautifulSoup
CHARACTER_LIMIT = 5000
# Website url and requsting the html
URL = url
page = requests.get(URL)
# Setup BeatifulSoup web scrapper
soup = BeautifulSoup(page.content, 'html.parser')
headline = soup.find("h1", class_="detailHeadline")
story = soup.find(class_='story')
lines = story.find_all("p")
# Recombine html paragraphs into one string
list_lines = [headline.get_text() + "\n"]
for p in range(len(lines)):
paragraph = lines[p].get_text()
list_lines.append(paragraph)
final_article = " ".join(list_lines)
final_article = final_article.replace(u'\xa0', u' ')
final_article = final_article.replace(u'\n', u'.')
print(len(final_article))
def prepare_key_phrases(article):
article_size = len(article)
if (article_size / CHARACTER_LIMIT > 0):
document = [article[i:i + CHARACTER_LIMIT] for i in range(0, article_size, CHARACTER_LIMIT)]
else:
document = [article]
return document
print(prepare_key_phrases(final_article)) | [
"thien.ho@alumni.ubc.ca"
] | thien.ho@alumni.ubc.ca |
60e20f65cf6af6e8b751077eed72b94b9864c8cc | db1b063be3ae7d3c4ba75b5d51d8fbe73c3d482b | /patapp/views/reply.py | 8317138c59d3f0d1a1988da1d8848cbe098e99b6 | [] | no_license | luoxianchao/MedicalWechatMiniprgram | 8f975d042ea69dbb8ffca24a7c9abab077c129f5 | aaa86f7813583c31d0316c69371aecf4f9537773 | refs/heads/master | 2022-10-22T01:23:04.654566 | 2020-06-16T05:40:56 | 2020-06-16T05:40:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | import os
import django
from django.http import HttpResponse, Http404, FileResponse, JsonResponse
from django.db import models
from django.core import serializers
from patapp.models import user
from patapp.models import image
from patapp.models import imagereal
import requests
def reply(request):
#image1=image.objects.all()
# print(image1)
# image1 = image.objects.values()
# print(image1.get(id=1))
# data = serializers.serialize("json", image1)
id = request.GET.get("id")
image1 = image.objects.filter(upload_key=id)
# user1 = user.objects.all()
a = image1.values()
usernew = list(a)
print(usernew)
response = []
response.append({
'user': usernew
})
return JsonResponse(data=response, safe=False)
| [
"sophrieet@gmail.com"
] | sophrieet@gmail.com |
4deaebf56103bcd918ea4cd531d6c30b9928c862 | ab938906b0dc6213dfcadb1044fd74382126f6e6 | /d26_1.py | 24d2580eacc1a6472cce65cd7456d95f5aa6bcbb | [] | no_license | sparsh-m/30days | 5b8842cd1abdee2249f74e62a46fac0d091af8a6 | 866fc6ec85a1eaec6a725f155310d037059f0771 | refs/heads/master | 2022-12-04T05:29:21.323438 | 2020-08-23T10:01:39 | 2020-08-23T10:01:39 | 273,663,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | #https://leetcode.com/problems/minimum-path-sum/
#TC:O(N^2)
def minPathSum(grid):
n,m = len(grid), len(grid[0])
dp = [[0 for i in range(len(grid[0]))] for i in range(len(grid))]
dp[0][0] = grid[0][0]
for i in range(1,len(dp[0])):
dp[0][i] = grid[0][i] + dp[0][i-1]
if len(dp)==1:
return dp[0][-1]
for i in range(1,len(dp)):
dp[i][0] = grid[i][0] + dp[i-1][0]
if len(dp[0])==1:
return dp[-1][0]
for i in range(1,len(dp)):
for j in range(1,len(dp[0])):
dp[i][j] = min(grid[i][j] + dp[i][j-1], grid[i][j] + dp[i-1][j])
return dp[-1][-1]
grid = [
[1,3,1]
]
print(minPathSum(grid)) | [
"sparshmisra1@gmail.com"
] | sparshmisra1@gmail.com |
6a63db375fbee64f04a063e3e15d6e9caff8ca94 | 0f20f3e02aa05b8e690190a96e92a524b211338f | /SW_Expert_Academy_02/String2.py | 142a9ff2f4c33454b306be6657fae7ebaa14028b | [] | no_license | HYEONAH-SONG/Algorithms | ec744b7e775a52ee0756cd5951185c30b09226d5 | c74ab3ef21a728dcd03459788aab2859560367e6 | refs/heads/master | 2023-07-18T14:41:48.360182 | 2021-09-03T13:41:23 | 2021-09-03T13:41:23 | 336,240,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | # 문장을 구성하는 단어를 역순으로 출력하는 프로그램을 작성하라
sentence = input()
s_list = sentence.split(' ')
s_list.reverse()
for i in s_list:
print(i, end=' ') | [
"sha082072@gmail.com"
] | sha082072@gmail.com |
976a5aaede8e86285f2d52c25cf17df99c11cc16 | 45894e01a4835ef059822d2c3924bfb13560beb9 | /lims_gql/schema.py | bbcf059639567c152b2c9822cbb517df3d488bc3 | [] | no_license | zhonghua-wang/lims_gql | 8d03df78c5551c6ede17ac467ba4c593596fdab6 | 16dda197ee453952aee7dde04e398258e02f5a3e | refs/heads/master | 2020-12-07T13:33:28.905671 | 2017-06-30T11:47:37 | 2017-06-30T11:47:37 | 95,570,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | import graphene
from graphene_django.debug import DjangoDebug
import instrument.schema
import core.schema
class Query(instrument.schema.Query, core.schema.Query, graphene.ObjectType):
debug = graphene.Field(DjangoDebug, name='__debug')
schema = graphene.Schema(query=Query)
| [
"zhonghua.wang@outlook.com"
] | zhonghua.wang@outlook.com |
e2a11079640953b82794c678445d69a6420f030d | d049ef46a863c61ccaaea10a966ddcd8016867f0 | /orders/migrations/0001_initial.py | 744b2abbf083bbe777e2974a683f32bd97e2df0a | [] | no_license | sofialazrak/topfonte | 5735bb76d30ca1ac73073ad48f9793bcce7f958d | 954afb32f9b45aff76110b7771e51a26495d6ddd | refs/heads/main | 2023-03-26T20:50:35.281128 | 2021-03-11T21:08:18 | 2021-03-11T21:08:18 | 346,414,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,797 | py | # Generated by Django 3.1.6 on 2021-02-08 16:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('shop', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('address', models.CharField(max_length=250)),
('postal_code', models.CharField(max_length=20)),
('city', models.CharField(max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('paid', models.BooleanField(default=False)),
],
options={
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('quantity', models.PositiveIntegerField(default=1)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='orders.order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_items', to='shop.product')),
],
),
]
| [
"sofia.lazrak@gmail.com"
] | sofia.lazrak@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.