index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
24,200 | 3ca5ce8d806c7eb87cb59990575ed144ce3150db | {"err_no": 0, "err_msg": "success", "data": [{"article_id": "6844903670656565261", "article_info": {"article_id": "6844903670656565261", "user_id": "4195392101555534", "category_id": "6809637767543259144", "tag_ids": [6809640528267706382, 6809640407484334093, 6809640394175971342, 6809640369764958215], "visible_level": 0, "link_url": "https://juejin.im/post/6844903670656565261", "cover_image": "", "is_gfw": 0, "title": "读 VuePress(二):使用 Webpack-chain 链式生成 webpack 配置", "brief_content": "vuepress 有三套 webpack 配置:基础配置、dev 配置、build 配置,看似和普通的一个前端项目也没什么差别,但它使用 webpack-chain 生成配置而不是传统的写死配置。 在引入详细的示例之前,先让我们介绍一下 webpack-chain 中内置的两种…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1536164821", "mtime": "1598466588", "rtime": "1536199308", "draft_id": "6845075608678973447", "view_count": 4519, "collect_count": 12, "digg_count": 29, "comment_count": 4, "hot_index": 258, "is_hot": 0, "rank_index": 0.00045796, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4195392101555534", "user_name": "fffff", "company": "微软", "job_title": "前端工程师", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/mirror-assets/168e089e656d9bb03b5~tplv-t2oaga2asx-image.image", "level": 3, "description": "打杂工程师", "followee_count": 4, "follower_count": 333, "post_article_count": 13, "digg_article_count": 13, "got_digg_count": 636, "got_view_count": 49954, "post_shortmsg_count": 4, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 1110, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546614, "tag_id": "6809640528267706382", "tag_name": "Webpack", "color": "#6F94DB", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/73e856b07f83b4231c1e.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1440920866, "mtime": 1631692726, "id_type": 9, "tag_alias": "", "post_article_count": 6704, "concern_user_count": 204077}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6844903670656565261, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}, {"article_id": "6951651144217133092", "article_info": {"article_id": "6951651144217133092", "user_id": "1248693511259070", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "CSS系列 -- 各种布局实现", "brief_content": "position定位这里我们有必要先了解一下 position 定位static 元素出现在正常的流中relative 相对定位absolute 绝对定位fixed 绝对定位flex布局详细内容见 C", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1618557497", "mtime": "1626434068", "rtime": "1618823947", "draft_id": "6951642092695191565", "view_count": 317, "collect_count": 7, "digg_count": 4, "comment_count": 0, "hot_index": 19, "is_hot": 0, "rank_index": 0.00045793, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1248693511259070", "user_name": "ALKAOUA", "company": "深圳大学 | 鹅厂实习生", "job_title": "大四学生", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/5f5db01d993c0569beee0f8124771363~300x300.image", "level": 2, "description": "前端开发", "followee_count": 3, "follower_count": 21, "post_article_count": 100, "digg_article_count": 81, "got_digg_count": 134, "got_view_count": 17766, "post_shortmsg_count": 1, "digg_shortmsg_count": 2, "isfollowed": false, "favorable_author": 0, "power": 311, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6951651144217133092, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}, {"article_id": "6844903504293658632", "article_info": {"article_id": "6844903504293658632", "user_id": "3667626519702206", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342, 6809640478850416654, 6809640398105870343, 6809640985295847437], "visible_level": 0, "link_url": "https://juejin.im/post/6844903504293658632", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2017/10/17/c6e33109e9a1e3f5201552b2939f76d5~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "谈谈PostCSS", "brief_content": "CSS,就是这个看似不起眼的家伙,却在开发中发挥着和js一样重要的作用。css,是一种样式脚本,好像和编程语言有着一定的距离,我们可以将之理解为一种描述方法。这似乎导致css被轻视了。不过,css近几年来正在经历着一次巨变——CSS Module。我记得js的井喷期应该可以说是…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1508236657", "mtime": "1598436400", "rtime": "1508292751", "draft_id": "6845075310149369870", "view_count": 6139, "collect_count": 21, "digg_count": 54, "comment_count": 0, "hot_index": 360, "is_hot": 0, "rank_index": 0.00045792, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3667626519702206", "user_name": "FE_莫问", "company": "字节跳动", "job_title": "前端开发工程师", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/9/11/165c698546347142~tplv-t2oaga2asx-image.image", "level": 3, "description": "轻松的背后是停滞不前", "followee_count": 20, "follower_count": 1462, "post_article_count": 24, "digg_article_count": 53, "got_digg_count": 1606, "got_view_count": 72158, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 2515, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546578, "tag_id": "6809640478850416654", "tag_name": "PostCSS", "color": "#DF352E", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/0dbe1d1ebaac45ea39e7.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1438160506, "mtime": 1631180117, "id_type": 9, "tag_alias": "", "post_article_count": 140, "concern_user_count": 11378}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}, {"id": 2546944, "tag_id": "6809640985295847437", "tag_name": "Stylus", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/ddfc9bad6a0c787e25f1.svg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489451831, "mtime": 1630962860, "id_type": 9, "tag_alias": "", "post_article_count": 41, "concern_user_count": 1984}], "user_interact": {"id": 6844903504293658632, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}, {"article_id": "6991152962055700517", "article_info": {"article_id": "6991152962055700517", "user_id": "2181849650040935", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "如何使用flex做平均布局和垂直居中的?", "brief_content": "1.最近需求里面涉及到平均布局,现在在这里总结一下: 想做一个这样的布局: 涉及到三层div:父div,五个子div,中间加一层x的div,flex,中间一层div加个负margin,实现一个平局布局", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1627754874", "mtime": "1627831433", "rtime": "1627788668", "draft_id": "6991116853359673352", "view_count": 74, "collect_count": 1, "digg_count": 1, "comment_count": 0, "hot_index": 4, "is_hot": 0, "rank_index": 0.00045751, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2181849650040935", "user_name": "梧桐呓语", "company": "", "job_title": "", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/bdbaf977a3a6b4d0a20b54ea4f4219c0~300x300.image", "level": 1, "description": "", "followee_count": 1, "follower_count": 0, "post_article_count": 27, "digg_article_count": 12, "got_digg_count": 7, "got_view_count": 1928, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 26, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6991152962055700517, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}, {"article_id": "6844903565454999560", "article_info": {"article_id": "6844903565454999560", "user_id": "3104676565489998", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844903565454999560", "cover_image": "", "is_gfw": 0, "title": "移动端布局方案探究", "brief_content": "1. 物理像素(physical pixel) 2. 设备独立像素(density-independent pixel) 3. 位图像素 一个位图像素是栅格图像(如:png, jpg, gif等)最小的数据单元。每一个位图像素都包含着一些自身的显示信息(如:显示位置,颜色值,透…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1519129243", "mtime": "1598446868", "rtime": "1519280473", "draft_id": "6845075380596899853", "view_count": 4174, "collect_count": 70, "digg_count": 107, "comment_count": 3, "hot_index": 318, "is_hot": 0, "rank_index": 0.00045661, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3104676565489998", "user_name": "Teal", "company": "字节跳动", "job_title": "前端开发", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/2/18/161a98f0b2ba7ec5~tplv-t2oaga2asx-image.image", "level": 2, "description": "", "followee_count": 12, "follower_count": 182, "post_article_count": 10, "digg_article_count": 73, "got_digg_count": 395, "got_view_count": 20463, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 566, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903565454999560, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}, {"article_id": "6844903517304569864", "article_info": {"article_id": "6844903517304569864", "user_id": "3069492194447016", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844903517304569864", "cover_image": "", "is_gfw": 0, "title": "Q:你知道如何用line-height使多行文字垂直居中么?", "brief_content": "line-height(行高) : 指的是两行文字间基线之间的距离,而实际撑开div高度的不是height,而是line-height。 **line box **: 每一行称为一条line box,它又是由这一行的许多inline box组成,它的高度可以直接由最大的line…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1511924457", "mtime": "1599380375", "rtime": "1511924457", "draft_id": "6845075319741743118", "view_count": 4543, "collect_count": 43, "digg_count": 93, "comment_count": 25, "hot_index": 345, "is_hot": 0, "rank_index": 0.0004562, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3069492194447016", "user_name": "Juicyangxj31871", "company": "", "job_title": "前端工程师", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2017/11/30/1600a5c7b55ceea4~tplv-t2oaga2asx-image.image", "level": 3, "description": "爱生气算吗?哈哈", "followee_count": 7, "follower_count": 67, "post_article_count": 6, "digg_article_count": 4, "got_digg_count": 689, "got_view_count": 31446, "post_shortmsg_count": 1, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 1003, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903517304569864, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}, {"article_id": "6920608051057655821", "article_info": {"article_id": "6920608051057655821", "user_id": "2664871918064039", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/ed88cab3829f47b8b48f85929bcdfe34~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "使用stroke-dashoffset 快速实现SVG描边动画", "brief_content": "stroke-dasharray:控制用来描边的点划线的图案范式。 这里可以传入以空格代表分隔的数组:可以传入任意数量的数字,代表了分割的规律。比如: stroke-dashoffset:用于指定 stroke-dasharray 开始的偏移量,这也是动画的原理的关键。 通过控…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1611329808", "mtime": "1611489263", "rtime": "1611471436", "draft_id": "6920571454043979783", "view_count": 393, "collect_count": 2, "digg_count": 15, "comment_count": 0, "hot_index": 34, "is_hot": 0, "rank_index": 0.0004549, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2664871918064039", "user_name": "TTtttt", "company": "字节", "job_title": "前端开发", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/b9d3c4334c9eded44e83a42928b0b4c7~300x300.image", "level": 2, "description": "学习react、go中....", "followee_count": 63, "follower_count": 39, "post_article_count": 6, "digg_article_count": 77, "got_digg_count": 137, "got_view_count": 5823, "post_shortmsg_count": 1, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 195, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6920608051057655821, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}, {"article_id": "6844903705993543688", "article_info": {"article_id": "6844903705993543688", "user_id": "1257497031620679", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844903705993543688", "cover_image": "", "is_gfw": 0, "title": "CSS系列——transition属性", "brief_content": "进度条会一段一段的渲染,非常突兀,用户体验很不友好。下面我们一起来探究一下transition属性用了什么魔法,让CSS能动起来? property:可以做动画的属性,包括width、height、background、backgorundImage、opacity、font、…", "is_english": 0, "is_original": 1, "user_index": 0.002931794154493, "original_type": 0, "original_author": "", "content": "", "ctime": "1574314861", "mtime": "1598476246", "rtime": "1574316293", "draft_id": "6845076540691054605", "view_count": 2104, "collect_count": 23, "digg_count": 21, "comment_count": 6, "hot_index": 132, "is_hot": 0, "rank_index": 0.00045468, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1257497031620679", "user_name": "^_^在掘金43335", "company": "", "job_title": "", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/39a5c81f45db3ffee9ad58bea6d18de1~300x300.image", "level": 2, "description": "", "followee_count": 66, "follower_count": 18, "post_article_count": 13, "digg_article_count": 344, "got_digg_count": 144, "got_view_count": 13987, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 283, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903705993543688, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}, {"article_id": "6961643051848564744", "article_info": {"article_id": "6961643051848564744", "user_id": "1248693511259070", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "CSS系列 -- 清除浮动", "brief_content": "场景 一个大盒子 Box,里面包含两个小盒子 box1、box2,想让 box1、box2 的高度来撑开 Box ,使得 Box 能做到 高度自适应(因为大盒子 Box 里面可能还有其他盒子 box3", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1620884052", "mtime": "1630325909", "rtime": "1620887241", "draft_id": "6961333471595724836", "view_count": 298, "collect_count": 3, "digg_count": 1, "comment_count": 0, "hot_index": 15, "is_hot": 0, "rank_index": 0.00045398, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1248693511259070", "user_name": "ALKAOUA", "company": "深圳大学 | 鹅厂实习生", "job_title": "大四学生", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/5f5db01d993c0569beee0f8124771363~300x300.image", "level": 2, "description": "前端开发", "followee_count": 3, "follower_count": 21, "post_article_count": 100, "digg_article_count": 81, "got_digg_count": 134, "got_view_count": 17766, "post_shortmsg_count": 1, "digg_shortmsg_count": 2, "isfollowed": false, "favorable_author": 0, "power": 311, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6961643051848564744, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}, {"article_id": "6844903545053921293", "article_info": {"article_id": "6844903545053921293", "user_id": "3702810890750398", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343, 6809640407484334093, 6809640394175971342, 6809640793381273614], "visible_level": 0, "link_url": "https://github.com/zhaoqize/blog/issues/10", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/1/5/160c445a8b6ce454~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "[翻译] tween.js 中文使用指南", "brief_content": "在学习 tween.js 的过程中没找到合适的中文资料,于是翻译了一篇入门指南。", "is_english": 0, "is_original": 0, "user_index": 0, "original_type": 1, "original_author": "", "content": "", "ctime": "1515121518", "mtime": "1598443801", "rtime": "1515121518", "draft_id": "0", "view_count": 5446, "collect_count": 41, "digg_count": 57, "comment_count": 2, "hot_index": 331, "is_hot": 0, "rank_index": 0.00045338, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3702810890750398", "user_name": "qize", "company": "心之所向", "job_title": "切图工程师", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/6/26/16b935e45d01529f~tplv-t2oaga2asx-image.image", "level": 2, "description": "Done Is Better Than Perfect!", "followee_count": 4, "follower_count": 3799, "post_article_count": 61, "digg_article_count": 86, "got_digg_count": 2069, "got_view_count": 87809, "post_shortmsg_count": 2, "digg_shortmsg_count": 10, "isfollowed": false, "favorable_author": 0, "power": 997, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546806, "tag_id": "6809640793381273614", "tag_name": "three.js", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/edf74d6b6b4f5121731c.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1488865919, "mtime": 1631691331, "id_type": 9, "tag_alias": "", "post_article_count": 381, "concern_user_count": 10446}], "user_interact": {"id": 6844903545053921293, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}, {"article_id": "6844903502217478157", "article_info": {"article_id": "6844903502217478157", "user_id": "272334611824398", "category_id": "6809637767543259144", "tag_ids": [6809640381920051207, 6809640392770715656, 6809640394175971342, 6809640398105870343, 6809640407484334093, 6809640420889346056, 6809640482725953550, 6809640625856577549], "visible_level": 0, "link_url": "https://juejin.im/post/6844903502217478157", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2017/10/8/8edaf47e69ac9b881b4833311f5a4aca~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "浏览器性能优化-渲染性能", "brief_content": "在浏览器渲染过程与性能优化一文中(建议先去看一下这篇文章再来阅读本文),我们了解与认识了浏览器的关键渲染路径以及如何优化页面的加载速度。在本文中,我们主要关注的是如何提高浏览器的渲染性能(浏览器进行布局计算、绘制像素等操作)与效率。 很多网页都使用了看起来效果非常酷炫的动画与用…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1507477310", "mtime": "1598435959", "rtime": "1507477310", "draft_id": "6845075308383584263", "view_count": 4950, "collect_count": 74, "digg_count": 109, "comment_count": 3, "hot_index": 359, "is_hot": 0, "rank_index": 0.00045289, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "272334611824398", "user_name": "SylvanasSun", "company": "", "job_title": "", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2017/7/18/d745a6b9901f5198aa7e3de2533b175f~tplv-t2oaga2asx-image.image", "level": 3, "description": "喜欢折腾技术的小码农", "followee_count": 5, "follower_count": 4305, "post_article_count": 31, "digg_article_count": 0, "got_digg_count": 2292, "got_view_count": 86671, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 1, "power": 3158, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546507, "tag_id": "6809640381920051207", "tag_name": "Chrome", "color": "#4586F2", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/084db5f7bc6a239be270.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234593, "mtime": 1631675564, "id_type": 9, "tag_alias": "", "post_article_count": 2663, "concern_user_count": 131553}, {"id": 2546515, "tag_id": "6809640392770715656", "tag_name": "HTML", "color": "#E44D25", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f18965b2a0ef9cac862e.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239419, "mtime": 1631683077, "id_type": 9, "tag_alias": "", "post_article_count": 6109, "concern_user_count": 240134}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546536, "tag_id": "6809640420889346056", "tag_name": "编程语言", "color": "#C679FF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/cde94583e8f0ca3f6127.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435972663, "mtime": 1631690928, "id_type": 9, "tag_alias": "", "post_article_count": 3637, "concern_user_count": 120863}, {"id": 2546581, "tag_id": "6809640482725953550", "tag_name": "程序员", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/63baec1130bde0284e98.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1438712834, "mtime": 1631686409, "id_type": 9, "tag_alias": "", "post_article_count": 16341, "concern_user_count": 275512}, {"id": 2546683, "tag_id": "6809640625856577549", "tag_name": "浏览器", "color": "#47ebc7", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/baf3558e2acdfa623201.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1460153459, "mtime": 1631677186, "id_type": 9, "tag_alias": "", "post_article_count": 3341, "concern_user_count": 28324}], "user_interact": {"id": 6844903502217478157, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}, {"article_id": "6844903751141212173", "article_info": {"article_id": "6844903751141212173", "user_id": "4476867078793198", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844903751141212173", "cover_image": "", "is_gfw": 0, "title": "移动端开发——关于局部区域滚动总结 | 实战系列", "brief_content": "在移动端开发的时候经常会碰到区域滚动的需求,当然实现起来也是非常简单的,给需要滚动的元素定高然后添加一个overflow-y:scorll自然就可以滚动了,但是添加这个属性之后,使用chrome或者其他浏览器工具调试时是支正常的,但是到手机上时滚动效果就十分的奇怪,滚动会让人感…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1546433727", "mtime": "1598486341", "rtime": "1546437556", "draft_id": "6845076138142728199", "view_count": 3157, "collect_count": 66, "digg_count": 58, "comment_count": 5, "hot_index": 220, "is_hot": 0, "rank_index": 0.00045254, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4476867078793198", "user_name": "广州芦苇科技web前端", "company": "广州芦苇信息科技有限公司", "job_title": "广州芦苇科技web前端", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/3/2/1693c940a76dc9bd~tplv-t2oaga2asx-image.image", "level": 3, "description": "广州芦苇信息科技有限公司 - web前端开发,学习沉淀,技术积累,技术探索 ", "followee_count": 23, "follower_count": 322, "post_article_count": 75, "digg_article_count": 113, "got_digg_count": 977, "got_view_count": 146362, "post_shortmsg_count": 75, "digg_shortmsg_count": 71, "isfollowed": false, "favorable_author": 0, "power": 2464, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903751141212173, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}, {"article_id": "6971009198205173790", "article_info": {"article_id": "6971009198205173790", "user_id": "4187356603095853", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "CSS入门基础(样式,css文件,选择器)", "brief_content": "CSS简介 详细样例: 基本用法——给元素添加样式 行内样式 运行效果: 内部样式 完整样例代码: 效果截图: 外部样式 定义样式 css 文件 在 html 中引入 css 文档 样例: 完整代码:", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1623064718", "mtime": "1623145125", "rtime": "1623145125", "draft_id": "6971008289521795086", "view_count": 161, "collect_count": 0, "digg_count": 3, "comment_count": 0, "hot_index": 11, "is_hot": 0, "rank_index": 0.00045193, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4187356603095853", "user_name": "牛哄哄的柯南", "company": "", "job_title": "", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/cfefbea3ad807e51510cf516569b27a3~300x300.image", "level": 1, "description": "", "followee_count": 9, "follower_count": 3, "post_article_count": 287, "digg_article_count": 14, "got_digg_count": 30, "got_view_count": 5596, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 85, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6971009198205173790, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}, {"article_id": "6909060535510269965", "article_info": {"article_id": "6909060535510269965", "user_id": "2330620382950376", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "CSS3实现气泡对话框", "brief_content": "可以把该对话框拆解为带圆角的普通矩形+三角形, 三角形可以借助border属性实现, 其中三角形占位可以借助CSS3的before、after伪元素实现. 实现原理:将2个三角形叠加、before的三角形边框颜色和外面框的保持一致, after的三角形边框设置成白色即可.", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1608641065", "mtime": "1624538646", "rtime": "1608691302", "draft_id": "6909030520173101070", "view_count": 745, "collect_count": 2, "digg_count": 2, "comment_count": 1, "hot_index": 40, "is_hot": 0, "rank_index": 0.00045188, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2330620382950376", "user_name": "小胖砸儿", "company": "广州某电商行业", "job_title": "前端开发", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/a9518377444eefbc18aec0331403682c~300x300.image", "level": 2, "description": "前端小妹、在制造bug的路上越走越远!", "followee_count": 15, "follower_count": 17, "post_article_count": 40, "digg_article_count": 2, "got_digg_count": 68, "got_view_count": 15823, "post_shortmsg_count": 8, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 226, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6909060535510269965, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}, {"article_id": "6984789291264114695", "article_info": {"article_id": "6984789291264114695", "user_id": "2340232218281054", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "CSS动画", "brief_content": "CSS动画 动画的原理 其定义为:有许多静止的画面(帧),以一定的速度(如30张/s)连续播放时,肉眼因视觉残像产生的错觉,而误以为是活动的画面。 帧的概念 帧是指每一个静止的画面,一般影视作品的播放", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1626273080", "mtime": "1626318024", "rtime": "1626318024", "draft_id": "6984677600887046158", "view_count": 100, "collect_count": 1, "digg_count": 1, "comment_count": 0, "hot_index": 6, "is_hot": 0, "rank_index": 0.00045173, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2340232218281054", "user_name": "Carlos_徐", "company": "", "job_title": "", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/mosaic-legacy/3797/2889309425~300x300.image", "level": 1, "description": "前端学习中", "followee_count": 1, "follower_count": 1, "post_article_count": 13, "digg_article_count": 1, "got_digg_count": 3, "got_view_count": 793, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 10, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6984789291264114695, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}, {"article_id": "6844903785316237325", "article_info": {"article_id": "6844903785316237325", "user_id": "3491704661098286", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844903785316237325", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/3/1/16937600eb6792fa~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "红绿灯🚦——CSS 动画", "brief_content": "乍一看你可能会觉得纯CSS动画可能做不到,实际上知道了原理还是比较简单的。 从上面样式里看出,每盏灯的 animation 持续时间都是10s,那动画不断循环播放的时候,它们之间就会一直保持同步的时间关系。 从图中看,一共分5个阶段或者说5个步骤,在每个阶段,不同的灯处于 on…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1551412291", "mtime": "1599813440", "rtime": "1551413663", "draft_id": "6845076189468426253", "view_count": 3169, "collect_count": 32, "digg_count": 37, "comment_count": 8, "hot_index": 203, "is_hot": 0, "rank_index": 0.00045172, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3491704661098286", "user_name": "ThinkerZhang", "company": "食议兽科技", "job_title": "前端工程师", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/10/18/16687a007f92ece1~tplv-t2oaga2asx-image.image", "level": 2, "description": "Make a Thinker!", "followee_count": 3, "follower_count": 44, "post_article_count": 6, "digg_article_count": 15, "got_digg_count": 143, "got_view_count": 15904, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 302, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903785316237325, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}, {"article_id": "6844903767033249805", "article_info": {"article_id": "6844903767033249805", "user_id": "4072246798980567", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342, 6809640398105870343], "visible_level": 0, "link_url": "https://juejin.im/post/6844903767033249805", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/1/24/1687f9c99037490f~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "Webnovel 不用照顾 Edge 浏览器性能?想多了!", "brief_content": "曾写过一篇性能优化 “ 长篇报告 ” 「 checkbox 美化引发的蝴蝶效应 」 ,也曾感叹 CSS 对渲染的影响是如此大,也许深化记忆点的代价就是被同一块石头绊倒2次 ?是的,性能优化“报告”第二弹来了,希望本篇文章可以在优化页面性能上给大家提供一些思路。 visibili…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1548329342", "mtime": "1598765818", "rtime": "1548329342", "draft_id": "6845076165565087751", "view_count": 2375, "collect_count": 26, "digg_count": 75, "comment_count": 20, "hot_index": 213, "is_hot": 0, "rank_index": 0.00045133, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4072246798980567", "user_name": "阅文前端团队", "company": "上海阅文信息技术有限公司", "job_title": "前端工程师", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/5/21/16381b99719b3107~tplv-t2oaga2asx-image.image", "level": 4, "description": "微信公众号 ID: yuewen_YFE,官网:https://blog.yux.team", "followee_count": 14, "follower_count": 5772, "post_article_count": 47, "digg_article_count": 20, "got_digg_count": 7577, "got_view_count": 204108, "post_shortmsg_count": 1, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 1, "power": 8833, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 1, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}], "user_interact": {"id": 6844903767033249805, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": {"org_type": 1, "org_id": "6930554016409583616", "online_version_id": 6932674735939518477, "latest_version_id": 6932674735939518477, "power": 8004, "ctime": 1613650444, "mtime": 1631692819, "audit_status": 2, "status": 0, "org_version": {"version_id": "6932674735939518477", "icon": "https://p9-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/4916c08157734748aad14fe505ffe59d~tplv-k3u1fbpfcp-watermark.image", "background": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/3ecfda9574d1460683cf3c2b46aed6a2~tplv-k3u1fbpfcp-watermark.image", "name": "阅文前端团队", "introduction": "微信公众号 ID: yuewen_YFE,官网:https://blog.yux.team", "weibo_link": "", "github_link": "https://github.com/yued-fe", "homepage_link": "https://blog.yux.team", "ctime": 1614222195, "mtime": 1614222195, "org_id": "6930554016409583616", "brief_introduction": "微信公众号 ID: yuewen_YFE,官网:https://blog.yux.team", "introduction_preview": "微信公众号 ID: yuewen_YFE,官网:https://blog.yux.team"}, "follower_count": 5860, "article_view_count": 177892, "article_digg_count": 6226}, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}, {"article_id": "6844903688444739592", "article_info": {"article_id": "6844903688444739592", "user_id": "4283353031252967", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470, 6809640394175971342, 6809640398105870343, 6809640407484334093], "visible_level": 0, "link_url": "https://juejin.im/post/6844903688444739592", "cover_image": "", "is_gfw": 0, "title": "LESS即学即用", "brief_content": "我们大家都知道HTML和CSS不属于编程语言而是属于标记语言,所以很难像JS一样定义变量、编写方法、实现模块化开发等。而目前的CSS编写模式中,都是定义一些公共的样式类名,哪一块的HTML需要这个样式,就去增加对应的样式类名,所以我们经常看到一个标签上存在很多样式类名,在这种模…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1539083291", "mtime": "1599629389", "rtime": "1539138242", "draft_id": "6845075622541148174", "view_count": 3792, "collect_count": 45, "digg_count": 48, "comment_count": 7, "hot_index": 244, "is_hot": 0, "rank_index": 0.00045109, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4283353031252967", "user_name": "浪里行舟", "company": "联系微信frontJS", "job_title": "前端", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/4ad29756aaea9618a8b385d6be23add4~300x300.image", "level": 6, "description": "", "followee_count": 106, "follower_count": 14741, "post_article_count": 58, "digg_article_count": 216, "got_digg_count": 15747, "got_view_count": 817992, "post_shortmsg_count": 16, "digg_shortmsg_count": 40, "isfollowed": false, "favorable_author": 1, "power": 23926, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6844903688444739592, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}, {"article_id": "6974739141921603597", "article_info": {"article_id": "6974739141921603597", "user_id": "4187356603095853", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "CSS文件是什么", "brief_content": "CSS文件是什么 上张图: 怎么创建 CSS 文件 怎么使用 CSS 文件 首先我们先创建一个 html 文件写一些内容 接下来我们用 css 文件来修饰 css 代码: html 代码: 效果截图:", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1623933107", "mtime": "1624350542", "rtime": "1624350542", "draft_id": "6974738938128760868", "view_count": 163, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 9, "is_hot": 0, "rank_index": 0.0004483, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4187356603095853", "user_name": "牛哄哄的柯南", "company": "", "job_title": "", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/cfefbea3ad807e51510cf516569b27a3~300x300.image", "level": 1, "description": "", "followee_count": 9, "follower_count": 3, "post_article_count": 287, "digg_article_count": 14, "got_digg_count": 30, "got_view_count": 5596, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 85, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6974739141921603597, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}, {"article_id": "6905928473786974221", "article_info": {"article_id": "6905928473786974221", "user_id": "2154698523020503", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "transition, transform, animation", "brief_content": ":hover 伪类选择器不仅可以用于a标签, 还可以用于所有标签, 但行元素在转为块元素或者脱标之前不能设置宽高. 这个属性让人不适应的地方在于属性值还要取值. 3D 相比 2D 多了个厚度.", "is_english": 0, "is_original": 1, "user_index": 5.595262072989907, "original_type": 0, "original_author": "", "content": "", "ctime": "1607911798", "mtime": "1607913629", "rtime": "1607913629", "draft_id": "6905928054221537287", "view_count": 626, "collect_count": 9, "digg_count": 5, "comment_count": 0, "hot_index": 36, "is_hot": 0, "rank_index": 0.00045067, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2154698523020503", "user_name": "fhsWar", "company": "银盛通信", "job_title": "前端开发工程师", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/a484ef4b9af5d7f27ea3368bdbf416be~300x300.image", "level": 2, "description": "前端,java,go", "followee_count": 3, "follower_count": 9, "post_article_count": 27, "digg_article_count": 12, "got_digg_count": 72, "got_view_count": 6924, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 141, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6905928473786974221, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603490102121960260B003EB3"}], "cursor": "eyJ2IjoiNzAwNzgwMzIxNDc1ODE1MDE3NSIsImkiOjI0NjB9", "count": 4601, "has_more": true} |
24,201 | 3ec1e22d2a77315673e728f8865f500aeb9405b5 | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup as bs
import locale
import urllib.request
import os, sys
import re
from datetime import datetime
import logging
sys.path.append(os.path.join(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__))),
"maps"))
from coordinates import get_coordinates, get_continent
from distance import get_distance
from tools.user_agent import get_user_agent
from datetime import date
months = [
'enero',
'febrero',
'marzo',
'abril',
'mayo',
'junio',
'julio',
'agosto',
'septiembre',
'octubre',
'noviembre',
'diciembre'
]
logger = logging.getLogger('viajes.parse_travel')
def is_number(string):
try:
int(string)
return True
except ValueError:
return False
def parse_travel(travel_url, price, env):
if type(travel_url) != str:
raise ValueError("travel_url is not a String object")
logger.debug(travel_url)
'''
if env == "dev":
locale.setlocale(locale.LC_TIME, "es_ES.utf8")
else:
locale.setlocale(locale.LC_TIME, "Spanish_Spain.1252")
'''
req = urllib.request.Request(
travel_url,
data=None,
headers= {
'User-Agent': get_user_agent()
}
)
document = urllib.request.urlopen(req)
# Only for the development stage
# with open('tools/test.txt', 'r') as file:
# document = file.read()
travel_page = bs(document, 'html.parser')
content = travel_page.find(
"div",
class_="entry-content").find_all("p")
travel = {
'departure': '',
'destination': '',
'return_to': '',
'ticket_type': '',
'date': datetime.now(),
'distance': 0.0,
'price': 0.0,
'distance_price': 0.0,
'url': '',
'continent': ''
}
for p in content:
if "Ciudad de salida" in p.text:
travel['departure'] = p.text.split(
":")[-1].strip().split("(")[0].strip()
elif "Ciudad de regreso" in p.text:
travel['return_to'] = p.text.split(
":")[-1].strip().split("(")[0].strip()
elif "Ciudad de destino" in p.text or "Ciudad" in p.text:
travel['destination'] = p.text.split(
":")[-1].strip().split("(")[0].strip()
elif "Tipo de billete" in p.text:
travel['ticket_type'] = p.text.split(
":")[-1].strip()
elif "Fechas:" in p.text:
travel['date'] = parse_date(p)
"""
This is only made for getting the coordinates of first city, the travel map
is NEVER modified
"""
destination = "";
if travel['destination']:
if ";" in travel['destination']:
destination = travel['destination'].split(';')[0].strip()
elif "," in travel['destination']:
destination = travel['destination'].split(',')[0].strip()
else:
destination = travel['destination'].strip()
destination_coord = get_coordinates(destination);
destination_continent = get_continent(destination);
travel['price'] = price
if travel['departure']:
if ";" in travel['departure']:
departure_coord = get_coordinates(
travel['departure'].split(';')[0].strip())
elif "," in travel['departure']:
departure_coord = get_coordinates(
travel['departure'].split(',')[0].strip())
else:
departure_coord = get_coordinates(
travel['departure'].strip())
travel['distance'] = get_distance([departure_coord,
destination_coord]) / 1000
travel['distance_price'] = travel['price'] / travel['distance']
travel['url'] = travel_url
travel['continent'] = destination_continent
return travel
def parse_date(date_p):
travel_date = date_p.text.split("Fechas:")[-1].split("(")[0].strip()
if "–" in travel_date:
travel_date = travel_date.split("–")[-1].strip()
elif "-" in travel_date:
travel_date = travel_date.split("-")[-1].strip()
pattern = re.compile("[^\w']")
travel_date = pattern.sub(' ', travel_date)
travel_date = travel_date.lower().split()
travel_date_str = ""
for idx, month in enumerate(months):
try:
index = travel_date.index(month)
if is_number(travel_date[index-1]) and \
int(travel_date[index-1]) <= 31:
travel_date_str += " " + travel_date[index-1]
else:
travel_date_str += " 28"
travel_date_str += " " + format(idx + 1, '02')
try:
if is_number(travel_date[index+1]):
travel_date_str += " " + travel_date[index+1]
except IndexError:
travel_date_str += " " + str(date.today().year)
break
except ValueError:
continue
travel_date_str = travel_date_str.strip()
return datetime.strptime(travel_date_str, "%d %m %Y")
if __name__ == "__main__":
print(parse_travel(
"http://www.exprimeviajes.com/chollo-vuelos-baratos-a-colombia-por-solo-359-euros/", 150))
|
24,202 | adc790635530042fee67993687a1e3a7e1b55cfb | def user(uname):
Username=uname
print(uname)
|
24,203 | 8b07bfcb080c1dbf07259abdfa9858e9c6c794c5 | #Created By Faraz Naseem..... 110009274..... November 22, 2019.
'''_______________________THIS IS PART TWO OF THE PROGRAM________________________'''
#The user is prompted to enter a string that is at least seven characters.
str1 = input("Please enter a string that is at least seven letters: ")
#A new variable is created to store the original string.
original_string = str1
first_element = str1[0]
last_element = len(str1) - 1
#The original string is printed.
print(str1)
#The list is modified before the first iteration of the while loop.
str1 = str1[1:last_element] + str1[last_element] + str1[0]
'''The condition set for the while loop is while the modified string
is not equal to the original string.'''
while(str1 != original_string):
#The modified string is printed.
print(str1)
#The modification continuosly occurs.
str1 = str1[1:last_element] + str1[last_element] + str1[0]
#The final string (which is equal to the original string) is output.
print(str1) |
24,204 | 20faec0cc5bf6aa711e0a708c11c4cb38ab48933 | # Thread error in Python & PyQt
Queue.Queue
|
24,205 | 29815d81358213317f0766d0b46b8bd4d3f77c80 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020 Sangil Lee
"""
Extract topics from a rosbag.
"""
import os
import argparse
import numpy as np
import cv2
import rosbag
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import IPython
bridge = CvBridge()
def main():
"""
Extract a topic from a rosbag.
"""
parser = argparse.ArgumentParser(description="Extract images from a ROS bag.")
parser.add_argument("bag_file", help="Input ROS bag.")
parser.add_argument("base_dir", nargs='?', default="./dataset", help="Output directory.")
args = parser.parse_args()
bag = rosbag.Bag(args.bag_file, "r")
print "Extract topics from %s into %s" %(args.bag_file, args.base_dir)
if not os.path.exists(os.path.join(args.base_dir,"image_dvs/")):
os.makedirs(os.path.join(args.base_dir,"image_dvs"), mode=0o777)
if not os.path.exists(os.path.join(args.base_dir,"image_rgbd/")):
os.makedirs(os.path.join(args.base_dir,"image_rgbd"), mode=0o777)
if not os.path.exists(os.path.join(args.base_dir,"depth/")):
os.makedirs(os.path.join(args.base_dir,"depth"), mode=0o777)
text_image_dvs = open(os.path.join(args.base_dir,"image_dvs.txt"), 'w')
text_image_rgbd = open(os.path.join(args.base_dir,"image_rgbd.txt"), 'w')
text_depth = open(os.path.join(args.base_dir,"depth.txt"), 'w')
text_events = open(os.path.join(args.base_dir,"events.txt"), 'w')
text_imu = open(os.path.join(args.base_dir,"imu.txt"), 'w')
text_gt_pose = open(os.path.join(args.base_dir,"pose.txt"), 'w')
text_image_dvs.write("# DVS images\n")
text_image_dvs.write("# timestamp filename\n")
text_image_rgbd.write("# RGBD images\n")
text_image_rgbd.write("# timestamp filename\n")
text_depth.write("# RGBD depth\n")
text_depth.write("# timestamp filename\n")
text_events.write("# events\n")
text_events.write("# timestamp x y polarity\n")
text_imu.write("# imu\n")
text_imu.write("# acceleration gyroscope\n")
text_imu.write("# timestamp ax ay az gx gy gz\n")
text_gt_pose.write("# timestamp x y z qx qy qz qw\n")
for topic, msg, t in bag.read_messages(topics=["/camera/rgb/image_color", "/camera/depth_registered/image", "/dvs/events", "/dvs/image_raw", "/dvs/imu", "/vicon/"]):
if topic == "/dvs/image_raw":
save_image(msg, t, args.base_dir, "image_dvs", text_image_dvs)
elif topic == "/camera/rgb/image_color":
save_image(msg, t, args.base_dir, "image_rgbd", text_image_rgbd)
elif topic == "/camera/depth_registered/image":
save_depth(msg, t, args.base_dir, "depth", text_depth)
elif topic == "/dvs/events":
save_event(msg, text_events)
elif topic == "/dvs/imu":
save_imu(msg, t, text_imu)
elif topic == "/vicon/":
save_pose(msg, t, text_gt_pose)
print "\rTime passed: %i.%09i [s]" %(t.secs, t.nsecs),
text_image_dvs.close()
text_image_rgbd.close()
text_depth.close()
text_events.close()
text_imu.close()
text_gt_pose.close()
bag.close()
return
def save_image(msg, t, base_dir, output_dir, text):
"""
save image into output directory
"""
cv_img = bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
filename = os.path.join(output_dir, "%i.%09i.png" %(t.secs, t.nsecs))
cv2.imwrite(os.path.join(base_dir, filename), cv_img)
text.write("%i.%09i\t%s\n" %( t.secs, t.nsecs, filename ))
def save_depth(msg, t, base_dir, output_dir, text):
"""
save image into output directory
"""
cv_img = bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
cv_img = np.uint16(cv_img * 255)
filename = os.path.join(output_dir, "%i.%09i.png" %(t.secs, t.nsecs))
cv2.imwrite(os.path.join(base_dir, filename), cv_img)
text.write("%i.%09i\t%s\n" %( t.secs, t.nsecs, filename ))
def save_event(msg, text):
"""
save events into output directory
"""
for e in msg.events:
text.write("%i.%09i\t%i\t%i\t%i\n" %( e.ts.secs, e.ts.nsecs, e.x, e.y, e.polarity+0))
def save_imu(msg, t, text):
"""
save imu into output directory
"""
text.write("%i.%09i\t%f\t%f\t%f\t%f\t%f\t%f\n"
%( t.secs, t.nsecs,
msg.linear_acceleration.x, msg.linear_acceleration.y, msg.linear_acceleration.z,
msg.angular_velocity.x, msg.angular_velocity.y, msg.angular_velocity.z))
def save_pose(msg, t, text):
"""
save pose into output directory
"""
text.write("%i.%09i\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n"
%( t.secs, t.nsecs,
msg.pose.position.x, msg.pose.position.y, msg.pose.position.z,
msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w))
if __name__ == '__main__':
main()
|
24,206 | f62f10ff52c2d790ea638fe0a1ecdb735264064d | import random
from pymongo import MongoClient
from discord import Embed
from dotenv import load_dotenv
import os
from banners.images import FIVE_STARS_IMAGES, FOUR_STARS_IMAGES, THREE_STAR_IMAGES
load_dotenv("../.env")
MONGODB_URL = os.getenv("MONGODB_URL")
class EventBanner:
def __init__(self):
# Base Class for Banner Classes
self.banner_name = "Base Event Banner Class"
self.five_star_pool = []
self.four_star_pool = []
self.event = True
self.event_hero = None
self.rate_up_four_star_pool = []
self.three_star_pool = []
self.user = None
self.embed_list = []
self.banner_image = None
self.__get_database()
# Function to get mongodb database
def __get_database(self):
cluster = MongoClient(MONGODB_URL, tlsInsecure=True)
db = cluster["gacha_bot"]
self.collection = db["users"]
return self.collection
def get_user(self):
collection = self.collection
query = {"_id": self.user}
if collection.count_documents(query) == 0: # If a user is not in the database, adds the user to the db
post = {
"_id": self.user,
"event":{
"total_wishes": 0,
"since_last_5_star": 0,
"since_last_4_star": 0,
"since_last_event_hero": 0,
"rolls":{}
},
"standard":{
"total_wishes": 0,
"since_last_5_star": 0,
"since_last_4_star": 0,
"rolls":{}
}
}
collection.insert_one(post)
request = post
else: # If a user is in the database, returns the user json file
request = collection.find_one(query)
self.user_data = request
return self.user_data
def post_user(self):
collection = self.collection
collection.replace_one({"_id": self.user}, self.user_data)
return
# For a single wish
def do_single_wish(self):
self.user_data["event"]["total_wishes"] += 1 # Increments the total wish counter
if self.user_data["event"]["since_last_5_star"] == 89: # 5 star pity takes precedent over any other pity
self.do_five_star_roll()
elif self.user_data["event"]["since_last_4_star"] == 9: # 4 star pity occurs when the roll is the 10th roll
self.do_four_star_roll()
else: # TODO: Temporary additions while testing 5 star rolling
roll = round(random.random(),3)
if roll <= 0.006: # If we luck out a 5 star
self.do_five_star_roll()
elif roll <= 0.051: # If we luck out a 4 star
self.do_four_star_roll()
else:
self.do_three_star_roll()
self.post_user()
# For multiple wishes
def do_many_wishes(self, wishes):
for x in range(wishes):
self.do_single_wish()
# Adds a summary page after the wishes
embed = Embed(title="User Summary", description="\u200b", color=0x2aec27)
embed.add_field(name="Total Event Wishes",value='{:,}'.format(self.user_data['event'].get('total_wishes')), inline=False)
embed.add_field(name="Pity",value=f"**5 Star Pity:** {self.user_data['event'].get('since_last_5_star')} \n**4 Star Pity:** {self.user_data['event'].get('since_last_4_star')}", inline=False)
embed.set_footer(text="Gacha Bot by Over#6203. Use the reactions to navigate the menus.")
embed.set_thumbnail(url="https://static.wikia.nocookie.net/gensin-impact/images/1/1f/Item_Intertwined_Fate.png/revision/latest/top-crop/width/360/height/360?cb=20201117073436")
embed.set_image(url=self.banner_image)
self.embed_list.append(embed.copy())
# Function for rolling five stars only
def do_five_star_roll(self):
# For rolling the event character
def rolls_event_hero():
# Resets all pity
self.user_data["event"]["since_last_5_star"] = 0
self.user_data["event"]["since_last_4_star"] = 0
self.user_data["event"]["since_last_event_hero"] = 0
# Adds the roll to the user inventory
self.user_data["event"]["rolls"][self.event_hero] = self.user_data["event"]["rolls"].get(self.event_hero, 0) + 1
# Adds an embed to the embed_list
embed = Embed(title=f"5 Star Roll ~ {self.event_hero}", description=f"Total Event Banner Rolls: **{'{:,}'.format(self.user_data['event'].get('total_wishes'))}**", color=0xf8a71b)
embed.set_image(url=FIVE_STARS_IMAGES[self.event_hero])
embed.set_footer(text="Gacha Bot by Over#6203. Use the reactions to navigate the menus.")
self.embed_list.append(embed.copy())
def rolls_random_five_star():
# When rolling a random 5 star, we have a 50/50 chance of rolling the event hero
if bool(random.getrandbits(1)):
rolls_event_hero()
return
# If we don't roll the event hero
character = random.choice(self.five_star_pool)
# Resets Counters
self.user_data["event"]["since_last_5_star"] = 0
self.user_data["event"]["since_last_4_star"] = 0
# Ensures we are guaranteed the event hero next 5 star pull
self.user_data["event"]["since_last_event_hero"] = 999
# Adds the roll to the user inventory
self.user_data["event"]["rolls"][character] = self.user_data["event"]["rolls"].get(character, 0) + 1
# Adds an embed to the embed_list
embed = Embed(title=f"5 Star Roll ~ {character}", description=f"Total Event Banner Rolls: **{'{:,}'.format(self.user_data['event'].get('total_wishes'))}**", color=0xf8a71b)
embed.set_image(url=FIVE_STARS_IMAGES[character])
embed.set_footer(text="Gacha Bot by Over#6203. Use the reactions to navigate the menus.")
self.embed_list.append(embed.copy())
# If hard pity guarantees the event character
if self.user_data["event"]["since_last_event_hero"] >= 179:
rolls_event_hero()
return
else:
rolls_random_five_star()
return
def do_four_star_roll(self):
self.user_data["event"]["since_last_5_star"] += 1
self.user_data["event"]["since_last_4_star"] = 0
self.user_data["event"]["since_last_event_hero"] += 1
# When rolling a random 4 star in the event banner, we have a 50/50 chance of rolling a featured hero
if bool(random.getrandbits(1)):
character = random.choice(self.rate_up_four_star_pool)
else:
character = random.choice(self.four_star_pool)
self.user_data["event"]["rolls"][character] = self.user_data["event"]["rolls"].get(character, 0) + 1
# Adds an embed to the embed_list
embed = Embed(title=f"4 Star Roll ~ {character}", description=f"Total Event Banner Rolls: **{'{:,}'.format(self.user_data['event'].get('total_wishes'))}**", color=0xbe31f2)
embed.set_image(url=FOUR_STARS_IMAGES[character])
embed.set_footer(text="Gacha Bot by Over#6203. Use the reactions to navigate the menus.")
self.embed_list.append(embed.copy())
return
def do_three_star_roll(self):
self.user_data["event"]["since_last_5_star"] += 1
self.user_data["event"]["since_last_4_star"] += 1
self.user_data["event"]["since_last_event_hero"] == 1
character = random.choice(self.three_star_pool)
self.user_data["event"]["rolls"][character] = self.user_data["event"]["rolls"].get(character, 0) + 1
# Adds an embed to the embed_list
embed = Embed(title=f"3 Star Roll ~ {character}", description=f"Total Event Banner Rolls: **{'{:,}'.format(self.user_data['event'].get('total_wishes'))}**", color=0x26aef2)
embed.set_image(url=THREE_STAR_IMAGES[character])
embed.set_footer(text="Gacha Bot by Over#6203. Use the reactions to navigate the menus.")
self.embed_list.append(embed.copy())
return
def __str__(self):
return self.banner_name
def __exit__(self):
self.post_user()
|
24,207 | 7643149be330abd57dbafb88c70fb5dcb17d134d | import sys
import discord
from discord.ext import commands
from discord.ext.commands import bot
intents = discord.Intents.default()
intents.members = True
class UtilityCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def dev(self, ctx):
devEmbed = discord.Embed(title="Developers:",
description="**These peeps worked to bring me to me to what I am today:**\n"
+ "\nflop#2371\nSeltzer#0006\nklb#5169\n\n"
+ "**Version:**\t 0.0.0\n"
+ "**Date Released:** \t N/A", color=discord.Color.purple())
await ctx.send(embed=devEmbed)
# playing with embeds
@commands.command()
async def help(self, ctx):
helpEmbed = discord.Embed(title="In your hour of need! Gizmo is here~",
description="**Commands:**\n" +
"**^r** -> Roll some dice (Format 1d20)\n" +
"**^purge** -> Delete some messages (Format purge 3)\n" +
"**^choose** -> Gizmo will decide!\n(Format:^choose pizza burgers)\n" +
"**^speak** -> Gizmo will speak to you\n" +
"**^dev** -> See the Development Team/Version\n" +
"\n\n***Gizmo is still a kitten, let us know about any possible bugs!***",
color=discord.Color.orange())
await ctx.author.send(embed=helpEmbed)
@bot.Command()
async def joined(self, ctx, member: discord.Member):
"""Says when a member joined."""
await ctx.send('{0.name} joined in {0.joined_at}'.format(member))
# ////// Who's who: ///////
# async def on_member_join(ctx, member):
# guild = member.guild
# if guild.system_channel is not None:
# to_send = 'Welcome {0.mention} to {1.name}!'.format(member, guild)
# await ctx.send(to_send)
#
#
# @bot.event
# async def on_command_error(error, ctx):
# if isinstance(error, commands.MissingRequiredArgument):
# await ctx.send('Please use proper formatting. Use ^help for more info.')
#
""" outputs username + whole message after command """
@bot.Command()
async def cTest(self, ctx, *, arg):
user = ctx.message.author
formatUser = str(user)
# gets rid of anything past # for example klb#5169 -> klb
x = formatUser.index("#")
formatUser = formatUser[0:x]
testVar = discord.Embed(title="Member: " + str(formatUser) + " said: " + arg)
await ctx.send(embed=testVar)
@bot.Command() # allows users to test the response of the bot from Discord
async def test(ctx):
await ctx.send('Ready to roll!'.format(ctx.author))
@bot.Command() # shuts down the bot
async def stop(ctx):
await ctx.send("Logging out. See you next session!".format(ctx.author))
sys.exit()
def setup(bot):
bot.add_cog(UtilityCog(bot))
|
24,208 | 4f23be7d5c9c0a18961b4955f3b694ff0acfa834 | from Importer import Importer
im = Importer()
for t in im.get_test_set()['t']:
print(t) |
24,209 | 1a3e4e5ac4da26ad17b209cd9c858a1a27b6e322 | # -*- coding: utf-8 -*-
"""
General framework regrouping the different splitting strategies possible to integrate in the
regression analysis pipeline.
===================================================
A Splitter instanciation requires:
- out_per_fold: the number of run to left out for the test set.
It makes use of the sklearn LeavePOut, and allows to keep track of the indexes of the runs.
"""
from sklearn.model_selection import LeavePOut
class Splitter(object):
""" Tools to split lists or groups into several folds.
"""
def __init__(self, out_per_fold):
""" Instanciation of Splitter class. We specify the number of runs
to leave out for the test set.
Arguments:
- out_per_fold: int
"""
self.out_per_fold = out_per_fold
pass
def split(self, X_train, Y_train, run_train=None, run_test=None):
""" Split lists in differents folds for cross validation.
Arguments:
- X_train: list
- Y_train: list
- run_train: list
- run_test: list
Returns:
- list (of dict)
"""
result = []
logo = LeavePOut(self.out_per_fold)
for train, test in logo.split(X_train):
y_train = [Y_train[i] for i in train]
x_train = [X_train[i] for i in train]
y_test = [Y_train[i] for i in test]
x_test = [X_train[i] for i in test]
result.append({'X_train': x_train,
'Y_train': y_train,
'X_test': x_test,
'Y_test': y_test,
'run_train': [run_train[index] for index in train] if run_train is not None else train,
'run_test': [run_train[index] for index in test] if run_train is not None else test
})
return result
|
24,210 | c08d685c76a79092cbdc8bcc971967ec25e4de73 | from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
def get_full_name(self):
full_name = super().get_full_name()
if not full_name:
full_name = self.username
return full_name.strip()
|
24,211 | 476513c8f9115ef7e9cbcafc82bef090830b58de | import numpy as np
import threading
from datasets.charset import transcriptions_to_labels
from datasets.datasets_helper import sparse_tuples_from_sequences
from datasets.datasets_helper import pad_sequences_and_get_lengths
from datasets.text_dataset import TextDataset
class TextDatasetSequential(TextDataset):
def __init__(self, queue, batch_size, data_format="TF", pad_to_max_width=False, transcriptions_file=None, images_path=None,
lmdb_database=None, chars=None, max_width=2048, sort=True, verbose=False):
super().__init__(max_width=max_width, transcriptions_file=transcriptions_file, images_path=images_path,
lmdb_database=lmdb_database, chars=chars, verbose=verbose)
self.type = 'sequential'
self.thread = None
self.queue = queue
self.batch_size = batch_size
self.data_format = data_format
self.pad_to_max_width = pad_to_max_width
self.rejected = 0
self.sort = sort
sort_width_container = []
for ids_index, (id, transcription, id_embedding) in enumerate(zip(self.ids, self.transcriptions, self.ids_embedding)):
img = self.load_image(ids_index)
if img.shape[1] <= self.max_width:
sort_width_container.append((id, transcription, id_embedding, img.shape[1]))
else:
self.rejected += 1
if self.sort:
sort_width_container = sorted(sort_width_container, key=lambda x: x[-1], reverse=True)
if verbose:
print(f"Rejected images {self.rejected}")
self.ids = [x[0] for x in sort_width_container]
self.transcriptions = [x[1] for x in sort_width_container]
self.ids_embedding = [x[2] for x in sort_width_container]
self.last_image = self.load_image(len(self.ids) - 1)
self.ids_index = 0
self.stop_thread = False
self.start_loading_thread()
def reset(self):
self.ids_index = 0
while self.thread.isAlive():
self.stop_thread = True
self.stop_thread = False
self.start_loading_thread()
def start_loading_thread(self):
self.thread = threading.Thread(target=self.loading_thread)
self.thread.daemon = True
self.thread.start()
def loading_thread(self):
while not self.stop_thread:
batch = self.create_new_batch()
self.queue.put(batch)
if batch['actual_batch_size'] < self.batch_size:
break
def get_batch(self):
return self.queue.get()
def create_new_batch(self):
ids = []
images = []
transcriptions = []
ids_embedding = []
for x in range(self.batch_size):
id, image, transcription, id_embedding = self.load_next()
ids.append(id)
images.append(image)
transcriptions.append(transcription)
ids_embedding.append(id_embedding)
if self.pad_to_max_width:
images_container = np.zeros([self.batch_size, self.height, self.max_width, self.channels], dtype=np.uint8)
for img_container, img in zip(images_container, images):
padding = (self.max_width - img.shape[1]) // 2
img_container[:, padding:padding + img.shape[1]] = img
images = images_container
else:
if self.sort:
max_width = images[0].shape[1]
else:
widths = [image.shape[1] for image in images]
max_width = max(widths)
max_width = int(np.ceil(max_width / 64) * 64) + 64
images_container = np.zeros([self.batch_size, self.height, max_width, self.channels], dtype=np.uint8)
for img_container, img in zip(images_container, images):
img_container[:, 32:32 + img.shape[1]] = img
images = images_container
labels = transcriptions_to_labels(self.from_char, self.to_char, transcriptions)
sequences = sparse_tuples_from_sequences(labels)
if self.ids_index > len(self.ids) - 1:
actual_batch_size = self.batch_size - (self.ids_index - len(self.ids))
else:
actual_batch_size = self.batch_size
if self.data_format == "TF":
seq_lengths = np.full(images.shape[0], images.shape[2] / self.output_subsampling, dtype=np.int32)
return images, ids, labels, sequences, seq_lengths, actual_batch_size
elif self.data_format == "TF-dense":
sequences, seq_lengths = pad_sequences_and_get_lengths(labels, int(images.shape[2] / 4))
return images, ids, labels, sequences, seq_lengths, actual_batch_size
elif self.data_format == "PyTorch":
images = np.transpose(images, (0, 3, 1, 2))
labels_concatenated = np.concatenate(labels)
labels_lengths = np.asarray([x.shape[0] for x in labels])
weights = np.ones(len(labels))
occurences = np.ones(len(labels))
return {'images': images, 'ids': ids, 'labels_concatenated': labels_concatenated,
'labels_lengths': labels_lengths, 'labels': labels, 'actual_batch_size': actual_batch_size,
'ids_embedding': ids_embedding, 'transcriptions': transcriptions, 'weights': weights,
'occurences': occurences}
else:
raise Exception(f'Not implemented: "{format}". Possible formats are: TF, TF-dense, PyTorch.')
def load_next(self):
if self.ids_index > len(self.ids) - 1:
self.ids_index += 1
return self.ids[-1], self.last_image, self.transcriptions[-1], self.ids_embedding[-1]
image = self.load_image(self.ids_index)
transcription = self.transcriptions[self.ids_index]
id_embedding = self.ids_embedding[self.ids_index]
self.ids_index += 1
return self.ids[self.ids_index - 1], image, transcription, id_embedding
def __del__(self):
if self.thread is not None:
while self.thread.isAlive():
self.stop_thread = True
|
24,212 | 67e924934f018d40fd54bdbae3a4c6655849b927 | import pytest
from brownie import accounts, MockReceiver
import time
VALID_MULTI_PROOF = "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000007C000000000000000000000000000000000000000000000000000000000000007604915AD2A83E334030BD2841825049F77CE42F98211C1B3369034B342BDB18A3CFACDE335819A4A7C43077B3FE11A427B3142FC55E25C6BC40F61C1EE86DD5A07EA0C5E0D83A8970E1F6135EE88A5CF826868E4256931E9149F485C6E7A020823FA8F6B190BD239F561FC3973C8D46C4E24F0639521ECD4F3E92FAE966CD79194E878F73021E2A10507408DBF8618AE3E4B73F94816F49D727E6FB95389322F3B3F02642D9E70D5C1C493A4F732BFE9C9B95A4A42651703B816EDCFC8FADA531200000000000000000000000000000000000000000000000000000000000015450000000000000000000000000000000000000000000000000000000060AF93DF0000000000000000000000000000000000000000000000000000000008BDFD54D3FB166B97CE4A66310369589FCE332C6822B640AAB350B725CE6917EA0133F96206F2FFDFBB93B83BD917B05B13CA59C12330268611242F5FD5734E673079159FB9C7533CAF1D218DA3AF6D277F6B101C42E3C3B75D784242DA663604DD53C29028C2AEF0CDD1F9746B1B2DEFFA898BFDE0B70CD58BAFB6211FABC8623F749400000000000000000000000000000000000000000000000000000000000001C00000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001C0000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000004404172DF5060CEDD070ED6CF1ABB01C3A510DE261A7C608CFEA8439175ECCDB4C100A35E2097E8849FE12FA43B704B839BA7B7D92F381494BFCE65C65099278B82000000000000000000000000000000000000000000000000000000000000001B00000000000000000000000000000000000000000000000000000000000000A000000000000000000000000000000000000000000000000000000000000000E000000000000000000000000000000000000000000000000000000000000000106E080211451500000000000022480A2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003F122408011220C90C2948C1C725537F856CC4EFFC80C8F345B06EBF094AD4AA0EFE90542B31662A0C08E0A7BE850610F6CDB0D103320962616E64636861696E006FC97790B34262AC9A1E40680B28C6BC16007B6444AA1635DC0F2656CA8BA997471AA11928C40949DB1C3CF91296234846A69C53CD9602FEC46C23E17C07DE32000000000000000000000000000000000000000000000000000000000000001C00000000000000000000000000000000000000000000000000000000000000A000000000000000000000000000000000000000000000000000000000000000E000000000000000000000000000000000000000000000000000000000000000106E080211451500000000000022480A2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003F122408011220C90C2948C1C725537F856CC4EFFC80C8F345B06EBF094AD4AA0EFE90542B31662A0C08E0A7BE850610EC97F0D403320962616E64636861696E00498D405422AC9AFFC10B5DBF00992496226B0436CAF9CC604758605B39A104BC3F7FDA04C70AA4E867CFE22EA476DB2001ED8E36B33819682B521BACEDF75012000000000000000000000000000000000000000000000000000000000000001C00000000000000000000000000000000000000000000000000000000000000A000000000000000000000000000000000000000000000000000000000000000E000000000000000000000000000000000000000000000000000000000000000106E080211451500000000000022480A2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003F122408011220C90C2948C1C725537F856CC4EFFC80C8F345B06EBF094AD4AA0EFE90542B31662A0C08E0A7BE85061082BBD0D203320962616E64636861696E0033802F79AD672E741D3FC3B80EB668398D4DA6982647A7DA084DA0F89B724688304842D70750586009E2292DFF159231BDB09072FF0F7269009B50B6A07D37E2000000000000000000000000000000000000000000000000000000000000001B00000000000000000000000000000000000000000000000000000000000000A000000000000000000000000000000000000000000000000000000000000000E000000000000000000000000000000000000000000000000000000000000000106E080211451500000000000022480A2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003F122408011220C90C2948C1C725537F856CC4EFFC80C8F345B06EBF094AD4AA0EFE90542B31662A0C08E0A7BE8506108AC6A1D303320962616E64636861696E0000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000064000000000000000000000000000000000000000000000000000000000000005E00000000000000000000000000000000000000000000000000000000000001545000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000003EF00000000000000000000000000000000000000000000000000000000000002A00000000000000000000000000000000000000000000000000000000000000160000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000001A000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000060AF73DE0000000000000000000000000000000000000000000000000000000060AF73E0000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000001E0000000000000000000000000000000000000000000000000000000000000000966726F6D5F7363616E0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000F0000000342544300000000000F424000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000092B6826F20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000144CEE7687A25A2E57EF6C8A0563C71E0AC206ED38F660868E0A6E35AF5DAC4F31F9000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000144CEB739BB22F48B7F3053A90BA2BA4FE07FAB262CADF8664489565C50FF505B8BD000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000007000000000000000000000000000000000000000000000000000000000000144C66DB7A4464DF7D7A956428F5D49FB034A860510035797D1B714D7348BDB7EA5600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000B000000000000000000000000000000000000000000000000000000000000144CF054C5E2412E1519951DBD7A60E2C5EDE41BABA494A6AF6FD0B0BAC4A4695C41000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000154455D62CF6B3BCE22AD491FBB1137A6FDB715AD03347D385A705505C924DC0446400000000000000000000000000000000000000000000000000000000000005E000000000000000000000000000000000000000000000000000000000000015450000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000144C00000000000000000000000000000000000000000000000000000000000002A00000000000000000000000000000000000000000000000000000000000000160000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000001A000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000060AF92100000000000000000000000000000000000000000000000000000000060AF9214000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000001E0000000000000000000000000000000000000000000000000000000000000000966726F6D5F7363616E0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000F0000000342544300000000000186A00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000ED1889260000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000144C8C37B4BACA5C1B8ABD886636BD64F18BE696F98560DC01D623DB52BAE8813E2F000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000144CEB739BB22F48B7F3053A90BA2BA4FE07FAB262CADF8664489565C50FF505B8BD000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000007000000000000000000000000000000000000000000000000000000000000144C66DB7A4464DF7D7A956428F5D49FB034A860510035797D1B714D7348BDB7EA5600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000B000000000000000000000000000000000000000000000000000000000000144CF054C5E2412E1519951DBD7A60E2C5EDE41BABA494A6AF6FD0B0BAC4A4695C41000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000154455D62CF6B3BCE22AD491FBB1137A6FDB715AD03347D385A705505C924DC04464"
EXPECTED_MULTI_RELAY_RESULT = [
[
"from_scan",
1,
"0x0000000342544300000000000f4240",
1,
1,
1,
1,
1622111198,
1622111200,
1,
"0x000000092b6826f2"
],
[
"from_scan",
1,
"0x0000000342544300000000000186a0",
1,
1,
2,
1,
1622118928,
1622118932,
1,
"0x00000000ed188926"
],
]
# Deploy MockReceiver contract
@pytest.fixture(scope="module")
def mockreceiver(bridge):
return accounts[0].deploy(MockReceiver, bridge)
def test_bridge_relayandmultiverify_success(bridge, mockreceiver):
tx = mockreceiver.relayAndMultiSafe(VALID_MULTI_PROOF)
assert tx.status == 1
assert bridge.blockDetails(5445) == [
"0xEA0C5E0D83A8970E1F6135EE88A5CF826868E4256931E9149F485C6E7A020823",
1622119391,
146668884,
]
for i in range(len(EXPECTED_MULTI_RELAY_RESULT)):
res = mockreceiver.latestResults(i)
assert [
res["clientID"],
res["oracleScriptID"],
res["params"],
res["askCount"],
res["minCount"],
res["requestID"],
res["ansCount"],
int(res["requestTime"]),
int(res["resolveTime"]),
res["resolveStatus"],
res["result"],
] == EXPECTED_MULTI_RELAY_RESULT[i]
|
24,213 | 642f50e46150bfcb54715cbd3c2ccc496f5c809a | import maya.OpenMaya as OM
import maya.OpenMayaAnim as OMA
import maya.OpenMayaMPx as OMX
import maya.cmds as cmds
import sys, math
structure="""
typedef struct
{
b2Body *body;
std::string name;
float tx;
float ty;
float width;
float height;
float rotation;
float friction;
float restitution;
float density;
b2BodyType type;
}Body; \n
"""
class Box2DTool():
def __init__(self) :
#check to see if the window exists:
if cmds.window("Box2DTool", exists = True):
cmds.deleteUI("Box2DTool")
#create the window:
window = cmds.window("Box2DTool", title = 'Box2D Tool', sizeable = False)
#create the main layout:
cmds.columnLayout(columnWidth = 300, adjustableColumn = False, columnAttach = ('both', 10))
#make dockable:
allowedAreas = ['right', 'left']
cmds.dockControl( 'Box2D Tool', area='left', content=window, allowedArea=allowedAreas )
self.dim=cmds.floatFieldGrp('dim', numberOfFields=2, label='Dimension', extraLabel='pixel', value1=5, value2=1 )
self.dim=cmds.floatFieldGrp('friction', numberOfFields=1, label='Friction', value1=0.2 )
self.dim=cmds.floatFieldGrp('restitution', numberOfFields=1, label='restitution', value1=0.0 )
self.dim=cmds.floatFieldGrp('density', numberOfFields=1, label='density', value1=0.0 )
cmds.separator()
self.dim=cmds.floatFieldGrp('rotation', numberOfFields=1, label='rotation', value1=0.0 )
cmds.separator()
cmds.optionMenuGrp( "bodyType",l='Body Type' )
cmds.menuItem(label='b2_staticBody');
cmds.menuItem(label='b2_kinematicBody');
cmds.menuItem(label='b2_dynamicBody');
cmds.button(label = "PlaceBlock", w = 100, h = 25, c = self.placeBlock)
cmds.separator()
cmds.button( label='Export', command=self.export )
def placeBlock(self, *args) :
cmds.polyCube(w=1,h=1)
name=cmds.ls(sl=True)
w=cmds.floatFieldGrp('dim',query=True, value1=True)
h=cmds.floatFieldGrp('dim',query=True, value2=True)
cmds.setAttr('%s.scaleX' %(name[0]),w/2.0)
cmds.setAttr('%s.scaleY' %(name[0]),h/2.0)
r=cmds.floatFieldGrp('rotation',query=True, value1=True)
cmds.addAttr(name[0],ln='BodyType', dt='string')
bt=cmds.optionMenuGrp("bodyType", query=True ,value=True)
cmds.setAttr('%s.BodyType' %(name[0]),bt ,type='string')
cmds.addAttr(name[0],ln='friction')
f=cmds.floatFieldGrp('friction',query=True, value1=True)
cmds.setAttr('%s.friction' %(name[0]),f)
cmds.addAttr(name[0],ln='restitution')
r=cmds.floatFieldGrp('restitution',query=True, value1=True)
cmds.setAttr('%s.restitution' %(name[0]),r)
cmds.addAttr(name[0],ln='density')
d=cmds.floatFieldGrp('density',query=True, value1=True)
cmds.setAttr('%s.density' %(name[0]),d)
r=cmds.floatFieldGrp('rotation',query=True, value1=True)
cmds.setAttr('%s.rotateZ' %(name[0]),r)
def export(self, *args) :
basicFilter = "*.b2d"
file=cmds.fileDialog2(caption="Please select file to save",fileFilter=basicFilter, dialogStyle=2)
if file !="" :
dagIt = OM.MItDag(OM.MItDag.kDepthFirst, OM.MFn.kTransform)
object = OM.MObject
ofile=open(file[0],'w')
ofile.write(structure)
ofile.write('\n\nBody bodies[]=\n{\n')
numBodies=0
while not dagIt.isDone():
object = dagIt.currentItem()
depNode = OM.MFnDependencyNode(object)
if object.apiTypeStr() =="kTransform" :
fn = OM.MFnTransform(object)
child = fn.child(0)
if child.apiTypeStr()=="kMesh" :
name=fn.name()
ofile.write('\t{ 0,"%s",' %(name) )
x=cmds.getAttr("%s.translateX" %(name))
ofile.write('%sf,' %(x))
y=cmds.getAttr("%s.translateY" %(name))
ofile.write('%sf,' %(y))
width=cmds.getAttr("%s.scaleX" %(name))
ofile.write('%sf,' %(width))
height=cmds.getAttr("%s.scaleY" %(name))
ofile.write('%sf,' %(height))
rot=cmds.getAttr("%s.rotateZ" %(name))
ofile.write('%sf,' %(rot))
f=cmds.getAttr("%s.friction" %(name))
ofile.write('%sf,' %(f))
f=cmds.getAttr("%s.restitution" %(name))
ofile.write('%sf,' %(f))
f=cmds.getAttr("%s.density" %(name))
ofile.write('%sf,' %(f))
type=cmds.getAttr("%s.BodyType" %(name))
ofile.write('%s },\n' %(type))
numBodies=numBodies+1
dagIt.next()
ofile.write("};\n")
ofile.write('const static int numBodies=%d;' %(numBodies) )
ofile.close()
Box2DTool() |
24,214 | 618f7a56b8a1fe07279a234ed4e081f9672bff4d | '''
Water Wheel of Fortune
By Nathaniel Yearwood
Cody Macedo
'''
import pygame, sys
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import ode
import random as rand
import math
import threading
win_width = 800 # 500 cm = 5 m
win_height = 600
# set up the colors
BLACK = (0, 0, 0)
GREY = (150,150,150)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
def normalize(v):
return v / np.linalg.norm(v)
class Particle(pygame.sprite.Sprite):
def __init__(self, imgfile, radius, mass=1.0):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(imgfile)
self.image = pygame.transform.scale(self.image, (radius, radius))
self.state = [0, 0, 0, 0]
self.mass = mass
self.t = 0
self.radius = radius
self.gravity = -9.8
def set_pos(self, pos):
self.state[0:2] = pos
return self
def set_vel(self, vel):
self.state[2:] = vel
return self
def update(self, dt):
self.t += dt
self.state[3] += dt * self.gravity
self.state[0] += self.state[2] * dt
self.state[1] += self.state[3] * dt
def move_by(self, delta):
self.state[0:2] = np.add(self.pos, delta)
return self
def draw(self, surface):
rect = self.image.get_rect()
rect.center = (self.state[0], win_height-self.state[1]) # Flipping y
surface.blit(self.image, rect)
def pprint(self):
print 'Particle', self.state
class Wheel(pygame.sprite.Sprite):
def __init__(self, center, radius, mass=1000):
pygame.sprite.Sprite.__init__(self)
self.state = np.zeros(4)
self.state[0:2] = np.zeros(2) # position
self.state[2] = 1 # angular velocity
self.state[3] = 0 # angular momentum
self.lines = []
self.mass = mass
self.t = 0
self.center = center
self.radius = radius
self.angle = 0
self.torque = 0
def set_vel(self, vel):
self.state[2] = vel
return self
def update(self, dt):
self.t += dt
def draw(self, surface):
self.angle += self.state[2]
for i in range(0,316, 45):
x = self.center[0] + math.cos(math.radians(self.angle + i)) * self.radius
y = self.center[1] + math.sin(math.radians(self.angle + i)) * self.radius
if (len(self.lines) <= 7):
self.lines.append(pygame.draw.line(surface, BLACK, self.center, (x,y), 5))
else:
self.lines[i/45] = pygame.draw.line(surface, BLACK, self.center, (x,y), 5)
self.circle = pygame.draw.circle(surface, BLACK, self.center, (int)(self.radius*.7), 10)
def pprint(self):
print 'Wheel', self.state
class World:
def __init__(self, height, width):
self.particles = []
self.wheels =[]
self.height = height
self.width = width
self.e = .2 # Coefficient of restitution
def add(self, imgfile, radius, mass=1.0):
particle = Particle(imgfile, radius, mass)
self.particles.append(particle)
return particle
def addWheel(self, centre, radius):
wheel = Wheel(centre, radius)
self.wheels.append(wheel)
return wheel
def pprint(self):
print '#particles', len(self.particles)
for d in self.particles:
d.pprint()
def draw(self, screen):
for d in self.particles:
d.draw(screen)
for w in self.wheels:
w.draw(screen)
def update(self, dt):
t = []
for d in self.particles:
d.update(dt)
for i in range(0, len(self.particles)):
self.check_for_collision(i)
try:
for j in range(len(self.wheels)):
t.append(threading.Thread(target=self.check_wheel_collision(i, j)))
t[i].start()
except:
print "Collision detection threading error"
for x in t:
x.join()
self.check_outside_screen()
def check_outside_screen(self):
self.particles = [x for x in self.particles if self.outside_screen(x)]
def outside_screen(self, particle):
if (particle.state[0] < -particle.radius):
return False
elif (particle.state[0] > win_width + particle.radius):
return False
elif (particle.state[1] < -particle.radius):
return False
else:
return True
# check for inter-particle collision
def check_for_collision(self, i):
if (self.particles[i].state[0] - self.particles[i].radius <= 0 or
self.particles[i].state[0] + self.particles[i].radius >= 800):
self.particles[i].state[2] *= -1*self.e
elif (self.particles[i].state[1] - self.particles[i].radius <= 0):
self.particles[i].state[3] = 0
for j in range(i+1, len(self.particles)):
if i == j:
return
pos_i = np.array(self.particles[i].state[0:2])
pos_j = np.array(self.particles[j].state[0:2])
dist_ij = np.sqrt(np.sum((pos_i - pos_j)**2))
radius_i = self.particles[i].radius
radius_j = self.particles[j].radius
if dist_ij > radius_i + radius_j:
return
# May be a collision
vel_i = np.array(self.particles[i].state[2:])
vel_j = np.array(self.particles[j].state[2:])
relative_vel_ij = vel_i - vel_j
n_ij = normalize(pos_i - pos_j)
if np.dot(relative_vel_ij, n_ij) >= 0:
return
mass_i = self.particles[i].mass
mass_j = self.particles[j].mass
J = -(1+self.e) * np.dot(relative_vel_ij, n_ij) / ((1./mass_i) + (1./mass_j))
vel_i_aftercollision = vel_i + n_ij * J / mass_i
vel_j_aftercollision = vel_j - n_ij * J / mass_j
self.particles[i].set_vel(vel_i_aftercollision)
self.particles[j].set_vel(vel_j_aftercollision)
# check for particle - wheel collision
def check_wheel_collision(self, i, j):
pos_i = np.array(self.particles[i].state[0:2])
pos_j = np.array(self.wheels[j].center)
dist_ij = np.sqrt(np.sum((pos_i - pos_j)**2))
radius_i = self.particles[i].radius
radius_j = self.wheels[j].radius*.7
if dist_ij > radius_i + radius_j:
return
# ensures particles do not cross wheel boundaries
dist_in = -(dist_ij - radius_j - radius_i) # distance inside of wheel
theta = math.asin((pos_i[1] - pos_j[1]) /dist_ij) #angle from centre of wheel
newPos = [(math.cos(theta) * dist_in), (math.sin(theta) * dist_in)]
# makes sure to flip new x pos to the left
if pos_i[0] < pos_j[0]:
newPos[0] *= -1
# updates the particle position
self.particles[i].set_pos([pos_i[0] + newPos[0], pos_i[1] + newPos[1]])
# May be a collision
vel_i = np.array(self.particles[i].state[2:])
vel_j = 0
relative_vel_ij = vel_i - vel_j
n_ij = normalize(pos_i - pos_j)
if np.dot(relative_vel_ij, n_ij) >= 0:
return
mass_i = self.particles[i].mass
mass_j = self.wheels[j].mass
J = -(1+self.e) * np.dot(relative_vel_ij, n_ij) / ((1./mass_i) + (1./mass_j))
vel_i_aftercollision = vel_i + n_ij * J / mass_i
self.particles[i].set_vel(vel_i_aftercollision)
# ANGULAR COLISION #
# detect collision with lines on wheel
for x in range(len(self.wheels[j].lines)):
line = self.wheels[j].lines[x]
A = self.wheels[j].center
C = self.particles[i].state[0:2]
if A == line.topleft:
B = line.bottomright
elif A == line.bottomright:
B = line.topleft
elif A == line.topright:
B = line.bottomleft
else:
B = line.topright
dist = np.sqrt((B[0]-A[0])**2+(B[1]-A[1])**2)
Dx = (B[0]-A[0])/dist
Dy = (B[1]-A[1])/dist
t = Dx*(C[0]-A[0])+Dy*(C[1]-A[1])
Ex = t*Dx+A[0]
Ey = t*Dy+A[1]
dist2 = np.sqrt((Ex-C[0])**2+(Ey-C[1])**2)
#if (dist2 < self.particles[i].radius):
#Do conservation of momentum for angular momentum
def main():
# initializing pygame
pygame.init()
clock = pygame.time.Clock()
# top left corner is (0,0)
screen = pygame.display.set_mode((win_width, win_height))
pygame.display.set_caption('Water Wheel of Fortune')
world = World(win_height, win_width)
world.addWheel([400, 300], 200)
# spout position and width for when rain == false
spoutPos = 380
spoutWidth = 40
pause = False
rain = False # particles randomly appear at top along widith when true, spout when false
maxP = 100 # maximum number of particles
dt = 0.3
pRadius = 10 # smallest radius is 3, anything smaller is invisible
pMass = 1
# timer to create more particles
pygame.time.set_timer(pygame.USEREVENT + 1, 50)
if rain:
range = [0 + pRadius, win_width - pRadius]
else:
range = [spoutPos + pRadius, spoutPos + spoutWidth + pRadius]
print "\n\nPress P key to pause or resume"
print "Press R key to toggle rain or spout"
print "Press A or D keys to move spout left or right\n\n"
while True:
# 30 fps
if not pause:
clock.tick(30)
event = pygame.event.poll()
if event.type == pygame.QUIT:
sys.exit(0)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_q:
pygame.quit()
sys.exit(0)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_p:
pause = not pause
elif event.type == pygame.KEYDOWN and event.key == pygame.K_r:
rain = not rain
if rain:
range = [0 + pRadius, win_width - pRadius]
else:
range = [spoutPos + pRadius, spoutPos + spoutWidth + pRadius]
elif event.type == pygame.USEREVENT + 1 and not pause:
# new particle generation
if (len(world.particles) < maxP):
# make sure particle is within the walls
newPos = np.array([rand.uniform(range[0],range[1]), win_height])
newVel = np.array([0, 0])
world.add('waterdroplet.png', pRadius, pMass).set_pos(newPos).set_vel(newVel)
# moves spout left
elif event.type == pygame.KEYDOWN and event.key == pygame.K_a and not rain:
if spoutPos - 10 >= 0: # stops spout at edge
spoutPos -= 10
range = [spoutPos + pRadius, spoutPos + spoutWidth + pRadius]
# moves spout right
elif event.type == pygame.KEYDOWN and event.key == pygame.K_d and not rain:
if spoutPos + 10 + spoutWidth*1.5 <= win_width: # stops spout at edge
spoutPos += 10
range = [spoutPos + pRadius, spoutPos + spoutWidth + pRadius]
else:
pass
if not pause:
# Clear the background, and draw the sprites
screen.fill(WHITE)
world.draw(screen)
world.update(dt)
if not rain:
pygame.draw.rect(screen, GREY, (spoutPos, 0, spoutWidth*1.5, 30))
pygame.display.update()
if __name__ == '__main__':
main() |
24,215 | dc9b18c1013c78d76c81f507bcf42d47e0aa1deb | n, a, b = map(int, input().split())
if n > a * b:
print(-1)
else:
c = [[0 for j in range(b)] for i in range(a)]
p = 1
i = 0
j = 0
while p <= n:
if i % 2 == 0:
for j in range(b):
if p > n:
break
c[i][j] = p
p += 1
else:
for j in reversed(range(b)):
if p > n:
break
c[i][j] = p
p += 1
i += 1
for i in range(a):
print(*c[i])
|
24,216 | cc1b00104f3401728d2393a8c45d82febdceb0fd | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('branch', '0002_auto_20141127_0251'),
]
operations = [
migrations.AlterField(
model_name='job',
name='date',
field=models.DateTimeField(verbose_name='Date (DD/MM/YYYY)', help_text="La date doit être indiquée sous le format DD/MM/YYYY où DD est le jour, MM est le mois et YYYY est l'année."),
preserve_default=True,
),
migrations.AlterField(
model_name='job',
name='estimated_time',
field=models.IntegerField(verbose_name='Temps estimé (en minutes)'),
preserve_default=True,
),
migrations.AlterField(
model_name='job',
name='real_time',
field=models.IntegerField(verbose_name='Temps réel (en minutes)'),
preserve_default=True,
),
]
|
24,217 | c0f8d13f49e3c2bc0204ba003aea205fe898cf99 | import pybullet as p
import time
p.connect(p.GUI)
def adddomino(p):
y2z = p.getQuaternionFromEuler([0, 0, 1.57])
meshScale = [1, 1, 1]
visualShapeId = p.createVisualShape(shapeType=p.GEOM_MESH,
fileName="domino/domino.obj",
rgbaColor=[1, 1, 1, 1],
specularColor=[0.4, .4, 0],
visualFrameOrientation=y2z,
meshScale=meshScale)
boxDimensions = [0.5 * 0.00635, 0.5 * 0.0254, 0.5 * 0.0508]
collisionShapeId = p.createCollisionShape(p.GEOM_BOX, halfExtents=boxDimensions)
objid=p.createMultiBody(baseMass=0.025,
baseCollisionShapeIndex=collisionShapeId,
baseVisualShapeIndex=visualShapeId,
basePosition=[-.5, -2, 0.14],
useMaximalCoordinates=True)
p.resetBaseVelocity(objid,linearVelocity=[0,10,1])
p.loadURDF("table_s/table.urdf", -.5000000, -2.00000, -.820000, 0.000000, 0.000000, 0.0, 1.0)
#p.setGravity(0, 0, -10)
arm = p.loadURDF("widowx/gun.urdf", useFixedBase=1)
#arm = p.loadURDF("widowx/widowx.urdf", basePosition=[-.5, -2, 0.01], baseOrientation=[0,0,0,1])
ball = p.loadURDF("sphere2.urdf", useFixedBase=1)
p.resetBasePositionAndOrientation(ball, [-.5, 2, 0.1], [0,0,0,1])
p.setGravity(0, 0, -10)
p.resetBasePositionAndOrientation(arm, [-0.098612, -2, 0.14018],
[0.000000, 0.000000, 0.000000, 1.000000])
while (1):
qKey = ord('a')
keys = p.getKeyboardEvents()
if qKey in keys and keys[qKey]&p.KEY_WAS_TRIGGERED:
adddomino(p)
p.stepSimulation()
time.sleep(0.01)
#p.saveWorld("test.py")
viewMat = p.getDebugVisualizerCamera()[2]
projMatrix = [0.7499999403953552, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 2.0, 2.0, -0.02000020071864128, 2.0]
#projMatrix = p.getDebugVisualizerCamera()[3]
width = 640
height = 480
img_arr = p.getCameraImage(width=width,
height=height,
viewMatrix=viewMat,
projectionMatrix=projMatrix)
|
24,218 | cf69812c204b57e31ff487caf3bf177f8399abff |
from django.http import HttpResponse
from django.template import loader
from django.http import Http404
from .models import Cliente
from .models import Tabla
from django.views.generic import ListView
from django.urls import reverse_lazy
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from skateez.models import Author
from django.contrib.auth.mixins import LoginRequiredMixin
# Create your views here.
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
# --- Vistas Genericas ---
class IndexView(generic.ListView):
template_name = 'skateez/index.html'
def get_queryset(self):
return Tabla.objects.all()
class DetailView(generic.DetailView):
model = Tabla
template_name = 'skateez/detail.html'
class ResultsView(generic.DetailView):
model = Tabla
template_name = 'skateez/results.html'
# !!CAMBIAR!!
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from skateez.models import Usuario
# Define an inline admin descriptor for Employee model
# which acts a bit like a singleton
class EmployeeInline(admin.StackedInline):
model = Usuario
can_delete = False
verbose_name_plural = 'Usuario'
# Define a new User admin
class UserAdmin(BaseUserAdmin):
inlines = (EmployeeInline,)
# Re-register UserAdmin
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
# --- Listas ---
class ListaTabla(ListView):
model = Tabla
"""
template_name = 'skateez/tabla_list.html'
def get_context_data(self, **kwargs):
context = super(ListaTabla, self).get_context_data(**kwargs)
context['tabla_list'] = Tabla.objects.all()
return context
"""
# --- Vistas genericas de creación, actualización y borrado ---
class Create(CreateView):
model = Author
fields = ['name']
class Update(UpdateView):
model = Author
fields = ['name']
class Delete(DeleteView):
model = Author
success_url = reverse_lazy('author-list')
class Create(LoginRequiredMixin, CreateView):
model = Author
fields = ['name']
def form_valid(self, form):
form.instance.created_by = self.request.user
return super().form_valid(form)
# --- Formulario ---
from skateez.forms import ContactForm
from django.views.generic.edit import FormView
class ContactView(FormView):
template_name = 'contact.html'
form_class = ContactForm
success_url = '/thanks/'
def form_valid(self, form):
form.send_email()
return super().form_valid(form)
# --- USERSignup ---
from django.contrib.auth import login, authenticate
from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import render, redirect
def signup(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect('index')
else:
form = UserCreationForm()
return render(request, 'skateez/signup.html', {'form': form})
# --- UserLogin ---
from django.contrib.auth import authenticate, login
def login_view(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=usuario1, password=useruser)
if user is not None:
login(request, user)
return redirect('index')
else:
return
# --- UserLogout ---
from django.contrib.auth import logout
def logout_view(request):
logout(request)
return redirect('index') |
24,219 | bb465d66a3067436f387f52ba76cf4156e43c21b | import datetime
import sys
import time
def print_time():
print(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
sys.stdout.flush()
def flush_print(string):
print(string)
sys.stdout.flush()
def t_print(string):
T = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
print(T, " -- ", string)
sys.stdout.flush()
|
24,220 | 8bf45cb5aab187691adfcdee2a9397db3e16e269 | #Mensaje de bienvenida
print ("Bienvenido al sistema de ubicación para zonas públicas WIFI")
#Usuario y contraseña
userPreset = 51606
passwordPreset = 60615
user = int (input ("Nombre de Usuario:\n"))
if user == userPreset:
password = int (input ("Contraseña:\n"))
if password == passwordPreset:
#Se toma dinamicamente la longitud del usuario para calcular el indice de inicio y fin para obtener los terminos requeridos
codeLentgh = len(str(userPreset))
startIndex = codeLentgh - 3
lastIndex = codeLentgh
#Se obtienen los dos terminos requeridos
firstTerm = int(str(userPreset)[startIndex:lastIndex])
secondTerm = int(str(userPreset)[codeLentgh - 2])
#Se hacen los calculos de 3 ecuaciones aritmeticas que el resultado sea igual al segundo termino
firstEq = ((5+1)-6)*6
secondEq = (6-6)%(5+1)
thirdEq = (5-1)*(6-6)
#Se valida que los resultados sean igual al segundo termino
if secondTerm == firstEq and secondTerm == secondEq and secondTerm == thirdEq:
expectedValue = firstTerm + secondTerm
result = int (input (str (firstTerm) + " + " + str(secondTerm) + " = "))
if result == expectedValue:
print ("Sesión Iniciada")
else:
print ("Error")
else:
print ("Error")
else:
print ("Error")
|
24,221 | 4e1f3f30d08f978b0b29576a135ef0583913f9c8 | import collections
import os
import abc
import copy
import datetime
import logging
import munge
from future.utils import with_metaclass
from vaping.config import parse_interval
import vaping.io
class PluginBase(vaping.io.Thread):
"""
Base plugin interface
# Instanced Attributes
- config (`dict`): plugin config
- vaping: reference to the main vaping object
Calls `self.init()` prefork while loading all modules, init() should
not do anything active, any files opened may be closed when it forks.
Plugins should prefer `init()` to `__init__()` to ensure the class is
completely done initializing.
Calls `self.on_start()` and `self.on_stop()` before and after running in
case any connections need to be created or cleaned up.
"""
@property
def groups(self):
"""
`dict` - group configurations keyed by name
"""
group_config = {}
# legacy way of threating any dict as a potential
# group config (pre #44 implementation)
# supported until vaping 2.0
for k,v in list(self.config.items()):
if isinstance(v, collections.Mapping):
group_config[k] = v
# explicit groups object (#44 implementation)
for _group_config in self.config.get("groups",[]):
group_config[_group_config["name"]] = _group_config
return group_config
def init(self):
"""
called after the plugin is initialized, plugin may define this for any
other initialization code
"""
pass
def on_start(self):
"""
called when the daemon is starting
"""
pass
def on_stop(self):
"""
called when the daemon is stopping
"""
pass
def new_message(self):
"""
creates and returns new message `dict`, setting `type`, `source`, `ts`, `data`
`data` is initialized to an empty array
**Returns**
message (`dict`)
"""
msg = {}
msg['data'] = []
msg['type'] = self.plugin_type
msg['source'] = self.name
msg['ts'] = (datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds()
return msg
def popen(self, args, **kwargs):
"""
creates a subprocess with passed args
**Returns**
Popen instance
"""
self.log.debug("popen %s", ' '.join(args))
return vaping.io.subprocess.Popen(args, **kwargs)
@property
def log(self):
"""
logger instance for plugin type
"""
if not self._logger:
self._logger = logging.getLogger('vaping.plugins.' + self.plugin_type)
return self._logger
def __init__(self, config, ctx):
"""
**Arguments**
- config (`dict`)
- ctx: vaping context
"""
if hasattr(self, 'default_config'):
self.config = munge.util.recursive_update(copy.deepcopy(self.default_config), copy.deepcopy(config))
else:
self.config = config
# set for pluginmgr
self.pluginmgr_config = self.config
self.vaping = ctx
self.name = self.config.get("name")
self._logger = None
super(PluginBase, self).__init__()
self.init()
def _run(self):
self.on_start()
class ProbeBase(with_metaclass(abc.ABCMeta, PluginBase)):
"""
Base class for probe plugin, used for getting data
expects method probe() to be defined
"""
def init(self):
pass
@abc.abstractmethod
def probe(self):
"""
probe for data, return a list of dicts
"""
def __init__(self, config, ctx, emit=None):
if emit:
self._emit = [emit]
else:
self._emit = []
self._emit_queue = vaping.io.Queue()
super(ProbeBase, self).__init__(config, ctx)
def _run(self):
super(ProbeBase, self)._run()
self.run_level = 1
while self.run_level:
self.send_emission()
msg = self.probe()
if msg:
self.queue_emission(msg)
else:
self.log.debug("probe returned no data")
def queue_emission(self, msg):
"""
queue an emission of a message for all output plugins
**Arguments**
- msg (`dict`): dict containing `type`, `source`, `ts` and `data` keys
"""
if not msg:
return
for _emitter in self._emit:
if not hasattr(_emitter, 'emit'):
continue
def emit(emitter=_emitter):
self.log.debug("emit to {}".format(emitter.name))
emitter.emit(msg)
self.log.debug("queue emission to {} ({})".format(
_emitter.name, self._emit_queue.qsize()))
self._emit_queue.put(emit)
def send_emission(self):
"""
emit and remove the first emission in the queue
"""
if self._emit_queue.empty():
return
emit = self._emit_queue.get()
emit()
def emit_all(self):
"""
emit and remove all emissions in the queue
"""
while not self._emit_queue.empty():
self.send_emission()
class TimedProbe(ProbeBase):
"""
Probe class that calls probe every config defined interval
"""
def __init__(self, config, ctx, emit=None):
super(TimedProbe, self).__init__(config, ctx, emit)
if 'interval' not in self.pluginmgr_config:
raise ValueError('interval not set in config')
self.interval = parse_interval(self.pluginmgr_config['interval'])
self.run_level = 0
def _run(self):
self.run_level = 1
while self.run_level:
start = datetime.datetime.now()
# since the TimedProbe will sleep between cycles
# we need to emit all queued emissions each cycle
self.emit_all()
msg = self.probe()
if msg:
self.queue_emission(msg)
else:
self.log.debug("probe returned no data")
done = datetime.datetime.now()
elapsed = done - start
if elapsed.total_seconds() > self.interval:
self.log.warning("probe time exceeded interval")
else:
sleeptime = datetime.timedelta(seconds=self.interval) - elapsed
vaping.io.sleep(sleeptime.total_seconds())
class FileProbe(ProbeBase):
"""
Probes a file and emits everytime a new line is read
# Config
- path (`str`): path to file
- backlog (`int=0`): number of bytes to read from backlog
- max_lines (`int=1000`): maximum number of lines to read during probe
# Instanced Attributes
- path (`str`): path to file
- backlog (`int`): number of bytes to read from backlog
- max_lines (`int`): maximum number of liens to read during probe
- fh (`filehandler`): file handler for opened file (only available if `path` is set)
"""
def __init__(self, config, ctx, emit=None):
super(FileProbe, self).__init__(config, ctx, emit)
self.path = self.pluginmgr_config.get("path")
self.run_level = 0
self.backlog = int(self.pluginmgr_config.get("backlog",0))
self.max_lines = int(self.pluginmgr_config.get("max_lines",1000))
if self.path:
self.fh = open(self.path, "r")
self.fh.seek(0,2)
if self.backlog:
try:
self.fh.seek(self.fh.tell() - self.backlog, os.SEEK_SET)
except ValueError as exc:
if str(exc).find("negative seek position") > -1:
self.fh.seek(0)
else:
raise
def _run(self):
self.run_level = 1
while self.run_level:
self.send_emission()
for msg in self.probe():
self.queue_emission(msg)
vaping.io.sleep(0.1)
def validate_file_handler(self):
"""
Here we validate that our filehandler is pointing
to an existing file.
If it doesnt, because file has been deleted, we close
the filehander and try to reopen
"""
if self.fh.closed:
try:
self.fh = open(self.path, "r")
self.fh.seek(0, 2)
except OSError as err:
logging.error("Could not reopen file: {}".format(err))
return False
open_stat = os.fstat(self.fh.fileno())
try:
file_stat = os.stat(self.path)
except OSError as err:
logging.error("Could not stat file: {}".format(err))
return False
if open_stat != file_stat:
self.log
self.fh.close()
return False
return True
def probe(self):
"""
Probe the file for new lines
"""
# make sure the filehandler is still valid
# (e.g. file stat hasnt changed, file exists etc.)
if not self.validate_file_handler():
return []
messages = []
# read any new lines and push them onto the stack
for line in self.fh.readlines(self.max_lines):
data = {"path":self.path}
msg = self.new_message()
# process the line - this is where parsing happens
parsed = self.process_line(line, data)
if not parsed:
continue
data.update(parsed)
# process the probe - this is where data assignment
# happens
data = self.process_probe(data)
msg["data"] = [data]
messages.append(msg)
# process all new messages before returning them
# for emission
messages = self.process_messages(messages)
return messages
def process_line(self, line, data):
""" override this - parse your line in here """
return data
def process_probe(self, data):
""" override this - assign your data values here """
return data
def process_messages(self, messages):
"""
override this - process your messages before they
are emitted
"""
return messages
class EmitBase(with_metaclass(abc.ABCMeta, PluginBase)):
"""
Base class for emit plugins, used for sending data
expects method emit() to be defined
"""
def __init__(self, config, ctx):
super(EmitBase, self).__init__(config, ctx)
@abc.abstractmethod
def emit(self, message):
""" accept message to emit """
class TimeSeriesDB(EmitBase):
"""
Base interface for timeseries db storage plugins
# Config
- filename (`str`): database file name template
- field (`str`): fieeld name to read the value from
# Instanced Attributes
- filename (`str`): database file name template
- field (`str`): fieeld name to read the value from
"""
def __init__(self, config, ctx):
super(TimeSeriesDB, self).__init__(config, ctx)
# filename template
self.filename = self.config.get("filename")
# field name to read the value from
self.field = self.config.get("field")
if not self.filename:
raise ValueError("No filename specified")
if not self.field:
raise ValueError("No field specified, field should specify which value to store in the database")
def create(self, filename):
"""
Create database
**Arguments**
- filename (`str`): database filename
"""
raise NotImplementedError()
def update(self, filename, time, value):
"""
Update database
**Arguments**
- filename (`str`): database filename
- time (`int`): epoch timestamp
- value (`mixed`)
"""
raise NotImplementedError()
def get(self, filename, from_time, to_time):
"""
Retrieve data from database for the specified
timespan
**Arguments**
- filename (`str`): database filename
- from_time (`int`): epoch timestamp start
- to_time (`int`): epoch timestamp end
"""
raise NotImplementedError()
def filename_formatters(self, data, row):
"""
Returns a dict containing the various filename formatter values
Values are gotten from the vaping data message as well as the
currently processed row in the message
**Arguments**
- data (`dict`): vaping message
- row (`dict`): vaping message data row
**Returns**
formatter variables (`dict`)
"""
r = {
"source" : data.get("source"),
"field" : self.field,
"type" : data.get("type")
}
r.update(**row)
return r
def format_filename(self, data, row):
"""
Returns a formatted filename using the template stored
in self.filename
**Arguments**
- data (`dict`): vaping message
- row (`dict`): vaping message data row
**Returns**
formatted version of self.filename (`str`)
"""
return self.filename.format(**self.filename_formatters(data, row))
def emit(self, message):
"""
emit to database
**Arguments**
- message (`dict`): vaping message dict
"""
# handle vaping data that arrives in a list
if isinstance(message.get("data"), list):
for row in message.get("data"):
# format filename from data
filename = self.format_filename(message, row)
# create database file if it does not exist yet
if not os.path.exists(filename):
self.create(filename)
# update database
self.log.debug("storing time:%d, %s:%s in %s" % (
message.get("ts"), self.field, row.get(self.field, "-"), filename))
self.update(filename, message.get("ts"), row.get(self.field))
|
24,222 | 010c183a900da0f1534d6e8e387ec0bdd6d335d7 | import factorization
def main():
"""
Test the get_factor_list function and factors generator on a few numbers.
"""
print("-----------------\n|")
print("| codedrome.com |")
print("| Factorization |")
print("-----------------\n")
numbers_to_factorize = [15,19,25,50,77,99]
print("factorization.get_factor_list\n-----------------------------")
for n in numbers_to_factorize:
factors = factorization.get_factor_list(n)
print("Factors of {}: {}".format(n, factors))
print("\nfactorization.factors (generator)\n---------------------------------")
for n in numbers_to_factorize:
print("Factors of {}: ".format(n), end="")
for f in factorization.factors(n):
print("{} ".format(f), end="")
print("")
main()
|
24,223 | a0a66e98fb67f52cbc10372f61e7385ad5b92035 | from datetime import datetime
from flask import render_template, redirect, \
url_for, flash, abort, current_app, request, \
jsonify
from flask_login import current_user, login_required
from . import main
from app import db, csrf
from app.main.forms import NotebookForm
from app.models import Notebook
@main.route('/notebooks', methods=['GET', 'POST'])
@login_required
def notebooks():
form = NotebookForm()
if form.validate_on_submit():
if Notebook.query.filter_by(
title=form.title.data,
author_id=current_user.id).first() is None:
notebook = Notebook(
title=form.title.data,
author_id=current_user.id)
db.session.add(notebook)
db.session.commit()
else:
flash('A notebook with name {0} already exists.'.format(
form.title.data))
return redirect(url_for('.notebooks'))
notebooks = Notebook.query.filter_by(
author_id=current_user.id,
is_deleted=False).all()
return render_template(
'app/notebooks.html',
notebooks=notebooks,
form=form)
@main.route('/notebook/<int:id>')
@login_required
def notebook(id):
notebook = Notebook.query.filter_by(id=id).first()
if current_user != notebook.author:
abort(403)
return render_template(
'app/notebook.html',
notebook=notebook,
notes=notebook.active_notes())
@main.route('/notebook/<int:id>', methods=['DELETE'])
@login_required
def delete_notebook(id):
notebook = Notebook.query.filter_by(id=id).first()
if current_user != notebook.author:
abort(403)
else:
if notebook.id == current_user.default_notebook:
return jsonify({"error": "You cannot delete your default notebook!"}), 400
else:
notebook.is_deleted = True
notebook.updated_date = datetime.utcnow()
db.session.commit()
for note in notebook.notes:
note.is_deleted = True
note.updated_date = datetime.utcnow()
db.session.commit()
return jsonify(notebook.to_json()) |
24,224 | 3f4b4ea2859cf404c0ac7460fc3c0895b53cbb0b | '''
keys: for each step, we always choose the path with least obstacles to deal with
Solutions:
Similar:
T:
S:
'''
from typing import List
from collections import deque
class Solution:
# ACed
# https://leetcode.com/problems/shortest-path-in-a-grid-with-obstacles-elimination/discuss/451832/Python-Short-BFS-Solution
# O(m*n*k)
def shortestPath1(self, grid: List[List[int]], k: int) -> int:
m, n = len(grid), len(grid[0])
queue = collections.deque([[0, 0, 0]]) # row, col, num of obstables met so far
visited = {(0, 0): 0} # row, col => num of obstables met so far
steps = 0
while queue:
for _ in range(queue): # cur layer of bfs
r, c, obs = queue.popleft()
if obs > k: # run out of elimination quote
continue
if r == m - 1 and c == n - 1:
return steps
for nr, nc in [(r+1, c), (r-1, c), (r, c+1), (r, c-1)]:
if 0 <= nr < m and 0 <= nc < n:
next_obs = obs + 1 if grid[nr][nc] == 1 else obs
# > If having smaller obstacles eliminated, then we continue with the case/path
# with fewer obstacles, since we have more quota left for future elimination
# > We enqueue a neighbor if we have not visited it before or we
# visited it before with less quota remaining.
# > Greedy here, i.e., using eliminate when we see obstacle
# > We only care about cases regarding steps to reach target instead of number of
# different paths.
if next_obs < visited.get((nr, nc), float('inf')):
visited[(nr, nc)] = next_obs # for next layer of bfs
queue.append([nr, nc, next_obs])
steps += 1 # for each layer of expansion (advancing to next layer)
return -1
# TLE
# O(m*n*k) for S and T, m as rows, n as cols
# for every cell (m*n), worst case we put the cell into the queue/bfs k times
def shortestPath(self, grid: List[List[int]], k: int) -> int:
rows, cols = len(grid), len(grid[0])
if rows == 1 and cols == 1:
return 0 # only one cell
queue = deque([(0, 0, k, 0)]) # r-idx, c-idx, elimination unused, steps so far
visited = set([(0, 0, k)]) # if visit seen cell, the steps will be larger
if k > rows - 1 + cols - 1: # we can just take the diagonal
return rows - 1 + cols - 1 # by eliminating all obstacles on the diagoal
while queue:
r, c, eli_residue, steps = queue.popleft()
for nr, nc in [(r-1, c), (r+1, c), (r, c-1), (r, c+1)]: # new_r
if 0 <= nr < rows and 0 <= nc < cols:
if grid[nr][nc] == 1 and eli_residue > 0 and (nr, nc, eli_residue-1) not in visited:
visited.add((nr, nc, eli_residue))
queue.append((nr, nc, eli_residue-1, steps+1))
# two ifs are sequential, since the case we can eliminate the cell and visit it
if grid[nr][nc] == 0 and (nr, nc, eli_residue) not in visited:
if nr == rows-1 and nc == cols-1:
return steps + 1
visited.add((nr, nc, eli_residue)) # not using elimination
queue.append((nr, nc, eli_residue, steps+1))
return -1
|
24,225 | 0517699da93f93348e9b120ff564a33ec1fcb956 | import requests
import re
import time
import threading
import pymysql
def write2database(list_content, table_name, event_time):
db = pymysql.connect(host="localhost",
user="root",
password="123456",
port=3306, # 端口
database="wbhot",
charset='utf8')
cursor = db.cursor()
# sql = "CREATE DATABASE IF NOT EXISTS wbhot"
# # 执行创建数据库的sql
sql = "create table if not exists {}(time char(5), event_name char(30), heat int)".format(table_name)
cursor.execute(sql)
db.commit()
sql_content = "INSERT INTO {} values(%s, %s, %s)".format(table_name)
try:
for i in range(len(list_content)):
cursor.execute(sql_content, (event_time, list_content[i][0], int(list_content[i][1])))
print('Write data to DB---Success')
db.commit()
except:
print("Write data to DB---Fail")
db.close()
def wbhot():
global timer
url = "https://s.weibo.com/top/summary?cate=realtimehot"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363"
}
try:
response = requests.get(url, headers=headers)
# print("response:", response)
except Exception as error:
error_time = time.strftime("%Y-%m-%d-%H_%M", time.localtime())
print("Error happened in ", error_time)
time.sleep(60)
wbhot()
else:
string = response.text
# print(string)
results = re.findall('<td class="td-02">.*?top.*?target="_blank">(.*?)</a>.*?<span>(.*?)</span>', string, re.S) # list
table_name, event_time = time.strftime("%Y_%m_%d", time.localtime()), time.strftime("%H_%M", time.localtime())
write2database(results, table_name, event_time)
timer = threading.Timer(300, wbhot)
timer.start()
if __name__ == "__main__":
wbhot()
|
24,226 | 2e706393bee5eb52108228556a8b13411ee8ab7d | import logging
import requests
import boto3
from botocore.exceptions import ClientError
BUCKET_NAME = 'reactvang'
def upload_file(file_name, object_name=None, bucket = BUCKET_NAME):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
# Upload the file
s3_client = boto3.client('s3')
try:
response = s3_client.upload_file(file_name, bucket, object_name) if type(file_name) == str else s3_client.upload_fileobj(file_name, BUCKET_NAME, object_name)
except ClientError as e:
logging.error(e)
return False
return True
def list_bucket_objects():
# # Create a client
# client = boto3.client('s3', region_name='us-west-2')
# # Create a reusable Paginator
# paginator = client.get_paginator('list_objects')
# # Create a PageIterator from the Paginator
# page_iterator = paginator.paginate(Bucket='my-bucket')
# for page in page_iterator:
# print(page['Contents'])
s3 = boto3.resource('s3')
bucket_objects = s3.Bucket(name=BUCKET_NAME).objects.all()
for bucket in bucket_objects:
print(bucket)
# delete_bucket_object(bucket.key)
def delete_bucket_object(key):
s3 = boto3.resource('s3')
s3.Object(BUCKET_NAME, key).delete()
def downloand(url="https://vanguardia.com.mx/sites/default/files/styles/paragraph_image_large_desktop_1x/public/amlo-pemex-lopez-obrador-plan-nacional-gas-petroleo-gob-mx.jpg_114089499.jpg"):
return requests.get(url, stream=True, headers={'User-agent': 'Mozilla/5.0'})
# img_raw = downloand().raw
# img = img_raw.read()
# s3 = boto3.resource('s3')
# s3.Bucket(name=BUCKET_NAME).put_object(Key="amloq.jpg", Body=img)
# upload_file(img_raw, BUCKET_NAME, "DIRECTORY/THAT/YOU/WANT/TO/CREATE/amloqe.jpg",)
# delete_bucket_object("amlo.jpg")
# list_bucket_objects()
# s3 = boto3.client('s3')
# s3.download_file(BUCKET_NAME, 'amlo.jpg', 'amlo.jpg') |
24,227 | 218fb5497533b69bd65b0ef85387bb6517fa11c4 | from flask import Blueprint, url_for, redirect, render_template, request
from .forms import LoginForm, RegistrationForm
from user import User
from sql.dbhelper import DBHelper
from passwordhelper import PasswordHelper
from flask_login import login_user
from flask_login import logout_user
DB = DBHelper()
PH = PasswordHelper()
main = Blueprint('main', __name__)
@main.route("/")
def home():
return render_template("home.html", loginform=LoginForm(), registrationform=RegistrationForm())
@main.route("/login", methods=["POST"])
def login():
form = LoginForm(request.form)
if form.validate():
stored_user = DB.get_user(form.loginemail.data)
if stored_user and PH.validate_password(form.loginpassword.data, stored_user['salt'], stored_user['hashed']):
user = User(form.loginemail.data)
login_user(user, remember=True)
return redirect(url_for('tables.account'))
form.loginemail.errors.append("Email or password invalid")
return render_template("home.html", loginform=form, registrationform=RegistrationForm())
@main.route("/logout")
def logout():
logout_user()
return redirect(url_for("main.home"))
@main.route("/register", methods=["POST"])
def register():
form = RegistrationForm(request.form)
if form.validate():
if DB.get_user(form.email.data):
form.email.errors.append("Email address already registered")
return render_template("home.html", loginform=LoginForm(), registrationform=form)
salt = PH.get_salt()
hashed = PH.get_hash(form.password2.data + salt)
DB.add_user(form.email.data, salt, hashed)
return render_template("home.html", loginform=LoginForm(), registrationform=form,
onloadmessage="Registration successful. Please log in.")
return render_template("home.html", loginform=LoginForm(), registrationform=form)
|
24,228 | 3aed0837f139066fcfb28ae229db73a917a145ba | from selenium import webdriver
import unittest
# Python demo - Firefox driver
class SeleniumFireFoxTest(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.get("https://www.seleniumhq.org/")
def test_selenium_homepage(self):
self.assertEqual("Selenium - Web Browser Automation",self.driver.title)
def test_about_selenium_page(self):
self.driver.find_element_by_partial_link_text("About").click()
self.assertEqual("About Selenium", self.driver.title)
def test_search(self):
self.driver.find_element_by_partial_link_text("About").click()
self.driver.find_element_by_id("q").send_keys("Automation")
self.driver.find_element_by_id("submit").click()
self.assertEqual("Google Custom Search", self.driver.title)
def test_back(self):
self.driver.find_element_by_partial_link_text("About").click()
self.driver.find_element_by_id("q").send_keys("Automation")
self.driver.find_element_by_id("submit").click()
self.driver.back()
self.assertEqual("About Selenium", self.driver.title)
def tearDown(self):
self.driver.close()
if __name__ == '__main__':
unittest.main() |
24,229 | 4efd9095de433ecf71ba33052dca5aec1a2712bb | from typing import Generator
def trial_div(n: int) -> bool:
"""Determines if natural number N is prime by trial division."""
if n == 1:
return False
i = 2
while i**2 <= n:
if n % i == 0:
return False
i += 1
return True
def lucas_lehmer() -> Generator[int, None, None]:
"""Generates the Lucas-Lehmer sequence."""
seed = 4
while True:
yield seed
seed = seed**2 - 2
def ll_primality(n: int) -> bool:
"""Determines if Mersenne number 2^N - 1 is prime via the Lucas-Lehmer primality test."""
if n <= 2 or not trial_div(n):
return False
luc_leh = lucas_lehmer()
for _ in range(n - 1):
ll = next(luc_leh)
return ll % (2**n - 1) == 0
def sieve(n: int) -> Generator[int, None, None]:
"""Yields all primes below N using the Sieve of Eratosthenes."""
primes, p = [i for i in range(2, n + 1)], 2
while p**2 < n:
for i in primes:
if i % p == 0 and i != p:
primes.remove(i)
p += 1
yield from primes
|
24,230 | 0e0aa7054df9df9825751d1a412c11b457e9262e | from pathlib import Path
import itertools
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 1000)
DATA_FOLDER = Path("data-ic")
def get_timeline():
df = pd.read_csv(Path("data", "nice_ic_by_day.csv"))
dates = sorted(df["Datum"].unique())
return dates
def export_date(df, data_folder, prefix, data_date=None, label=None):
if data_date:
df_date = df.loc[df["Datum"] == data_date, :]
else:
df_date = df
# export with data date
if label is not None:
export_path = Path(DATA_FOLDER, data_folder, f"{prefix}_{label}.csv")
else:
export_path = Path(DATA_FOLDER, data_folder, f"{prefix}.csv")
print(f"Export {export_path}")
df_date.to_csv(export_path, index=False)
# NICE data
VARIABLES = [
"icCount",
"new",
"intakeCount",
"intakeCumulative",
"survivedCumulative",
"diedCumulative",
"dischargedTotal"
]
TYPE = [
"Totaal ingezette IC's",
"Toename opnamen (IC)",
"Totaal opnamen (IC)",
"Cumulatief opnamen (IC)",
"Cumulatief ontslag (ziekenhuis)",
"Cumulatief ontslag (overleden)",
"Totaal ontslag (IC)",
"Toename ontslag (ziekenhuis)",
"Toename ontslag (overleden)",
]
def main_long_nice():
df_reported = pd.read_csv(Path("data", "nice_ic_by_day.csv"))
df_reported['new'] = df_reported['newIntake'] + df_reported['newSuspected']
df_reported['Type'] = 'NA'
big = pd.DataFrame([])
for i in VARIABLES:
new = df_reported[['Datum', i, 'Type']]
new = new.rename(columns={i: "Aantal"})
new['Type'] = TYPE[VARIABLES.index(i)]
big = big.append(new, ignore_index = True)
new = pd.DataFrame([])
new = big.loc[big['Type'].isin(["Cumulatief ontslag (ziekenhuis)", "Cumulatief ontslag (overleden)"])]
new["AantalCumulatief"] = new["Aantal"]
new.loc[new["Type"] == "Cumulatief ontslag (ziekenhuis)", "Type"] = "Toename ontslag (ziekenhuis)"
new.loc[new["Type"] == "Cumulatief ontslag (overleden)", "Type"] = "Toename ontslag (overleden)"
new["Aantal"] = new \
.groupby('Type', sort=True)['AantalCumulatief'] \
.transform(pd.Series.diff)
new.loc[new["Datum"] == "2020-02-27", "Aantal"] = \
new.loc[new["Datum"] == "2020-02-27", "AantalCumulatief"]
big = big.append(new, sort = False)
big = big.sort_values('Datum', ascending=True)
big = big.reset_index(drop=True)
big['Aantal'] = big["Aantal"].astype(pd.Int64Dtype())
# format the columns
big = big[[
"Datum",
"Type",
"Aantal"
]]
Path(DATA_FOLDER, "data-nice").mkdir(exist_ok=True)
# dates = sorted(big["Datum"].unique())
# export by date
# for data_date in dates:
# export_date(big, "data-nice", "NICE_NL_IC", data_date, str(data_date).replace("-", ""))
# export latest day
# export_date(big, "data-nice", "NICE_NL_IC", data_date=dates[-1], label="latest")
# export all (latest download)
export_date(big, "data-nice", "NICE_IC_long", data_date=None, label="latest")
TYPES = [
"IngezetteICs",
"ToenameOpnamen",
"TotaalOpnamen",
"CumulatiefOpnamen",
"CumulatiefOntslagZiekenhuis",
"CumulatiefOntslagOverleden",
"TotaalOntslagIC",
"ToenameOntslagZiekenhuis",
"ToenameOntslagOverleden",
]
def main_wide_nice():
df_reported = pd.read_csv(Path("data", "nice_ic_by_day.csv"))
df_reported['new'] = df_reported['newIntake'] + df_reported['newSuspected']
df_reported.drop(['newIntake', 'newSuspected'], axis = 1, inplace = True)
df_reported.rename(columns={
VARIABLES[0]: TYPES[0],
VARIABLES[1]: TYPES[1],
VARIABLES[2]: TYPES[2],
VARIABLES[3]: TYPES[3],
VARIABLES[4]: TYPES[4],
VARIABLES[5]: TYPES[5],
VARIABLES[6]: TYPES[6]}, inplace=True)
df_reported["ToenameOntslagOverleden"] = df_reported["CumulatiefOntslagOverleden"]
df_reported["ToenameOntslagOverleden"] = df_reported \
['CumulatiefOntslagOverleden'] \
.transform(pd.Series.diff)
df_reported['ToenameOntslagOverleden'] = df_reported["ToenameOntslagOverleden"].astype(pd.Int64Dtype())
df_reported["ToenameOntslagZiekenhuis"] = df_reported["CumulatiefOntslagZiekenhuis"]
df_reported["ToenameOntslagZiekenhuis"] = df_reported \
['CumulatiefOntslagZiekenhuis'] \
.transform(pd.Series.diff)
df_reported['ToenameOntslagZiekenhuis'] = df_reported["ToenameOntslagZiekenhuis"].astype(pd.Int64Dtype())
# format the columns
df_reported = df_reported[[
"Datum",
"IngezetteICs",
"TotaalOpnamen",
"ToenameOpnamen",
"CumulatiefOpnamen",
"ToenameOntslagZiekenhuis",
"CumulatiefOntslagZiekenhuis",
"ToenameOntslagOverleden",
"CumulatiefOntslagOverleden",
"TotaalOntslagIC"
]]
Path(DATA_FOLDER, "data-nice").mkdir(exist_ok=True)
# export all (latest download)
export_date(df_reported, "data-nice", "NICE_IC_wide", data_date=None, label="latest")
if __name__ == '__main__':
DATA_FOLDER.mkdir(exist_ok=True)
main_long_nice()
main_wide_nice()
|
24,231 | 6fdd89ae32835114ec737eb494b0ebd7155db688 | import petl as etl
import pycountry_convert as pycountry
import pandas as pd
import pymysql
import sys
import datetime
# Función para determinar el continente de un país por nombre
def get_continent_code(country):
try:
return pycountry.country_alpha2_to_continent_code(pycountry.country_name_to_country_alpha2(country))
except :
# Manejamos las excepciones de países o lugares que no son países oficiales
if (country == 'Diamond Princess') or (country == 'Timor-Leste'):
return 'AS'
elif (country == 'Western Sahara'):
return 'AF'
elif (country == 'MS Zaandam'):
return 'NA'
elif (country == 'Kosovo') or (country == 'Holy See'):
return 'EU'
else:
# Nos permite revisar si hay algún país con error
print('País no encontrado: %s', country)
return 'N/A'
# Función para procesar los archivos de casos confirmados, fallecidos y recuperados
# Esperamos el path del archivo y un nombre que será usado como nombre de la tabla en la base de datos
def procesar_fuente(path, nombre):
try:
# Procesamos primero casos confirmados
tabla = etl.fromcsv(path)
# Cambiamos el nombre a los encabezados
tabla = etl.rename(tabla, {'Country/Region': 'Country'})
# Ajustamos los tipos de datos
# A partir de la columna 5, el tipo de dato es integer, que es el número de personas/casos
# Adicionalmente aprovechamos para cambiar el formato de la fecha de 1/23/20 a 2020-01-23 en el header
headers = etl.fieldnames(tabla)
i=0
for header in headers:
if i>=4:
tabla = etl.convert(tabla, header, int) # corregimos el tipo de dato
fecha = datetime.datetime.strptime(header, '%m/%d/%y') # calculamos la fecha en formato correcto
tabla = etl.rename(tabla, header, fecha.strftime('%Y-%m-%d'))
i = i + 1
# Eliminamos las columnas de Province/State, Lat y Lon que no vamos a utilizar
tabla = etl.cutout(tabla, 0, 2, 3)
# Ajustamos algunos nombres de países para luego asignarles una región/continente
tabla = etl.convert(tabla, 'Country', 'replace', 'Congo (Brazzaville)', 'Congo')
tabla = etl.convert(tabla, 'Country', 'replace', 'Congo (Kinshasa)', 'Democratic Republic of the Congo')
tabla = etl.convert(tabla, 'Country', 'replace', 'Cote d\'Ivoire', 'Ivory Coast')
tabla = etl.convert(tabla, 'Country', 'replace', 'Korea, South', 'South Korea')
tabla = etl.convert(tabla, 'Country', 'replace', 'West Bank and Gaza', 'Palestine')
tabla = etl.convert(tabla, 'Country', 'replace', 'Burma', 'Myanmar')
tabla = etl.convert(tabla, 'Country', 'replace', 'US', 'USA')
tabla = etl.convert(tabla, 'Country', 'replace', 'Taiwan*', 'Taiwan')
# Luego procedemos a agrupar y acumular los resultados por el país
df_confirmed = etl.todataframe(tabla)
df = df_confirmed.groupby(['Country']).sum()
tabla = etl.fromdataframe(df, include_index=True)
# Renombramos el campo de Country nuevamente
tabla = etl.rename(tabla, {'index': 'Country'})
# Luego agregamos las columnas de fecha como datos y renombramos las nuevas columnas
tabla = etl.melt(tabla, 'Country')
tabla = etl.rename(tabla, {'variable': 'Date'})
tabla = etl.rename(tabla, {'value': 'Cases'})
# Luego agregamos el continente para agrupar
tabla = etl.addfield(tabla, 'Continent', lambda rec: get_continent_code(rec['Country']))
# Y nuevamente nos aseguramos que sean del tipo de dato que deben ser.
tabla = etl.convert(tabla, 'Cases', int)
tabla = etl.convert(tabla, 'Date', lambda v: datetime.datetime.strptime(v, '%Y-%m-%d') )
#Finalmente, subimos el archivo al repositorio de datos
conn = pymysql.connect(password='cenfotec', database='covid', user='covid')
conn.cursor().execute('SET SQL_MODE=ANSI_QUOTES')
etl.todb(tabla, conn, nombre, create=True, drop=True)
conn.close()
except:
print('Se ha presentado un error! ', sys.exc_info()[0])
raise
# Fuente de los datos que vamos a leer
uri_confirmed = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
uri_death = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
uri_recovered = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv'
# Usamos la función de procesar_fuente para cargar los diferentes archivos a la base de datos
procesar_fuente(uri_confirmed, 'confirmados')
procesar_fuente(uri_death, 'fallecidos')
procesar_fuente(uri_recovered, 'recuperados')
# Ejemplos de visualización para debugging
#print(etl.header(tabla))
#print(etl.records(tabla))
#print(tabla.lookall())
#etl.tocsv(tabla, 'confirmados.csv')
#df.to_csv(r'confirmados.csv', index=True, header=True)
|
24,232 | 9d2bbdf958bcbc4de6132981b3083ebeb0d96cf0 | import time
def remaintime(t):
if t == 0:
a = "time is over."
return a
else:
print("waits for", str(t), "seconds." )
time.sleep(1)
return remaintime(t - 1)
print(remaintime(10)) |
24,233 | 3f2f8d9ce7d2e42e06d25eaed05fc10c25169dc3 | # print(1+1)
# print(2 * 3 )
# print('budi' + 'susi')
# # variables
# nama = 'andi'
# usia = 12
# print(nama)
# print(usia)
# tinggi = 188.8
# print(tinggi)
# jomblo = False
# print(jomblo)
# print('halo, aku ' + nama)
# # print('halo, aku ', + nama) # salah ini
# print('umurku' + str(usia))
# print('umurku', usia)
# print('purwadhika\tschool')
# print('purwadhika\nschool')
# print(' saya ' + nama + ' usia ' + str(usia))
# print('saya', nama, 'usia', usia)
# print(f'saya {nama} usia {usia}')
# print('saya {} usia{}' .format(nama,usia))
# print(nama.lower())
# print(nama.upper())
# x = 'satrio'
# print(x.islower())
# print(x.isupper())
# print(len(x))
# print(x[0 : len(x) : 1])
# print(x[0])
# print(x [-1])
# print(x [len(x) - 1])
# print(x.replace('s', 't'))
# print(x.replace('a', 'k'))
# x = 12
# x = x + 13
# print(x)
# cara itung Jumlah huruf
nama = 'Purwadhika Startup & Coding School'
print(len(nama.replace(' ','')))
print(len(nama))
# jumlah huruf c ?. counted from zero, kalau command ini bakal kehitungnya yang paling depan
print(nama.lower().index('r') -0)
huruf = nama.lower()
hitung = huruf.count('c')
print('thus there are', hitung, 'c')
nama_split = nama.lower().split()
nama_split
print(nama_split[1].count('t'))
nama.count('startup')
print (nama_split)
nama_split[0].count('startup')
|
24,234 | 35be63b0d419f0d8e8239882e79ce5816068ea78 | from copy import copy
class CFG():
def __init__(self, block_stack):
self.graph = self.generate_graph(block_stack)
self.graph = {
1: [[2, 10]],
2: [[4, 5], [3, 5]],
3: [[6, 5]],
4: [[5, 5]],
5: [[6, 5]],
6: [[8, 6], [7, 4]],
7: [[8, 4]],
8: [[9, 17], [10, 10]],
9: [[8, 17]],
10: [[11, 10]],
11: [[13, 8], [12, 2]],
12: [[17, 2]],
13: [[15, 6], [14, 2]],
14: [[17, 2]],
15: [[17, 2], [16, 4]],
16: [[17, 4]],
17: [['EXIT', 10]],
'EXIT': [['START', 10]],
'START': [[1, 10]]
}
def getGraph(self):
return self.graph
def generate_graph(self, blocks):
graph = {}
for t, block_id, f, m in blocks:
graph[block_id] = []
graph['START'] = [[blocks[0][1], 1]]
graph['EXIT'] = [['START', 1]]
graph[blocks[-1][1]].append(['EXIT', 1])
for i in range(0, len(blocks)):
block_type, block_id, follows, marked = blocks[i]
previous_id = block_id - 1
if follows != 0:
blocks[i - 1][3] = True
if follows == 1:
for j in reversed(range(block_id)):
if blocks[j][3] and blocks[j][2] != 2:
if [block_id, 1] not in graph[blocks[j][1]]:
graph[blocks[j][1]].append([block_id, 1])
if blocks[j][0] == 'IF_THEN' and j != block_id - 1:
break
else:
for_block_id = 0
for j in reversed(range(block_id)):
if blocks[j][0] == 'FOR':
for_block_id = blocks[j][1]
break
blocks[j][3] = False
if [for_block_id, 1] not in graph[previous_id]:
graph[previous_id].append([for_block_id, 1])
if [block_id, 1] not in graph[for_block_id]:
graph[for_block_id].append([block_id, 1])
if block_type == 'IF_THEN':
if block_id != 1:
if [block_id, 1] not in graph[previous_id]:
graph[previous_id].append([block_id, 1])
j = block_id
while (True):
# # if blocks[j][0] == 'if':
# # if_counters += 1
if blocks[j][0] in ['ELSE', 'ELIF'] or blocks[j][2] == 1:
if [blocks[j][1], 1] not in graph[block_id]:
graph[block_id].append([blocks[j][1], 1])
break
j += 1
elif block_type == 'ELIF':
# idi u nazad i povezi sve sto nisu nakon if / for
for j in reversed(range(block_id)):
if blocks[j][2] != 0 or blocks[j][1] not in ['ELIF', 'IF_THEN']:
break
if [blocks[j-1][1], 1] not in graph[blocks[j][1]]:
graph[blocks[j][1]].append([blocks[j-1][1], 1])
# idi u napred i nadji decu sa kojom nisi povezan
# if_counters = 0
j = block_id
while (True):
# # if blocks[j][0] == 'if':
# # if_counters += 1
if blocks[j][0] in ['ELSE', 'ELIF'] or blocks[j][2] == 1:
if [blocks[j][1], 1] not in graph[block_id]:
graph[block_id].append([blocks[j][1], 1])
break
j += 1
blocks[i - 1][3] = True
elif block_type == 'ORDINARY':
if block_id != 1:
if blocks[i][2] == 2:
continue
if [block_id, 1] not in graph[previous_id]:
graph[previous_id].append([block_id, 1])
elif block_type == 'ELSE':
for j in reversed(range(block_id)):
if blocks[j][2] != 0 or blocks[j][1] not in ['ELIF', 'IF']:
break
if [blocks[j-1][1], 1] not in graph[blocks[j][1]]:
graph[blocks[j][1]].append([blocks[j-1][1], 1])
blocks[i - 1][3] = True
elif block_type == 'FOR':
if block_id != 1:
if [block_id, 1] not in graph[previous_id]:
graph[previous_id].append([block_id, 1])
return graph
def spanning_tree(self):
start_node = 'EXIT'
marked_nodes = {}
tree = {}
# mark start node as visited
marked_nodes[start_node] = True
# initialize stack
stack = [start_node]
while len(stack) > 0:
# take element (node) from the top of the stack
current_node = stack[-1]
if current_node not in tree:
tree[current_node] = []
# every node is visited
if len(marked_nodes) == len(self.graph):
spanning_tree = copy(tree)
for node in spanning_tree:
for (dest_node, weight) in spanning_tree[node]:
if dest_node not in spanning_tree:
tree[dest_node] = []
spanning_tree = copy(tree)
return copy(tree)
# visit unmarked neighbour
has_unvisited = False
for (dest_node, weight) in self.graph[current_node]:
if dest_node not in marked_nodes:
stack.append(dest_node)
marked_nodes[dest_node] = True
tree[current_node].append([dest_node, weight])
has_unvisited = True
# if every neighbor of the node
# is visited remove it from stack
if not has_unvisited:
stack.pop()
return -1
def spanning_tree_inverse(self, tree=None):
if not tree:
tree = self.spanning_tree()
inverse = {}
for src_node in self.graph:
inverse[src_node] = []
for src_node in self.graph:
for [dest_node, weight] in self.graph[src_node]:
if [dest_node, weight] not in tree[src_node]:
inverse[src_node].append([dest_node, weight])
return inverse
|
24,235 | c6b855199861e8de874b246a9fc19a7adaa25aeb | from puddleworld import puddleworld
puddle = puddleworld() |
24,236 | 0503811a57e7b375fb341ff0369bffad0a6be01b | student_heights = input("Input a list of student heights in cm: ").split()
for n in range(0, len(student_heights)):
student_heights[n] = int(student_heights[n])
counter = 0
total_height = 0
for height in student_heights:
counter += 1
total_height += height
print(round(total_height / counter)) |
24,237 | 2b067eabda0908d22d6766dc727692dea7532d3b | import os
def SYSTEMINFO():
os.system("SYSTEMINFO")
SYSTEMINFO()
|
24,238 | 829e2acbaa2e55233becd56b7758cb1ccf0a1fea | """Root level URLs are defined here"""
from django.contrib import admin
from django.urls import path, include
from . import views
urlpatterns = [
path("admin/", admin.site.urls),
path("auth/", include("rest_framework_social_oauth2.urls")),
path("", views.api_root),
path("", include("todos.urls")),
path("", include("users.urls")),
]
|
24,239 | 0a17cfbcbb704cde2538a58156a1b0a2a88e5153 | import numpy as np
def pt_adjust_learning_rate(epoch, opt, optimizer):
"""Sets the learning rate to the initial LR decayed by 0.2 every steep step"""
# if epoch < 2:
# for param_group in optimizer.param_groups:
# param_group['lr'] = 1e-7
# return 0
# print(epoch)
# print(np.asarray(opt.pt_lr_decay_epochs))
steps = np.sum(epoch > np.asarray(opt.pt_lr_decay_epochs))
if steps > 0:
new_lr = opt.pt_learning_rate * (opt.pt_lr_decay_rate ** steps)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def ft_adjust_learning_rate(optimizer, intial_lr, epoch, lr_steps):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
decay = 0.3 ** (sum(epoch >= np.array(lr_steps)))
lr = intial_lr * decay
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
24,240 | 2626e1b8889ce7a88fd32a04d329920e461106b3 | #partner: hanheller
#partner: omkazmi
#n is a pos int, xs is a list of ints that are all pos, and range from 0 to n-1
def counts(n, xs):
newlist = []
c = 0
while c < n:
newinstance = 0
for x in xs:
if x == c:
newinstance +=1
newlist.append(newinstance)
c +=1
return newlist |
24,241 | 2a41f0752fe5a8a89e9896c698e5764cde74d22b | from vpython import *
def newton2(FX, FY, FZ, M):
x = [FX, FY, FZ]
A = [i / M for i in x]
return A
def vecScale(A,B,C,D):
x = [A,B,C]
A = [i * D for i in x]
return tuple(A)
ball = sphere(pos=vector(0, 0, 0), radius=10, color=color.blue)
ball.velocity = vector(0, 0, 0)
t = 1
deltat = 0.01
box = box(pos=vector(-1, -1, -1), size=vector(10, 10, 10), color=color.red)
while t < 2:
rate(100)
ball.pos = ball.pos + ball.velocity * deltat
NewX = newton2(1,1,1,1)[0]
NewY = newton2(1,1,1,1)[1]
NewZ = newton2(1,1,1,1)[2]
VelVec = vecScale(NewX,NewY,NewZ,t)
ball.velocity += vector(VelVec[0],VelVec[1],VelVec[2])
t += deltat |
24,242 | 586e8a0186440b1d2d33411cdc1a7cf49cdb2271 | # -*- coding:UTF-8 -*-
import gevent.monkey
gevent.monkey.patch_all()
import warnings
warnings.simplefilter("ignore", category=UserWarning)
import optparse
xssFuzzList = [
]
class Scan(object):
def run(self):
pass
def main():
# region 解释命令行
parse = optparse.OptionParser(usage='usage:%prog [options] --domain 域名 --gevent 协程数', version='%prog 1.0')
parse.prog = '子域名收集'
parse.add_option('--url', dest='url', action='url', type=str, metavar='domain', help='域名')
parse.add_option('--gevent', dest='gevent', action='store', type=int, metavar='gnum', help='域名')
options, args = parse.parse_args()
# endregion
options.url = "http://xxx.com"
options.method = "GET"
options.data = "xxx=?&xxx=?&xxx=?"
if __name__ == "__main__":
@TODO 寻找xss过滤的方法,反推得到xss字符
main()
|
24,243 | d8ecf13f4f08bc7d075237b6cca5f20b19ffe96d | # 单例模式
import day
class MusicPlayer(object):
instance = None
init_flag = False
# 初始化方法只执行一次
def __init__(self):
if MusicPlayer.init_flag:
return
MusicPlayer.init_flag = True
print("play")
def __new__(cls, *args, **kwargs):
if cls.instance is None:
cls.instance = super().__new__(cls)
return cls.instance
def test(self):
print("test")
# 异常处理
def input_password():
pwd = input("请输入密码:")
if len(pwd) >= 8:
return pwd
ex = Exception("密码长度不够")
raise ex
try:
input_password()
except Exception as e:
print(e)
print(day.__file__)
|
24,244 | dd12688e4248a95519aaa2d23487ccd6a37c4bad | # -*- test-case-name: twisted.test.test_amp.TLSTest -*-
# Copyright (c) 2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Utilities and helpers for simulating a network
"""
import itertools
from OpenSSL.SSL import Error as NativeOpenSSLError
from zope.interface import implements, directlyProvides
from twisted.python.failure import Failure
from twisted.internet import error
from twisted.internet import interfaces
class TLSNegotiation:
def __init__(self, obj, connectState):
self.obj = obj
self.connectState = connectState
self.sent = False
self.readyToSend = connectState
def __repr__(self):
return 'TLSNegotiation(%r)' % (self.obj,)
def pretendToVerify(self, other, tpt):
# Set the transport problems list here? disconnections?
# hmmmmm... need some negative path tests.
if not self.obj.iosimVerify(other.obj):
tpt.disconnectReason = NativeOpenSSLError()
tpt.loseConnection()
class FakeTransport:
"""
A wrapper around a file-like object to make it behave as a Transport.
This doesn't actually stream the file to the attached protocol,
and is thus useful mainly as a utility for debugging protocols.
"""
implements(interfaces.ITransport,
interfaces.ITLSTransport) # ha ha not really
_nextserial = itertools.count().next
closed = 0
disconnecting = 0
disconnected = 0
disconnectReason = error.ConnectionDone("Connection done")
producer = None
streamingProducer = 0
tls = None
def __init__(self):
self.stream = []
self.serial = self._nextserial()
def __repr__(self):
return 'FakeTransport<%s,%s,%s>' % (
self.isServer and 'S' or 'C', self.serial,
self.protocol.__class__.__name__)
def write(self, data):
if self.tls is not None:
self.tlsbuf.append(data)
else:
self.stream.append(data)
def _checkProducer(self):
# Cheating; this is called at "idle" times to allow producers to be
# found and dealt with
if self.producer:
self.producer.resumeProducing()
def registerProducer(self, producer, streaming):
"""From abstract.FileDescriptor
"""
self.producer = producer
self.streamingProducer = streaming
if not streaming:
producer.resumeProducing()
def unregisterProducer(self):
self.producer = None
def stopConsuming(self):
self.unregisterProducer()
self.loseConnection()
def writeSequence(self, iovec):
self.write("".join(iovec))
def loseConnection(self):
self.disconnecting = True
def reportDisconnect(self):
if self.tls is not None:
# We were in the middle of negotiating! Must have been a TLS problem.
err = NativeOpenSSLError()
else:
err = self.disconnectReason
self.protocol.connectionLost(Failure(err))
def getPeer(self):
# XXX: According to ITransport, this should return an IAddress!
return 'file', 'file'
def getHost(self):
# XXX: According to ITransport, this should return an IAddress!
return 'file'
def resumeProducing(self):
# Never sends data anyways
pass
def pauseProducing(self):
# Never sends data anyways
pass
def stopProducing(self):
self.loseConnection()
def startTLS(self, contextFactory, beNormal=True):
# Nothing's using this feature yet, but startTLS has an undocumented
# second argument which defaults to true; if set to False, servers will
# behave like clients and clients will behave like servers.
connectState = self.isServer ^ beNormal
self.tls = TLSNegotiation(contextFactory, connectState)
self.tlsbuf = []
def getOutBuffer(self):
S = self.stream
if S:
self.stream = []
return ''.join(S)
elif self.tls is not None:
if self.tls.readyToSend:
# Only _send_ the TLS negotiation "packet" if I'm ready to.
self.tls.sent = True
return self.tls
else:
return None
else:
return None
def bufferReceived(self, buf):
if isinstance(buf, TLSNegotiation):
assert self.tls is not None # By the time you're receiving a
# negotiation, you have to have called
# startTLS already.
if self.tls.sent:
self.tls.pretendToVerify(buf, self)
self.tls = None # we're done with the handshake if we've gotten
# this far... although maybe it failed...?
# TLS started! Unbuffer...
b, self.tlsbuf = self.tlsbuf, None
self.writeSequence(b)
directlyProvides(self, interfaces.ISSLTransport)
else:
# We haven't sent our own TLS negotiation: time to do that!
self.tls.readyToSend = True
else:
self.protocol.dataReceived(buf)
def makeFakeClient(c):
ft = FakeTransport()
ft.isServer = False
ft.protocol = c
return ft
def makeFakeServer(s):
ft = FakeTransport()
ft.isServer = True
ft.protocol = s
return ft
class IOPump:
"""Utility to pump data between clients and servers for protocol testing.
Perhaps this is a utility worthy of being in protocol.py?
"""
def __init__(self, client, server, clientIO, serverIO, debug):
self.client = client
self.server = server
self.clientIO = clientIO
self.serverIO = serverIO
self.debug = debug
def flush(self, debug=False):
"""Pump until there is no more input or output.
Returns whether any data was moved.
"""
result = False
for x in range(1000):
if self.pump(debug):
result = True
else:
break
else:
assert 0, "Too long"
return result
def pump(self, debug=False):
"""Move data back and forth.
Returns whether any data was moved.
"""
if self.debug or debug:
print '-- GLUG --'
sData = self.serverIO.getOutBuffer()
cData = self.clientIO.getOutBuffer()
self.clientIO._checkProducer()
self.serverIO._checkProducer()
if self.debug or debug:
print '.'
# XXX slightly buggy in the face of incremental output
if cData:
print 'C: '+repr(cData)
if sData:
print 'S: '+repr(sData)
if cData:
self.serverIO.bufferReceived(cData)
if sData:
self.clientIO.bufferReceived(sData)
if cData or sData:
return True
if (self.serverIO.disconnecting and
not self.serverIO.disconnected):
if self.debug or debug:
print '* C'
self.serverIO.disconnected = True
self.clientIO.disconnecting = True
self.clientIO.reportDisconnect()
return True
if self.clientIO.disconnecting and not self.clientIO.disconnected:
if self.debug or debug:
print '* S'
self.clientIO.disconnected = True
self.serverIO.disconnecting = True
self.serverIO.reportDisconnect()
return True
return False
def connectedServerAndClient(ServerClass, ClientClass,
clientTransportFactory=makeFakeClient,
serverTransportFactory=makeFakeServer,
debug=False):
"""Returns a 3-tuple: (client, server, pump)
"""
c = ClientClass()
s = ServerClass()
cio = clientTransportFactory(c)
sio = serverTransportFactory(s)
c.makeConnection(cio)
s.makeConnection(sio)
pump = IOPump(c, s, cio, sio, debug)
# kick off server greeting, etc
pump.flush()
return c, s, pump
|
24,245 | ae19f09295bcfed60e0ad402d825333961604309 | # 6.00x Problem Set 4A Template
#
# The 6.00 Word Game
# Created by: Kevin Luu <luuk> and Jenna Wiens <jwiens>
# Modified by: Sarina Canelake <sarina>
#
import random
import string
VOWELS = 'aeiou'
CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
HAND_SIZE = 7
SCRABBLE_LETTER_VALUES = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10
}
# -----------------------------------
# Helper code
# (you don't need to understand this helper code)
WORDLIST_FILENAME = "words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print "Loading word list from file..."
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r', 0)
# wordList: list of strings
wordList = []
for line in inFile:
wordList.append(line.strip().lower())
print " ", len(wordList), "words loaded."
return wordList
def getFrequencyDict(sequence):
"""
Returns a dictionary where the keys are elements of the sequence
and the values are integer counts, for the number of times that
an element is repeated in the sequence.
sequence: string or list
return: dictionary
"""
# freqs: dictionary (element_type -> int)
freq = {}
for x in sequence:
freq[x] = freq.get(x,0) + 1
return freq
# (end of helper code)
# -----------------------------------
#
# Problem #1: Scoring a word
#
def getWordScore(word, n):
"""
Returns the score for a word. Assumes the word is a valid word.
The score for a word is the sum of the points for letters in the
word, multiplied by the length of the word, PLUS 50 points if all n
letters are used on the first turn.
Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is
worth 3, D is worth 2, E is worth 1, and so on (see SCRABBLE_LETTER_VALUES)
word: string (lowercase letters)
n: integer (HAND_SIZE; i.e., hand size required for additional points)
returns: int >= 0
"""
score = 0
for letters in word:
if letters in SCRABBLE_LETTER_VALUES:
score += SCRABBLE_LETTER_VALUES[letters]
if len(word) == n:
return (score * len(word)) + 50
else:
return score * len(word)
#
# Problem #2: Make sure you understand how this function works and what it does!
#
def displayHand(hand):
"""
Displays the letters currently in the hand.
For example:
>>> displayHand({'a':1, 'x':2, 'l':3, 'e':1})
Should print out something like:
a x x l l l e
The order of the letters is unimportant.
hand: dictionary (string -> int)
"""
for letter in hand.keys():
for j in range(hand[letter]):
print letter, # print all on the same line
print # print an empty line
#
# Problem #2: Make sure you understand how this function works and what it does!
#
def dealHand(n):
"""
Returns a random hand containing n lowercase letters.
At least n/3 the letters in the hand should be VOWELS.
Hands are represented as dictionaries. The keys are
letters and the values are the number of times the
particular letter is repeated in that hand.
n: int >= 0
returns: dictionary (string -> int)
"""
hand={}
numVowels = n / 3
for i in range(numVowels):
x = VOWELS[random.randrange(0,len(VOWELS))]
hand[x] = hand.get(x, 0) + 1
for i in range(numVowels, n):
x = CONSONANTS[random.randrange(0,len(CONSONANTS))]
hand[x] = hand.get(x, 0) + 1
return hand
#
# Problem #2: Update a hand by removing letters
#
def updateHand(hand, word):
"""
Assumes that 'hand' has all the letters in word.
In other words, this assumes that however many times
a letter appears in 'word', 'hand' has at least as
many of that letter in it.
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
tempHand = hand.copy()
for letters in word:
if letters in tempHand:
tempHand[letters] -= 1
return tempHand
#
# Problem #3: Test word validity
#
def isValidWord(word, hand, wordList):
"""
Returns True if word is in the wordList and is entirely
composed of letters in the hand. Otherwise, returns False.
Does not mutate hand or wordList.
word: string
hand: dictionary (string -> int)
wordList: list of lowercase strings
"""
tempHand = hand.copy()
if len(word) > 0 and word in wordList:
for letter in word:
if letter not in tempHand or tempHand[letter] <= 0:
return False
else:
tempHand[letter] = tempHand.get(letter, 0) - 1
return True
return False
#
# Problem #4: Playing a hand
#
def calculateHandlen(hand):
"""
Returns the length (number of letters) in the current hand.
hand: dictionary (string-> int)
returns: integer
"""
return sum(hand.itervalues())
def playHand(hand, wordList, n):
"""
Allows the user to play the given hand, as follows:
* The hand is displayed.
* The user may input a word or a single period (the string ".")
to indicate they're done playing
* Invalid words are rejected, and a message is displayed asking
the user to choose another word until they enter a valid word or "."
* When a valid word is entered, it uses up letters from the hand.
* After every valid word: the score for that word is displayed,
the remaining letters in the hand are displayed, and the user
is asked to input another word.
* The sum of the word scores is displayed when the hand finishes.
* The hand finishes when there are no more unused letters or the user
inputs a "."
hand: dictionary (string -> int)
wordList: list of lowercase strings
n: integer (HAND_SIZE; i.e., hand size required for additional points)
"""
# Keep track of the total score
score = 0
# As long as there are still letters left in the hand:
while calculateHandlen(hand) > 0:
# Display the hand
print( 'Current Hand: '),
displayHand(hand)
# Ask user for input
word = raw_input('Enter word, or a "." to indicate that you are finished: ')
# If the input is a single period:
if word == '.':
# End the game (break out of the loop)
print "Goodbye! Total score:", score, "points."
print
break
# Otherwise (the input is not a single period):
else:
# If the word is not valid:
if isValidWord(word, hand, wordList) is False:
# Reject invalid word (print a message followed by a blank line)
print "Invalid word, please try again."
print
# Otherwise (the word is valid):
else:
# Tell the user how many points the word earned, and the updated total score, in one line followed by a blank line
score += getWordScore(word, n)
print word, "earned", getWordScore(word, n), "points. Total:", score, "points"
print
# Update the hand
hand = updateHand(hand, word)
# Game is over (user entered a '.' or ran out of letters), so tell user the total score
if calculateHandlen(hand) == 0:
print "Run out of letters. Total score:", score, "points."
#
# Problem #5: Playing a game
#
def playGame(wordList):
"""
Allow the user to play an arbitrary number of hands.
1) Asks the user to input 'n' or 'r' or 'e'.
* If the user inputs 'n', let the user play a new (random) hand.
* If the user inputs 'r', let the user play the last hand again.
* If the user inputs 'e', exit the game.
* If the user inputs anything else, tell them their input was invalid.
2) When done playing the hand, repeat from step 1
"""
hand = None
while True:
selection = raw_input("Enter n to deal a new hand, r to replay the last hand, or e to end game:")
if selection == 'n':
hand = dealHand(HAND_SIZE)
playHand(hand, wordList, HAND_SIZE)
print
elif selection == 'r':
if hand is None:
print "You have not played a hand yet. Please play a new hand first!"
print
else:
playHand(hand, wordList, HAND_SIZE)
elif selection == 'e':
break
else:
print "Invalid command."
#
# Build data structures used for entire session and play game
#
if __name__ == '__main__':
wordList = loadWords()
playGame(wordList)
|
24,246 | 21e74db708ff1dc2cd183764434f536df7221c26 | __source__ = 'https://leetcode.com/problems/shortest-distance-from-all-buildings/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/shortest-distance-from-all-buildings.py
# Time: O(k * m * n), k is the number of the buildings
# Space: O(m * n)
#
# Description: Leetcode # 317. Shortest Distance from All Buildings
#
# You want to build a house on an empty land which reaches all buildings in the shortest amount of distance.
# You can only move up, down, left and right. You are given a 2D grid of values 0, 1 or 2, where:
#
# Each 0 marks an empty land which you can pass by freely.
# Each 1 marks a building which you cannot pass through.
# Each 2 marks an obstacle which you cannot pass through.
# For example, given three buildings at (0,0), (0,4), (2,2), and an obstacle at (0,2):
#
# 1 - 0 - 2 - 0 - 1
# | | | | |
# 0 - 0 - 0 - 0 - 0
# | | | | |
# 0 - 0 - 1 - 0 - 0
# The point (1,2) is an ideal empty land to build a house, as the total travel distance of 3+3+1=7 is minimal.
# So return 7.
#
# Note:
# There will be at least one building. If it is not possible to build such house according to the above rules,
# return -1.
#
# Companies
# Google Zenefits
# Related Topics
# Breadth-first Search
# Similar Questions
# Walls and Gates Best Meeting Point
#
import unittest
class Solution(object):
def shortestDistance(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
def bfs(grid, dists, cnts, x, y):
dist, m, n = 0, len(grid), len(grid[0])
visited = [[False for _ in xrange(n)] for _ in xrange(m)]
pre_level = [(x, y)]
visited[x][y] = True
while pre_level:
dist += 1
cur_level = []
for i, j in pre_level:
for dir in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
I, J = i+dir[0], j+dir[1]
if 0 <= I < m and 0 <= J < n and grid[I][J] == 0 and not visited[I][J]:
cnts[I][J] += 1
dists[I][J] += dist
cur_level.append((I, J))
visited[I][J] = True
pre_level = cur_level
m, n, cnt = len(grid), len(grid[0]), 0
dists = [[0 for _ in xrange(n)] for _ in xrange(m)]
cnts = [[0 for _ in xrange(n)] for _ in xrange(m)]
for i in xrange(m):
for j in xrange(n):
if grid[i][j] == 1:
cnt += 1
bfs(grid, dists, cnts, i, j)
shortest = float("inf")
for i in xrange(m):
for j in xrange(n):
if dists[i][j] < shortest and cnts[i][j] == cnt:
shortest = dists[i][j]
return shortest if shortest != float("inf") else -1
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
Traverse the matrix. For each building, use BFS to compute the shortest distance from each '0' to
this building. After we do this for all the buildings, we can get the sum of shortest distance
from every '0' to all reachable buildings. This value is stored in 'distance[][]'.
For example, if grid[2][2] == 0, distance[2][2] is the sum of shortest distance from this block
to all reachable buildings.
Time complexity: O(number of 1)O(number of 0) ~ O(m^2n^2)
We also count how many building each '0' can be reached. It is stored in reach[][].
This can be done during the BFS.
We also need to count how many total buildings are there in the matrix, which is stored in 'buildingNum'.
Finally, we can traverse the distance[][] matrix to get the point having shortest distance to all buildings.
O(m*n)
The total time complexity will be O(m^2*n^2), which is quite high!.
# 6ms 85.93%
class Solution {
int[][] dirs = {{0, -1}, {-1, 0}, {0, 1}, {1, 0}};
int min, m, n;
public int shortestDistance(int[][] grid) {
if(grid == null || grid.length == 0)
return 0;
m = grid.length;
n = grid[0].length;
int[][] dist = new int[m][n];
min = Integer.MAX_VALUE;
int start = 0;
for(int i = 0; i < m; i++){
for(int j = 0; j < n; j++){
if(grid[i][j] == 1){
bfs(grid, i, j, dist, start--);
}
}
}
return min == Integer.MAX_VALUE? -1 : min;
}
private void bfs(int[][] grid, int i, int j, int[][] dist, int start){
Queue<int[]> queue = new LinkedList<>();
queue.add(new int[]{i, j});
//System.out.println("i: " + i + " j: " + j);
min = Integer.MAX_VALUE;
int level = 0;
while(!queue.isEmpty()){
int size = queue.size();
level++;
for(int k = 0; k < size; k++){
int[] cur = queue.poll();
for(int[] d: dirs){
int nextX = cur[0] + d[0];
int nextY = cur[1] + d[1];
if(nextX < 0 || nextY < 0 || nextX >= m || nextY >= n || grid[nextX][nextY] != start)
continue;
dist[nextX][nextY] += level;
min = Math.min(min, dist[nextX][nextY]);
//System.out.println(min);
grid[nextX][nextY]--;
queue.add(new int[]{nextX, nextY});
}
}
}
}
}
# 12ms 74.39%
class Solution {
private static final int[][] DIRECTIONS = new int[][] {{-1, 0}, {1, 0}, {0, -1}, {0, 1}};
public int shortestDistance(int[][] grid) {
int m = grid.length;
int n = m == 0 ? 0 : grid[0].length;
if (m == 0 || n == 0) {
return 0;
}
int[][] distances = new int[m][n];
int[][] reachable = new int[m][n];
int buildings = 0;
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
if (grid[i][j] == 1) {
bfs(grid, m, n, i, j, distances, reachable, buildings);
buildings++;
}
}
}
int result = Integer.MAX_VALUE;
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
if (grid[i][j] == 0 && reachable[i][j] == buildings) {
result = Math.min(result, distances[i][j]);
}
}
}
return result == Integer.MAX_VALUE ? -1 : result;
}
private void bfs(int[][] grid, int m, int n, int i, int j, int[][] distances, int[][] reachable, int buildings) {
Queue<Integer> rowQueue = new LinkedList<>();
Queue<Integer> colQueue = new LinkedList<>();
boolean[][] visited = new boolean[m][n];
int path = 1;
rowQueue.add(i);
colQueue.add(j);
visited[i][j] = true;
while (!rowQueue.isEmpty()) {
int size = rowQueue.size();
for (int k = 0; k < size; k++) {
int curRow = rowQueue.poll();
int curCol = colQueue.poll();
for (int[] direction : DIRECTIONS) {
int nextRow = curRow + direction[0];
int nextCol = curCol + direction[1];
if (nextRow >= 0 && nextRow < m && nextCol >= 0 && nextCol < n && grid[nextRow][nextCol] == 0 && !visited[nextRow][nextCol] && reachable[nextRow][nextCol] == buildings) {
rowQueue.add(nextRow);
colQueue.add(nextCol);
distances[nextRow][nextCol] += path;
visited[nextRow][nextCol] = true;
reachable[nextRow][nextCol]++;
}
}
}
path++;
}
}
}
'''
|
24,247 | 57c7109448c07be3d24ef41ab078ebcff19b8cec | import numpy as np
import DataStruct
import threading
import matplotlib.pyplot as plt
import func as f
import os
import scheduling as sch
lst_friend_num_history = list()
lst_meantime_history = list()
lst_mintime_history = list()
lst_maxtime_history = list()
grid = (2, 2)
schedule_algorithm = "FCFS"
if __name__ == '__main__':
# get original condition
n_wait = int(input("enter number of waiting students: "))
if not os.path.exists(f"/home/lsh/Documents/informatics_project/{schedule_algorithm}"):
os.makedirs(f"/home/lsh/Documents/informatics_project/{schedule_algorithm}")
if not os.path.exists(f"/home/lsh/Documents/informatics_project/{schedule_algorithm}/{n_wait}"):
os.makedirs(f"/home/lsh/Documents/informatics_project/{schedule_algorithm}/{n_wait}")
for work in range(f.n_simulate):
f.reset()
# set size of friends
f.lst_friend_num = f.sigma_friend_number * np.random.randn(n_wait // f.average_friend_number) + f.average_friend_number
f.lst_friend_num = [int(i) for i in f.lst_friend_num if i > 0] # number of friend is non-negative
for n in f.lst_friend_num:
lst_friend_num_history.append(n) # friend num history
# set eating time
eating_speed = f.sigma_eating_speed * np.random.randn(n_wait) + f.average_eating_speed
eating_speed = [abs(i) for i in eating_speed] # eating speed is non-negative
for i in range(n_wait):
f.lst_wait_temp.append(DataStruct.Student(eating_speed[i]))
# using cafeteria with friends
index = 0
for n in f.lst_friend_num:
n = int(n)
if n > 0:
if f.DEBUG: print(f.lst_wait_temp)
friends = f.lst_wait_temp[index:index + n]
friends[0].leader = True
for friend in friends:
friend.set_header(friends[0])
for student in friends:
student.add_friend(friends)
index = index + n
sch.FCFS() # scheduling
# sch.SJF()
# sch.LJF()
# calculate time while waiting cafeteria_line1
n_present_people = 0
for student in f.cafeteria_line1.return_students():
student.add_time((50 / 7) / 60 * n_present_people)
n_present_people += 1
# calculate time while waiting cafeteria_line2
n_present_people = 0
for student in f.cafeteria_line2.return_students():
student.add_time((50 / 7) / 60 * n_present_people)
n_present_people += 1
# add time while get food
for student in f.lst_wait:
student.add_time(f.t_serving)
# prepare for eating
lock = threading.Lock()
for student in f.lst_wait:
eating_student = threading.Thread(target=f.students_in_cafeteria, args=(student, lock))
eating_student.start()
f.lst_eating_students.append(eating_student)
record_thread = threading.Thread(target=f.recording)
record_thread.start()
f.lst_eating_students.append(record_thread)
for student in f.lst_eating_students:
student.join()
print(f"========== {round((work + 1)/f.n_simulate * 100)} % end ==========")
print(f"no seat: {len(f.lst_no_seat)}")
plt.figure(num=work+1, clear=True)
plt.figure(figsize=(8, 8))
box = {'ec': (0.8, 0.8, 0.8), 'fc': (0.9, 0.9, 0.9)}
ax1 = plt.subplot2grid(grid, (0, 0), rowspan=1, colspan=1)
plt.title("number of available seat : time")
plt.plot(f.lst_available_chair_history, "r-")
plt.ylabel("number of available seat")
plt.xlabel("time [sec]")
plt.grid(True)
plt.xticks([i * 300 for i in range(round(len(f.lst_available_chair_history) / 300))])
if len(f.lst_no_seat) > 0:
font_seat = {"size": 8}
plt.text(1200, 10, f"no seat: {len(f.lst_no_seat)} people", fontdict=font_seat, bbox=box)
# plt.savefig(f"/home/lsh/Documents/informatics_project/n={n_wait}_available_seat_graph.png", facecolor='#eeeeee')
ax2 = plt.subplot2grid(grid, (0, 1), rowspan=1, colspan=1)
plt.title("friend group size distribution")
plt.hist(f.lst_friend_num, range=(0.5, 10.5))
plt.xlabel("number of member")
plt.ylabel("number of group")
plt.grid(True)
plt.xticks(list(range(0, 11)))
# plt.savefig(f"/home/lsh/Documents/informatics_project/n={n_wait}_friend_group_hist.png", facecolor='#eeeeee')
ax3 = plt.subplot2grid(grid, (1, 0), rowspan=1, colspan=2)
plt.title("total time distribution")
n, _, _ = plt.hist(f.lst_total_time, bins=50, histtype="bar", range=(0, 10))
plt.xlabel("total time taken")
plt.ylabel("number of students")
plt.grid(True)
plt.xticks([i / 2 for i in range(0, 21)])
np_total_time = np.array(f.lst_total_time)
mean_total_time = round(float(np.mean(np_total_time)), 2)
min_total_time = round(float(np.min(np_total_time)), 2)
max_total_time = round(float(np.max(np_total_time)), 2)
lst_meantime_history.append(mean_total_time)
lst_maxtime_history.append(max_total_time)
lst_mintime_history.append(min_total_time)
font = {"weight": "bold", "size": 12}
plt.text(8, max(n) - 1,
f"mean: {mean_total_time} [min]\nmin: {min_total_time} [min]\nmax: {max_total_time} [min]",
fontdict=font, bbox=box)
plt.subplots_adjust(left=0.11, bottom=0.11, right=0.90, top=0.90, wspace=0.3, hspace=0.3)
plt.savefig(f"/home/lsh/Documents/informatics_project/{schedule_algorithm}/{n_wait}/n={n_wait}_test:{work}_group_size_threshold={f.n_group_size_threshold}_mean_group_size={f.average_friend_number}.png", facecolor='#eeeeee')
plt.close('all')
plt.figure(f.n_simulate + 1)
plt.figure(figsize=(8, 8))
plt.subplot2grid(grid, (0, 0), rowspan=1, colspan=1)
plt.title("friend group size distribution")
plt.hist(lst_friend_num_history, range=(0.5, 10.5))
plt.xlabel("number of member")
plt.ylabel("number of group")
plt.grid(True)
plt.xticks(list(range(0, 11)))
plt.subplot2grid(grid, (0, 1), rowspan=1, colspan=1)
plt.title("mean time taken")
plt.hist(lst_meantime_history)
plt.xlabel("mean time")
plt.ylabel("number of people")
plt.grid(True)
plt.subplot2grid(grid, (1, 0), rowspan=1, colspan=1)
plt.title("min time taken")
plt.hist(lst_mintime_history)
plt.xlabel("min time")
plt.ylabel("number of people")
plt.grid(True)
plt.subplot2grid(grid, (1, 1), rowspan=1, colspan=1)
plt.title("max time taken")
plt.hist(lst_maxtime_history)
plt.xlabel("max time")
plt.ylabel("number of people")
plt.grid(True)
plt.subplots_adjust(left=0.11, bottom=0.11, right=0.90, top=0.90, wspace=0.3, hspace=0.3)
plt.savefig(f"/home/lsh/Documents/informatics_project/n={n_wait}_sch={schedule_algorithm}_group_size_threshold={f.n_group_size_threshold}_mean_group_size={f.average_friend_number}.png", facecolor='#eeeeee')
plt.show()
|
24,248 | 8eede2f170a902d5be47878f1b64fa1df68d495f | import random
import time
import redis
from threading import Thread
class QueueMessageWorker(Thread):
def __init__(self, connect, delay):
Thread.__init__(self)
self.connect = connect
self.delay = delay
def run(self):
status = 'status'
while 1:
getQueryMessageFromService = self.connect.brpop('queue:')
if getQueryMessageFromService:
messageStatusChanges = int(getQueryMessageFromService[1])
self.connect.hmset(f'message:{messageStatusChanges}', {
status: 'check'
})
messageStatusChanges = self.connect.hmget(f'message:{messageStatusChanges}',['messageFromId', 'recipientId'])
messageFromId = int(messageStatusChanges[0])
recipientId = int(messageStatusChanges[1])
self.getMessageReload(messageFromId)
if random.random() > 0.3:
self.toSpamMessage(messageFromId, messageStatusChanges)
else:
status = 'status'
self.connect.hmset(f'message:{messageStatusChanges}', {
status: 'sent'
})
self.connect.hincrby(f'user:{messageFromId}', 'sent', 1)
self.connect.sadd(f'sentto:{recipientId}', messageStatusChanges)
def getMessageReload(self, messageFromId):
self.connect.hincrby(f'user:{messageFromId}', 'queue', -1)
self.connect.hincrby(f'user:{messageFromId}', 'check', 1)
time.sleep(self.delay)
self.connect.pipeline(True)
self.connect.hincrby(f'user:{messageFromId}', 'check', -1)
def toSpamMessage(self, messageFromId, messageStatusChanges):
status = 'status'
fromLogin = self.connect.hmget(f'user:{messageFromId}', ['login'])[0]
self.connect.zincrby('spam:', 1, f'user:{messageFromId}')
self.connect.hmset(f'message:{messageStatusChanges}', {
status: 'block'
})
self.connect.hincrby(f'user:{messageFromId}', 'block', 1)
message = self.connect.hmget(f'message:{messageStatusChanges}', ['text'])[0]
self.connect.publish('spam', f'User {fromLogin} sent spam message: {message}')
if __name__ == '__main__':
for x in range(1):
handlers = random.randint(0, 3)
connection = redis.Redis(charset='UTF-8', decode_responses=True)
queryWorkers = QueueMessageWorker(connection, handlers)
queryWorkers.daemon = True
queryWorkers.start()
while 1:
pass
|
24,249 | ec2d23353c0f50bcc0565974c0dc93e253d8a4ac | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import cgi
import os
import socket
import datetime
import urllib2
import logging
import helptool
from models import Surrogate
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.ext.webapp import template
def is_dev():
return os.environ.get('SERVER_SOFTWARE', '').startswith('Development')
class CheckSurrogate(webapp.RequestHandler):
def get(self):
socket.timeout(1)
check_server_count = 0
check_period = 600
mirrordelay = 86400
t1 = datetime.datetime.now()
lm = ''
dm = ''
ttmp = datetime.datetime.now()
keika = ttmp - t1
message = ''
surrogates = Surrogate.all().order('checkpref').order('time')
for surrogate in surrogates:
if check_server_count >= 100 and not is_dev():
break
if surrogate.checkpref > 150 and not is_dev():
continue
ttmp = datetime.datetime.now()
keika = ttmp - t1
if keika > datetime.timedelta(0,20) and not is_dev():
break
if surrogate.checkpref:
surrogate.checkpref += int(surrogate.checkpref)
else:
surrogate.checkpref = 0
if surrogate.tracefile:
True
else:
tracefile = 'ftp-master.debian.org'
if surrogate.type == "CNAME":
check_server_count += 1
dm = 'go check'
tf, lmt = helptool.delegateForCname(surrogate.ip)
if tf:
message = surrogate.ip + " is alive (CNAME host)."
else:
message = surrogate.ip + " is dead (CNAME host)."
logging.info(message)
surrogate.lastModifiedTime = lmt
if surrogate.time - lmt > datetime.timedelta(0,mirrordelay):
surrogate.alive = False
surrogate.checkpref += 1
surrogate.failreason = "DELAY"
else:
surrogate.alive = True
surrogate.checkpref = 0
surrogate.failreason = ""
surrogate.put()
elif surrogate.alive == None or t1 > surrogate.time + datetime.timedelta(0,check_period) or is_dev(): #remote_addr == "127.0.0.1":
check_server_count += 1
dm = 'go check'
k = surrogate.ip
req = urllib2.Request(url="http://" + k + '/debian/project/trace/' + tracefile)
req.add_header('User-Agent',"Debian-cdn-mirror-ping/1.5")
try:
f = urllib2.urlopen(req)
lm = f.info()['Last-Modified']
message = ''
lmt = datetime.datetime.strptime(lm, "%a, %d %b %Y %H:%M:%S GMT")
surrogate.lastModifiedTime = lmt
if surrogate.time - lmt > datetime.timedelta(0,mirrordelay):
surrogate.alive = False
surrogate.checkpref += 1
surrogate.failreason = "DELAY"
else:
surrogate.alive = True
surrogate.checkpref = 0
surrogate.failreason = ""
except urllib2.HTTPError, e:
message += "%s is not working. (HTTP error)" % (k)
surrogate.alive = False
surrogate.checkpref += 1
surrogate.failreason = "E:HTTP"
logging.info(message)
except urllib2.URLError, e:
message += "%s is not working. (URL error)" % (k)
surrogate.alive = False
surrogate.checkpref += 1
surrogate.failreason = "E:URL"
logging.info(message)
except:
message += "%s is not working. " % (k)
surrogate.alive = False
surrogate.checkpref += 1
surrogate.failreason = "E:NO WORK"
logging.info(message)
surrogate.put()
else:
dm = 'no check: ' + "%s is checked less than %d sec ago (surrogate.time %s, %s)" % (surrogate.ip,check_period,surrogate.time,t1)
if users.get_current_user():
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
t2 = datetime.datetime.now()
keika = t2 - t1
template_values = {
'surrogates': surrogates,
'url': url,
'url_linktext': url_linktext,
'message': message,
'lm':lm,
'dm':dm,
'keika':keika,
}
path = os.path.join(os.path.dirname(__file__), 'managesurrogate.html')
self.response.out.write(template.render(path, template_values))
application = webapp.WSGIApplication(
[('/checksurrogate', CheckSurrogate)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
24,250 | 9b5da3b552a521ac6af622c98a9405668a1b58d0 | import uuid
import redis
import logging
import time
from flask import Flask, request
from flask_restful import Resource, Api, abort
from cassandra.cqlengine import connection
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.query import LWTException, DoesNotExist
from model import Music, ListenLog, TopkMusic
_LOG = logging.getLogger(__name__)
_NAME = 'topk-music'
_redis_client = None
# helper
def setup_logger(logger):
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
setup_logger(_LOG)
def get_redis_client():
global _redis_client
if not _redis_client:
_redis_client = r = redis.StrictRedis('127.0.0.1', port=6379)
return _redis_client
def get_app():
app = Flask(__name__)
# routing
api = Api(app)
api.add_resource(ListenMusic, '/listened')
api.add_resource(GetTopK, '/topk')
# create table
connection.setup(['127.0.0.1'], _NAME, lazy_connect=True,
retry_connect=True, protocol_version=3)
sync_table(Music)
sync_table(ListenLog)
sync_table(TopkMusic)
return app
if __name__ == '__main__':
if not os.getenv('CQLENG_ALLOW_SCHEMA_MANAGEMENT'):
os.environ['CQLENG_ALLOW_SCHEMA_MANAGEMENT'] = '1'
app = get_app()
app.run(debug=True)
|
24,251 | 9b7f5f602bdf932313a9808cd437dcded3f5e7c6 | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_bootstrap_components as dbc
from dotenv import load_dotenv
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
import os
load_dotenv()
from app import app
from dataframes import df_members
from dataframes import df_members_wo_dual_citizenship
from dataframes import df_members_dual_citizenship
from dataframes import df_members_death_cause
MAPBOX_ACCESS_TOKEN = os.getenv('MAPBOX_ACCESS_TOKEN')
df_members.sort_values("year", inplace=True)
years_dropdown_options = []
year_names = df_members['year'].value_counts(dropna=False).keys().tolist()
year_names.sort(reverse=True)
for year in year_names:
if(year == "Unknown"):
break
years_dropdown_options.append({"label": year, "value": year})
data = df_members.query("year == 2019")
layout = html.Div(
children=[
dbc.Container([
dbc.Row([
dbc.Col(html.H1(children='Himalayas Expeditions Members Analytics'), className="mb-2")
]),
dbc.Row([
dbc.Col(html.P(children='Visualising info on expeditions members by season in the Himalayas from 1920s - 2010s'), className="mb-4")
]),
dbc.Row([
dcc.Dropdown(
id='years-filter',
options=years_dropdown_options,
value='2019',
style={'width': '50%'},
clearable=False,
className="mb-2"
),
]),
dbc.Row([
dbc.Col(
dcc.Graph(
id="members-citizenship-chart", config={"displayModeBar": True},
),
className="card mb-3",
)
]),
dbc.Row([
dbc.Col(
dcc.Graph(
id="pie-members-sexes-chart", config={"displayModeBar": False},
),
className="card mb-3",
),
dbc.Col(
dcc.Graph(
id="members-sexes-by-seasons-chart", config={"displayModeBar": False},
),
className="card mb-3",
)
]),
dbc.Row([
dbc.Col(
dcc.Graph(
id="pie-death-causes-chart", config={"displayModeBar": False},
),
className="card mb-3",
),
]),
])
]
)
@app.callback(
[
Output("members-citizenship-chart", "figure"),
Output("pie-members-sexes-chart", "figure"),
Output("members-sexes-by-seasons-chart", "figure"),
Output("pie-death-causes-chart", "figure")
],
[
Input("years-filter", "value")
],
)
def update_charts(year):
query = "year == {year}".format(year = year)
data_map = df_members_wo_dual_citizenship.query(query)
data = df_members.query(query)
data_death_causes = df_members_death_cause.query(query)
members_sexes = data["sex"].value_counts().to_frame().reset_index()
members_sexes.columns = ["sex", "number_of_members"]
data_copy = data[["season", "sex"]]
data_copy['is_male'] = np.where(data_copy['sex'] == 'M', True, False)
data_copy['is_female'] = np.where(data_copy['sex'] == 'F', True, False)
data_copy.drop(['sex'], axis = 1, inplace = True)
sexes_by_seasons = data_copy.groupby("season").sum().reset_index()
death_causes = data_death_causes["death_cause"].value_counts().to_frame().reset_index()
death_causes.columns = ["cause", "no_of_deaths"]
member_citizenships_chart_figure = go.Figure(go.Scattermapbox(
lat=data_map["latitude"].tolist(),
lon=data_map["longitude"].tolist(),
mode='markers',
marker=go.scattermapbox.Marker(
size=9
),
text=data_map["member_id"].tolist(),
))
member_citizenships_chart_figure.update_layout(
title='Citizenships of Expendition Members',
autosize=True,
hovermode='closest',
mapbox=dict(
accesstoken=MAPBOX_ACCESS_TOKEN,
),
)
pie_members_sexes_chart_figure = px.pie(members_sexes, values=members_sexes["number_of_members"], names=members_sexes["sex"], title='Sex Ratios of Expeditions Members')
members_sexes_by_seasons_chart_figure = go.Figure()
members_sexes_by_seasons_chart_figure.add_trace(go.Bar(
x=sexes_by_seasons["season"].tolist(),
y=sexes_by_seasons["is_male"].tolist(),
name='Male Expendition Members',
marker_color='lightsalmon'
))
members_sexes_by_seasons_chart_figure.add_trace(go.Bar(
x=sexes_by_seasons["season"].tolist(),
y=sexes_by_seasons["is_female"].tolist(),
name='Female Expendition Members',
marker_color='indianred'
))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
members_sexes_by_seasons_chart_figure.update_layout(
barmode='group',
title={
'text': "Expendition Members Sex Numbers By Seasons",
'y':0.1,
'x':0.5,
'xanchor': 'center',
'yanchor': 'bottom'
},
legend_title_text='Sex',
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
),
)
pie_death_causes_chart_figure = px.pie(death_causes, values=death_causes["no_of_deaths"], names=death_causes["cause"], title='Death Cause Ratios of Expeditions Members That Died')
return member_citizenships_chart_figure, pie_members_sexes_chart_figure, members_sexes_by_seasons_chart_figure, pie_death_causes_chart_figure
|
24,252 | fba43cde3def12ee8c55f3a00f11a82ca5d411b6 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018, Sujeet Akula <sujeet@freeboson.org>
# Distributed under terms of the MIT license.
from pysherdog.cli import main
def test_main():
main([])
|
24,253 | 569d15d4ad71663b8ee2513bbd45ee5b23cbd027 | import tensorflow as tf
# BASIC OPERATIONS
# ADDING VALUES USING ADDITION OPERATOR
tensor = tf.constant([[1, 2],
[3, 4]])
print(tensor)
print(tensor + 10) # ORIGINAL TENSOR WILL REMAIN UNCHANGED UNTIL IT IS ASSIGNED WITH THE INCREMENTED VALUES
# MULTIPLICATION
print(tensor * 10)
# SUBTRACTION
print(tensor - 1)
# WE CAN USE TENSORFLOW BUILT-IN FUNCTION TOO
print(tf.multiply(tensor, 10))
# PRACTICE WITH MUTLIPLE DIMENSIONS |
24,254 | 48a59f1d33146394fa029337e57acb654baf1fc7 | import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn import model_selection
from sklearn.cluster import KMeans
iris = load_iris()
iternim = 10000
a = []
trainselect =[]
for i in range(0,iternim):
X_train, X_test, y_train, y_test = train_test_split(iris.data,iris.target,test_size=0.1)
model = KMeans(n_clusters=3)
model.fit(X_train,y_train)
b = metrics.silhouette_score(X_test,y_test)
if b >= 0.80:
trainselect.append(X_train)
trainselect.append(y_train)
print(trainselect)
print(b)
print('Threshold is satisfied!')
break
a.append(b)
if i == iternim-1:
print('10000 iteration is passed and threshold is not satisfied. This is the best result!')
print(max(a))
|
24,255 | 363b2a122b1a997dc4178e3368fb5776efd5fdc7 | # -*- encoding: utf-8 -*-
"""switch-level
Usage:
switch-level <stage_id>
switch-level (-h | --help)
switch-level (-v|--version)
Options:
-h --help Show this screen.
-v --version Show version.
"""
import git
from docopt import docopt
def main():
args = docopt(__doc__, version='checker 0.1.0')
if args['<stage_id>']:
repo = git.Repo('.')
cmd = repo.git
if repo.index.diff(None):
cmd.reset('--hard', 'HEAD')
cmd.clean('-fd')
cmd.checkout('{}'.format(args['<stage_id>']))
cmd.pull()
if __name__ == "__main__":
main()
|
24,256 | 89d7f3d6df30d9fb5833ccdeb598f56ab847c8e9 | from data import BUSINESSES, REVIEWS
import recommender
import pandas as pd
import numpy as np
import math
def incl_city_business(user_id, business_id, city):
"""creates combination of item based and content based recommender system"""
frame1 = pd.concat([pd.DataFrame(REVIEWS[x]) for x in REVIEWS if x == city])
businesses = pd.DataFrame()
for business1 in BUSINESSES[city]:
for business2 in BUSINESSES[city]:
if business2['business_id'] != business_id and business1["business_id"] == business_id:
if business1['categories'] is not None and business2[
'categories'] is not None:
if business2['is_open'] == 1 and business2['review_count'] > 9:
if any(x in business2["categories"].split(', ') for x in business1["categories"].split(', ')):
businesses = businesses.append(business2, ignore_index=True)
# drop first reviews when user reviewed company more then once
frame2 = frame1.drop_duplicates(subset=["user_id", "business_id"], keep='last', inplace=False)
utility_matrix = pivot_reviews(frame2)
similarity = create_similarity_matrix_euclid(utility_matrix)
for business in businesses.index:
neighborhood = select_neighborhood(similarity, utility_matrix, user_id, businesses.loc[business]["business_id"])
prediction = weighted_mean(neighborhood, utility_matrix, user_id)
businesses.ix[business, 'predicted rating'] = prediction
sorted_prediction = businesses.sort_values(by=['predicted rating'], ascending=False)
sorted_prediction2 = sorted_prediction.drop(columns=['predicted rating'])
sorted_prediction2 = sorted_prediction2.reset_index()
sorted_prediction3 = sorted_prediction.reset_index()
return sorted_prediction2.to_dict(orient='records'), sorted_prediction3.to_dict(orient='records')
def itembase(user_id):
"""creates item based recommender system"""
frame1 = pd.concat([pd.DataFrame(REVIEWS[x]) for x in REVIEWS])
filtered_data = recommender.filtering_not_city()
businesses = pd.DataFrame(filtered_data).set_index('business_id')
frame2 = frame1.drop_duplicates(subset=["user_id", "business_id"], keep='last', inplace=False)
utility_matrix = pivot_reviews(frame2)
similarity = create_similarity_matrix_euclid(utility_matrix)
for business in businesses.index:
neighborhood = select_neighborhood(similarity, utility_matrix, user_id, business)
prediction = weighted_mean(neighborhood, utility_matrix, user_id)
businesses.ix[business, 'predicted rating'] = prediction
sorted_prediction = businesses.sort_values(by=['predicted rating'], ascending=False)
sorted_prediction2 = sorted_prediction.drop(columns=['predicted rating'])
sorted_prediction2 = sorted_prediction2.reset_index()
sorted_prediction3 = sorted_prediction.reset_index()
return sorted_prediction2.to_dict(orient='records'), sorted_prediction3.to_dict(orient='records')
def get_review(reviews, userId, BusinessId):
"""given a userId and BusinessId, this function returns the corresponding review"""
reviews = reviews[(reviews['business_id'] == BusinessId) & (reviews['user_id'] == userId)]
if reviews.empty:
return np.nan
elif len(reviews) > 1:
return float(reviews['stars'].max())
else:
return float(reviews['stars'])
def pivot_reviews(reviews):
"""takes a review table as input and computes the utility matrix"""
businessIds = reviews['business_id'].unique()
userIds = reviews['user_id'].unique()
pivot_data = pd.DataFrame(np.nan, columns=userIds, index=businessIds, dtype=float)
for user in userIds:
for business in businessIds:
pivot_data.loc[business][user] = get_review(reviews, user, business)
return pivot_data
def similarity_euclid(matrix, business1, business2):
"""computes the euclidean similarity"""
selected_features = matrix.loc[business1].notna() & matrix.loc[business2].notna()
if not selected_features.any():
return 0
features1 = matrix.loc[business1][selected_features]
features2 = matrix.loc[business2][selected_features]
distance = math.sqrt(((features1 - features2) ** 2).sum())
if distance is np.nan:
return 0
return 1 / (1 + distance)
def create_similarity_matrix_euclid(matrix):
"""creates the similarity matrix based on euclidean distance"""
similarity_matrix_euclid = pd.DataFrame(0, index=matrix.index, columns=matrix.index, dtype=float)
for business1 in matrix.index:
for business2 in matrix.index:
similarity_matrix_euclid[business1][business2] = similarity_euclid(matrix, business1, business2)
return similarity_matrix_euclid
def select_neighborhood(similarity_matrix, utility_matrix, target_user, target_business):
"""selects all items with similarity > 0"""
items_dict = {}
new_matrix = utility_matrix[target_user].dropna()
for business in new_matrix.index:
if new_matrix[business] and similarity_matrix[business][target_business] > 0:
items_dict[business] = similarity_matrix[business][target_business]
return pd.Series(items_dict)
def weighted_mean(neighborhood, utility_matrix, user_id):
"""computes the weighted mean"""
if neighborhood.sum() != 0:
return ((utility_matrix[user_id] * neighborhood).sum()) / neighborhood.sum()
else:
return 0
|
24,257 | 217190e4e55366f2dc81c031e1046ec5e6852bae | # Training with DQN on 'CartPole-v1' environment.
# Qingyuan Jiang. May. 26th. 2020
#
if __name__ == '__main__':
import torch
from dqn.train_dqn import dqn_algo
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = "cpu"
# Training parameters.
dqn_algo(env_name='CartPole-v1', device=device,
steps_per_epoch=1000, epochs=200, max_ep_len=1000, replay_size=int(1e4), batch_size=256,
start_steps=1000, update_after=1000, update_every=20, target_freq=5,
gamma=0.999, epsilon_start=0.8, epsilon_final=0.1, save_freq=5)
|
24,258 | 3fdf65203a646a9ec19d33ee1f162b5fdac81e21 | """Manages all bot commands."""
import lib.irc
cmds = {}
private_cmds = {}
class CommandMessage(lib.irc.Message):
def __init__(self, msg, cmd_char):
super().__init__(msg.content, private=msg.private)
self.cmd_char = cmd_char
self.channel = msg.channel
self.sender = msg.sender
self.bot = None
self.cmd = None
self.params = None
if msg.content.startswith(cmd_char):
content = msg.content[1:]
if ' ' in content:
self.cmd, self.params = content.split(' ', 1)
else:
self.cmd = content
def is_command(self):
return self.cmd is not None
class command(object):
"""Decorator defining a bot command.
The alias parameter takes a str or list of aliases. If the command should
not be available via private msg, invoke the decorator with private=False.
"""
def __init__(self, alias=None, private=True):
self.alias = alias
self.private = private
def __call__(self, function):
if self.alias is None:
self.alias = (function.__name__,)
else:
self.alias.append(function.__name__)
for name in self.alias:
cmds[name] = function
if self.private:
private_cmds[name] = function
return function
def execute(cmd, msg, private=False):
"""Execute command or private command."""
cmd_dict = private_cmds if private else cmds
if cmd in cmd_dict:
return cmd_dict[cmd](msg)
|
24,259 | d0c1218571c33542030546dc33c498503465c3cc | # -*- coding: utf-8 -*-
# -*- mode: python; -*-
"""exec" "`dirname \"$0\"`/call.sh" "$0" "$@"; """
from __future__ import print_function
import shelve
import sys
import os.path
import json
import hashlib
import random
import util
__doc__ = """
Created on 2015-03-02
@author: joschi
@author: razavian
"""
def writeRow(cols, out, start, length, colZero):
delim = out['delim'];
quote = out['quote'];
def doQuote(cell):
cell = str(cell)
if cell.find(delim) < 0 and cell.find(quote) < 0:
return cell
return quote + cell.replace(quote, quote + quote) + quote
s = doQuote(colZero) + delim;
if start > 0:
s += start * delim
s += delim.join(map(doQuote, cols))
remain = length - start - len(cols)
if remain > 0:
s += remain * delim
print(s, file=out['out'])
def openDB(pid, data, out, writeHeader):
db = shelve.open(settings['database'])
data = db[pid.strip()]
all_hdrs = [
]
def processHeader(file, db_key):
hdrs = []
with open(file, 'r') as hnd:
hdrs = hnd.read().strip().split(settings['hdr_split'])
skip = -1
start = len(all_hdrs)
for ix, head in enumerate(hdrs):
if head == settings['join_id']:
skip = ix
else:
all_hdrs.append(db_key + '_' + head)
return {
'skip': skip,
'start': start,
'data': data[db_key],
'col_num': len(all_hdrs) - start
}
row_definitions = [
processHeader(settings['header_elig'], 'ELIG'),
processHeader(settings['header_encs'], 'ENCS'),
processHeader(settings['header_lab_rsl'], 'LAB_RSL'),
processHeader(settings['header_med_clms'], 'MED_CLMS'),
processHeader(settings['header_rx_clms'], 'RX_CLMS'),
]
db.close()
if writeHeader:
writeRow(all_hdrs, out, 0, len(all_hdrs), settings['join_id'])
return (row_definitions, len(all_hdrs), all_hdrs)
def readShelve(pid, settings, output):
pids = [ pid ]
if pid == '--all':
pids = getAll(settings)
out = {
'delim': settings['delim'],
'quote': settings['quote'],
'out': output
}
anonymize = settings['anonymize']['do']
first = True
for patientId in pids:
if anonymize:
realId = hashlib.sha1(patientId).hexdigest()
age_shift = 0
while age_shift == 0:
age_shift = random.randint(-10, 10)
date_shift = 0
while date_shift == 0:
date_shift = random.randint(-365 * 10, 365 * 10)
else:
realId = patientId
join_id = settings['join_id']
splitter = settings['row_split']
(row_defs, length, all_hdrs) = openDB(patientId, settings, out, first)
first = False
for row_def in row_defs:
start = row_def['start']
col_num = row_def['col_num']
skip = row_def['skip']
# manipulation ixs apply before skipping
age_ixs = [ ix - start for ix in xrange(start, start + col_num) if all_hdrs[ix] in settings['anonymize']['age_columns'] ]
date_ixs = [ ix - start for ix in xrange(start, start + col_num) if all_hdrs[ix] in settings['anonymize']['date_columns'] ]
redact_ixs = [ ix - start for ix in xrange(start, start + col_num) if all_hdrs[ix] in settings['anonymize']['redact_columns'] ]
for row in row_def['data']:
if row == '':
continue
values = row.strip().split(splitter)
if anonymize:
for ix in age_ixs:
values[ix] = str(int(values[ix]) + age_shift)
for ix in date_ixs:
values[ix] = util.from_time(util.shift_days(util.toTime(values[ix]), date_shift))
for ix in redact_ixs:
values[ix] = ''
id = values.pop(skip)
if len(values) != col_num:
print("column mismatch! expected {0} got {1}: {2}".format(str(col_num), str(len(values)), row), file=sys.stderr)
continue
if id != patientId:
print("unexpected id! expected {0} got {1}: {2}".format(patientId, id, row))
continue
writeRow(values, out, start, length, realId)
def getAll(settings):
pids = []
for file in settings['shelve_id_files']:
with open(file, 'r') as f:
for line in f:
if line == '':
continue
pids.append(line.strip().split()[0])
return pids
def printList(settings):
for file in settings['shelve_id_files']:
with open(file, 'r') as f:
for line in f:
if line == '':
continue
print(line.strip().split()[0], file=sys.stdout)
### argument API
def usage():
print("""
{0}: --all | -p <pid> -c <config> -o <output> [--seed <seed>] [-h|--help] | [-l|--list]
-h|--help: prints this help
--all: print all patients
-p <pid>: specify patient id
-c <config>: specify config file
-o <output>: specify output file. '-' uses standard out
--seed <seed>: specifies the seed for the rng. if omitted the seed is not set. needs to be integer
-l|--list: prints all available patient ids and exits
""".strip().format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
def interpretArgs():
settings = {
'delim': ',',
'quote': '"',
'hdr_split': '|',
'row_split': '|',
'database': 'db/members.db',
'header_elig': 'code/headers/elig.hdr',
'header_encs': 'code/headers/encs.hdr',
'header_lab_rsl': 'code/headers/lab_rsl.hdr',
'header_med_clms': 'code/headers/med_clms.hdr',
'header_rx_clms': 'code/headers/rx_clms.hdr',
'join_id': 'MEMBER_ID',
'shelve_id_files': [
'code/db/set_myeloma.txt',
'code/db/set_diabetes.txt'
],
'anonymize': {
'do': False,
'date_columns': [
'ELIG_EFFECTIVE_DATE',
'ELIG_TERMINATION_DATE',
'ENCS_SERVICE_DATE',
'ENCS_PAID_DATE',
'ENCS_ADMIT_DATE',
'ENCS_DISCHARGE_DATE',
'LAB_RSL_SERVICE_DATE',
'MED_CLMS_SERVICE_DATE',
'MED_CLMS_PAID_DATE',
'MED_CLMS_ADMIT_DATE',
'MED_CLMS_DISCHARGE_DATE',
'RX_CLMS_SERVICE_DATE',
'RX_CLMS_PAID_DATE',
'RX_CLMS_PRESCRIPTION_DATE'
],
'age_columns': [
'ELIG_AGE',
'LAB_RSL_AGE',
'RX_CLMS_AGE'
],
'redact_columns': [
'ELIG_PATIENT_KEY',
'ELIG_OLD_MEMBER_ID',
'ELIG_SUBSCRIBER_ID',
'ELIG_ZIP',
'ELIG_COUNTRY_CODE',
'ELIG_PCP_ID',
'ELIG_GROUP_ID',
'ELIG_SUB_GROUP_ID',
'ELIG_PLAN_ID',
'LAB_RSL_SUBSCRIBER_ID'
]
}
}
info = {
'pid': '',
'output': '-'
}
args = sys.argv[:]
args.pop(0);
do_list = False
while args:
val = args.pop(0)
if val == '-h' or val == '--help':
usage()
if val == '-l' or val == '--list':
do_list = True
elif val == '-p':
if not args:
print('-p requires argument', file=sys.stderr)
usage()
info['pid'] = args.pop(0)
elif val == '--all':
info['pid'] = '--all'
elif val == '-c':
if not args:
print('-c requires argument', file=sys.stderr)
usage()
util.read_config(settings, args.pop(0))
elif val == '-o':
if not args:
print('-o requires argument', file=sys.stderr)
usage()
info['output'] = args.pop(0)
elif arg == '--seed':
if not len(args):
print('--seed requires integer seed', file=sys.stderr)
usage()
try:
seed = int(args.pop(0))
random.seed(seed)
except:
print('--seed requires integer seed', file=sys.stderr)
usage()
else:
print('illegal argument '+val, file=sys.stderr)
usage()
if do_list:
printList(settings)
sys.exit(0)
if info['pid'] == '':
print('patient id required', file=sys.stderr)
usage()
return (settings, info)
if __name__ == '__main__':
(settings, info) = interpretArgs()
with util.OutWrapper(info['output']) as output:
readShelve(info['pid'], settings, output)
|
24,260 | 643773d6ac1eeb4930cd3638c175845582e309d0 | N = int(input())
l = tuple(map(int, input().split()))
cnt = 0
for i in range(N):
if l[l[i]-1] == i+1:
cnt += 1
print(cnt//2) |
24,261 | e3e815235ed6dbe2cae685f7f447afd3cdd8309c | from db.models import *
from .helpers import unpack_query_objects, stringify_object
@db_session
def create_product(product: dict)->dict:
new_product = Product(**product)
commit()
return stringify_object(new_product)
@db_session
def product_list_by_shop(shop: int)-> list:
return []
@db_session
def product_list_by_category(category: int)-> list:
return []
|
24,262 | a296553e5eec73c12c05bac0d060112015f2d56a | from numpy import *
import numpy as np
def solve_stdLP(A,C,b):
control=True
m,n=A.shape
a=np.column_stack([A,np.mat(eye(m))])
c=np.concatenate((np.mat(zeros(C.shape)),np.mat(ones((m,1)))))
index_list_b=[i for i in range(n+1,n+m+1,1)]
while control:
judge,indices=standard_lp_solve(a,c,b,index_list_b)
if judge>1e-6:
control=False
print('Infeasible Solution')
elif judge<1e-6 and max(indices)+1<n+1:
control=False
standard_lp_solve(A,C,b,[index+1 for index in indices],switch=True)
else:
temp_b=a[:,indices].copy()
c_b=np.mat(zeros((len(indices),1)))
c_b=c[indices,:]
eta=c_b.T*temp_b.I*a-c.T
temp_index=np.argmax(eta[0,:])+1
indices,count=find_next(a,temp_b,b,temp_index,indices)
delete(a,count,axis=1)
def standard_lp_solve(A,C,b,index_list_B=False,switch=False):
#index_list_B is a list which contains the index of feasible base
#A is a matrix,and C is the cost matrix(a column vector)
m,n=A.shape
x=np.mat(zeros(C.shape)).T
while True:
indices=[]
for index in index_list_B:
indices.append(index-1)
B=A[:,indices].copy()
C_B=np.mat(zeros((len(indices),1)))
C_B=C[indices,:]
#eta is a row vector
eta=C_B.T*B.I*A-C.T
temp_index=np.argmax(eta[0,:])+1
if eta.max()<=1e-6:
for i in range(m):
x[:,indices[i]]=(B.I*b.T)[i]
z=x*C
if switch:
for index in indices:
print('x'+str(int(index+1))+'='+str(float(x[:,index])))
print('else=0')
print('z='+str(float(z)))
return z,indices
elif (B.I*(A[:,temp_index-1])).max()<=1e-6:
print('Unbounded')
return 0
else:
index_list_B,count=find_next(A,B,b,temp_index,index_list_B)
def find_next(A,B,b,temp_index,index_list_B):
temp1=(B.I*(A[:,temp_index-1]))
temp2=B.I*b.T
temp4=inf
temp3=[]
for i in range(len(temp1)):
if temp1[i] >0 and temp4>float(temp2[i]/temp1[i]):
temp4=float(temp2[i]/temp1[i])
count=i
index_list_B[count]=temp_index
return index_list_B,count
def Solve():
b=np.mat([24,8])
A=np.mat([[2,4,10,-1,0],[5,1,5,0,-1]])
C=np.mat([4,2,6,0,0]).T
solve_stdLP(A,C,b)
Solve()
|
24,263 | 74fe55a2b59177101f23e90e78797a6c3440895f | # purpose: kerasによるCNNの画像識別テスト 予測編
# author: Katsuhiro MORISHITA 森下功啓
# memo:
# created: 2018-08-15
from keras.models import load_model
import pandas as pd
import numpy as np
import pickle
import os
from mlcore import *
def main():
# 画像を読み込む(必要ない変数にはdummyを付けた)
label_dict, param = restore(['label_dict.pickle', 'param.pickle'])
param["dir_names_dict"] = {"yellow":["sample_image_flower/1_test"], # そもそも正解クラスが不明な場合は、keyを適当な文字列に置き換えてください
"white":["sample_image_flower/2_test"]}
x, y, weights_dict_dummy, label_dict_dummy, output_dim_dummy, file_names = read_images1(param) # 予想のためだけに画像を読み込むと、意図によってはoutput_dimやlabel_dictは適当でなくなるのでdummyが良い(使わない)
# 機械学習器を復元
model = load_model('model.hdf5')
# 予測とその結果の保存
th = 0.4 # 尤度の閾値
result_raw = model.predict(x, batch_size=len(x), verbose=0) # クラス毎の尤度を取得。 尤度の配列がレコードの数だけ取得される
result_list = [len(arr) if np.max(arr) < th else arr.argmax() for arr in result_raw] # 最大尤度を持つインデックスのlistを作る。ただし、最大尤度<thの場合は、"ND"扱いとする
predicted_classes = np.array([label_dict[class_id] for class_id in result_list]) # 予測されたclass_local_idをラベルに変換
print("test result: ", predicted_classes)
correct_classse = [label_dict[z] for z in y] # 正解class_idをラベルに変換
save_validation_table(predicted_classes, correct_classse, label_dict)
df = pd.DataFrame()
df["file name"] = file_names
df["correct classse"] = correct_classse
df["predicited classes"] = predicted_classes
df.to_csv("prediction_result.csv", index=False, encoding="utf-8-sig")
if __name__ == "__main__":
main() |
24,264 | 6851a5502a42fec8717935c70f18d26dce221d13 | #!/usr/bin/env python
# BSD 3-Clause License; see https://github.com/scikit-hep/aghast/blob/master/LICENSE
import numpy
from aghast import *
import aghast.interface
def binning2array(binning):
if not isinstance(binning, EdgesBinning):
if not hasattr(binning, "toEdgesBinning"):
raise TypeError(
"cannot convert {0} to a Numpy binning".format(type(binning).__name__)
)
binning = binning.toEdgesBinning()
if (
binning.overflow is not None
and binning.overflow.loc_underflow != BinLocation.nonexistent
and binning.overflow.loc_overflow != BinLocation.nonexistent
):
out = numpy.empty(len(binning.edges) + 2, dtype=binning.edges.dtype)
out[0] = -numpy.inf
out[-1] = numpy.inf
out[1:-1] = binning.edges
elif (
binning.overflow is not None
and binning.overflow.loc_underflow != BinLocation.nonexistent
):
out = numpy.empty(len(binning.edges) + 1, dtype=binning.edges.dtype)
out[0] = -numpy.inf
out[1:] = binning.edges
elif (
binning.overflow is not None
and binning.overflow.loc_overflow != BinLocation.nonexistent
):
out = numpy.empty(len(binning.edges) + 1, dtype=binning.edges.dtype)
out[-1] = numpy.inf
out[:-1] = binning.edges
else:
out = binning.edges
return out
def to_numpy(obj):
if isinstance(obj, Histogram):
edges = [binning2array(x.binning) for x in obj.axis]
slices = ()
for x in edges:
start = -numpy.inf if x[0] == -numpy.inf else None
stop = numpy.inf if x[-1] == numpy.inf else None
slices = slices + (slice(start, stop),)
counts = obj.counts[slices]
if isinstance(counts, dict):
counts = counts["sumw"]
if len(edges) == 1:
return counts, edges[0]
elif len(edges) == 2:
return counts, edges[0], edges[1]
else:
return counts, edges
else:
raise TypeError(
"cannot convert {0} to a Numpy histogram".format(type(obj).__name__)
)
def array2counts(array):
if issubclass(array.dtype.type, numpy.integer) and (array >= 0).all():
return UnweightedCounts(InterpretedInlineBuffer.fromarray(array))
else:
return WeightedCounts(InterpretedInlineBuffer.fromarray(array))
def array2binning(array):
if len(array) <= 1:
raise ValueError(
"binning array must have at least 2 elements: {0}".format(repr(array))
)
if not (array[1:] >= array[:-1]).all():
raise ValueError(
"binning array must be monotonically increasing: {0}".format(repr(array))
)
bin_widths = array[1:] - array[:-1]
bin_width = bin_widths.mean()
if (numpy.absolute(bin_widths - bin_width) < 1e-10 * (array[-1] - array[0])).all():
return RegularBinning(len(array) - 1, RealInterval(array[0], array[-1]))
else:
return EdgesBinning(array)
def from_numpy(obj):
if (
isinstance(obj, tuple)
and len(obj) == 2
and isinstance(obj[0], numpy.ndarray)
and isinstance(obj[1], numpy.ndarray)
):
counts, edges = obj
return Histogram([Axis(array2binning(edges))], array2counts(counts))
elif (
isinstance(obj, tuple)
and len(obj) == 3
and isinstance(obj[0], numpy.ndarray)
and isinstance(obj[1], numpy.ndarray)
and isinstance(obj[2], numpy.ndarray)
):
counts, xedges, yedges = obj
return Histogram(
[Axis(array2binning(xedges)), Axis(array2binning(yedges))],
array2counts(counts),
)
elif (
isinstance(obj, tuple)
and len(obj) == 2
and isinstance(obj[0], numpy.ndarray)
and isinstance(obj[1], list)
and all(isinstance(x, numpy.ndarray) for x in obj[1])
):
counts, edges = obj
return Histogram([Axis(array2binning(x)) for x in edges], array2counts(counts))
else:
raise TypeError("not a recognized Numpy histogram type")
|
24,265 | 2d783815a804419bf5524f9c88ad594d45696068 | import importlib.util
spec = importlib.util.spec_from_file_location(
"Gerber", ".\gerber_renderer\Gerber.py")
Gerber = importlib.util.module_from_spec(spec)
spec.loader.exec_module(Gerber)
board = Gerber.Board('./tests/gerber3.zip', verbose=True)
board.render('./tests/output')
board.render_pdf('./tests/output', 'top_copper',
'white', scale_compensation=-0.206, full_page=True, offset=(200, -250))
board.render_pdf('./tests/output', 'bottom_copper',
'white', mirrored=True, scale_compensation=-0.206, full_page=True, offset=(0, -0))
board.render_pdf('./tests/output', 'top_mask',
'black', mirrored=True, scale_compensation=-0.206, full_page=True)
board.render_pdf('./tests/output', 'top_mask',
'black', scale_compensation=-0.206, full_page=True)
# board.render_pdf('./tests/output', 'top_silk', 'yellow')
|
24,266 | 8ef11c874c63741fc4cdc8d6dde2b8d312312338 | from lib.handlers.common.BaseHandler import WebBaseHandler
from lib.settings import *
class AddAnimeHandler(WebBaseHandler):
def GET(self):
if not self.isLogin: return returnData(500, KeyErrorMessage)
try:
animeid = str(int(web.input(aid='').aid))
except:
return returnData(500, UnknowErrorMessage)
if not animeid: return returnData(500, AnimeNotExistMessage)
data = db.select('anmielist', where='animeid="%s"'%animeid)
if len(data) == 0:
anime = AnimeDataGetter()
isSuccess = anime.getDetail(animeid)
if not isSuccess: return returnData(500, AnimeNotExistMessage)
db.insert('anmielist', animename = anime.AnimeTitle, \
animeid = anime.AnimeAid, episode = anime.AnimeEpiCount,\
isover = anime.AnimeIsOver, poster = anime.AnimePoster, \
detail = anime.AnimeIntro)
if animeid in self.animelist: return returnData(500, AddRepateMessage)
db.update('user', where='id=%d'%self.uid, animelist=animeid + '|' + \
self.animestr, isread='0'+str(self.isreadstr), epilook = '0|' + self.epistr)
return returnData()
class DelAnimeHandler(WebBaseHandler):
def GET(self):
if not self.isLogin: return returnData(500, KeyErrorMessage)
try:
animeid = str(int(web.input(aid='').aid))
except:
return returnData(500, UnknowErrorMessage)
if not animeid: return returnData(500, AnimeNotExistMessage)
try:
epilook2 = ''
self.epilook[self.animelist.index(animeid)] = ''
for i in self.epilook:
if i and epilook2:epilook2 = epilook2 + '|' + i
if i and not epilook2:epilook2 = i
except:
epilook2 = '|'.join(self.epilook)
if not animeid in self.animelist: return returnData(500, AnimeErrorMessage)
self.isread[self.animelist.index(animeid)] = ''
animeliststr = self.animestr.replace(animeid + '|', '')
if epilook2:
db.update('user', where="id=%d"%self.uid, animelist = animeliststr, \
isread = ''.join(self.isread), epilook = epilook2 + '|')
else:
db.update('user', where="id=%d"%self.uid, animelist = animeliststr, \
isread = ''.join(self.isread), epilook = epilook2)
return returnData()
class EpiEditHandler(WebBaseHandler):
def GET(self):
if not self.isLogin: return returnData(500, KeyErrorMessage)
webinput = web.input(aid='',epi='0')
try:
animeid = str(int(webinput.aid))
epinum = str(int(webinput.epi))
except:
return returnData(500, UnknowErrorMessage)
if not epinum or not animeid: return returnData(500, UnknowErrorMessage)
episode = db.select('anmielist',what='episode',where='animeid=%s'%animeid)
if not episode: return returnData(500, AnimeNotExistMessage)
episode = episode[0].episode
if not animeid in self.animelist: return returnData(500, AnimeErrorMessage)
if int(episode) < int(epinum): return returnData(500, EpisodeErrorMessage)
self.epilook[self.animelist.index(animeid)] = epinum
db.update('user',where="id=%d"%self.uid,epilook='|'.join(self.epilook)+'|')
return returnData() |
24,267 | 3b5a54fb4674bd8a87b028c6584f2a6328886d2c | # Solution 1 - looking for min
def selection_sort1(arr):
unsorted_index = 0
for _ in range(len(arr)-1):
minimum = arr[unsorted_index]
m_index = unsorted_index
for i in range(unsorted_index+1,len(arr)):
if minimum > arr[i]:
minimum = arr[i]
m_index = i
temp = arr[unsorted_index]
arr[unsorted_index] = minimum
arr[m_index] = temp
unsorted_index += 1
print(arr)
# Solution 2 - looking for max
def selection_sort2(arr):
# For every slot in array
for fillslot in range(len(arr)-1,0,-1):
positionOfMax=0
# For every set of 0 to fillslot+1
for location in range(1,fillslot+1):
# Set maximum's location
if arr[location]>arr[positionOfMax]:
positionOfMax = location
temp = arr[fillslot]
arr[fillslot] = arr[positionOfMax]
arr[positionOfMax] = temp
print(arr)
|
24,268 | cae1ab93dfd77600080ba6125513f06e96e045f3 | #!/usr/bin/env python
import unittest
from trajectory.trajectory import NegativeTimeException, Trajectory
class TrajectoryTest(unittest.TestCase):
def test_when_time_is_negative_then_an_exception_is_raised(self):
trajectory = Trajectory()
self.assertRaises(NegativeTimeException, trajectory.get_position_at, -1)
|
24,269 | ac0fbca0b9f091099d8d1649f7fdc0dbf30412aa | # Generated by Django 3.0.3 on 2021-04-20 00:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0006_auto_20210416_0112'),
]
operations = [
migrations.AlterField(
model_name='order',
name='unique_code',
field=models.CharField(blank=True, max_length=4, null=True, unique=True),
),
]
|
24,270 | e610957cc9a5dfa118fb26d205ed4809b6a776f3 | # Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import newrelic.core.attribute as attribute
import newrelic.core.trace_node
from newrelic.common import system_info
from newrelic.core.database_utils import sql_statement, explain_plan
from newrelic.core.node_mixin import DatastoreNodeMixin
from newrelic.core.metric import TimeMetric
_SlowSqlNode = namedtuple('_SlowSqlNode',
['duration', 'path', 'request_uri', 'sql', 'sql_format',
'metric', 'dbapi2_module', 'stack_trace', 'connect_params',
'cursor_params', 'sql_parameters', 'execute_params',
'host', 'port_path_or_id', 'database_name', 'params'])
class SlowSqlNode(_SlowSqlNode):
def __new__(cls, *args, **kwargs):
node = _SlowSqlNode.__new__(cls, *args, **kwargs)
node.statement = sql_statement(node.sql, node.dbapi2_module)
return node
@property
def formatted(self):
return self.statement.formatted(self.sql_format)
@property
def identifier(self):
return self.statement.identifier
_DatabaseNode = namedtuple('_DatabaseNode',
['dbapi2_module', 'sql', 'children', 'start_time', 'end_time',
'duration', 'exclusive', 'stack_trace', 'sql_format',
'connect_params', 'cursor_params', 'sql_parameters',
'execute_params', 'host', 'port_path_or_id', 'database_name',
'guid', 'agent_attributes', 'user_attributes'])
class DatabaseNode(_DatabaseNode, DatastoreNodeMixin):
def __new__(cls, *args, **kwargs):
node = _DatabaseNode.__new__(cls, *args, **kwargs)
node.statement = sql_statement(node.sql, node.dbapi2_module)
return node
@property
def product(self):
return self.dbapi2_module and self.dbapi2_module._nr_database_product
@property
def instance_hostname(self):
if self.host in system_info.LOCALHOST_EQUIVALENTS:
hostname = system_info.gethostname()
else:
hostname = self.host
return hostname
@property
def operation(self):
return self.statement.operation
@property
def target(self):
return self.statement.target
@property
def formatted(self):
return self.statement.formatted(self.sql_format)
def explain_plan(self, connections):
return explain_plan(connections, self.statement, self.connect_params,
self.cursor_params, self.sql_parameters, self.execute_params,
self.sql_format)
def time_metrics(self, stats, root, parent):
"""Return a generator yielding the timed metrics for this
database node as well as all the child nodes.
"""
product = self.product
operation = self.operation or 'other'
target = self.target
# Determine the scoped metric
statement_metric_name = 'Datastore/statement/%s/%s/%s' % (product,
target, operation)
operation_metric_name = 'Datastore/operation/%s/%s' % (product,
operation)
if target:
scoped_metric_name = statement_metric_name
else:
scoped_metric_name = operation_metric_name
yield TimeMetric(name=scoped_metric_name, scope=root.path,
duration=self.duration, exclusive=self.exclusive)
# Unscoped rollup metrics
yield TimeMetric(name='Datastore/all', scope='',
duration=self.duration, exclusive=self.exclusive)
yield TimeMetric(name='Datastore/%s/all' % product, scope='',
duration=self.duration, exclusive=self.exclusive)
if root.type == 'WebTransaction':
yield TimeMetric(name='Datastore/allWeb', scope='',
duration=self.duration, exclusive=self.exclusive)
yield TimeMetric(name='Datastore/%s/allWeb' % product, scope='',
duration=self.duration, exclusive=self.exclusive)
else:
yield TimeMetric(name='Datastore/allOther', scope='',
duration=self.duration, exclusive=self.exclusive)
yield TimeMetric(name='Datastore/%s/allOther' % product, scope='',
duration=self.duration, exclusive=self.exclusive)
# Unscoped operation metric
yield TimeMetric(name=operation_metric_name, scope='',
duration=self.duration, exclusive=self.exclusive)
# Unscoped statement metric
if target:
yield TimeMetric(name=statement_metric_name, scope='',
duration=self.duration, exclusive=self.exclusive)
# Unscoped instance Metric
if self.instance_hostname and self.port_path_or_id:
instance_metric_name = 'Datastore/instance/%s/%s/%s' % (product,
self.instance_hostname, self.port_path_or_id)
yield TimeMetric(name=instance_metric_name, scope='',
duration=self.duration, exclusive=self.exclusive)
def slow_sql_node(self, stats, root):
product = self.product
operation = self.operation or 'other'
target = self.target
if target:
name = 'Datastore/statement/%s/%s/%s' % (product, target,
operation)
else:
name = 'Datastore/operation/%s/%s' % (product, operation)
request_uri = ''
if root.type == 'WebTransaction':
request_uri = root.request_uri
params = None
if root.distributed_trace_intrinsics:
params = root.distributed_trace_intrinsics.copy()
# Note that we do not limit the length of the SQL at this
# point as we will need the whole SQL query when doing an
# explain plan. Only limit the length when sending the
# formatted SQL up to the data collector.
return SlowSqlNode(duration=self.duration, path=root.path,
request_uri=request_uri, sql=self.sql,
sql_format=self.sql_format, metric=name,
dbapi2_module=self.dbapi2_module,
stack_trace=self.stack_trace,
connect_params=self.connect_params,
cursor_params=self.cursor_params,
sql_parameters=self.sql_parameters,
execute_params=self.execute_params,
host=self.instance_hostname,
port_path_or_id=self.port_path_or_id,
database_name=self.database_name,
params=params)
def trace_node(self, stats, root, connections):
name = root.string_table.cache(self.name)
start_time = newrelic.core.trace_node.node_start_time(root, self)
end_time = newrelic.core.trace_node.node_end_time(root, self)
children = []
root.trace_node_count += 1
sql = self.formatted
# Agent attributes
self.agent_attributes['db.instance'] = self.db_instance
if sql:
# Limit the length of any SQL that is reported back.
limit = root.settings.agent_limits.sql_query_length_maximum
self.agent_attributes['db.statement'] = sql[:limit]
params = self.get_trace_segment_params(root.settings)
# Only send datastore instance params if not empty.
if self.host:
params['host'] = self.instance_hostname
if self.port_path_or_id:
params['port_path_or_id'] = self.port_path_or_id
sql = params.get('db.statement')
if sql:
params['db.statement'] = root.string_table.cache(sql)
if self.stack_trace:
params['backtrace'] = [root.string_table.cache(x) for x in
self.stack_trace]
# Only perform an explain plan if this node ended up being
# flagged to have an explain plan. This is applied when cap
# on number of explain plans for whole harvest period is
# applied across all transaction traces just prior to the
# transaction traces being generated.
if getattr(self, 'generate_explain_plan', None):
explain_plan_data = self.explain_plan(connections)
if explain_plan_data:
params['explain_plan'] = explain_plan_data
return newrelic.core.trace_node.TraceNode(start_time=start_time,
end_time=end_time, name=name, params=params, children=children,
label=None)
def span_event(self, *args, **kwargs):
sql = self.formatted
if sql:
# Truncate to 2000 bytes and append ...
_, sql = attribute.process_user_attribute(
'db.statement', sql, max_length=2000, ending='...')
self.agent_attributes['db.statement'] = sql
return super(DatabaseNode, self).span_event(*args, **kwargs)
|
24,271 | f49f31d8757bddad21f18ac109b0a8ef41b6ac04 | #required encoding for scraping, otherwise defaults to unicode and screws things up
from bs4 import BeautifulSoup
import requests
import sys
import re
import pandas as pd
import pprint
import numpy as np
import csv, sys
import base64
import pymongo
from pymongo import MongoClient
from lxml import html
import csv,json
#from exceptions import ValueError
from time import sleep
import webbrowser
import selenium
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
import time
import os
client = MongoClient('mongodb://heroku_4jtg3rvf:r9nq5ealpnfrlda5la4fj8r192@ds161503.mlab.com:61503/heroku_4jtg3rvf')
db = client['heroku_4jtg3rvf']
# initiating chrome driver --https://stackoverflow.com/questions/41059144/running-chromedriver-with-python-selenium-on-herokup
#GOOGLE_CHROME_BIN = '/app/.apt/usr/bin/google-chrome'
#CHROMEDRIVER_PATH = '/usr/bin/google-chrome'
chrome_exec_shim = os.environ.get("GOOGLE_CHROME_BIN", "chromedriver")
chrome_bin = os.environ.get('GOOGLE_CHROME_BIN', None)
sel_chrome = os.environ.get('GOOGLE_CHROME_SHIM', None)
chrome_options = webdriver.ChromeOptions()
chrome_options.binary_location = sel_chrome
driver = webdriver.Chrome(chrome_options=chrome_options)
## used for local testing
#chromedriver = "/Users/crystalm/Downloads/chromedriver"
#os.environ["webdriver.chrome.driver"] = chromedriver
#chrome_options = webdriver.ChromeOptions()
#chrome_options.add_argument('--no-sandbox')
#driver = webdriver.Chrome(chromedriver, chrome_options=chrome_options)
driver.get('https://www.amazon.com/gp/sign-in.html')
time.sleep(1) # Let the user actually see something!
email = driver.find_element_by_name('email')
email.clear()
email.send_keys('crystal.wesnoski@gmail.com')
driver.find_element_by_id('continue').click()
password = driver.find_element_by_name('password')
password.clear()
password.send_keys('cw1992')
driver.find_element_by_name('rememberMe').click()
driver.find_element_by_id('signInSubmit').click()
time.sleep(2)
driver.get('https://www.amazon.com/dp/B01MCULB3G')
time.sleep(5)
print('made it here')
amazon = driver.find_element_by_id('nav-logo-base nav-sprite').text
print(amazon)
driver.find_element_by_id('amzn-ss-text-link').click()
print('made it here')
time.sleep(2)
url = driver.find_element_by_id("amzn-ss-text-shortlink-textarea").text
time.sleep(2)
driver.quit()
url
dic = {
'url_short': url
}
result = db.test_submit.insert_one(dic)
|
24,272 | 56bfe919cfae82bfda7dc26ba02f3d8b28932fd6 | from urllib.request import urlopen, HTTPError
import json
import pandas as pd
import random
import re
import time
from datetime import datetime
import logging
logging.basicConfig(filename='reddit_scraping.log', level=logging.INFO)
# collect posts from subreddits pertaining to the vaccine
# collect comments from those posts
# collect replies to those comments
sub_reddits = ['Coronavirus', 'vaxxhappened', 'antivax', 'VaccineMyths',
'science', 'news', 'COVID19', 'conspiracy', 'nyc', "Indiana", "Conservative", "illinois", "nashville", "LosAngeles"]
match_words =['covid-19 vaccine', 'vaccine', 'vaccination', 'coronavirus vaccine',
'covid vaccine', 'covid', 'coronavirus', 'virus', 'vax', 'doses', 'pfizer', 'moderna',
'johnson & johnson', 'J&J', 'vaccinators']
def parse_sub_reddits(sub_reddit: str,
match_words: list):
"""
Check all the posts in the subreddit for
Args:
sub_reddit (str): a subreddit to parse posts
match_words (list): a list of match words
Returns:
List of all posts in the subreddit mentioning vaccines
"""
url_to_open = f"https://www.reddit.com/r/{sub_reddit}.json"
success_status = 0
while success_status != 200:
try:
response = urlopen(url_to_open, timeout=10)
success_status = response.status
except HTTPError:
logging.info(f"HTTP Error for exceeding requests. Sleeping for 2 minutes at {datetime.today()}.")
time.sleep(120)
success_status = 400
entire_sub_reddit = json.loads(response.read())
posts = [post["data"] for post in entire_sub_reddit['data']['children'] if post["kind"] == "t3"]
_ids = []
post_dataframes = []
return_dict = {}
if len(posts) > 0:
for post in posts:
try:
title = post['title'].lower()
if re.findall(r"(?=("+'|'.join(match_words)+r"))", title):
_id = post['id']
norm_df = pd.json_normalize(post)
norm_df = norm_df[['id', 'subreddit', 'title', 'ups', 'downs', 'upvote_ratio', 'num_comments', 'author_fullname', 'created_utc', 'subreddit_subscribers']]
norm_df = norm_df.rename(columns = {'id': 'post_id', 'author_fullname': 'author'})
post_dataframes.append(norm_df)
if post['num_comments'] > 0:
_ids.append(_id)
except KeyError:
pass
if len(post_dataframes) > 0:
all_dfs = pd.concat(post_dataframes, ignore_index=True)
return_dict['data'] = all_dfs
return_dict['ids'] = _ids
else:
return_dict['data'] = None
return_dict['ids'] = None
else:
return_dict['data'] = None
return_dict['ids'] = None
return return_dict
def comment_data(post_id: str,
sub_reddit: str):
"""
Generates a pandas dataframe with scraped comments and replies data. Will concatenate replies with comments
post_id (str): post_id from valid posts that contain covid vaccine keywords
"""
url_to_open = f"https://www.reddit.com/r/{sub_reddit}/comments/{post_id}.json"
success_status = 0
while success_status != 200:
try:
response = urlopen(url_to_open, timeout=10)
success_status = response.status
except HTTPError:
logging.info(f"HTTP Error for exceeding requests. Sleeping for 2 minutes at {datetime.today()}.")
time.sleep(120)
success_status = 400
sub_reddit_page = json.loads(response.read())
comments_df = pd.json_normalize(sub_reddit_page[1]['data']['children'])
comments_df['post_id'] = post_id
comments_df = comments_df[['post_id', 'data.id', 'data.author_fullname', 'data.body', 'data.created',
'data.downs', 'data.ups']]
comments_df = comments_df.rename(columns = {'data.id': 'comment_id', 'data.author_fullname': 'author', 'data.body': 'comment',
'data.created': 'created_utc', 'data.downs': 'downs', 'data.ups': 'ups'})
comments_df['reply'] = 'N'
comments_df['comment_replied_id'] = ''
# get all replies
replies_list = []
for comment in sub_reddit_page[1]['data']['children']:
replies = comment.get('data').get('replies')
comment_id = comment.get('data').get('id')
if replies is None or replies == '':
pass
else:
replies_df = pd.json_normalize(replies['data']['children'])
try:
replies_df = replies_df[['data.id', 'data.author_fullname', 'data.body', 'data.created',
'data.downs', 'data.ups']]
except KeyError:
pass
replies_df = replies_df.rename(columns = {'data.id': 'comment_id', 'data.author_fullname': 'author', 'data.body': 'comment',
'data.created': 'created_utc', 'data.downs': 'downs', 'data.ups': 'ups'})
replies_df['reply'] = 'Y'
replies_df['comment_replied_id'] = comment_id
replies_df['post_id'] = post_id
replies_list.append(replies_df)
if len(replies_list) == 1:
all_replies = replies_list[0]
elif len(replies_list) > 1:
all_replies = pd.concat(replies_list, ignore_index = True)
else:
all_replies = None
column_order = [c for c in comments_df.columns]
comments_df = comments_df[column_order]
if all_replies is not None:
all_replies = all_replies[column_order]
all_comments_replies = pd.concat([comments_df, replies_df], ignore_index=True)
else:
all_comments_replies = comments_df
return all_comments_replies
def utc_to_date(x):
try:
new_value = datetime.strftime(datetime.fromtimestamp(x), '%Y-%m-%d %H:%M:%S')
except ValueError:
new_value = None
return new_value
def stream_to_db(subreddit: str,
df_dict: dict,
db_path: str) -> None:
"""
Appends to CSVs and removes any duplicated tweets or users before saving
Args:
df_dict (dict): return from scraping tweets
db_path (str): path to database files
"""
file_lkps = {'posts': f"reddit-{subreddit}-posts.csv",
'comments': f"reddit-{subreddit}-comments.csv"}
for _key in df_dict:
if df_dict.get(_key) is None:
pass
full_path = f"{db_path}/{file_lkps.get(_key)}"
df = df_dict.get(_key)
df.to_csv(full_path, index=False, encoding='utf-8')
logging.info(f"Saved {_key} data for subreddit {subreddit} at {datetime.today()}")
return None
if __name__ == "__main__":
for sr in sub_reddits:
logging.info(f'Starting scraping for subreddit {sr} at {datetime.today()}')
db_path = '/Users/philazar/Desktop/projects/covid-sentiment/data/reddit'
valid_posts = parse_sub_reddits(sub_reddit = sr, match_words= match_words)
posts_df = valid_posts.get('data')
if posts_df is not None:
posts_df['post_date'] = posts_df['created_utc'].apply(lambda x: utc_to_date(x))
stream_to_db(subreddit = sr,
df_dict = {'posts': posts_df},
db_path=db_path)
post_ids = valid_posts.get('ids')
if post_ids is not None:
comments_dataframes = []
for i in post_ids:
comments_dataframe = comment_data(post_id=i, sub_reddit= sr)
comments_dataframes.append(comments_dataframe)
all_comments = pd.concat(comments_dataframes, ignore_index =True)
all_comments['comment_date'] = all_comments['created_utc'].apply(lambda x: utc_to_date(x))
stream_to_db(subreddit = sr,
df_dict = {'comments': all_comments},
db_path=db_path)
logging.info(f'Finished scraping for subreddit {sr} at {datetime.today()}')
|
24,273 | 03a9c5e9a6c7019ca3431d52715d9b373feb00d3 | #!/usr/bin/env python
# 第一行指定python解释器
# 脚本文件,可以脱离Django独立运行。为了生成网页静态化页面,可以参考manage.py脚本文件
# 指定导包路径:是为了后面的导包按照美多商城的导包方式正常导包
import sys
# sys.path.insert(导包路径列表的角标,0表示新的导包路径在最前面,"新的导包路径,这里是指向第一个meiduo_mall")
# 指向第一个meiduo_mall:从当前的scripts文件目录网上回退两级即可
sys.path.insert(0,"../../")
# 设置Django运行所依赖的环境变量
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'meiduo_mall.settings.dev'
# import io
# sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='gb18030')
# 让Django进行一次初始化
import django
django.setup()
# 导入所需要的依赖包、相关模型类
from django.template import loader
from django.conf import settings
from apps.goods.goods_utils import get_categories,get_breadcrumb,get_goods_specs
from apps.goods.models import SKU
# 定义静态化详情页的工具方法
def action_static_detail_html(sku):
"""
:param sku:要静态化的SKU信息
"""
# 查询要渲染页面的数据
# 查询商品SKU信息:参数sku
# 查询商品分类
categories = get_categories()
# 查询面包屑导航
breadcrumb = get_breadcrumb(sku.category)
# 查询商品规格信息
goods_specs = get_goods_specs(sku)
# 查询SKU关联的SPU,渲染商品详情,售后,包装:SPU信息可以在模板中通过关联查询得到{{ sku.spu }},所以在这不写
# 构造上下文字典
context = {
"sku":sku,
"categories":categories,
"breadcrumb":breadcrumb,
"goods_specs":goods_specs,
}
# 使用上下文字典渲染详情页HTML文件,并得到详情页的HTML字符串
template = loader.get_template("detail.html")
detail_html_str = template.render(context)
# 将详情页的HTML字符串写入到指定的静态文件中
# file_path = "路径/front_end_pc/goods/3.html"
file_path = os.path.join(os.path.dirname(settings.BASE_DIR),"front_end_pc/goods/"+str(sku.id)+".html")
with open(file_path,"w",encoding="utf-8") as f:
f.write(detail_html_str)
if __name__ == '__main__':
# 脚本入口:查询所有的sku信息,遍历它们,每遍历一个sku就生成一个对应的静态页
skus = SKU.objects.all()
for sku in skus:
# print(sku.id)
action_static_detail_html(sku) |
24,274 | 688b88932103c6004bae2eee118108cffee33247 | from CS4HS import *
import random
######################## collision code #############################
def IsColliding(x1, y1, width1, height1, x2, y2, width2, height2):
return not(x1 > (x2 + width2) or \
y1 > (y2 + height2) or \
x2 > (x1 + width1) or \
y2 > (y1 + height1))
#####################################################################
game = Graphics(800, 600)
player_1_x = 0 # width = 24
player_1_y = 0 # height = 24
player_2_x = 100 # width = 24
player_2_y = 100 # height = 24
star_x = random.randint(0, 750) # width = 20
star_y = random.randint(0, 550) # height = 20
player_1_speed = 2
player_2_speed = 1
label .run
game.clear()
# checking for input
if game.isKeyPressed(KEY_A):
player_1_x -= player_1_speed
if game.isKeyPressed(KEY_D):
player_1_x += player_1_speed
if game.isKeyPressed(KEY_W):
player_1_y += player_1_speed
if game.isKeyPressed(KEY_S):
player_1_y -= player_1_speed
if game.isKeyPressed(KEY_LEFT):
player_2_x -= player_2_speed
if game.isKeyPressed(KEY_RIGHT):
player_2_x += player_2_speed
if game.isKeyPressed(KEY_UP):
player_2_y += player_2_speed
if game.isKeyPressed(KEY_DOWN):
player_2_y -= player_2_speed
# drawing the images
# note the order, drawn from back to front
game.drawImage("star.gif", star_x, star_y)
game.drawImage("red.gif", player_1_x, player_1_y)
game.drawImage("blue.gif", player_2_x, player_2_y)
# must "reveal" the game to see the updated screen
game.reveal()
goto .run
|
24,275 | 363764afd03aae1d364b3eae2f9c5c6eae0d9c14 | from datetime import datetime, timedelta
import json
import os
import pickle
import requests
import sys
import urlparse
try:
UPSTREAM_ADDRESS = os.environ["UPSTREAM_ADDRESS"]
START_TIME = pickle.loads(os.environ["START_TIME"])
except KeyError as e:
sys.stderr.write("ERROR: " + str(e) + " environment variable not defined!\n")
raise
def parse_gunicorn_headers(environ):
prefix = "HTTP_"
ignore = ["host"]
res = {}
for k in environ:
if k.startswith(prefix):
# underscores are valid in http headers but quite often rejected e.g. by nginx
# http://nginx.org/en/docs/http/ngx_http_core_module.html#underscores_in_headers
# or django
# https://www.djangoproject.com/weblog/2015/jan/13/security/
header_name = k[len(prefix):].replace("_", "-")
header_val = environ[k]
if header_name.lower() in ignore:
continue
res[header_name] = header_val
return res
def merge_upstream_url(upstream_addr, path, query):
# https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit
SCHEME = 0
NETLOC = 1
PATH = 2
QUERY = 3
FRAGMENT = 4
r = list(urlparse.urlsplit(upstream_addr))
r[PATH] = path
r[QUERY] = query
return urlparse.urlunsplit(r)
def fetch(method):
dispatch = { "get": requests.get }
try:
return dispatch[method.lower()]
except KeyError:
return lambda *args, **kwargs: MockResponse(405)
def get_code_description(code_no):
try:
return requests.status_codes._codes[code_no][0]
except KeyError:
custom_codes = {
520: "Unknown Error"
}
return custom_codes.get(code_no, "unknown")
def fetch_upstream_gracefully(environ):
try:
res = fetch(environ['REQUEST_METHOD'])(
merge_upstream_url(UPSTREAM_ADDRESS, environ['PATH_INFO'], environ['QUERY_STRING']),
headers=parse_gunicorn_headers(environ),
timeout=GracePeriod.timeout())
except requests.RequestException as e:
sys.stderr.write(str(e) + "\n")
res = MockResponse().report(str(e))
if res.status_code != 200 and not GracePeriod.expired():
print str(datetime.now()) + " Received " + str(res.status_code) + " but grace period is in effect!"
return MockResponse(200).report("Upstream returned non 200 status.", res)
return res
class GracePeriod(object):
REQUEST_TIMEOUT_DURING_GRACE_PERIOD = float(os.getenv("REQUEST_TIMEOUT_DURING_GRACE_PERIOD", 1))
GRACE_PERIOD = int(os.getenv("GRACE_PERIOD", 300))
@staticmethod
def expired():
return (datetime.now() - START_TIME) > timedelta(seconds=GracePeriod.GRACE_PERIOD)
@staticmethod
def timeout():
timeout = {
True: None,
False: GracePeriod.REQUEST_TIMEOUT_DURING_GRACE_PERIOD
}
return timeout[GracePeriod.expired()]
class MockResponse(object):
def __init__(self, status_code = 520, content = '', headers = None):
# https://en.wikipedia.org/wiki/List_of_HTTP_status_codes#Cloudflare
self.status_code = status_code
self.content = content
self.headers = {} if headers == None else headers
def report(self, cause, up_res = None):
r = {
"failure": True,
"cause": cause
}
if up_res:
r["upstream_response"] = {
"status": up_res.status_code,
"headers": up_res.headers,
"body": up_res.content
}
self.content = json.dumps(r, indent=4, sort_keys=True)
return self
def app(environ, start_response):
res = fetch_upstream_gracefully(environ)
status = '{} {}'.format(res.status_code, get_code_description(res.status_code))
response_body = res.content
response_headers = res.headers.items()
start_response(status, response_headers)
return iter([response_body])
|
24,276 | 508134dc67b7b65e6de110df1c2d8cb6904d65f9 | #coding=utf-8
import sys
# 通过单例实现全局变量的存取
class global_var(object):
def __init__(self):
pass
def __new__(self, *args, **kwargs):
if not hasattr(self, '__instance__'):
self.__instance__ = super(global_var, self).__new__(self)
self._global_value_ = {}
return self.__instance__
def set_value(self, name, value):
self._global_value_[name] = value
def get_value(self, name):
try:
return self._global_value_[name]
except:
return None
# 用单例实现的伪常量,所有的值只能设置一次
class global_const(object):
def __init__(self):
pass
def __new__(self, *args, **kwargs):
if not hasattr(self, '__instance__'):
self.__instance__ = super(global_const, self).__new__(self)
self._global_const_ = {}
return self.__instance__
def _check_has_value_(self, name):
if sys.version_info.major == 2:
return self._global_const_.has_key(name)
elif sys.version_info.major == 3:
return name in self._global_const_
def set_value(self, name, value):
if not self._check_has_value_(name):
self._global_const_[name] = value
def get_value(self, name):
try:
return self._global_const_[name]
except:
return None
|
24,277 | 20e6e4e1595b3492ed9bae18937ea9e5cb9cb29c | import model_server
class RepositoryUriTranslator(object):
def translate(self, repo_uri):
with model_server.rpc_connect("repos", "read") as model_server_rpc:
attributes = model_server_rpc.get_repo_attributes(repo_uri)
if attributes['repo']['type'] == 'git':
return "git@%s:%s" % (attributes['repostore']['ip_address'], repo_uri)
elif attributes['repo']['type'] == 'hg':
# Note that we are making the assumption that the route will never contain any slashes.
return "ssh://hg@%s/%s" % (attributes['repostore']['ip_address'], repo_uri)
def extract_repo_name(self, repo_uri):
with model_server.rpc_connect("repos", "read") as model_server_rpc:
attributes = model_server_rpc.get_repo_attributes(repo_uri)
return attributes['repo']['name']
|
24,278 | 5c467e73e4602c81cb354aa03e680bf02518282d | from __future__ import print_function
from bitcoin import bip32_master_key, bip32_ckd, bip32_descend, bip32_privtopub, encode_privkey
import json
import datetime
import ethereum.keys
import ethereum.transactions
from ethereum.utils import decode_addr, decode_hex, encode_hex
import requests
from mnemonic import Mnemonic
from rlp import encode
def mnemonic_to_hdkey(mnemonic):
# if we wanted to avoid the mnemonic dep we could just do:
# pbkdf2_hmac('sha512', mnemonic, 'mnemonic', 2048).encode('hex')
# to get the seed
if not Mnemonic('english').check(mnemonic):
raise Exception('invalid mnemonic')
seed = Mnemonic('english').to_seed(mnemonic)
hd_root = bip32_master_key(seed)
# path is m/0'/0'/0'
return bip32_ckd(bip32_ckd(bip32_ckd(hd_root, 2**31), 2**31), 2**31)
def derive_keypairs(hd_key, keys=3):
keypairs = []
for i in range(keys):
privkey = encode_privkey(bip32_descend(hd_key, [i]), 'hex')
addr = decode_addr(ethereum.keys.privtoaddr(privkey)).decode('utf-8')
keypairs.append((privkey, addr))
return keypairs
def create_mnemonic():
return Mnemonic('english').generate()
def gas_price():
return requests.get('https://etherchain.org/api/gasPrice').json()['data'][0]['price']
def lookup(addr):
''' Returns balance and nonce. '''
# TODO: beware balance is int but nonce is string
# TODO: use instead https://etherchain.org/api/account/multiple/:ids
if not addr.startswith('0x'): # etherchain api requires the 0x prefix
addr = '0x' + addr
data = requests.get('https://etherchain.org/api/account/%s' % addr).json()['data']
if data:
return data[0]
else:
return {'balance': 0, 'nonce': '0'}
def send(privkey, nonce, recipient, amount_wei, gas_price_wei, gas_limit=21000):
# TODO: sanity check incoming args
tx = ethereum.transactions.Transaction(nonce, gas_price_wei, gas_limit, recipient, amount_wei, '')
tx.sign(privkey)
return encode(tx)
def export_keystore(privkey, password):
content = ethereum.keys.make_keystore_json(privkey, password)
addr = decode_addr(ethereum.keys.privtoaddr(privkey)).decode('utf-8')
content['address'] = addr
content_json = json.dumps(content, indent=4)
filename = 'UTC--%s000Z--%s' % (datetime.datetime.utcnow().isoformat(), addr)
return filename, content_json
def test_send():
privkey = decode_hex('a06bab413912bc24726e266a1f6613944ea30bf3399ae3375ccf7a663b73b625')
recipient = '0x25c6e74ff1d928df98137af4df8430df24f07cd7'
nonce = 0
amount = 1000000000000000000
gas_price = 100000000000
gas_limit = 30000
tx = send(privkey, nonce, recipient, amount, gas_price, gas_limit)
tx_expected = 'f86c8085174876e8008275309425c6e74ff1d928df98137af4df8430df24f07cd7880de0b6b3a7640000801ba03710b1c12686a52ca22a489a7e2323e33cdab723fe174f466d8d7122c5bc65faa077dd10ef5a9f89630aaecf852b2f9e3679c75f98fb39e91275fe76e53948af05'
assert tx_expected == tx.hex()
def test_mnemonic():
mnemonic = 'logic one label consider alter keen sweet local blush quit holiday trouble'
keypairs = [
('a06bab413912bc24726e266a1f6613944ea30bf3399ae3375ccf7a663b73b625', '4165c8a7e88c5780ac9214c1d9214a241ab5f078'),
('1ba6df9042640c614ba798271b7c1ede4c475d7087dbbb1f4372cf426d7a4cc6', 'b4e264be7f4d3a44ed58f8be183faae8515e78c7'),
('e35478c748b6a2891ec518cbc5c62d08c8c02aa62a223103b46ae5366e9be29c', 'e33b9d75798de6fdae6e5073dc3c3c52d1203fa7')]
assert keypairs == mnemonic_to_hdkey(mnemonic)
if __name__ == '__main__':
import sys
import getpass
from decimal import Decimal
command = sys.argv[1] if len(sys.argv) > 1 else ''
if command == 'gas':
print(gas_price())
elif command == 'lookup':
addr = sys.argv[2]
info = lookup(addr)
print('Balance: %s ETH' % (Decimal(info['balance'])/Decimal(1000000000000000000)))
print('Nonce:', info['nonce'])
elif command == 'create':
mnemonic = create_mnemonic()
hd_privkey = mnemonic_to_hdkey(mnemonic)
print('Mnemonic: %s' % mnemonic)
#print('HDPublicKey: %s' % bip32_privtopub(hd_privkey))
print('-' * 40)
for i,(privkey,addr) in enumerate(derive_keypairs(hd_privkey)):
print('Address #%d: 0x%s' % (i, addr))
elif command == 'keys':
mnemonic = getpass.getpass('Enter mnemonic:').strip()
hd_privkey = mnemonic_to_hdkey(mnemonic)
#print('HDPublicKey: %s' % bip32_privtopub(hd_privkey))
for i,(privkey,addr) in enumerate(derive_keypairs(hd_privkey)):
print('Address #%d: 0x%s Privkey: %s' % (i, addr, privkey))
elif command == 'send':
privkey_hex = getpass.getpass('Enter privkey:')
privkey = decode_hex(privkey_hex)
assert len(privkey) == 32
nonce = int(sys.argv[2])
recipient = sys.argv[3]
amount = int(Decimal(sys.argv[4]) * Decimal(1000000000000000000))
gas_price = int(sys.argv[5])
gas_limit = int(sys.argv[6])
tx = send(privkey, nonce, recipient, amount, gas_price, gas_limit)
print('Trasaction:', encode_hex(tx).decode('utf-8'))
elif command == 'export':
privkey_hex = getpass.getpass('Enter privkey:')
privkey = decode_hex(privkey_hex)
assert len(privkey) == 32
pw = getpass.getpass('Choose a keystore password:')
pw2 = getpass.getpass('Repeat password:')
assert pw == pw2, "Password mismatch"
print("Applying hard key derivation function. Please wait ...")
filename, content_json = export_keystore(privkey, pw)
print('Wallet saved to file: %s' % filename)
open(filename, 'w').write(content_json)
elif command == 'test':
test_send()
test_mnemonic()
print('Tests passed.')
else:
print('''Command Help:
create
Generate a new icebox wallet.
keys
Displays the addresses and private keys for a wallet.
NOTE: This command will prompt you for your mnemonic.
gas
Shows the current gas price in WEI (must be online).
lookup <addr>
Shows the current balance and nonce given an address (must be online).
send <nonce> <recipient address> <amount> <gas price> <gas limit>
Creates a send transaction (but does not broadcast it).
NOTE: This command will prompt you for the private key to send from.
Amount should be specified in ETHERs.
Gas price should be specified in WEI.
Gas limit for simple sends should be set to 21000.
export
Export a private key to a geth-compatible keystore.
NOTE: This command will prompt you for the private key and keystore password.
''')
|
24,279 | 59eb52b3fe89a15d7fde81dfc84d1c05851d2ba6 | # coding: utf-8
"""
ClickSend v3 REST API
This is the official [ClickSend](https://clicksend.com) SDK. *You'll need to create a free account to use the API. You can register [here](https://www.clicksend.com/signup).* You can use our API documentation along with the SDK. Our API docs can be found [here](https://developers.clicksend.com). # noqa: E501
OpenAPI spec version: 3.1.0
Contact: support@clicksend.com
Generated by: https://github.com/clicksend-api/clicksend-codegen.git
"""
from setuptools import setup, find_packages # noqa: H301
NAME = "clicksend-client"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="ClickSend v3 REST API",
author_email="support@clicksend.com",
url="",
keywords=["Swagger", "ClickSend v3 REST API"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
This is the official [ClickSend](https://clicksend.com) SDK. *You'll need to create a free account to use the API. You can register [here](https://www.clicksend.com/signup).* You can use our API documentation along with the SDK. Our API docs can be found [here](https://developers.clicksend.com). # noqa: E501
"""
)
|
24,280 | 35ca005ea44fcc2e8679bec0962d700377b48951 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import wx
from SyntaxHighlight import *
from configClass import *
from logClass import *
from SyntaxColor import *
from DefCodeWin import *
def CallChangeOption(event, option, val, IdRange=0):
"""
CallChangeOption
Helper function used to call Config.ChangeOption.
"""
Config.ChangeOption(option, val, IdRange)
def CallChangeColorFile(event, item, newcolor):
"""
CallChangeColorFile
Used to call ChangeColorFile
"""
ChangeColorFile(item, newcolor)
event.Skip()
def ToggleSpinner(event, state, widget):
"""
ToggleSpinner
Disables or enables the suplied widget depending on the arguments.
"""
if state == True:
widget.Enable()
else:
widget.Disable()
event.Skip()
class CfgFrame(wx.Frame):
"""
CfgFrame
Creates the application configuration window and
provides the necessary controls to modify the application
preferences.
"""
def __init__(self, IdRange, parent=None):
"""
__init__
Builds the entire frame GUI and binds their events across
3 Notebook tabs.
"""
wx.Frame.__init__(self, parent, -1, 'Settings', size=(300, 500))
self.SetIcon(wx.Icon('icons/gEcrit.png', wx.BITMAP_TYPE_PNG))
ConfigBook = wx.Notebook(self)
dflt_text_win = DefaultCodeFr(self, -1)
ConfigPanel = wx.Panel(ConfigBook)
ConfigPanel2 = wx.Panel(ConfigBook)
ColPal.CollorPaletteWindow(0, IdRange)
first_sizer = wx.BoxSizer(wx.VERTICAL)
AutosaveBox = wx.CheckBox(ConfigPanel, -1, "Enable Autosave", (10,
10), (160, -1))
AutosaveBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,
"Autosave", AutosaveBox.GetValue(), IdRange))
AutosaveBox.Bind(wx.EVT_CHECKBOX, lambda event: ToggleSpinner(event,
AutosaveBox.GetValue(), Interval))
Inter_Info = wx.StaticText(ConfigPanel, -1,
"Save data each # of characters:", (20,
35))
Interval = wx.SpinCtrl(ConfigPanel, -1, "", (20, 60), (90, -1))
Interval.SetRange(1, 500)
Interval.SetValue(Config.GetOption("Autosave Interval"))
Interval.Bind(wx.EVT_SPINCTRL, lambda event: CallChangeOption(event,
"Autosave Interval", Interval.GetValue(), IdRange))
if not Config.GetOption("Autosave"):
AutosaveBox.SetValue(False)
Interval.Disable()
else:
AutosaveBox.SetValue(True)
RmTrlBox = wx.CheckBox(ConfigPanel,-1,"Strip Trailing Spaces On Save",
pos = (20, 70), size = (-1, -1))
RmTrlBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,
"StripTrails", RmTrlBox.GetValue()))
RmTrlBox.SetValue(Config.GetOption("StripTrails"))
StatusBarBox = wx.CheckBox(ConfigPanel, -1, "Enable StatusBar",
(10, 90), (160, -1))
StatusBarBox.Bind(wx.EVT_CHECKBOX, lambda event: \
CallChangeOption(event, "StatusBar",
StatusBarBox.GetValue(), IdRange))
StatusBarBox.SetValue(Config.GetOption("StatusBar"))
Src_Br_Box = wx.CheckBox(ConfigPanel, -1,
"Enable Source Browser", (10, 115), (-1,
-1))
Src_Br_Box.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,
"SourceBrowser", Src_Br_Box.GetValue(), IdRange))
Src_Br_Box.SetValue(Config.GetOption("SourceBrowser"))
FileTreeBox = wx.CheckBox(ConfigPanel, -1,
"Enable File Tree Browser", (10, 117),
(-1, -1))
FileTreeBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,
"FileTree", FileTreeBox.GetValue(), IdRange))
FileTreeBox.SetValue(Config.GetOption("FileTree"))
SpellBox = wx.CheckBox(ConfigPanel, -1, "Enable Spell Checker",
(10, 120), (-1, -1))
SpellBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,
"SpellCheck", SpellBox.GetValue(), IdRange))
SpellBox.SetValue(Config.GetOption("SpellCheck"))
SpellBox.Bind(wx.EVT_CHECKBOX, lambda event: ToggleSpinner(event,
SpellBox.GetValue(), SpellSugBox))
SpellSugBox = wx.CheckBox(ConfigPanel, -1,
"Show Spell Suggestions", (10, 120), (-1,
-1))
SpellSugBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,
"SpellSuggestions", SpellSugBox.GetValue(),
IdRange))
SpellSugBox.SetValue(Config.GetOption("SpellSuggestions"))
DfltTextBox = wx.CheckBox(ConfigPanel, -1,
"Enable New Document Default Text", (10,
130), (-1, -1))
DfltTextBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,
"DefaultTextAct", DfltTextBox.GetValue()))
DfltTextBox.Bind(wx.EVT_CHECKBOX, lambda event: ToggleSpinner(event,
DfltTextBox.GetValue(), DfltTextBtn))
DfltTextBox.SetValue(Config.GetOption("DefaultTextAct"))
DfltTextBtn = wx.Button(ConfigPanel, -1,
"Edit Document Default Text", (50, 135),
(-1, -1))
DfltTextBtn.Bind(wx.EVT_BUTTON, dflt_text_win.ShowMe)
DfltTextBtn.Enable(Config.GetOption("DefaultTextAct"))
LogActBox = wx.CheckBox(ConfigPanel, -1, "Enable Log", (10, 140),
(160, -1))
LogActBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,
"ActLog", LogActBox.GetValue(), IdRange))
LogActBox.SetValue(Config.GetOption("ActLog"))
PalleteButton = wx.Button(ConfigPanel, -1, "Colour Palette", pos=
(10, 220), size=(-1, -1))
PalleteButton.Bind(wx.EVT_BUTTON, ColPal.ShowMe)
DefaultsButton = wx.Button(ConfigPanel, -1, "Reset to Defaults",
pos=(10, 260), size=(-1, -1))
DefaultsButton.Bind(wx.EVT_BUTTON, lambda event: \
CallChangeOption(event, "Defaults",
"Defaults", IdRange))
DefaultsButton.Bind(wx.EVT_BUTTON, lambda event: \
CallChangeColorFile(event, "Defaults",
"Defaults"))
ViewButton = wx.Button(ConfigPanel, label="View Log", pos=(10,
180), size=(-1, -1))
ViewButton.Bind(wx.EVT_BUTTON, self.viewLog)
EraseButton = wx.Button(ConfigPanel, label="Erase Log", pos=(50,
180), size=(-1, -1))
EraseButton.Bind(wx.EVT_BUTTON, Log.EraseLog)
EraseButton.Bind(wx.EVT_BUTTON, lambda event: ToggleSpinner(event,
False, EraseButton))
OKButton = wx.Button(ConfigPanel, -1, "OK", pos=(200, 420), size=
(80, 40))
OKButton.Bind(wx.EVT_CLOSE, self.HideMe)
OKButton.Bind(wx.EVT_BUTTON, self.HideMe)
special_sizer = wx.BoxSizer(wx.HORIZONTAL)
special_sizer.Add(ViewButton, 0)
special_sizer.Add(EraseButton, 0)
first_sizer.Add(AutosaveBox, 0, wx.EXPAND, wx.ALL, 5)
first_sizer.Add(Inter_Info, 0, wx.ALL, 5)
first_sizer.Add(Interval, 0, wx.LEFT, 30)
first_sizer.Add(RmTrlBox, 0 , wx.EXPAND)
first_sizer.Add(StatusBarBox, 0, wx.EXPAND, wx.ALL, 5)
first_sizer.Add(Src_Br_Box, 0, wx.EXPAND, wx.ALL, 5)
first_sizer.Add(FileTreeBox, 0, wx.EXPAND, wx.ALL, 5)
first_sizer.Add(SpellBox, 0, wx.EXPAND, wx.ALL, 5)
first_sizer.Add(SpellSugBox, 0, wx.EXPAND, wx.ALL, 15)
first_sizer.Add(DfltTextBox, 0, wx.EXPAND)
first_sizer.Add(DfltTextBtn, 0, wx.LEFT, 30)
first_sizer.Add(LogActBox, 0, wx.EXPAND, wx.ALL, 5)
first_sizer.Add(PalleteButton, 0, wx.ALL, 5)
first_sizer.Add(special_sizer, 0, wx.ALL, 5)
first_sizer.Add(DefaultsButton, 0)
ConfigPanel.SetSizer(first_sizer)
second_sizer = wx.BoxSizer(wx.VERTICAL)
LineNrBox = wx.CheckBox(ConfigPanel2, -1, "Show Line Numbers", (10,
10), (-1, -1))
LineNrBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,
"LineNumbers", LineNrBox.GetValue(), IdRange))
LineNrBox.SetValue(Config.GetOption("LineNumbers"))
SyntaxHgBox = wx.CheckBox(ConfigPanel2, -1, "Syntax Highlight ",
(10, 35), (-1, -1))
SyntaxHgBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,
"SyntaxHighlight", SyntaxHgBox.GetValue(),
IdRange))
SyntaxHgBox.SetValue(Config.GetOption("SyntaxHighlight"))
AutoIdentBox = wx.CheckBox(ConfigPanel2, -1, "Autoindentation",
(10, 60), (-1, -1))
AutoIdentBox.Bind(wx.EVT_CHECKBOX, lambda event: \
CallChangeOption(event, "Autoindentation",
AutoIdentBox.GetValue(), IdRange))
AutoIdentBox.Bind(wx.EVT_CHECKBOX, lambda event: ToggleSpinner(event,
AutoIdentBox.GetValue(), IndentSizeBox))
AutoIdentBox.SetValue(Config.GetOption("Autoindentation"))
IndentSizeBox = wx.SpinCtrl(ConfigPanel2, -1, "", (35, 85), (90,
-1))
IndentSizeBox.SetRange(1, 12)
IndentSizeBox.SetValue(Config.GetOption("IndentSize"))
IndentSizeBox.Bind(wx.EVT_SPINCTRL, lambda event: \
CallChangeOption(event, "IndentSize",
IndentSizeBox.GetValue(), IdRange))
if Config.GetOption("Autoindentation") == True:
IndentSizeBox.Enable()
else:
IndentSizeBox.Disable()
IndentationGuidesBox = wx.CheckBox(ConfigPanel2, -1,
"Indentation Guides", (10, 110), (-1, -1))
IndentationGuidesBox.SetValue(Config.GetOption("IndetationGuides"))
IndentationGuidesBox.Bind(wx.EVT_CHECKBOX, lambda event: \
CallChangeOption(event,
"IndetationGuides",
IndentationGuidesBox.GetValue(),
IdRange))
BackSpaceUnindentBox = wx.CheckBox(ConfigPanel2, -1,
"Backspace to Unindent", (10, 135), (-1, -1))
BackSpaceUnindentBox.SetValue(Config.GetOption("BackSpaceUnindent"))
BackSpaceUnindentBox.Bind(wx.EVT_CHECKBOX, lambda event: \
CallChangeOption(event,
"BackSpaceUnindent",
BackSpaceUnindentBox.GetValue(),
IdRange))
WhitespaceBox = wx.CheckBox(ConfigPanel2, -1, "Show Whitespace",
(10, 160), (-1, -1))
WhitespaceBox.SetValue(Config.GetOption("Whitespace"))
WhitespaceBox.Bind(wx.EVT_CHECKBOX, lambda event: \
CallChangeOption(event, "Whitespace",
WhitespaceBox.GetValue(), IdRange))
UseTabsBox = wx.CheckBox(ConfigPanel2, -1, "Use Tabs", (10, 185),
(160, -1))
UseTabsBox.SetValue(Config.GetOption("UseTabs"))
UseTabsBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,
"UseTabs", UseTabsBox.GetValue(), IdRange))
CarretInfo = wx.StaticText(ConfigPanel2, -1, 'Carret Width:', (10,
210))
CarretWidthSpin = wx.SpinCtrl(ConfigPanel2, -1, "", (35, 235), (-1,
-1))
CarretWidthSpin.SetRange(1, 20)
CarretWidthSpin.SetValue(Config.GetOption("CarretWidth"))
CarretWidthSpin.Bind(wx.EVT_SPINCTRL, lambda event: \
CallChangeOption(event, "CarretWidth",
CarretWidthSpin.GetValue(), IdRange))
FoldMarkBox = wx.CheckBox(ConfigPanel2, -1, "Fold Marks", (10,
265), (160, -1))
FoldMarkBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,
"FoldMarks", FoldMarkBox.GetValue(), IdRange))
FoldMarkBox.SetValue(Config.GetOption("FoldMarks"))
TabInfo = wx.StaticText(ConfigPanel2, -1, "Tab Width:", pos=(10,
300), size=(-1, -1))
TabWidthBox = wx.SpinCtrl(ConfigPanel2, -1, "", pos=(35, 320),
size=(90, -1))
TabWidthBox.SetValue(Config.GetOption("TabWidth"))
TabWidthBox.Bind(wx.EVT_SPINCTRL, lambda event: CallChangeOption(event,
"TabWidth", TabWidthBox.GetValue(), IdRange))
EdgeLineBox = wx.CheckBox(ConfigPanel2, -1, "Edge Line", pos=(10,
350), size=(-1, -1))
EdgeLineBox.SetValue(Config.GetOption("EdgeLine"))
EdgeLineBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,
"EdgeLine", EdgeLineBox.GetValue(), IdRange))
EdgeLineBox.Bind(wx.EVT_CHECKBOX, lambda event: ToggleSpinner(event,
EdgeLineBox.GetValue(), EdgeLinePos))
EdgeInfo = wx.StaticText(ConfigPanel2, -1, "Edge Line Position:",
pos=(35, 375), size=(-1, -1))
EdgeLinePos = wx.SpinCtrl(ConfigPanel2, -1, "", pos=(35, 400),
size=(-1, -1))
EdgeLinePos.SetValue(Config.GetOption("EdgeColumn"))
if Config.GetOption("EdgeLine"):
EdgeLinePos.Enable()
else:
EdgeLinePos.Disable()
EdgeLinePos.Bind(wx.EVT_SPINCTRL, lambda event: CallChangeOption(event,
"EdgeColumn", EdgeLinePos.GetValue(), IdRange))
BraceCompBox = wx.CheckBox(ConfigPanel2,-1,"Autocomplete Braces",
pos=(10,200),size=(-1,-1))
BraceCompBox.Bind(wx.EVT_CHECKBOX,lambda event: CallChangeOption(
event,"BraceComp",BraceCompBox.GetValue(),IdRange))
BraceCompBox.SetValue(Config.GetOption("BraceComp"))
second_sizer.Add(LineNrBox, 0, wx.EXPAND)
second_sizer.Add(SyntaxHgBox, 0, wx.EXPAND)
second_sizer.Add(AutoIdentBox, 0, wx.EXPAND)
second_sizer.Add(IndentSizeBox, 0, wx.LEFT, 30)
second_sizer.Add(IndentationGuidesBox, 0, wx.EXPAND)
second_sizer.Add(BackSpaceUnindentBox, 0, wx.EXPAND)
second_sizer.Add(WhitespaceBox, 0, wx.EXPAND)
second_sizer.Add(UseTabsBox, 0, wx.EXPAND, 30)
second_sizer.Add(CarretInfo, 0, wx.EXPAND)
second_sizer.Add(CarretWidthSpin, 0, wx.LEFT, 30)
second_sizer.Add(FoldMarkBox, 0, wx.EXPAND)
second_sizer.Add(TabInfo, 0, wx.EXPAND)
second_sizer.Add(TabWidthBox, 0, wx.LEFT, 30)
second_sizer.Add(EdgeLineBox, 0, wx.EXPAND)
second_sizer.Add(EdgeInfo, 0, wx.EXPAND)
second_sizer.Add(EdgeLinePos, 0, wx.LEFT, 30)
second_sizer.Add(BraceCompBox,0,wx.EXPAND)
ConfigPanel2.SetSizer(second_sizer)
OKButton2 = wx.Button(ConfigPanel2, -1, "OK", pos=(200, 420),
size=(80, 40))
OKButton2.Bind(wx.EVT_CLOSE, self.HideMe)
OKButton2.Bind(wx.EVT_BUTTON, self.HideMe)
third_sizer = wx.BoxSizer(wx.VERTICAL)
ConfigPanel3 = wx.Panel(ConfigBook)
BashBox = wx.CheckBox(ConfigPanel3, -1, "OS Terminal", pos=(10,
10), size=(-1, -1))
BashBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,
"BashShell", BashBox.GetValue(), IdRange))
BashBox.Bind(wx.EVT_CHECKBOX, lambda event: ToggleSpinner(event,
BashBox.GetValue(), OSPath))
BashBox.SetValue(Config.GetOption("BashShell"))
OSInfo = wx.StaticText(ConfigPanel3, -1, "OS shell path:", pos=(10,
30), size=(-1, -1))
OSPath = wx.TextCtrl(ConfigPanel3, -1, "", pos=(10, 50), size=(250,
-1))
OSPath.SetValue(Config.GetOption("OSPath"))
OSPath.Enable(BashBox.GetValue())
OSApply = wx.Button(ConfigPanel3, -1, "Apply", pos=(10, 80),
size=(-1, -1))
OSApply.Bind(wx.EVT_BUTTON, lambda event: CallChangeOption(event,
"OSPath", OSPath.GetValue(), IdRange))
PythonBox = wx.CheckBox(ConfigPanel3, -1, "Python Terminal", pos=
(10, 110), size=(-1, -1))
PythonBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,
"PythonShell", PythonBox.GetValue(), IdRange))
PythonBox.Bind(wx.EVT_CHECKBOX, lambda event: ToggleSpinner(event,
PythonBox.GetValue(), PyPath))
PythonBox.SetValue(Config.GetOption("PythonShell"))
PyInfo = wx.StaticText(ConfigPanel3, -1, "Python shell path:",
pos=(10, 130), size=(-1, -1))
PyPath = wx.TextCtrl(ConfigPanel3, -1, "", pos=(10, 150), size=(250,
-1))
PyPath.SetValue(Config.GetOption("PyPath"))
PyPath.Enable(PythonBox.GetValue())
PyApply = wx.Button(ConfigPanel3, -1, "Apply", pos=(10, 180),
size=(-1, -1))
PyApply.Bind(wx.EVT_BUTTON, lambda event: CallChangeOption(event,
"PyPath", PyPath.GetValue(), IdRange))
third_sizer.Add(BashBox, 0, wx.EXPAND, 5)
third_sizer.Add(OSInfo, 0, wx.EXPAND, 5)
third_sizer.Add(OSPath, 0, wx.EXPAND, 5)
third_sizer.Add(OSApply, 0, 5)
third_sizer.Add(PythonBox, 0, wx.EXPAND, 5)
third_sizer.Add(PyInfo, 0, wx.EXPAND, 5)
third_sizer.Add(PyPath, 0, wx.EXPAND, 5)
third_sizer.Add(PyApply, 0, 5)
ConfigPanel3.SetSizer(third_sizer)
OKButton4 = wx.Button(ConfigPanel3, -1, "OK", pos=(200, 420),
size=(80, 40))
OKButton4.Bind(wx.EVT_BUTTON, self.HideMe)
ConfigBook.AddPage(ConfigPanel, "General")
ConfigBook.AddPage(ConfigPanel2, "Editor")
ConfigBook.AddPage(ConfigPanel3, "Terminals")
self.Bind(wx.EVT_CLOSE, self.HideMe)
self.Hide()
self.Centre()
def ShowMe(self, event):
"""
ShowMe
Makes window visible.
"""
self.Show(True)
def HideMe(self, event):
"""
HideMe
Hides the window.
"""
self.Hide()
def viewLog(self, event):
"""
viewLog
Creates child class and the required controls to view the log
file.
"""
logcontent = ""
if Config.GetOption("ActLog") == True:
logFrame = wx.Frame(None, -1, "View Log", size=(500, 500))
panel5 = wx.Panel(logFrame)
data = wx.richtext.RichTextCtrl(panel5, pos=(0, 0), size=(500,
500))
data.AppendText(Log.ReadLog())
logFrame.Centre()
logFrame.Show()
else:
inform = wx.MessageDialog(None,
"The Log is disabled!\
\nEnable it to view.",
"Log Status", wx.OK)
inform.ShowModal()
|
24,281 | 9968c18bbff447faaf9a66edc26b8122ae3ae3c8 | from urllib import request
from bs4 import BeautifulSoup as bs
import re #clear the dot..
import jieba #lexicon
import pandas as pd #statistics
import numpy # frequency
#wordcloud
import matplotlib.pyplot as plt
%matplotlib inline
import matplotlib
matplotlib.rcParams['figure.figsize'] = (10.0, 5.0)
from wordcloud import WordCloud#词云包
#crawling data from webpage with url
resp = request.urlopen('https://movie.douban.com/nowplaying/guangzhou/')
html_data = resp.read().decode('utf-8')
#print(html_data)
#pip3 install bs4/BeautifulSoup
#read tags for info we need
soup = bs(html_data, 'html.parser')
nowplaying_movie = soup.find_all('div', id='nowplaying')
nowplaying_movie_list = nowplaying_movie[0].find_all('li', class_='list-item')
nowplaying_list = []
for item in nowplaying_movie_list:
nowplaying_dict = {}
#use alt to know the movie names
nowplaying_dict['id'] = item['data-subject']
for tag_img_item in item.find_all('img'):
nowplaying_dict['name'] = tag_img_item['alt']
nowplaying_list.append(nowplaying_dict)
#display id and name
#print(nowplaying_list)
# comments from the film
# start=0 first comment
requrl = 'https://movie.douban.com/subject/' + nowplaying_list[1]['id'] + '/comments' +'?' +'start=0' + '&limit=20'
resp = request.urlopen(requrl)
html_data = resp.read().decode('utf-8')
soup = bs(html_data, 'html.parser')
comment_div_lits = soup.find_all('div', class_='comment')
#comments with format
#print(comment_div_lits)
eachCommentList = [];
for item in comment_div_lits:
if item.find_all('p')[0].string is not None:
eachCommentList.append(item.find_all('p')[0].string)
#only comments text
#print(eachCommentList)
#clean the data
#all data become char
comments = ''
for k in range(len(eachCommentList)):
comments = comments + (str(eachCommentList[k])).strip()
#print(comments)
#delete dot...
pattern = re.compile(r'[\u4e00-\u9fa5]+')
filterdata = re.findall(pattern, comments)
cleaned_comments = ''.join(filterdata)
#print(cleaned_comments)
#frequency of words
segment = jieba.lcut(cleaned_comments)
words_df=pd.DataFrame({'segment':segment})
#print out high-freq words, which has no exact meanings
#print(words_df.head())
#clear useless stopwords like 'very', download txt file online
stopwords=pd.read_csv("stop_words_zh_UTF-8.txt",index_col=False,quoting=3,sep="\t",names=['stopword'], encoding='utf-8')#quoting=3全不引用
words_df=words_df[~words_df.segment.isin(stopwords.stopword)]
#print(words_df.head())
# numpy count frequency
words_stat=words_df.groupby(by=['segment'])['segment'].agg({"计数":numpy.size})
words_stat=words_stat.reset_index().sort_values(by=["计数"],ascending=False)
#print(words_stat.head())
#words cloud
wordcloud=WordCloud(font_path="simhei.ttf",background_color="white",max_font_size=80) #指定字体类型、字体大小和字体颜色
word_frequence = {x[0]:x[1] for x in words_stat.head(1000).values}
word_frequence_list = []
for key in word_frequence:
temp = (key,word_frequence[key])
word_frequence_list.append(temp)
wordcloud=wordcloud.fit_words(word_frequence)
plt.imshow(wordcloud)
plt.show()
|
24,282 | cc2353683bfcd5a9178a1df9d81225ac30c074bc | from django.conf.urls import include, url, patterns
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^$', 'guru.views.home', name='home'),
url(r'^login$', 'django.contrib.auth.views.login', {'template_name':'guru/login.html'}, name='login'),
url(r'^logout$', 'django.contrib.auth.views.logout_then_login', name='logout'),
url(r'^register$', 'guru.views.register', name='register'),
url(r'^inbox$', 'guru.views.inbox', name='inbox'),
url(r'^reminders$', 'guru.views.reminders', name='reminders'),
url(r'^calendar$', 'guru.views.calendar', name='calendar'),
url(r'^myreviews$', 'guru.views.myreviews', name='myreviews'),
url(r'^settings$', 'guru.views.settings', name='settings'),
url(r'^searchresults$', 'guru.views.searchresults', name='searchresults'),
url(r'^postdetails/(?P<id>\d+)$', 'guru.views.postdetails', name='postdetails'),
url(r'^profile/(?P<id>\d+)$', 'guru.views.profile', name='profile'),
url(r'^profile/(?P<username>\w+)$', 'guru.views.pprofile', name='pprofile'),
url(r'^editprofile', 'guru.views.editprofile', name='editprofile'),
url(r'^newuser$', 'guru.views.createNewUser', name='newuser'),
url(r'^reset$', 'guru.views.reset', name='resetpasswordrender'),
url(r'^resetpassword$', 'guru.views.resetpassword', name='resetpassword'),
url(r'^newlisting$', 'guru.views.newListing', name='newlisting'),
url(r'^Interest$', 'guru.views.addInterests', name='addInterests'),
url(r'^alllistings$', 'guru.views.allListings', name='allListings'),
url(r'^addlisting$', 'guru.views.addlisting', name='addlisting'),
url(r'^compose$', 'guru.views.compose', name='compose'),
url(r'^compose/(?P<id>\d+)$', 'guru.views.lcompose', name='lcompose'),
url(r'getusernames$', 'guru.views.getusernames', name='getusernames'),
url(r'sendmessage$', 'guru.views.sendmessage', name='sendmessage'),
url(r'sendreply$', 'guru.views.sendreply', name='sendreply'),
url(r'^message/(?P<id>\d+)$', 'guru.views.messageExpanded', name='messageExpanded'),
url(r'^interested/(?P<id>\d+)$', 'guru.views.interested', name='interested'),
url(r'^uninterested/(?P<id>\d+)$', 'guru.views.uninterested', name='uninterested'),
url(r'^activity$', 'guru.views.activity', name='activity'),
url(r'^dismiss-request/(?P<reqInfo>\w+)$', 'guru.views.dismissRequest', name='dismissRequest'),
url(r'^confirm-request$', 'guru.views.confirmRequest', name='confirmRequest'),
url(r'^saveInterests$', 'guru.views.saveInterests', name='saveInterests'),
url(r'^schedule/(?P<listingId>\d+)/(?P<guruId>\d+)$', 'guru.views.schedule', name='schedule'),
url(r'^schedule/(?P<listingId>\d+)$', 'guru.views.studentSchedule', name='studentSchedule'),
url(r'^add-date$', 'guru.views.add_date', name='addDate'),
url(r'^delete-date/(?P<id>\d+)$', 'guru.views.delete_date', name='deleteDate'),
url(r'^get-dates$', 'guru.views.get_dates', name='getDates'),
url(r'^confirm-date$', 'guru.views.confirm_date', name='confirmDate'),
url(r'^dismiss-sched$', 'guru.views.dismiss_sched', name='dismissSched'),
url(r'^confirm-sched$', 'guru.views.confirm_sched', name='confirmSched'),
url(r'^updateInterests$', 'guru.views.updateInterests', name='updateInterests'),
url(r'^review/(?P<sessionId>\d+)$', 'guru.views.reviewSession', name='reviewSession'),
url(r'^postReview$', 'guru.views.postReview', name='postReview'),
url(r'getcategories$', 'guru.views.getcategories', name='getcategories'),
url(r'sendtext$', 'guru.views.sendtext', name='sendtext'),
url(r'search$', 'guru.views.search', name='search'),
url(r'^get-relevantInterest$', 'guru.views.get_relevantInterest', name='getrelevantInterest'),
]
if settings.DEBUG:
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT}))
|
24,283 | 192262ede9b47ef393b039f22d485fc3d81d39fb | # urls for submission
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^new/(?P<project_id>[0-9]+)/$', views.new_submission, name='new_submission'),
url(r'^show/(?P<submission_id>[0-9]+)/$', views.show_submission, name='show_submission'),
url(r'^(?P<submission_id>[0-9]+)/add_new_comment/', views.add_new_comment, name='add_new_comment'),
url(r'(?P<sub_id>[0-9]+)/edit_comment/(?P<comment_id>[0-9]+)$', views.edit_comment, name='edit_comment'),
url(r'(?P<sub_id>[0-9]+)/delete_comment/(?P<comment_id>[0-9]+)$', views.delete_comment, name='delete_comment'),
]
|
24,284 | e4d1356a717ab1594cbbcea92382acfd19b71a6c | import random
print(random.randint(100,500))
|
24,285 | 8e6adfc5e07e61b5d7de520f38f0c63e23df1e5b | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255, db_index=True)),
('size', models.PositiveIntegerField(db_index=True)),
('_content', models.TextField(db_column='content')),
('created_datetime', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Created datetime', db_index=True)),
('_content_hash', models.CharField(db_index=True, max_length=128, null=True, db_column='content_hash', blank=True)),
],
options={
'db_table': 'database_files_file',
},
),
]
|
24,286 | fd847b5746a7d2873926886ba1cf5c40b334d11c | from .nonce import NonceManager
|
24,287 | 91661219f88f949f1e81291aa3bfc38dcfc3fb83 | def fun():
a=input("请输入一个整数")
l=len(a)
print("这是一个",l,"位数")
fun() |
24,288 | 1589daadce7174980e2b1367db152f2699b0e674 | class Solution:
def findUnsortedSubarray(self, nums: List[int]) -> int:
mini = math.inf
maxi = -math.inf
flag = False
for i in range(1, len(nums)):
if nums[i] < nums[i - 1]:
flag = True
if flag:
mini = min(mini, nums[i])
flag = False
for i in reversed(range(len(nums) - 1)):
if nums[i] > nums[i + 1]:
flag = True
if flag:
maxi = max(maxi, nums[i])
for l in range(len(nums)):
if nums[l] > mini:
break
for r, num in reversed(list(enumerate(nums))):
if num < maxi:
break
return 0 if l >= r else r - l + 1
|
24,289 | 1006239a71cd89a6d38affac79173de93333a7b0 | from distance_movetime import space_calc
from dataaccess import DataAccess
import numpy as np
da = DataAccess()
x = da.get_distance()
a = np.array([])
for i in x:
a = np.append(a,i,axis=0)
a = a.reshape(5,3)
print(a[1,0])
|
24,290 | 29949dc77b9292eb670e97765afd5a4a6f5a4211 | from setuptools import setup
setup(
name = 'django_editorjs_parser',
packages = ['django_editorjs_parser'],
py_modules = ['django_editorjs_parser.src'],
version = '0.1.5',
license = 'MIT',
description = 'Parser for clean-blocks used by editor-js written in python',
author='giovkast',
author_email='giovkast@gmail.com',
url = 'https://github.com/giokast/python_editorjs_parser',
download_url = 'https://github.com/giokast/django_editorjs_parser/archive/refs/tags/0.1.5.tar.gz',
keywords = '',
install_requires = [
],
classifiers = [
'Development Status :: 4 - Beta', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3', #Specify which pyhton versions that you want to support
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
zip_safe=False) |
24,291 | 77fbeea1e87308852ac9e0f7fac4968f15842bf7 | import sys
import os
import argparse
import numpy as np
import random
import PIL.Image as Image
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.models as models
import logging
from myDataset import myDataset
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# ids = [0,1]
parser = argparse.ArgumentParser(description='MIL-nature-medicine-2019 tile classifier training script')
parser.add_argument('--train_lib', type=str, default='', help='path to train MIL library binary')
parser.add_argument('--valid', type=bool, default=True, help='path to validation MIL library binary. If present.')
parser.add_argument('--output', type=str, default='./', help='name of output file')
parser.add_argument('--batch_size', type=int, default=256, help='mini-batch size (default: 512)')
parser.add_argument('--nepochs', type=int, default=100, help='number of epochs')
parser.add_argument('--workers', default=2, type=int, help='number of data loading workers (default: 4)')
parser.add_argument('--test_every', default=2, type=int, help='test on val every (default: 10)')
parser.add_argument('--weights', default=0.5, type=float, help='unbalanced positive class weight (default: 0.5, balanced classes)')
parser.add_argument('--k', default=1, type=int, help='top k tiles are assumed to be of the same class as the slide (default: 1, standard MIL)')
logger = logging.getLogger(__name__)
logger.setLevel(level = logging.INFO)
handler = logging.FileHandler("./log.txt")
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
best_acc = 0
def main():
global args, best_acc
args = parser.parse_args()
# resnet-34, or could change the model for efficiency
model = models.resnet34(True)
model.fc = nn.Linear(model.fc.in_features, 2) # for trible classification
# pre_state_dict = torch.load('./checkpoints/LU_V2.pth')['state_dict']
# #pre_state_dict = torch.load('./checkpoints/LU_V3.pth')
# model.load_state_dict(pre_state_dict)
model.cuda()
device_ids = range(torch.cuda.device_count())
print(device_ids)
# if necessary, mult-gpu training
if len(device_ids) > 1:
model = torch.nn.DataParallel(model)
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-4)
cudnn.benchmark = True
# normalization
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.1, 0.1, 0.1])
trans = transforms.Compose([transforms.ToTensor(), normalize])
# load data
train_dset = myDataset(csv_path='./coords/G_TwoTypes_Train.csv', transform=trans)
train_loader = torch.utils.data.DataLoader(
train_dset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=False)
val_dset = myDataset(csv_path='./coords/G_TwoTypes_Test.csv', transform=trans)
val_loader = torch.utils.data.DataLoader(
val_dset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=False)
# open output file
fconv = open(os.path.join(args.output, 'convergence.csv'), 'w')
fconv.write('epoch,metric,value\n')
fconv.close()
#loop throuh epochs
for epoch in range(args.nepochs):
# for evaluation,
train_dset.setmode(1)
# slideIDX --> Patch_level label
# get all problities of all patches in the loader
probs = inference(epoch, train_loader, model)
print(probs.shape)
# choose most K probable patch per slide, k = 2
topk = group_argtopk(np.array(train_dset.slideIDX), probs, args.k)
# make train data, and shuffle it
train_dset.maketraindata(topk)
train_dset.shuffletraindata()
# training part
train_dset.setmode(2)
loss = train(epoch, train_loader, model, criterion, optimizer)
print('Training\tEpoch: [{}/{}]\tLoss: {}'.format(epoch+1, args.nepochs, loss))
logger.info('Training\tEpoch: [{}/{}]\tLoss: {}'.format(epoch+1, args.nepochs, loss))
fconv = open(os.path.join(args.output, 'convergence.csv'), 'a')
fconv.write('{},loss,{}\n'.format(epoch+1, loss))
fconv.close()
torch.save(model.state_dict(), os.path.join(args.output, 'G_current_checkpoint.pth'))
# Validation
if (epoch) % args.test_every == 0:
val_dset.setmode(1)
probs = inference(epoch, val_loader, model)
nan_num = np.isnan(probs).sum()
if nan_num > 0:
logger.info('NaN is in probs')
print('######################################################################################')
maxs = group_max(np.array(val_dset.slideIDX), probs, len(val_dset.targets))
pred = [1 if x >= 0.5 else 0 for x in maxs]
err, fpr, fnr = calc_err(pred, val_dset.targets)
print('Validation\tEpoch: [{}/{}]\tError: {}\tFPR: {}\tFNR: {}'.format(epoch + 1, args.nepochs, err, fpr,
fnr))
fconv = open(os.path.join(args.output, 'convergence.csv'), 'a')
fconv.write('{},error,{}\n'.format(epoch + 1, err))
fconv.write('{},fpr,{}\n'.format(epoch + 1, fpr))
fconv.write('{},fnr,{}\n'.format(epoch + 1, fnr))
fconv.close()
# Save best model
err = (fpr + fnr) / 2.
if 1 - err >= best_acc:
best_acc = 1 - err
obj = {
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_acc': best_acc,
'optimizer': optimizer.state_dict()
}
torch.save(obj, os.path.join(args.output, 'G_checkpoint_best.pth'))
def inference(run, loader, model):
model.eval()
probs = torch.FloatTensor(len(loader.dataset))
with torch.no_grad():
for i, input in enumerate(loader):
print('Inference\tEpoch: [{}/{}]\tBatch: [{}/{}]'.format(run+1, args.nepochs, i+1, len(loader)))
input = input.cuda()
output = F.softmax(model(input), dim=1)
probs[i*args.batch_size:i*args.batch_size+input.size(0)] = output.detach()[:,1].clone()
return probs.cpu().numpy()
def train(run, loader, model, criterion, optimizer):
model.train()
running_loss = 0.
for i, (input, target) in enumerate(loader):
input = input.cuda()
target = target.cuda()
output = model(input)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()*input.size(0)
return running_loss/len(loader.dataset)
def calc_err(pred,real):
pred = np.array(pred)
real = np.array(real)
neq = np.not_equal(pred, real)
err = float(neq.sum())/pred.shape[0]
fpr = float(np.logical_and(pred==1,neq).sum())/(real==0).sum()
fnr = float(np.logical_and(pred==0,neq).sum())/(real==1).sum()
return err, fpr, fnr
def group_argtopk(groups, data,k=1):
order = np.lexsort((data, groups))
groups = groups[order]
data = data[order]
index = np.empty(len(groups), 'bool')
index[-k:] = True
index[:-k] = groups[k:] != groups[:-k]
return list(order[index])
def group_max(groups, data, nmax):
out = np.empty(nmax)
out[:] = np.nan
order = np.lexsort((data, groups))
groups = groups[order]
data = data[order]
index = np.empty(len(groups), 'bool')
index[-1] = True
index[:-1] = groups[1:] != groups[:-1]
out[groups[index]] = data[index]
return out
if __name__ == '__main__':
main()
|
24,292 | b9d761fba0912c5d1507ae5c408a0c92d9a48806 |
from cv2 import cv2
from threading import Thread
import playsound
import Apu
def avaaKamera():
#Hymyn ja kasvojen mallit
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
smile_cascade = cv2.CascadeClassifier('haarcascade_smile.xml')
cap = cv2.VideoCapture(0)
while (True):
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 2.1, 4)
smiles = smile_cascade.detectMultiScale(gray, 3.5, 20)
#Hoitaa hymyn käsittelyn
Apu.hoidaIlo(smiles)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
for (x, y, w, h) in smiles:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
cv2.imshow('kuva', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
avaaKamera()
|
24,293 | 38da4fffb8be93fee96e8eb4ea68bfd9db4008e1 | import numpy as np
import scipy.stats
from pgfa.math_utils import discrete_rvs, do_metropolis_hastings_accept_reject, log_normalize, log_sum_exp
def update_precision(model, variance=1):
old = model.params.precision
a_new, b_new = get_gamma_params(old, variance)
new = scipy.stats.gamma.rvs(a_new, scale=(1 / b_new))
a_old, b_old = get_gamma_params(new, variance)
model.params.precision = new
log_p_new = model.joint_dist.log_p(model.data, model.params)
log_q_new = scipy.stats.gamma.logpdf(new, a_new, scale=(1 / b_new))
model.params.precision = old
log_p_old = model.joint_dist.log_p(model.data, model.params)
log_q_old = scipy.stats.gamma.logpdf(old, a_old, scale=(1 / b_old))
if do_metropolis_hastings_accept_reject(log_p_new, log_p_old, log_q_new, log_q_old):
model.params.precision = new
else:
model.params.precision = old
def update_V(model, variance=1):
params = model.params.copy()
a_prior, b_prior = model.params.V_prior
Ds = np.random.permutation(model.params.D)
Ks = np.random.permutation(model.params.K)
for d in Ds:
for k in Ks:
old = params.V[k, d]
a, b = get_gamma_params(old, variance)
new = scipy.stats.gamma.rvs(a, scale=(1 / b))
params.V[k, d] = new
log_p_new = model.data_dist.log_p(model.data, params)
log_p_new += scipy.stats.gamma.logpdf(new, a_prior, scale=(1 / b_prior))
log_q_new = scipy.stats.gamma.logpdf(new, a, scale=(1 / b))
a, b = get_gamma_params(new, variance)
params.V[k, d] = old
log_p_old = model.data_dist.log_p(model.data, params)
log_p_old += scipy.stats.gamma.logpdf(old, a_prior, scale=(1 / b_prior))
log_q_old = scipy.stats.gamma.logpdf(old, a, scale=(1 / b))
if do_metropolis_hastings_accept_reject(log_p_new, log_p_old, log_q_new, log_q_old):
params.V[k, d] = new
else:
params.V[k, d] = old
model.params = params
def update_V_perm(model):
params = model.params.copy()
for d in np.random.permutation(model.params.D):
old = params.V[:, d].copy()
new = params.V[np.random.permutation(params.K), d]
params.V[:, d] = new
log_p_new = model.data_dist.log_p(model.data, params)
params.V[:, d] = old
log_p_old = model.data_dist.log_p(model.data, params)
if do_metropolis_hastings_accept_reject(log_p_new, log_p_old, 0, 0):
params.V[:, d] = new
else:
params.V[:, d] = old
model.params = params
def update_V_random_grid_pairwise(model, num_points=10):
if model.params.K < 2:
return
ka, kb = np.random.choice(model.params.K, 2, replace=False)
params = model.params.copy()
old = params.V[[ka, kb]].flatten()
D = params.D
dim = 2 * D
e = scipy.stats.multivariate_normal.rvs(np.zeros(dim), np.eye(dim))
e /= np.linalg.norm(e)
r = scipy.stats.gamma.rvs(1, 1)
grid = np.arange(1, num_points + 1)
ys = old[np.newaxis, :] + grid[:, np.newaxis] * r * e[np.newaxis, :]
log_p_new = np.zeros(num_points)
for i in range(num_points):
params.V[[ka, kb]] = ys[i].reshape((2, D))
log_p_new[i] = model.joint_dist.log_p(model.data, params)
if np.all(np.isneginf(log_p_new)) or np.any(np.isnan(log_p_new)):
return
try:
idx = discrete_rvs(np.exp(0.5 * np.log(grid) + log_normalize(log_p_new)))
except ValueError:
return
new = ys[idx]
xs = new[np.newaxis, :] - grid[:, np.newaxis] * r * e[np.newaxis, :]
log_p_old = np.zeros(num_points)
for i in range(num_points):
params.V[[ka, kb]] = xs[i].reshape((2, D))
log_p_old[i] = model.joint_dist.log_p(model.data, params)
if do_metropolis_hastings_accept_reject(log_sum_exp(log_p_new), log_sum_exp(log_p_old), 0, 0):
params.V[[ka, kb]] = new.reshape((2, D))
else:
params.V[[ka, kb]] = old.reshape((2, D))
model.params = params
def update_V_random_grid(model, num_points=10):
if model.params.K < 2:
return
params = model.params.copy()
old = params.V.flatten()
K, D = params.V.shape
dim = K * D
e = scipy.stats.multivariate_normal.rvs(np.zeros(dim), np.eye(dim))
e /= np.linalg.norm(e)
r = scipy.stats.gamma.rvs(1, 1)
grid = np.arange(1, num_points + 1)
ys = old[np.newaxis, :] + grid[:, np.newaxis] * r * e[np.newaxis, :]
log_p_new = np.zeros(num_points)
for i in range(num_points):
params.V = ys[i].reshape((K, D))
log_p_new[i] = model.joint_dist.log_p(model.data, params)
idx = discrete_rvs(np.exp(0.5 * np.log(grid) + log_normalize(log_p_new)))
new = ys[idx]
xs = new[np.newaxis, :] - grid[:, np.newaxis] * r * e[np.newaxis, :]
log_p_old = np.zeros(num_points)
for i in range(num_points):
params.V = xs[i].reshape((K, D))
log_p_old[i] = model.joint_dist.log_p(model.data, params)
if do_metropolis_hastings_accept_reject(log_sum_exp(log_p_new), log_sum_exp(log_p_old), 0, 0):
params.V = new.reshape((K, D))
else:
params.V = old.reshape((K, D))
model.params = params
def update_V_block(model, variance=1):
params = model.params.copy()
a_prior, b_prior = model.params.V_prior
for k in np.random.permutation(model.params.K):
old = params.V[k].copy()
new = np.zeros(params.D)
log_p_new = 0
log_q_new = 0
log_p_old = 0
log_q_old = 0
for d in range(model.params.D):
a, b = get_gamma_params(old[d], variance)
new[d] = scipy.stats.gamma.rvs(a, scale=(1 / b))
log_p_new += scipy.stats.gamma.logpdf(new[d], a_prior, scale=(1 / b_prior))
log_q_new += scipy.stats.gamma.logpdf(new[d], a, scale=(1 / b))
a, b = get_gamma_params(new[d], variance)
log_p_old += scipy.stats.gamma.logpdf(old[d], a_prior, scale=(1 / b_prior))
log_q_old += scipy.stats.gamma.logpdf(old[d], a, scale=(1 / b))
params.V[k] = new
log_p_new += model.data_dist.log_p(model.data, params)
params.V[k] = old
log_p_old += model.data_dist.log_p(model.data, params)
if do_metropolis_hastings_accept_reject(log_p_new, log_p_old, log_q_new, log_q_old):
params.V[k] = new
else:
params.V[k] = old
model.params = params
def update_V_block_dim(model, variance=1):
params = model.params.copy()
a_prior, b_prior = model.params.V_prior
for d in np.random.permutation(model.params.D):
old = params.V[:, d].copy()
new = np.zeros(params.K)
log_p_new = 0
log_q_new = 0
log_p_old = 0
log_q_old = 0
for k in range(model.params.K):
a, b = get_gamma_params(old[k], variance)
new[k] = scipy.stats.gamma.rvs(a, scale=(1 / b))
log_p_new += scipy.stats.gamma.logpdf(new[k], a_prior, scale=(1 / b_prior))
log_q_new += scipy.stats.gamma.logpdf(new[k], a, scale=(1 / b))
a, b = get_gamma_params(new[k], variance)
log_p_old += scipy.stats.gamma.logpdf(old[k], a_prior, scale=(1 / b_prior))
log_q_old += scipy.stats.gamma.logpdf(old[k], a, scale=(1 / b))
params.V[:, d] = new
log_p_new += model.data_dist.log_p(model.data, params)
params.V[:, d] = old
log_p_old += model.data_dist.log_p(model.data, params)
if do_metropolis_hastings_accept_reject(log_p_new, log_p_old, log_q_new, log_q_old):
params.V[:, d] = new
else:
params.V[:, d] = old
model.params = params
def get_gamma_params(mean, variance):
b = mean / variance
a = b * mean
return a, b
|
24,294 | e00afb9dc3333f7cf6dc60d57cd964096ba95acb | import discord
from vars.var import *
from vars.wepembed import *
reset_value = f"Con questo comando comando si resettano i nickname di tutti i presenti nel server, riportandoli al loro nome usato dalla account \n esempio: \n nome account: \u200b \u200b \u200b \u200b \u200b \u200b \u200b \u200b nickname nel server:\n> Hik#9778 \u200b \u200b \u200b \u200b \u200b \u200b \u200b \u200b \u200b \u200b \u200b Izalith\n dopo il comando `.reset` il nick sul server diventerà: \n> Hik"
embed_reset_info = discord.Embed(title="Commands Information", color=discord.Color.green(), inline=True)
embed_reset_info.set_author(name="IMMORTAL BOT\n", icon_url=img)
embed_reset_info.add_field(name = 'Comando reset', value= reset_value)
rank_value = f"Con questo comando viene aggiornato il prefisso nel nickname basandosi sul ruolo corrente\n esempio:\nnickname: `[X]Hik`, ruolo:`[VII] Veterano` \n dopo il comando il nickname diventerà \n > [VII] Hik "
embed_rank_info = discord.Embed(title="Commands Information", color=discord.Color.green(), inline=True)
embed_rank_info.set_author(name="IMMORTAL BOT\n", icon_url=img)
embed_rank_info.add_field(name = 'Comando reset', value= rank_value)
f_value = f"Con questo comando si possono avere info sull'arma richiesta\n esempio:\n> `.f lancia`\n \u200b \nPer ulteriori informazioni scrivere\n> `.f info`"
embed_f_info = discord.Embed(title="Commands Information", color=discord.Color.green(), inline=True)
embed_f_info.set_author(name="IMMORTAL BOT\n", icon_url=img)
embed_f_info.add_field(name = 'Comando reset', value= f_value)
vrole_value = f"Con questo comando si può vedere la lista membri di un determinato ruolo\n esempio:\n> `.vrole`"
embed_vrole_info = discord.Embed(title="Commands Information", color=discord.Color.green(), inline=True)
embed_vrole_info.set_author(name="IMMORTAL BOT\n", icon_url=img)
embed_vrole_info.add_field(name = 'Comando reset', value= rank_value) |
24,295 | 6fa1807fc28d8351676a139163f56b02de7bf2cb |
def vogal(letra):
if letra == 'A' or 'E' or 'I' 'O' or 'U' or 'a' or 'e' or 'i' or 'o' or 'u':
return True
if letra != 'Q' or'W'or'R'or'T'or'Y'or'P'or'S'or'D'or 'F'or 'G'or 'H'or 'J'or 'K'or 'L'or 'Ç'or 'Z'or 'X'or 'C'or 'V'or 'B'or 'N'or 'M'or 'q'or 'w'or 'r'or 't'or 'y'or 'p'or 's'or 'd'or 'f'or 'g'or 'h'or 'k' or'j'or 'k'or 'l'or 'ç'or 'z'or 'x'or 'c'or 'v'or 'b'or 'n'or 'm':
return False
|
24,296 | 2eade48e15aa682cbcd17f209ac083caef1aa2be | # Latihan tanggal 16 Desember 2020
# Case 2:
# Kita membuat dua fungsi, dimana fungsi pertama adalah untuk mengecek apakah
# angka itu dapat dibagi oleh suatu angka (hasilnya true atau false)
angka_1 = 15
angka_2 = 6
angka_3 = 2
def pembagian_dua_angka(angka_1,angka_2):
if angka_1 % angka_2 ==0:
hasil = "TRUE"
else:
hasil = "FALSE"
return hasil;
hasil_bagi = pembagian_dua_angka(angka_1, angka_2)
print(hasil_bagi)
def proses_pangkat(hasil_bagi, angka_1, angka_3):
if hasil_bagi == "TRUE":
hasil_pangkat = angka_1 ** angka_3
else:
hasil_pangkat = "Angka_1 tidak dapat dibagi dengan Angka_2"
return hasil_pangkat;
hasil_pemangkatan = proses_pangkat(hasil_bagi, angka_1, angka_3)
print(hasil_pemangkatan)
|
24,297 | 67b507d8d1691badfc82fc7c6ddb79ec936b96f5 | def negate(x):
return(-x)
|
24,298 | e68e9e6d2718ffe40901cc4dbcedad41dc89b905 | import HPC_Paths as p
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import mir_eval
import librosa
import os
import sox
from mir_eval import util
def onset(unit, start_time,fmin_input,h_length, feature_input):
start_time = int(start_time)
if (start_time == 0):
end_time = 605
#last start_time for units 1,2,3,5,7
elif (start_time == 38995):
if (unit == 1):
end_time = 39285
elif (unit == 2):
end_time = 39301
elif (unit == 3):
end_time = 39355
elif (unit == 5):
end_time = 39356
elif (unit == 7):
end_time = 39295
#last start_time for unit 10
elif (start_time == 29395 and unit == 10):
end_time = 29483
else:
end_time = start_time+610
duration_length = end_time - start_time
file = p.get_trimmed_audio(unit) + str(unit).zfill(2) + "_S_" + str(start_time) + "_E_" + str(end_time) + ".wav"
y, sr = librosa.load(file, duration=duration_length)
if feature_input == librosa.stft:
S = feature_input(y, hop_length=h_length, n_fft=2*h_length)
elif feature_input == librosa.feature.melspectrogram:
S = feature_input(y, sr=sr, hop_length=h_length, fmin=fmin_input)
S = librosa.logamplitude(S, ref=1.0)
#onset_env = librosa.onset.onset_strength(S=S, aggregate = np.median)
onset_env = librosa.onset.onset_strength(S=S)
saveFile = p.get_detections(unit) + str(unit).zfill(2) + "_S_" + str(start_time) + "_E_" + str(end_time) + "_detections"
checkFile = p.get_data() + str(unit).zfill(2) + "_S_" + str(start_time) + "_E_" + str(end_time) + "_detections.npy"
np.save(saveFile, onset_env)
def peak_picking(unit, start_time, hop_duration): #medfilt_size=None):
start_time = int(start_time)
if (start_time == 0):
end_time = 605
#last start_time for units 1,2,3,5,7
elif (start_time == 38995):
if (unit == 1):
end_time = 39285
elif (unit == 2):
end_time = 39301
elif (unit == 3):
end_time = 39355
elif (unit == 5):
end_time = 39356
elif (unit == 7):
end_time = 39295
#last start_time for unit 10
elif (start_time == 29395 and unit == 10):
end_time = 29483
else:
end_time = start_time+610
checkFile = p.get_filtered_detections(unit) + str(unit).zfill(2) + "_S_" + str(start_time) + "_E_" + str(end_time) + "_detections.npy"
li = np.load(checkFile)
#if medfilt_size is not None:
#pass # remove me
# apply scipy.signal.medfilt
peaks = []
saveFile = p.get_peaks(unit) + str(unit).zfill(2) + "_S_" + str(start_time) + "_E_" + str(end_time) + "_peaks"
for i in range(len(li)):
#first input
if (i-1<0):
if (li[i+1] < li[i]):
#print(li[i],i)
peaks.append((li[i],(i*hop_duration)))
#last input
if ((i+1)==len(li)):
if (li[i-1] < li[i]):
peaks.append((li[i],(i*hop_duration)))
#print(li[i],i)
#middle inputs
if ((i-1>0) and ((i+1)!=len(li)) and (li[i-1] < li[i]) and (li[i+1] < li[i])):
peaks.append((li[i],(i*hop_duration)))
#print(li[i],i)
np.save(saveFile, peaks)
def threshold(unit, start_time, thresh):
start_time = int(start_time)
if (start_time == 0):
end_time = 605
#last start_time for units 1,2,3,5,7
elif (start_time == 38995):
if (unit == 1):
end_time = 39285
elif (unit == 2):
end_time = 39301
elif (unit == 3):
end_time = 39355
elif (unit == 5):
end_time = 39356
elif (unit == 7):
end_time = 39295
#last start_time for unit 10
elif (start_time == 29395 and unit == 10):
end_time = 29483
else:
end_time = start_time+610
checkFile = p.get_peaks(unit) + str(unit).zfill(2) + "_S_" + str(start_time) + "_E_" + str(end_time) + "_peaks.npy"
threshold_peaks = []
predicted = []
groundValues = []
for i in np.load(checkFile):
#print(i)
if (i[0]>=thresh):
threshold_peaks.append(i)
predicted.append(i[1]) #+38340)
truth = p.get_trimmed_annotation(unit) + str(unit).zfill(2) + "_S_" + str(start_time) + "_E_" + str(end_time) + ".txt"
for line in open(truth,'r'):
line = line.strip('\n')
line = float(line) #- 38340
groundValues.append(line)
groundValues = np.array(groundValues)
predicted = np.array(predicted)
#F, P, R = mir_eval.onset.f_measure(groundValues,predicted) #(reference_onsets, estimated_onsets)
Tp = float(len(util.match_events(groundValues, predicted,0.05)))
Fp = float(len(predicted)) - float(len(util.match_events(groundValues, predicted,0.05)))
Fn = float(len(groundValues)) - float(len(util.match_events(groundValues, predicted,0.05)))
return Tp, Fp, Fn #F, P, R
|
24,299 | ab839ce960109a2e232691ae673768e81fd44805 | import shutil
import numpy as np
from cloudvolume.lib import generate_random_string
from chunkflow.lib.cartesian_coordinate import BoundingBox, Cartesian
from chunkflow.volume import PrecomputedVolume
def test_volume():
print('test volume cutout...')
# compute parameters
size = (36, 448, 448)
# create image dataset using cloud-volume
img = np.random.randint(0, 256, size=size)
img = img.astype(np.uint8)
# save the input to disk
volume_path = 'file:///tmp/test/volume/' + \
generate_random_string()
vol = PrecomputedVolume.from_numpy(
img,
volume_path
)
offset = Cartesian(4, 64, 64)
shape = (28, 320, 320)
bbox = BoundingBox.from_delta(offset, shape)
chunk = vol.cutout(bbox)
# chunk = chunk.squeeze_channel()
assert offset == chunk.voxel_offset
np.testing.assert_array_equal(chunk, img[4:-4, 64:-64, 64:-64])
shutil.rmtree('/tmp/test') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.