index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
988,600 | d16c1132d1f6f2bc75bff7fe745f09a6a4762da0 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: notification_entry.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import client_state_pb2 as client__state__pb2
import notification_data_pb2 as notification__data__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='notification_entry.proto',
package='notifications.proto',
syntax='proto2',
serialized_options=_b('H\003'),
serialized_pb=_b('\n\x18notification_entry.proto\x12\x13notifications.proto\x1a\x12\x63lient_state.proto\x1a\x17notification_data.proto\"\x80\x01\n\x0eScheduleParams\x12>\n\x08priority\x18\x01 \x01(\x0e\x32,.notifications.proto.ScheduleParams.Priority\".\n\x08Priority\x12\x07\n\x03LOW\x10\x00\x12\x08\n\x04HIGH\x10\x01\x12\x0f\n\x0bNO_THROTTLE\x10\x02\"\xee\x01\n\x11NotificationEntry\x12\x36\n\x04type\x18\x01 \x01(\x0e\x32(.notifications.proto.SchedulerClientType\x12\x0c\n\x04guid\x18\x02 \x01(\t\x12\x13\n\x0b\x63reate_time\x18\x03 \x01(\x03\x12@\n\x11notification_data\x18\x04 \x01(\x0b\x32%.notifications.proto.NotificationData\x12<\n\x0fschedule_params\x18\x05 \x01(\x0b\x32#.notifications.proto.ScheduleParamsB\x02H\x03')
,
dependencies=[client__state__pb2.DESCRIPTOR,notification__data__pb2.DESCRIPTOR,])
_SCHEDULEPARAMS_PRIORITY = _descriptor.EnumDescriptor(
name='Priority',
full_name='notifications.proto.ScheduleParams.Priority',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='LOW', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HIGH', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NO_THROTTLE', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=177,
serialized_end=223,
)
_sym_db.RegisterEnumDescriptor(_SCHEDULEPARAMS_PRIORITY)
_SCHEDULEPARAMS = _descriptor.Descriptor(
name='ScheduleParams',
full_name='notifications.proto.ScheduleParams',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='priority', full_name='notifications.proto.ScheduleParams.priority', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_SCHEDULEPARAMS_PRIORITY,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=95,
serialized_end=223,
)
_NOTIFICATIONENTRY = _descriptor.Descriptor(
name='NotificationEntry',
full_name='notifications.proto.NotificationEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='notifications.proto.NotificationEntry.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='guid', full_name='notifications.proto.NotificationEntry.guid', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='create_time', full_name='notifications.proto.NotificationEntry.create_time', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='notification_data', full_name='notifications.proto.NotificationEntry.notification_data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='schedule_params', full_name='notifications.proto.NotificationEntry.schedule_params', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=226,
serialized_end=464,
)
_SCHEDULEPARAMS.fields_by_name['priority'].enum_type = _SCHEDULEPARAMS_PRIORITY
_SCHEDULEPARAMS_PRIORITY.containing_type = _SCHEDULEPARAMS
_NOTIFICATIONENTRY.fields_by_name['type'].enum_type = client__state__pb2._SCHEDULERCLIENTTYPE
_NOTIFICATIONENTRY.fields_by_name['notification_data'].message_type = notification__data__pb2._NOTIFICATIONDATA
_NOTIFICATIONENTRY.fields_by_name['schedule_params'].message_type = _SCHEDULEPARAMS
DESCRIPTOR.message_types_by_name['ScheduleParams'] = _SCHEDULEPARAMS
DESCRIPTOR.message_types_by_name['NotificationEntry'] = _NOTIFICATIONENTRY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ScheduleParams = _reflection.GeneratedProtocolMessageType('ScheduleParams', (_message.Message,), dict(
DESCRIPTOR = _SCHEDULEPARAMS,
__module__ = 'notification_entry_pb2'
# @@protoc_insertion_point(class_scope:notifications.proto.ScheduleParams)
))
_sym_db.RegisterMessage(ScheduleParams)
NotificationEntry = _reflection.GeneratedProtocolMessageType('NotificationEntry', (_message.Message,), dict(
DESCRIPTOR = _NOTIFICATIONENTRY,
__module__ = 'notification_entry_pb2'
# @@protoc_insertion_point(class_scope:notifications.proto.NotificationEntry)
))
_sym_db.RegisterMessage(NotificationEntry)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
988,601 | 6bfd8919aa9dfbbf561f71b4c7621977f346dee4 | num=input()
a=0
for x in num:
if x.isdigit():
a=a+1
print (a)
|
988,602 | a3dafd81bc492eff26576573fa6ae8a5b1d542d2 | # -*- coding: utf-8 -*-
"""Implements object data persistence with "Active Record".
$Id: record.py 953 2012-03-25 13:26:19Z anovgorodov $
"""
import zope.interface
import zope.schema
from zope.interface import implements
from rx.ormlite2 import dbop
from rx.ormlite2.interfaces import IRecord, IActiveRecord
from rx.ormlite2.schema import IORMField, IChoice, IReference
from rx.ormlite2.lob import ILOB
from rx.ormlite2.exc import PersistenceError, ModelError
# Set to enable compact record representation
COMPACT_RECORD_REPR = False
class MISSING_REPR:
def __repr__(self): return '{MISSING}'
MISSING = MISSING_REPR()
class _MissingMarker(object): pass
missing_marker = _MissingMarker()
class ObjectRef(object):
def __init__(self, owner, name, field, key=None):
assert key is None or isinstance(key, tuple)
self.owner = owner
self.name = name
self.field = field
self.key = key
def __str__(self):
return str(self.key)
def __unicode__(self):
return unicode(self.key)
def __repr__(self):
return 'ObjectRef(%s{%s}, %r, %r)' % (self.owner.__class__.__name__, id(self.owner), self.field, self.key)
def __call__(self):
bound_field = self.field.bind(self.owner)
return bound_field.vocabulary[self.key]
def __eq__(self, other):
return self.field is other.field and self.key == other.key
def __ne__(self, ob):
return not self.__eq__(ob)
def set(self, value):
if value is None:
self.key = self.field.null()
else:
self.key = IRecord(value).primary_key
class OrmMetaClass(type):
def __new__(cls, name, bases, attrs):
new = super(OrmMetaClass, cls).__new__(cls, name, bases, attrs)
cls.init_orm_metadata(new)
return new
@classmethod
def init_orm_metadata(meta, cls):
# if hasattr(cls, '__class_initialized_%s_%s' % \
# (cls.__module__, cls.__name__)):
# return
# заставляем метакласс Zope отработать раньше нас
if '__implements_advice_data__' in cls.__dict__:
interfaces, classImplements = cls.__dict__['__implements_advice_data__']
#del cls.__implements_advice_data__
classImplements(cls, *interfaces)
fields = {}
cls.p_attr2col = {}
cls.p_col2attr = {}
cls.p_attr_seq = []
cls.p_col_seq = []
cls.p_keys = []
cls.p_key_fields = []
seen = {}
impl_fields = []
for iface in zope.interface.implementedBy(cls):
for name, field in zope.schema.getFieldsInOrder(iface):
if name not in seen:
seen[name] = 1
impl_fields.append((name, field))
for name, field in impl_fields:
if IORMField.providedBy(field):
if field.db_column in cls.p_col_seq:
raise ModelError('DB column "%s" cannot be declared twice '
'(trying to declare for field "%s", already declared '
'for field "%s")' % (field.db_column, name,
cls.p_col2attr[field.db_column]))
cls.p_attr2col[name] = field.db_column
cls.p_col2attr[field.db_column] = name
cls.p_attr_seq.append(name)
cls.p_col_seq.append(field.db_column)
fields[name] = field
if field.primary_key:
cls.p_keys.append(name)
cls.p_key_fields.append(field.db_column)
cls.p_fields = fields
# setattr(cls, '__class_initialized_%s_%s' % \
# (cls.__module__, cls.__name__), True)
class Record(object):
__metaclass__ = OrmMetaClass
implements(IRecord)
def __setstate__(self, dic):
# unpickling support
self.__class_init__()
self.__dict__.update(dic)
def __init__(self, **kw):
self.__class_init__()
super(Record, self).__init__()
# Инициализация значений полей
for name, field in self.p_fields.items():
value = kw.get(name, field.default)
if IChoice.providedBy(field):
ref = self._object_ref(self, name, field)
ref.set(value)
setattr(self, name, ref)
continue
if value is None:
value = field.null()
setattr(self, name, value)
@property
def primary_key(self):
return tuple([ getattr(self, name) for name in self.p_keys ])
@classmethod
def _object_ref(self, *args, **kw):
return ObjectRef(*args, **kw)
def __repr__(self):
if COMPACT_RECORD_REPR:
parts = [ (key, getattr(self, key, MISSING)) \
for key in self.p_keys ]
r = '<%s.%s(%s) at 0x%X>' % (
self.__class__.__module__,
self.__class__.__name__,
', '.join(('%s=%r' % (key, value) for key, value in parts)),
id(self))
return r
else:
parts = [str(self.__class__) + ' {%s}' % id(self)]
parts.append(" TableName = %s" % getattr(self, 'p_table_name', MISSING))
for ob_attr, field in self.p_fields.items():
if hasattr(self, ob_attr):
v = getattr(self, ob_attr)
if isinstance(v, ActiveRecord):
v = "%s{%s}" % (v.__class__.__name__, id(v))
else:
v = repr(v)
else:
v = "{MISSING}"
parts.append(" %s = %s = %s" % \
(ob_attr, field.db_column, v))
return "\n".join(parts)
@classmethod
def _selectExpr(klass):
columns = list(klass.p_col_seq)
dba = dbop.DbAdapter()
if hasattr(dba, 'LOBIO'):
LOBIO = dba.LOBIO
if getattr(LOBIO, 'disableRecordLoading', False):
lob_columns = [ c for c in columns \
if ILOB.providedBy(klass.p_fields[klass.p_col2attr[c]]) ]
fix = lambda c, lob_columns=lob_columns: \
'NULL as %s' % c if c in lob_columns else c
columns = [ fix(c) for c in columns ]
return ','.join(columns)
def _fetchFromDb(self):
where_dict = {}
# for k in self.p_key_fields:
# where_dict[k] = getattr(self, self.p_col2attr[k])
#TODO: Нужны тесты на добавленную логику
p_keys = self.p_keys
key_field_items = [ (key, field) \
for key, field in self.p_fields.items() if key in p_keys ]
for ob_attr, field in key_field_items:
bound_field = field.bind(self)
db_column = field.db_column
if IChoice.providedBy(bound_field):
val = getattr(self, ob_attr).key
else:
val = getattr(self, ob_attr)
if IReference.providedBy(bound_field) and val is not None:
assert len(val.p_keys) == 1
val.save() # FIXME: зачем здесь save ?
bound_field = val.p_fields[val.p_keys[0]].bind(val)
val = getattr(val, val.p_keys[0])
elif IORMField.providedBy(bound_field):
if val is not None:
val = bound_field.toDbType(val)
where_dict[db_column] = val
c = dbop.selectFrom(tabName = self.p_table_name,
selectExpr = self._selectExpr(),
whereDict = where_dict)
return c.fetchone()
@classmethod
def load(klass, *args, **kw):
assert issubclass(klass, Record)
assert args or kw, "No argument supplied to load() - most likely this is an error"
return klass(*args, **kw).reload()
def reload(self):
r = self._fetchFromDb()
if not r:
raise PersistenceError('No data')
self.__dict__.update(self._fromSequence(r))
return self
def _update(self, data=None, **kw):
if data is None:
data = dict()
elif IRecord.providedBy(data):
data = dict([(k, getattr(data, k)) for k in data.p_fields])
data.update(kw)
for ob_attr, field in self.p_fields.items():
if ob_attr in data and ob_attr not in self.p_keys:
setattr(self, ob_attr, data[ob_attr])
def differsFromDb(self):
r = self._fetchFromDb()
if not r:
return True
d = self._fromSequence(r)
for attr, dbval in d.items():
field = self.p_fields[attr]
obval = getattr(self, attr)
if IORMField.providedBy(field):
if obval is not None:
obval = field.toDbType(obval)
if obval != dbval:
return True
return False
def _fromDict(self, r):
seq = []
for db_col in self.p_col_seq:
seq.append(r.get(db_col) or r.get(db_col.upper()))
return self._fromSequence(seq)
def _fromSequence(self, r):
d = {}
attr2col = self.p_attr2col
tableName = self.p_table_name
attr_seq = self.p_attr_seq
fields = self.p_fields
fromDbType = self._fromDbType
valuesToSetKeys = []
for i in range(len(self.p_col_seq)):
attr = attr_seq[i]
field = fields[attr]
val = fromDbType(r[i], attr, field)
if val is not missing_marker:
# Hack to support LOB reloading
if ILOB.providedBy(field) and val is not None:
val._tableName = tableName
val._fieldName = attr2col[attr]
val._keys = {}
valuesToSetKeys.append(val)
d[attr] = val
# Hack to support LOB reloading
for k in self.p_keys:
for val in valuesToSetKeys:
val._keys[attr2col[k]] = d[k]
return d
def _fromDbType(self, val, attr, field):
if IChoice.providedBy(field):
return self._object_ref(self, attr, field, (val,))
elif IReference.providedBy(field):
if val is not None:
bound_field = field.bind(self)
return bound_field.vocabulary[val]
elif IORMField.providedBy(field):
bound_field = field.bind(self)
val = bound_field.fromDbType(val)
return val
def copy(self):
clone = self.__class__.__new__(self.__class__)
clone.__dict__.update(self.__dict__)
for name, field in self.p_fields.items():
v = clone.__dict__.get(name)
if IChoice.providedBy(field):
clone.__dict__[name] = self._object_ref(clone, name, field, v.key)
return clone
@classmethod
def load_many(klass, cursor):
#klass.__class_init__()
col2attr = klass.p_col2attr
attr2col = klass.p_attr2col
colindex2attr = []
for d in cursor.description:
col_name = d[0].lower()
if col2attr.has_key(col_name):
colindex2attr.append(col2attr[col_name])
else:
colindex2attr.append(col_name)
for row in cursor:
if not row:
break
ob = klass()
valuesToSetKeys = []
for i in xrange(len(row)):
attr = colindex2attr[i]
field = ob.p_fields.get(attr)
val = ob._fromDbType(row[i], attr, field)
if val is not missing_marker:
# Hack to support LOB reloading
if ILOB.providedBy(field) and val is not None:
val._tableName = klass.p_table_name
val._fieldName = attr2col[attr]
val._keys = {}
valuesToSetKeys.append(val)
setattr(ob, attr, val)
# Hack to support LOB reloading
for k in ob.p_keys:
for val in valuesToSetKeys:
val._keys[attr2col[k]] = getattr(ob, k)
yield ob
@classmethod
def load_all(klass):
cursor = dbop.dbquery("select * from %s" % klass.p_table)
return klass.load_many(cursor)
@classmethod
def select(klass, where, *params, **kw):
""" Выборка списка объектов из БД
Пример: User.select("ROLE='ADMIN' or STATUS=%s", status)
"""
assert not (params and kw)
select_list = ','.join([ col for col in klass.p_col_seq ])
where_clause = 'WHERE %s' % where if where else ''
q = 'SELECT %s FROM %s %s' % (select_list, klass.p_table_name, where_clause)
cursor = dbop.dbquery(q, *params, **kw)
return klass.load_many(cursor)
def __eq__(self, ob):
if not isinstance(ob, Record):
return False
if self.p_attr_seq != ob.p_attr_seq:
#print 'self.p_attr_seq != ob.p_attr_seq'
return False
for name in self.p_attr_seq:
if getattr(self, name) is not getattr(ob, name) and \
getattr(self, name) != getattr(ob, name):
#print '%r != %r' % (getattr(self, name), getattr(ob, name))
return False
return True
def __ne__(self, ob):
return not self.__eq__(ob)
class ActiveRecord(Record):
implements(IActiveRecord)
def save(self, replace=True):
columns = {}
for ob_attr, field in self.p_fields.items():
bound_field = field.bind(self)
db_column = field.db_column
if IChoice.providedBy(bound_field):
val = getattr(self, ob_attr).key
else:
val = getattr(self, ob_attr)
if IReference.providedBy(bound_field) and val is not None:
assert len(val.p_keys) == 1
val.save()
bound_field = val.p_fields[val.p_keys[0]].bind(val)
val = getattr(val, val.p_keys[0])
elif IORMField.providedBy(bound_field):
if val is not None:
val = bound_field.toDbType(val)
columns[db_column] = val
return dbop.insert(tabName = self.p_table_name,
columns = columns,
keyFields = self.p_key_fields,
replace = replace)
def delete(self):
where_dict = {}
# for k in self.p_key_fields:
# where_dict[k] = getattr(self, self.p_col2attr[k])
#TODO: Нужны тесты на добавленную логику
p_keys = self.p_keys
key_field_items = [ (key, field) \
for key, field in self.p_fields.items() if key in p_keys ]
for ob_attr, field in key_field_items:
bound_field = field.bind(self)
db_column = field.db_column
if IChoice.providedBy(bound_field):
val = getattr(self, ob_attr).key
else:
val = getattr(self, ob_attr)
if IReference.providedBy(bound_field) and val is not None:
assert len(val.p_keys) == 1
val.save()
bound_field = val.p_fields[val.p_keys[0]].bind(val)
val = getattr(val, val.p_keys[0])
elif IORMField.providedBy(bound_field):
if val is not None:
val = bound_field.toDbType(val)
where_dict[db_column] = val
dbop.delete(tabName = self.p_table_name,
whereDict = where_dict
)
@classmethod
def getNewId(klass):
return dbop.get_new_id()
|
988,603 | eb1eff31635e4c83d8080bbdfa70e76c4e634313 | #!/usr/bin/python
from kafka import KafkaConsumer;
kafkaHosts=["kafka01.paas.longfor.sit:9092"
,"kafka02.paas.longfor.sit:9092"
,"kafka03.paas.longfor.sit:9092"]
'''
earliest
当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费
latest
当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据
none
topic各分区都存在已提交的offset时,从offset后开始消费;只要有一个分区不存在已提交的offset,则抛出异常
'''
consumer = KafkaConsumer(
bootstrap_servers=kafkaHosts,group_id='mdf_group',auto_offset_reset='latest');
consumer.subscribe("testapplog_plm-prototype");
for msg in consumer:
print(msg.value) |
988,604 | 070483279a5249491b27fb1969490ce0e5e489d7 | # 파일 열기 => 작업 => 닫기
# (w)write:덮어쓰기,(a)append:이어쓰기,(r)read
#1.파일 쓰기
file = open("E:/python/Chapter10/filetest/data0823.txt", "w", encoding="utf8")
file.write("1.인생은고통이다")
file.close
#2. 파일추가
file = open("E:/python/Chapter10/filetest/data0823.txt", "a", encoding="utf8")
file.write("\n2.상위 5% 뺴면 시궁창이다.")
file.close()
# 파일 읽기
file = open("E:/python/Chapter10/filetest/data0823.txt", "a", encoding="utf8")
# 3.1 파일 전체 읽기
data1 = file.read()
print(data1)
file.close()
# 3.2 파일 한줄 읽기
while True:
data1 = file.readline()
print(data1)
if data1 == "":
break
file.close() |
988,605 | 37672c75ed5c712eb458d7e08c5fb3e78b89fc26 | # ! /usr/bin/env python
# - * - coding:utf-8 - * -
# __author__ : KingWolf
# createtime : 2018/11/2 0:46
import os
import time
from selenium import webdriver
from PIL import Image
from baidu_ai.baidu_ai_api_test import BaiduOCR
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.action_chains import ActionChains
class Butian_proceing(object):
def __init__(self):
# 设置user-data-dir路径
chrome_option = webdriver.ChromeOptions()
chrome_option.add_argument('user-data-dir=F:\data_profile')
driver_path = os.path.join((os.path.dirname(__file__)), 'selenium_study_aotumation', 'chromedriver.exe')
self.driver = webdriver.Chrome(options=chrome_option, executable_path=driver_path)
self.driver.get('https://www.butian.net/')
# 让浏览器的窗口最大化
self.driver.maximize_window()
time.sleep(5)
def get_captcha_img(self,full_captcha,small_captcha):
#进入登录界面,先截完整图片
self.driver.get_screenshot_as_file(full_captcha)
time.sleep(1.5)
#定位验证码图片
captcha_img = self.driver.find_element_by_id('captchaImage')
#获取验证码的四个坐标
left = captcha_img.location['x']
upper = captcha_img.location['y']
right = captcha_img.size['width'] + left
buttom = captcha_img.size['height'] + upper
captcha_Coords = (left,upper,right,buttom)
#打开完整截图
img = Image.open(full_captcha)
#先做灰度处理,再裁切
pic = img.convert('L')
new_img = pic.crop(captcha_Coords)
new_img.save(small_captcha)
def baidu_ocr(self,full_captcha,small_captcha):
#先调用截图处理方法
self.get_captcha_img(full_captcha,small_captcha)
time.sleep(2)
code = BaiduOCR().basic_Accurate_options(filepath=small_captcha)
return code
def butian(self,full_captcha,small_captcha):
try:
#定位到白帽登陆的界面
self.driver.find_element_by_link_text(u'白帽登录').click()
time.sleep(3)
#关闭"重要提示"的alert弹窗(不能用传统的alert弹窗处理,还是用元素定位)
# alert_window = driver.driver.switch_to.alert
# alert_window.accept()
#点击弹窗的"关闭按钮"
alert_window = self.driver.find_element_by_xpath('//div[@id="mPrompt1"]//button[@class="btn-white"]').click()
time.sleep(2)
#跳转到360账号登录
self.driver.find_element_by_link_text(u'使用360账号登录').click()
time.sleep(2)
#直接调用百度识别方法,获取验证码
captcha_code = self.baidu_ocr(full_captcha,small_captcha)
time.sleep(3)
#使用360账号登录,输入账号密码
self.driver.find_element_by_id('username').clear()
self.driver.find_element_by_id('username').send_keys('lccr777@163.com')
time.sleep(0.5)
self.driver.find_element_by_id('password').clear()
self.driver.find_element_by_id('password').send_keys('922521dfxs5619')
time.sleep(2)
#手动输入验证码(后续再接入百度识图的ai自动识别验证码)
self.driver.find_element_by_id('captcha').clear()
self.driver.find_element_by_id('captcha').send_keys(captcha_code)
#点击登录按钮
self.driver.find_element_by_id('button').click()
time.sleep(3)
#通过打印登录成功,跳转的页面右上角会显示用户昵称,判断该昵称是否一致来断定是否登录成功
nickname = self.driver.find_element_by_xpath('//div[@class="whitehatName"]').text
print(nickname)
#右上角的账号
username = self.driver.find_element_by_xpath('//a[@class="navtoolsName"]')
if nickname == "狼胸_book14":
print('登录成功')
# #这里做一个退出登录的操作,涉及到鼠标悬停,目标是登录成功后的右上角昵称,鼠标悬停在这里,然后选择退出系统,点击结束
# ActionChains(self.driver).move_to_element(username).perform()
#
# #定位到退出系统
# logut = self.driver.find_element_by_link_text(u'退出系统')
# logut.click()
# time.sleep(3)
# self.driver.quit()
#如果登录成功,就尝试提交漏洞
self.submit_vulnerability()
else:
print('登录失败')
except Exception as e:
print("错误的代码:",e)
def submit_vulnerability(self):
#点击"提交漏洞"
self.driver.find_element_by_id('btnSub').click()
time.sleep(2)
#厂商名称
self.driver.find_element_by_id('inputCompy').clear()
self.driver.find_element_by_id('inputCompy').send_keys('test')
#域名或者ip
self.driver.find_element_by_name('host').clear()
self.driver.find_element_by_name('host').send_keys('11177711')
#漏洞类型(下拉框属于select方法)---选择"web漏洞",index=1,value=1
selection = Select(self.driver.find_element_by_id('selCate'))
selection.select_by_visible_text('Web漏洞')
#漏洞标题
self.driver.find_element_by_xpath('//input[@id="title"]').clear()
self.driver.find_element_by_xpath('//input[@id="title"]').send_keys('2有SQL注入')
#漏洞url
self.driver.find_element_by_name('url[]').clear()
self.driver.find_element_by_name('url[]').send_keys('http://12356.com')
#漏洞类型(一般都是事件型)--一般批量都是SQL注入,中危
vulner_type = Select(self.driver.find_element_by_id('lootypesel2'))
vulner_type.select_by_visible_text('SQL注入')
#漏洞等级
vulner_level = Select(self.driver.find_element_by_name('level'))
vulner_level.select_by_visible_text('中危')
#简要描述
self.driver.find_element_by_id('description').clear()
self.driver.find_element_by_id('description').send_keys('777777777777777777777')
#详细细节(暂时还是手动处理比较好),这里的编辑器嵌套了iframe表单,需要先处理
self.driver.switch_to.frame('ueditor_0')
self.driver.find_element_by_xpath('//body[@class="view"]').clear()
self.driver.find_element_by_xpath('//body[@class="view"]').send_keys('第一次测试调试!')
# time.sleep(120)
#修复方案
self.driver.find_element_by_id('repair_suggest').clear()
self.driver.find_element_by_id('repair_suggest').send_keys('过滤相关的关键字')
#所属行业(一般选择"IT/计算机/互联网/通信")
industry = Select(self.driver.find_element_by_id('industry1'))
industry.select_by_visible_text('IT/计算机/互联网/通信')
#行业分类(默认选择第一个选项)
self.driver.find_element_by_xpath('//p[@id="industry2"]//input[@id="19"]').click()
#所属地区,分省市县三个下拉框
province_selection = Select(self.driver.find_element_by_id('selec1'))
province_selection.select_by_visible_text('广东省')
city_selection = Select(self.driver.find_element_by_id('selec2'))
city_selection.select_by_visible_text('珠海市')
country_selection = Select(self.driver.find_element_by_id('selec3'))
country_selection.select_by_visible_text('市辖区')
#厂商联系方式
self.driver.find_element_by_name('company_contact').clear()
self.driver.find_element_by_name('company_contact').send_keys('Tel:0759-2222222')
#匿名提交
self.driver.find_element_by_name('anonymous').click()
#点击提交按钮
self.driver.find_element_by_id('tijiao').click()
#这里的滑块拖动或者点击文字,还是要手动
time.sleep(11)
if __name__ == "__main__":
butian_ = Butian_proceing()
butian_.butian(full_captcha=r"E:\selenium_pic\full_captcha.png",small_captcha=r"E:\selenium_pic\small_captcha.png")
|
988,606 | 5c663e6ceac57842d49338e6f3c13ae6899c90a1 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# vim: ts=4 sw=4 tw=100 et ai si
import cv2
import imutils
import threading
from PIL import Image, ImageTk
class VideoStream:
"""This class handles obtainig video stream from the ip camera."""
def video_loop(self):
"""
Loop which displays the video. Do not call without using Threading. Otherwise the programm
will softlock.
"""
_, img = self.vs.read()
img = imutils.resize(img, width=self.width)
image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
self.frame.configure(image=image)
self.frame.photo = image
self.top.after(self.fps, self.video_loop)
def start(self):
"""Start asyncronous video stream."""
self.top.after(15, self.video_loop)
def __init__(self, top, frame, url, width):
"""
Init method for video stream.
frame - Tkinter Frame which will display the video stream.
url - link to the video camera. This can also be a file.
"""
self.fps = int(1000/60)
self.top = top
self.frame = frame
self.width = width
self.vs = cv2.VideoCapture(url)
self.vs.set(cv2.CAP_PROP_BUFFERSIZE, 1)
|
988,607 | 3dabc4590012dead6839a03bbc52d164ee14d0e1 | def pfreq(a, b, c):
dic,leng, conc, blank={},len(c), "", 0
for x in range(leng):
if len(c[x].replace("M", "")) != 0:
c[x]=c[x].replace("M", "")
tp=len(c[x].replace("A", "P"))
dp=len(c[x].replace("A", ""))
dic[b[x]]=((dp*100)/tp)
if dic[b[x]] < 75:
conc += ((" "*blank)+b[x])
blank = 1
print "%s" %conc
n=int(input())
for x in range(n):
a=int(input())
b=raw_input().split()
c=raw_input().split()
pfreq(a, b, c)
|
988,608 | 6948a68c5ab73ee6af4466f33741ebe759b2cd5f | # create functions in python that will take an argument and will return 1 for True or 0 for False for the following checks:
# - if the entry is numberic
# - if the entry is alphanumberic (chars and numbers)
# - if the entry is chars only
# - if entry is lowwercase
# - if entry is uppercase
def is_numberic(a):
from pandas.core.dtypes.inference import is_number
return 1 if is_number(a) else 0
def is_lowecase(a):
return 1 if is_lowecase(a) else 0
def is_chars(a):
return 1 if is_chars(a) else 0
def is_alfa(a):
import re
return 1 if bool(re.match('^[a-zA-Z0-9]+$', a)) else 0
|
988,609 | fbab026fe8875f683b7771b589718ec4d029685e | import csv
def main():
game = {}
move = {}
mode = None
gamefieldnames = ['white', 'black', 'date', 'halfmoves', 'moves', 'result', 'whiteelo', 'blackelo', 'gamenumber', 'event', 'site', 'eventdate', 'round', 'eco', 'opening']
movefieldnames = ['movenumber', 'side', 'move', 'fen', 'gamenumber']
with open('chessData.txt', 'r') as input, open('chessData_games.csv', 'w') as out_games, open('chessData_moves.csv', 'w') as out_moves:
gamewriter = csv.DictWriter(out_games, fieldnames=gamefieldnames)
gamewriter.writeheader()
movewriter = csv.DictWriter(out_moves, fieldnames=movefieldnames)
movewriter.writeheader()
for line in input:
if line == '=========================== Game ======================================================\n':
mode = 'game'
game = {}
elif line == '--------------------------------------------------------- Game Moves ---------------------------------------------------------------------\n':
gamewriter.writerow(game)
mode = 'move'
elif line == '======================================================================================\n':
pass
else:
if mode == 'game':
line = line.split(':')
game[line[0].strip().lower()] = line[1].strip()
elif mode == 'move':
line = line.split(',')
for pair in line:
tokens = pair.split(':')
move[tokens[0].strip().lower()] = tokens[1].strip()
movewriter.writerow(move)
if __name__ == '__main__':
main() |
988,610 | 07ee0e0fd3f71c07790297b6872f00b2fa1bed1a | """
visualize results for test image
"""
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
from torch.autograd import Variable
import transforms as transforms
from skimage import io
from skimage.transform import resize
from models import *
import dlib
from imutils import face_utils
import cv2
cut_size = 44
transform_test = transforms.Compose([
transforms.TenCrop(cut_size),
transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
])
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
video_capture = cv2.VideoCapture(0)
# rect for frontal face detector and rect.rect for cnn face detector
# Actually, since dlib is build with CUDA support and GPU is used, frontal
# face detector runs slower than its cnn counterpart!
# face_detect = dlib.get_frontal_face_detector()
face_detect = dlib.cnn_face_detection_model_v1("mmod_human_face_detector.dat")
sp = dlib.shape_predictor("shape_predictor_5_face_landmarks.dat")
class_names = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
net = VGG('VGG19')
checkpoint = torch.load(os.path.join('FER2013_VGG19', 'PrivateTest_model.t7'))
net.load_state_dict(checkpoint['net'])
net.cuda()
net.eval()
while True:
ret, frame = video_capture.read()
raw_img = frame
# raw_img = io.imread('images/1.jpg')
# raw_img = io.imread('/home/activreg/Tomasz_files/test_data/photos/Malakas.jpeg')
# raw_img = io.imread('/home/activreg/Downloads/grief_cycle.jpeg')
# Undesired for camera input, but required for io.imread images
# raw_img = cv2.cvtColor(raw_img, cv2.COLOR_BGR2RGB)
rects = face_detect(raw_img, 1)
for (i, rect) in enumerate(rects):
(face_x, face_y, face_w, face_h) = face_utils.rect_to_bb(rect.rect)
shape = sp(raw_img, rect.rect)
face_chip = dlib.get_face_chip(raw_img, shape)
#face_chip = raw_img[face_y:face_y+face_h,face_x:face_x+face_w] # Testing purposes only
gray_face_chip = rgb2gray(face_chip)
gray_face_chip = resize(gray_face_chip, (48,48), mode='symmetric').astype(np.uint8)
img = gray_face_chip[:, :, np.newaxis]
img = np.concatenate((img, img, img), axis=2)
img = Image.fromarray(img)
inputs = transform_test(img)
ncrops, c, h, w = np.shape(inputs)
inputs = inputs.view(-1, c, h, w)
inputs = inputs.cuda()
inputs = Variable(inputs, volatile=True)
outputs = net(inputs)
outputs_avg = outputs.view(ncrops, -1).mean(0) # avg over crops
#score = F.softmax(outputs_avg)
_, predicted = torch.max(outputs_avg.data, 0)
#print("The Expression is %s" %str(class_names[int(predicted.cpu().numpy())]))
cv2.putText(raw_img, (class_names[int(predicted.cpu().numpy())]),(face_x+face_w-10,face_y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.rectangle(raw_img, (face_x, face_y, face_w, face_h), (255, 0, 0), 2)
cv2.imshow("Face chip", face_chip) # For testing purposes only! (A.k.a., to be ultimately commented out)
cv2.imshow("Video", raw_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
print("I don't like when core gets dumped!")
|
988,611 | 2324fb3e6bbea3f4d5117b1422d77fe296ee66a8 | #!/usr/bin/python3
import logging
from lulu_pcol_sim import sim
import sys # for argv
import time # for strftime()
import natsort # for natural sorting of alphabet (needed because the order of objects has to be B_0, B_1, B_2, B_10, B_11 and not B_0, B_1, B_10, B_11, ...)
def createInstanceHeader(pcol, path, originalFilename, nr_robots):
"""Create an instance of the passed P colony that is written as a header in C at the given path
:pcol: The pcolony object that was read by lulu_pcol_sim
:path: The path to the instance.h that will be written"""
needsWildcardExpansion = False
with open(path, "w") as fout:
fout.write("""// vim:filetype=c
/**
* @file lulu_instance.h
* @brief Lulu P colony simulator internal structure corresponding to the P colony defined in '%s'.
* In this header we define the structure of the Pcolony that will power the simulated robot
* This file was generated automatically by lulu_c.py on %s
* @author Andrei G. Florea
* @author Catalin Buiu
* @date 2016-02-29
*/
#ifndef LULU_INSTANCE_H
#define LULU_INSTANCE_H
#include "lulu.h" """ % (originalFilename, time.strftime("%d %h %Y at %H:%M")))
fout.write("\nenum objects {")
# extend wildcard objects to _0, _1, ... _n where n = nr_robots
for a in pcol.A[:]:
# both $ and $id wildcards need extended objects
if ("_W_ALL" in a or "_W_ID" in a):
needsWildcardExpansion = True
logging.debug("Extending %s wildcarded object" % a)
# construct extended object list
extension = [a.replace("W_ID", "%d" % i).replace("W_ALL", "%d" % i) for i in range(nr_robots)]
# if this extension has not been previously added
if (not set(extension).issubset(set(pcol.A))):
#add the extetendet object list to the alphabet
pcol.A.extend(extension)
# sort objects naturally
pcol.A = natsort.natsorted(pcol.A, key=lambda x: x.replace('_W_ID', '/').replace('_W_ALL', '.'))
for i, obj in enumerate(pcol.A):
if (obj in ['e', 'f']):
continue; # they are already defined in lulu.h
if (i == 0):
# NO_OBJECT = 0, OBJECT_ID_E = 1, OBJECT_ID_F = 2
fout.write("\n OBJECT_ID_%s = 3," % obj.upper());
else:
fout.write("\n OBJECT_ID_%s," % obj.upper());
fout.write("\n};")
fout.write("\n\nenum agents {")
for i, agent_name in enumerate(pcol.B):
fout.write("\n AGENT_%s," % agent_name.upper());
fout.write("\n};")
if (needsWildcardExpansion):
fout.write("""\n#define NEEDING_WILDCARD_EXPANSION //this ensures that the wildcard expansion code is included""")
if ("motion" in pcol.B):
fout.write("\n#define USING_AGENT_MOTION //this ensures that the code associated with the MOTION agent is included in Lulu_kilobot")
if ("led_rgb" in pcol.B):
fout.write("\n#define USING_AGENT_LED_RGB //this ensures that the code associated with the LED_RGB agent is included in Lulu_kilobot")
if ("msg_distance" in pcol.B):
fout.write("\n#define USING_AGENT_MSG_DISTANCE //this ensures that the code associated with the MSG_DISTANCE agent is included in Lulu_kilobot")
if ("timer" in pcol.B):
fout.write("\n#define USING_AGENT_TIMER //this ensures that the code associated with the TIMER agent is included in Lulu_kilobot")
fout.write("\n")
if ("d_all" in pcol.A):
fout.write("""\n#define USING_OBJECT_D_ALL //this ensures that the code associated with processing D_ALL objects is included in Lulu_kilobot""")
if ("d_next" in pcol.A):
fout.write("""\n#define USING_OBJECT_D_NEXT //this ensures that the code associated with processing D_NEXT objects is included in Lulu_kilobot""")
# check if using {IN,OUT}_EXTEROCEPTIVE rules (<I=> or <=O>)
using_in_out_exteroceptive_rules = False
for agent in pcol.agents.values():
for program in agent.programs:
for rule in program:
if (rule.type == sim.RuleType.in_exteroceptive or rule.type == sim.RuleType.out_exteroceptive or
rule.alt_type == sim.RuleType.in_exteroceptive or rule.alt_type == sim.RuleType.out_exteroceptive):
using_in_out_exteroceptive_rules = True
break;
if (using_in_out_exteroceptive_rules):
fout.write("""\n#define USING_IN_OUT_EXTEROCEPTIVE_RULES //this ensures that the code associated with processing IN_EXTEROCEPTIVE (<I=>) or OUT_EXTEROCEPTIVE (<=O>) rules is included in Lulu_kilobot""")
fout.write("""\n\n//if building Pcolony simulator for PC
#ifdef PCOL_SIM
//define array of names for objects and agents for debug
extern char* objectNames[];
extern char* agentNames[];
#endif
/**
* @brief The smallest kilo_uid from the swarm (is set in instance.c by lulu_c.py)
*/
extern const uint16_t smallest_robot_uid;
/**
* @brief The number of robots that make up the swarm (is set in instance.c by lulu_c.py)
*/
extern const uint16_t nr_swarm_robots;""");
fout.write("""\n\n/**
* @brief Initialises the pcol object and all of it's components
*
* @param pcol The P colony that will be initialized
*/
void lulu_init(Pcolony_t *pcol);
/**
* @brief Destroys the pcol objects and all of it's components
*
* @param pcol The P colony that will be destroyed
*/
void lulu_destroy(Pcolony_t *pcol);
#ifdef NEEDING_WILDCARD_EXPANSION
/**
* @brief Expands and replaces wildcarded objects with the appropriate objects
* Objects that end with _W_ID are replaced with _i where i is the the id of the robot, provided with my_id parameter
*
* @param pcol The pcolony where the replacements will take place
* @param my_id The kilo_uid of the robot
* @return The symbolic id that corresponds to this robot (my_id - smallest_robot_uid)
*/
uint16_t expand_pcolony(Pcolony_t *pcol, uint16_t my_id);
#endif
#endif""")
# end createInstanceHeader()
def createInstanceSource(pcol, path, nr_robots, smallest_robot_id):
"""Create an instance of the passed P colony that is written as a source file in C at the given path
:pcol: The pcolony object that was read by lulu_pcol_sim
:path: The path to the instance.c that will be written"""
# prevent alphabet related bugs by including e and f objects in alphabet
if ("e" not in pcol.A):
pcol.A.append("e")
if ("f" not in pcol.A):
pcol.A.append("f")
with open(path + ".c", "w") as fout:
fout.write("""#include "%s.h"
#ifdef NEEDING_WILDCARD_EXPANSION
#include "wild_expand.h"
#endif
#ifdef PCOL_SIM""" % path.split("/")[-1]) #only filename
fout.write("""\n char* objectNames[] = {[NO_OBJECT] = "no_object", """)
for obj in pcol.A:
fout.write("""[OBJECT_ID_%s] = "%s", """ % (obj.upper(), obj))
fout.write("""};
char* agentNames[] = {""")
for ag_name in pcol.B:
fout.write("""[AGENT_%s] = "%s", """ % (ag_name.upper(), ag_name))
fout.write("""};
#endif
//the smallest kilo_uid from the swarm
const uint16_t smallest_robot_uid = %d;
//the number of robots that make up the swarm
const uint16_t nr_swarm_robots = %d;
void lulu_init(Pcolony_t *pcol) {""" % (smallest_robot_id, nr_robots) )
# call initPcolony()
fout.write("""\n //init Pcolony with alphabet size = %d, nr of agents = %d, capacity = %d
initPcolony(pcol, %d, %d, %d);""" % (len(pcol.A), len(pcol.B), pcol.n, len(pcol.A), len(pcol.B), pcol.n))
fout.write("""\n //Pcolony.alphabet = %s""" % pcol.A)
# init environment
fout.write("""\n\n //init environment""")
counter = 0;
for obj, nr in pcol.env.items():
#replace %id and * with $id and $ respectively
fout.write("""\n pcol->env.items[%d].id = OBJECT_ID_%s;""" % (counter, obj.upper()))
fout.write("""\n pcol->env.items[%d].nr = %d;\n""" % (counter, nr))
counter += 1
fout.write("""\n //end init environment""")
fout.write("""\n\n //init global pswarm environment""")
if (pcol.parentSwarm == None or len(pcol.parentSwarm.global_env) == 0):
fout.write("""\n pcol->pswarm.global_env.items[0].id = OBJECT_ID_E;""")
fout.write("""\n pcol->pswarm.global_env.items[0].nr = 1;""")
else:
counter = 0
for obj, nr in pcol.parentSwarm.global_env.items():
#replace %id and * with $id and $ respectively
fout.write("""\n pcol->pswarm.global_env.items[%d].id = OBJECT_ID_%s;""" % (counter, obj.upper()))
fout.write("""\n pcol->pswarm.global_env.items[%d].nr = %d;""" % (counter, nr))
counter += 1
fout.write("""\n //end init global pswarm environment""")
fout.write("""\n\n //init INPUT global pswarm environment""")
if (pcol.parentSwarm == None or len(pcol.parentSwarm.in_global_env) == 0):
fout.write("""\n pcol->pswarm.in_global_env.items[0].id = OBJECT_ID_E;""")
fout.write("""\n pcol->pswarm.in_global_env.items[0].nr = 1;""")
else:
counter = 0
for obj, nr in pcol.parentSwarm.in_global_env.items():
#replace %id and * with $id and $ respectively
fout.write("""\n pcol->pswarm.in_global_env.items[%d].id = OBJECT_ID_%s;""" % (counter, obj.upper()))
fout.write("""\n pcol->pswarm.in_global_env.items[%d].nr = %d;""" % (counter, nr))
counter += 1
fout.write("""\n //end init INPUT global pswarm environment""")
fout.write("""\n\n //init OUTPUT global pswarm environment""")
if (pcol.parentSwarm == None or len(pcol.parentSwarm.out_global_env) == 0):
fout.write("""\n pcol->pswarm.out_global_env.items[0].id = OBJECT_ID_E;""")
fout.write("""\n pcol->pswarm.out_global_env.items[0].nr = 1;""")
else:
counter = 0
for obj, nr in pcol.parentSwarm.out_global_env.items():
#replace %id and * with $id and $ respectively
fout.write("""\n pcol->pswarm.out_global_env.items[%d].id = OBJECT_ID_%s;""" % (counter, obj.upper()))
fout.write("""\n pcol->pswarm.out_global_env.items[%d].nr = %d;""" % (counter, nr))
counter += 1
fout.write("""\n //end init OUTPUT global pswarm environment""")
for ag_name in pcol.B:
fout.write("""\n\n //init agent %s""" % ag_name)
#fout.write("""\n\n initAgent(&pcol->agents[AGENT_%s], pcol, %d);""" % (ag_name.upper(), len(pcol.agents[ag_name].programs)))
fout.write("""\n\n initAgent(&pcol->agents[AGENT_%s], pcol, %d);""" % (ag_name.upper(), getNrOfProgramsAfterExpansion(pcol.agents[ag_name], nr_robots- 1)))
fout.write("""\n //init obj multiset""")
counter = 0;
for obj, nr in pcol.agents[ag_name].obj.items():
#replace %id and * with $id and $ respectively
for i in range(nr):
fout.write("""\n pcol->agents[AGENT_%s].obj.items[%d] = OBJECT_ID_%s;""" % (ag_name.upper(), counter, obj.upper()))
counter += 1
fout.write("""\n\n //init programs""")
for prg_nr, prg in enumerate(pcol.agents[ag_name].programs):
fout.write("""\n\n initProgram(&pcol->agents[AGENT_%s].programs[%d], %d);""" % (ag_name.upper(), prg_nr, getNrOfRulesWithoutRepetitions(prg)))
fout.write("""\n //init program %d: < %s >""" % (prg_nr, prg.print()))
rule_index = 0
for rule_nr, rule in enumerate(prg):
# skip rules that contain identical operands and thus have no effect
if (rule.lhs == rule.rhs and rule.lhs == 'e' and rule.main_type != sim.RuleType.conditional):
continue
fout.write("""\n //init rule %d: %s""" % (rule_nr, rule.print(toString=True)) )
if (rule.main_type != sim.RuleType.conditional):
fout.write("""\n initRule(&pcol->agents[AGENT_%s].programs[%d].rules[%d], RULE_TYPE_%s, OBJECT_ID_%s, OBJECT_ID_%s, NO_OBJECT, NO_OBJECT);""" % (ag_name.upper(), prg_nr, rule_index, rule.type.name.upper(), rule.lhs.upper(), rule.rhs.upper()))
else:
fout.write("""\n initRule(&pcol->agents[AGENT_%s].programs[%d].rules[%d], RULE_TYPE_CONDITIONAL_%s_%s, OBJECT_ID_%s, OBJECT_ID_%s, OBJECT_ID_%s, OBJECT_ID_%s);""" % (ag_name.upper(), prg_nr, rule_index, rule.type.name.upper(), rule.alt_type.name.upper(), rule.lhs.upper(), rule.rhs.upper(), rule.alt_lhs.upper(), rule.alt_rhs.upper()))
#increase rule_index
rule_index += 1
fout.write("""\n //end init program %d
pcol->agents[AGENT_%s].init_program_nr++;""" % (prg_nr, ag_name.upper()))
fout.write("""\n //end init programs""")
fout.write("""\n //end init agent %s""" % ag_name)
fout.write("""\n}""")
fout.write("""\n\nvoid lulu_destroy(Pcolony_t *pcol) {
//destroys all of the subcomponents
destroyPcolony(pcol);
}""")
fout.write("""\n
#ifdef NEEDING_WILDCARD_EXPANSION
uint16_t expand_pcolony(Pcolony_t *pcol, uint16_t my_id) {
//used for a cleaner iteration through the P colony
//instead of using agents[i] all of the time, we use just agent
Agent_t *agent;
""")
fout.write("""\n uint8_t obj_with_id[] = {""")
obj_with_id_size = 0
for obj in pcol.A:
if ("_W_ID" in obj):
fout.write("OBJECT_ID_%s, " % obj.upper())
obj_with_id_size += 1
fout.write("""};
uint8_t obj_with_id_size = %d;""" % (obj_with_id_size))
fout.write("""\n uint8_t obj_with_any[] = {""")
obj_with_any_size = 0
is_obj_with_any_followed_by_id = []
for i, obj in enumerate(pcol.A):
if (obj.endswith("_W_ALL")):
fout.write("OBJECT_ID_%s, " % obj.upper())
# if we are at least 2 objects before the end of the list
if (i < len(pcol.A) - 1):
# check if this _$ wildcarded object is followed by a _$id object
if ("_W_ID" in pcol.A[i+1]):
is_obj_with_any_followed_by_id.append(1)
else:
is_obj_with_any_followed_by_id.append(0)
else:
# this (_$) object is the last one in the list
is_obj_with_any_followed_by_id.append(0)
obj_with_any_size += 1
fout.write("""};
uint8_t obj_with_any_size = %d;
uint8_t is_obj_with_any_followed_by_id[] = {%s};""" % (obj_with_any_size,
str(is_obj_with_any_followed_by_id).replace("[", "").replace("]", "")))
fout.write("""\n\n uint16_t my_symbolic_id = my_id - smallest_robot_uid;
//replace W_ID wildcarded objects with the object corresponding to the symbolic id
// e.g.: B_W_ID -> B_0 for my_symbolic_id = 0
replacePcolonyWildID(pcol, obj_with_id, obj_with_id_size, my_symbolic_id);
//expand each obj_with_any[] element into nr_swarm_robots objects except my_symbolic id.
// e.g.: B_W_ALL -> B_0, B_2 for nr_swarm_robots = 3 and my_symbolic_id = 1
expandPcolonyWildAny(pcol, obj_with_any, is_obj_with_any_followed_by_id, obj_with_any_size, my_symbolic_id, nr_swarm_robots);
return my_symbolic_id;
}
#endif""")
# end createInstanceHeader()
def getNrOfProgramsAfterExpansion(agent, suffixListSize):
"""Returns the final number of programs that will result after all programs (within this agent)
with * wildcard objects have been expanded
:agent: The agent whose programs will checked
:suffixListSize: The number of programs that result after expanding a program such as < X_* -> e, e->X_* >
if suffixListSize = 2 then we obtain 2 new programs, < X_0 - > e ... > and < X_1 -> e ...> that replace the original one
:returns: The final number of programs that will result after expansion """
check_for_any_wild = [x.endswith("_W_ALL") for x in agent.colony.A]
any_wild_objects = []
for i, val in enumerate(check_for_any_wild):
if (val):
any_wild_objects.append(agent.colony.A[i])
counter = 0
logging.info("wild_ANY objects = %s" % any_wild_objects)
for program in agent.programs:
wild_exists_in_program = False
for rule in program:
for obj in any_wild_objects:
if (obj == rule.lhs or obj == rule.rhs or obj == rule.alt_lhs or obj == rule.alt_rhs):
wild_exists_in_program = True
logging.warning("wild_ANY object %s exists in program %s rule %s" % (obj, program.print(), rule.print(toString=True)))
break;
# end for rule in program
if (wild_exists_in_program):
counter += suffixListSize
return counter + len(agent.programs)
# end getNrOfProgramsAfterExpansion()
def getNrOfRulesWithoutRepetitions(prg):
"""Returns the number of rules from this program that do not consist of operand repetitions such as e->e
Note: conditional rules are included without checking because it is assumed that they were introduce to check a condition
:returns: Number of rules that have lhs different from rhs"""
nr_rules = len(prg)
for rule in prg:
if (rule.main_type != sim.RuleType.conditional):
if (rule.lhs == rule.rhs and rule.lhs == "e"):
nr_rules -= 1
return nr_rules
# end getNrOfRulesWithoutRepetitions()
# MAIN
if (__name__ == "__main__"):
logLevel = logging.INFO
if ('--debug' in sys.argv):
logLevel = logging.DEBUG
try:
import colorlog # colors log output
formatter = colorlog.ColoredFormatter(
"%(log_color)s%(levelname)-8s %(message)s %(reset)s",
datefmt=None,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
},
secondary_log_colors={},
style='%'
)
colorlog.basicConfig(stream = sys.stdout, level = logLevel)
stream = colorlog.root.handlers[0]
stream.setFormatter(formatter);
# colorlog not available
except ImportError:
logging.basicConfig(format='%(levelname)s:%(message)s', level = logLevel)
if (len(sys.argv) < 2):
logging.error("Expected input file path as parameter")
exit(1)
if (len(sys.argv) < 3):
logging.error("Expected the path to the file (without extensions) that will be generated")
exit(1)
if (len(sys.argv) < 4):
logging.error("Expected the number of robots that make up the swarm")
exit(1)
if (len(sys.argv) < 5):
logging.error("Expected the minimum robot id (kilo_uid) as the last parameter")
exit(1)
# read Pcolony from file
pObj = sim.readInputFile(sys.argv[1])
pcol = None
# if the p object read from the input file is a Pswarm
if (type(pObj) == sim.Pswarm):
if (len(sys.argv) < 3):
logging.error("Expected the name of a Pcolony as parameter")
exit(1)
if (sys.argv[2] not in pObj.C):
logging.error("Expected the name of a Pcolony as parameter")
logging.info("Valid Pcolony names for this file are: %s" % pObj.C)
exit(1)
if (len(sys.argv) < 4):
logging.error("Expected the path to the header (that will be generated) as the last parameter")
exit(1)
pcol = pObj.colonies[sys.argv[2]]
nr_robots = int(sys.argv[3])
min_robot_id = int(sys.argv[4])
path = sys.argv[5]
else:
pcol = pObj
nr_robots = int(sys.argv[2])
min_robot_id = int(sys.argv[3])
path = sys.argv[4]
#replacing wildcarded marks * and %id with $ and $id respectively
#in alphabet, all multisets and programs
for i, val in enumerate(pcol.A):
pcol.A[i] = val.replace("%id", "W_ID").replace("*", "W_ALL")
for key in pcol.env:
# if key contains wildcards
if ("*" in key or "%id" in key):
#copy value at wildcarded key at new $ key
pcol.env[key.replace("%id", "W_ID").replace("*", "W_ALL")] = pcol.env[key];
#delete the * key
del pcol.env[key]
#if this pcolony is part of swarm
if (pcol.parentSwarm != None):
for key in pcol.parentSwarm.global_env:
# if key contains wildcards
if ("*" in key or "%id" in key):
#copy value at wildcarded key at new $ key
pcol.parentSwarm.global_env[key.replace("%id", "W_ID").replace("*", "W_ALL")] = pcol.parentSwarm.global_env[key];
#delete the * key
del pcol.parentSwarm.global_env[key]
for ag_name in pcol.B:
for key in pcol.agents[ag_name].obj:
# if key contains wildcards
if ("*" in key or "%id" in key):
#copy value at wildcarded key at new $ key
pcol.agents[ag_name].obj[key.replace("%id", "W_ID").replace("*", "W_ALL")] = pcol.agents[ag_name].obj[key];
#delete the * key
del pcol.agents[ag_name].obj[key]
# end for key in obj
for prg_nr, prg in enumerate(pcol.agents[ag_name].programs):
for rule_nr, rule in enumerate(prg):
rule.lhs = rule.lhs.replace("%id", "W_ID").replace("*", "W_ALL")
rule.rhs = rule.rhs.replace("%id", "W_ID").replace("*", "W_ALL")
rule.alt_lhs = rule.alt_lhs.replace("%id", "W_ID").replace("*", "W_ALL")
rule.alt_rhs = rule.alt_rhs.replace("%id", "W_ID").replace("*", "W_ALL")
logging.info("Generating the instance header (%s)" % (path + ".h"))
createInstanceHeader(pcol, path + ".h", sys.argv[1].split("/")[-1], nr_robots)
logging.info("Generating the instance source (%s)" % (path + ".c"))
createInstanceSource(pcol, path, nr_robots, min_robot_id)
pcol.print_colony_components()
|
988,612 | 3c4461998ab6672a739fd6d99695d96135777721 | from flask import Flask, request
app = Flask(__name__)
lista = ['Ambroży', 'Barnaba', 'Celina', 'Danuta', 'Eligiusz', 'Felicja']
@app.route('/osoby')
def odczyt_z_listy():
# spróbuj http://127.0.0.1:5000/osoby?id=2 query string to jest w tym wypadku id=2
indeks = request.args.get('id')
if indeks:
try:
osoba = lista[indeks]
return f'Osoba pod indeksem {indeks} to {osoba}'
except IndexError:
return f'Nie ma indeksu {indeks} w liście'
return f'Osoby na liście: {lista}'
if __name__ == "__main__":
app.run()
|
988,613 | 3c6940846c041888788e3b5462d82d8335aade2c | class Solution:
def isHappy(self, n: int) -> bool:
func = lambda x : sum(int(ch) ** 2 for ch in str(x))
slow = func(n);
fast = func(func(n));
while slow != fast:
slow = func(slow)
fast = func(func(fast))
if slow == 1:
return True
else:
return False
|
988,614 | 3e93fc509eecdbd745cab11854abd15be7f3db6b | #!/usr/bin/{{ pillar['pkgs']['python'] }}
import dns.query
import dns.resolver
import dns.reversename
import dns.update
eth_ip = '{{ salt["network.interfaces"]()["eth0"]["inet"][0]["address"] }}'
server_addr = 'ns.skynet.hiveary.com'
fqdn = '{{ grains["host"] }}.skynet.hiveary.com.'
ttl = 7200
skynet_update = dns.update.Update('skynet.hiveary.com.')
# Remove the record if it already exists to prevent duplicates, then add the new address
skynet_update.delete(fqdn, 'A')
skynet_update.add(fqdn, ttl, 'A', eth_ip)
# Add the server to the DNS load balancing for its service
# The hostname is formatted as "IDENTIFIER-SERVICE-ENVIRONMENT"
service = '{{ grains["host"] }}'.split('-')[1]
current_addresses = dns.resolver.query(service)
addrs = [a.address for a in current_addresses]
if eth_ip not in addrs:
skynet_update.add(service, ttl, 'A', eth_ip)
ptr_update = dns.update.Update('37.13.in-addr.arpa.')
# Remove the record if it already exists to prevent duplicates, then add the new address
ptr_update.delete(dns.reversename.from_address(eth_ip), 'PTR')
ptr_update.add(dns.reversename.from_address(eth_ip), ttl, 'PTR', fqdn)
dns.query.tcp(skynet_update, server_addr)
dns.query.tcp(ptr_update, server_addr)
|
988,615 | 592e5eaf156c8f1eeb498734f6f4213ac9da921f | from flask import jsonify
import db_helper
import api_utils
def list_customers():
"""List of all customers and customer_id"""
customers = db_helper.get_all_customers()
return jsonify({"customers": customers})
def show_accounts(customer_id):
"""Show a list of accounts for requested customer_ID"""
customer_accounts = db_helper.get_customer_accounts(customer_id)
if not customer_accounts:
return api_utils.error("No accounts for customer with id \
number {} found".format(customer_id), 404)
else:
return jsonify({"accounts": customer_accounts})
def create_customer(data):
"""Creates a new customer for a customer name and mobile number"""
mandatory_params = ['customer_name', 'mobile_number']
result = api_utils.check_required_params(mandatory_params, data)
if result:
return result
mobile_number = db_helper.mobile_number_unique(data['mobile_number'])
if not mobile_number:
return api_utils.error("There already is a customer with \
mobile number {} found".format(data['mobile_number']), 404)
new_customer = db_helper.add_new_customer(data['customer_name'],
mobile_number)
return jsonify({'new_customer': new_customer})
|
988,616 | a7f72635e7196d1cc059aa8caa49ed1804d8912f | # coding: utf-8
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
# Command line :
# python -m benchmark.HIGGS.explore.tau_effect
import os
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from config import SAVING_DIR
from config import SEED
from visual import set_plot_config
set_plot_config()
from problem.higgs.higgs_geant import load_data
from problem.higgs.higgs_geant import split_data_label_weights
from problem.higgs import get_parameter_class
from problem.higgs import get_higgsnll_class
from problem.higgs import get_config_class
from problem.higgs import get_generator_class
from problem.higgs import get_higgsloss_class
from problem.higgs import get_parameter_generator
TES = True
JES = False
LES = False
Parameter = get_parameter_class(TES, JES, LES)
NLLComputer = get_higgsnll_class(TES, JES, LES)
Config = get_config_class(TES, JES, LES)
GeneratorClass = get_generator_class(TES, JES, LES)
HiggsLoss = get_higgsloss_class(TES, JES, LES)
param_generator = get_parameter_generator(TES, JES, LES)
DATA_NAME = 'HIGGS'
BENCHMARK_NAME = DATA_NAME
DIRECTORY = os.path.join(SAVING_DIR, BENCHMARK_NAME, "explore")
def main():
print('Hello world')
os.makedirs(DIRECTORY, exist_ok=True)
data = load_data()
generator = GeneratorClass(data, seed=2)
dirname = os.path.join(DIRECTORY, 'tes_minibatch')
os.makedirs(dirname, exist_ok=True)
minibatchsize(generator, dirname=dirname)
def minibatchsize(generator, dirname=DIRECTORY):
DELTA = 0.03
config = Config()
nominal_param = config.CALIBRATED#.clone_with(mu=0.5)
up_param = nominal_param.clone_with(tes=nominal_param.tes + DELTA)
down_param = nominal_param.clone_with(tes=nominal_param.tes - DELTA)
now = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S\n")
mean_values = get_mean_pri_tau_pt_means(generator, nominal_param)
for k, v in mean_values.items():
print(f'{k} : {np.mean(v)} +/- {np.std(v)}')
generator.reset()
mean_values = get_mean_pri_tau_pt_means(generator, up_param)
for k, v in mean_values.items():
print(f'{k} : {np.mean(v)} +/- {np.std(v)}')
generator.reset()
mean_values = get_mean_pri_tau_pt_means(generator, down_param)
for k, v in mean_values.items():
print(f'{k} : {np.mean(v)} +/- {np.std(v)}')
def get_mean_pri_tau_pt_means(generator, param):
print(param, *param)
N_SAMPLES = 50
SAMPLE_SIZE = np.arange(10_000, 100_000, 10_000)
pri_tau_pt_idx = 12
print(generator.feature_names[pri_tau_pt_idx])
mean_values = {}
for sample_size in SAMPLE_SIZE:
print(f'processing with {sample_size} events ...')
mean_values[sample_size] = []
for i in range(N_SAMPLES):
X, y, w = generator.generate(*param, n_samples=sample_size, no_grad=True)
pri_tau_pt = X[:, pri_tau_pt_idx]
pri_tau_pt_mean = (pri_tau_pt * w).sum() / w.sum()
mean_values[sample_size].append( pri_tau_pt_mean.detach().numpy() )
return mean_values
if __name__ == '__main__':
main()
|
988,617 | e33d0f6ffd29ec1ee7a55069fabbcc0fc6352ddd | from django import template
register=template.Library()
@register.filter(name='trunc')
def truncate_n(value,n):
result=value[:n]
return result |
988,618 | 419189ba840f3baffd14eebd053d85a1aff8e3fb | #!/usr/bin/env python
import sys
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
df = pd.read_csv(sys.argv[1])
lr = LinearRegression()
X = df[['circle_x']]
Y = df['sin_theta']
lr.fit(X, Y)
print('coef=', lr.coef_)
print('intercept=', lr.intercept_)
plt.scatter(X, Y)
plt.plot(X, lr.predict(X), color='red')
plt.xlabel('circle_x')
plt.ylabel('sin_theta')
plt.show()
|
988,619 | 24928c891ac1e8f096364baf460a2222fc3da2d6 | from admin import BaseNoteAdmin
from widgets import ReminderWidget
from forms import LockoutFormSetMixin, NoteFormSet, NoteForm
from models import Note, NotesField
|
988,620 | 572d3027de6f8fb05a799337686ef6f649abf2d4 | #Author: Molly Creagar
#coding the Perceptron algorithm (linear classifier) from scratch
import numpy as np
import matplotlib.pyplot as plt
def perceptron(Xpos,Xneg,t):
#run algorithm 50000 times
numEpochs = 50000
#we want to see the boundary every 500 epochs
boundaryVis = 500
#starting vector
a = np.random.randn(3)
for i in range(numEpochs):
# random choice of epoch w/o replacement
for j in np.random.permutation(numXi):
xi = X[j,:]
#if we are in the first half of the data (the positive xi's)
if j < numPos:
#if a.dot(xi)>0, no need to do anything
if a.dot(xi) < 0:
a = a + t*xi
#if we are in the second half of the X vec (the negative xi's)
else:
#if a.dot(xi) <0, we are set
if a.dot(xi) > 0:
a = a - t*xi
# show the updates every 500 times
if i % boundaryVis == 0:
print("Epoch ", i)
plt.gcf().clear()
plt.scatter(Xpos[:,0],Xpos[:,1],c="purple")
plt.scatter(Xneg[:,0],Xneg[:,1],c="green")
#replot the points and line
plotLine(a,xMin,xMax,yMin,yMax,)
plt.axis("equal")
plt.pause(.05)
plt.show()
return a
def plotLine(a,xMin,xMax,yMin,yMax):
#get the blue line between the clouds
xVals = np.linspace(xMin,xMax,100)
yVals = (-a[0] * xVals - a[2])/a[1]
idxs = np.where((yVals >= yMin) & (yVals <= yMax))
plt.plot(xVals[idxs],yVals[idxs])
#create data
numPos = 100
numNeg = 100
np.random.seed(14)
muPos = [1.0, 1.0]
covPos = np.array([[1.0,0.0],[0.0,1.0]])
muNeg = [-1.0, -1.0]
covNeg = np.array([[1.0,0.0],[0.0,1.0]])
Xpos = np.ones((numPos,3))
for i in range(numPos):
Xpos[i,0:2] = np.random.multivariate_normal(muPos, covPos)
Xneg = np.ones((numNeg,3))
for i in range(numNeg):
Xneg[i,0:2] = np.random.multivariate_normal(muNeg,covNeg)
X = np.concatenate((Xpos,Xneg))
numPos = Xpos.shape[0]
numXi = X.shape[0]
xMin = -3.0
xMax = 3.0
yMin = -3.0
yMax = 3.0
t = .000001
a = perceptron(Xpos,Xneg,t)
|
988,621 | 3e020aa0a6ad9a7cb6ca3b2d424f534b76d5d0e6 | # -*- coding: utf-8 -*-
import os
import numbers
import filecmp
import warnings
from collections import defaultdict
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
import matplotlib.patches as patches
from matplotlib.lines import Line2D
from matplotlib_venn import venn3
import seaborn as sns
import numpy as np
import pandas as pd
import scipy as sc
from statsmodels.sandbox.stats.multicomp import fdrcorrection0
from statsmodels.stats.proportion import proportions_ztest
from scipy.stats import linregress
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Helvetica']
# If fonts don't work, move fonts to /usr/share/matplotlib/mpl-data/fonts/ttf/
# or .local/lib/python2.7/site-packages/matplotlib/mpl-data/fonts/ttf/ and run:
#from matplotlib import font_manager
#font_manager._rebuild()
class Plotter(object):
linewidth = 0.75
def __init__(self, output_path='results', page_size=7):
self.output_path = output_path
if not os.path.exists(output_path):
os.mkdir(output_path)
self.page_size = page_size
plt.rc('xtick', labelsize=5)
plt.rc('ytick', labelsize=5)
plt.rc('axes', labelsize=5)
# plt.rc('subplot', left=7)
plt.rc('figure.subplot', left=0.15, bottom=0.15, top=0.95, right=0.95)
def _legend(self,
ax, plots=[], labels=[], position='right', legend_title=None, reverse=True, handletextpad=None,
shift_right=0, shift_top=0, ncol=1, columnspacing=None, markerscale=None, handlelength=None,
handler_map=None
):
if reverse:
plots = plots[::-1]
labels = labels[::-1]
if ncol == 2:
plots = plots[::2] + plots[1::2]
labels = labels[::2] + labels[1::2]
frameon = False
borderpad = None
labelspacing = 0.3
bbox = None
if position == 'right':
bbox = (1+shift_right, 1+shift_top)
loc='upper left'
if position == 'above':
bbox = (shift_right, 1+shift_top)
loc='lower left'
if position == 'floatright':
loc='upper right'
frameon = True
borderpad = 0.5
handletextpad = 0.5
handlelength=0.8
legend_props = {
'bbox_to_anchor':bbox, 'loc': loc,
'prop': {'size': 5}, 'frameon': frameon,
'title': legend_title,
'ncol': ncol,
'borderpad': borderpad, 'handlelength': handlelength, 'handletextpad': handletextpad,
'markerscale': markerscale,
'columnspacing': columnspacing, 'labelspacing': labelspacing
}
if plots and labels:
legend = ax.legend(plots, labels, handler_map=handler_map, **legend_props)
else:
legend = ax.legend(handler_map=handler_map, **legend_props)
if legend_title:
legend.set_title(legend_title, prop={'size': 5})
legend._legend_box.align = "left"
if position == 'floatright':
legend.get_frame().set_linewidth(0.5)
return legend
def plot(
self, plot_type, data, save='', show=True, size=0.5, colors=defaultdict(lambda: 'k'), save_scale=2,
y_label='', x_label='', xlim=None, ylim=None, no_x=False, no_y=False,
hline=None, vline=None, xticks=None, xticklabels=None, yticks=None, yticklabels=None,
hlines=None,
xpad=2, ypad=2, xtickpad=1, ytickpad=1, ylog=False, xlog=False,
xgrid=False, ygrid=False, xintersect=None, yintersect=None, border=None,
scalex=None, colorbar=None, margin={'left': 0.08, 'right': 0.02, 'top': 0.02, 'bottom': 0.08},
ticklength=3,
*args, **kwargs
):
plot_method = getattr(self, plot_type, None)
assert callable(plot_method), 'Plot type ' + plot_type + ' does not exist'
if show:
plt.ion()
else:
plt.ioff()
if isinstance(size, numbers.Number):
size = (size, size * 0.75)
size = np.array(size)
self.figure_size = np.copy(size)
size[0] += margin['left'] + margin['right']
size[1] += margin['top'] + margin['bottom']
fig = plt.figure(figsize=(size*self.page_size), dpi=150)
ax = fig.gca()
plt.subplots_adjust(
left=margin['left']/size[0],
right=1-margin['right']/size[0],
bottom=margin['bottom']/size[1],
top=1-margin['top']/size[1],
)
if not border:
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
if no_x:
ax.spines['bottom'].set_visible(False)
if no_y:
ax.spines['left'].set_visible(False)
for d in ('top', 'left', 'right', 'bottom'):
ax.spines[d].set_linewidth(border or Plotter.linewidth)
ax.tick_params(width=Plotter.linewidth, length=ticklength)
if no_x and no_y:
ax.tick_params(width=Plotter.linewidth, length=0)
ax.tick_params(axis='x', which='both', pad=xtickpad)
ax.tick_params(axis='y', which='both', pad=ytickpad)
if xgrid:
plt.grid(which='major', axis='x', linewidth=Plotter.linewidth, color='#eeeeee', clip_on=False, zorder=0)
if ygrid:
plt.grid(which='major', axis='y', linewidth=Plotter.linewidth, color='#eeeeee', clip_on=False, zorder=0)
if xlog:
ax.set_xscale('log')
if ylog:
ax.set_yscale('log')
if ylim:
if type(ylim) == tuple:
ax.set_ylim(ylim)
else:
ax.set_ylim(top=ylim)
if xlim:
ax.set_xlim(xlim)
# **********************************************************************
plot_method(ax, data, colors, *args, **kwargs)
# **********************************************************************
if plot_type == 'hist' and xlim:
ax.set_xlim(xlim)
ax.set_ylabel(y_label, labelpad=ypad)
ax.set_xlabel(x_label, labelpad=xpad)
if xticks is not None:
if str(xticks) == 'remove':
ax.set_xticks([])
ax.set_xticks([], minor=True)
else:
ax.set_xticks(xticks)
if xticklabels is not None:
ax.set_xticklabels(xticklabels)
if yticks is not None:
if str(yticks) == 'remove':
ax.set_yticks([])
ax.set_xticks([], minor=True)
else:
ax.set_yticks(list(yticks))
if yticklabels is not None:
ax.set_yticklabels(yticklabels)
if hline is not None:
ax.axhline(hline, c='k', linewidth=Plotter.linewidth, zorder=-1)
if hlines:
for y in hlines:
ax.axhline(y, c='#eeeeee', linewidth=Plotter.linewidth/2.0, zorder=-1, clip_on=False)
if vline is not None:
ax.axvline(vline, color='#777777', linestyle='dotted', linewidth=Plotter.linewidth, zorder=-1)
if xintersect is not None:
ax.spines['bottom'].set_position(('data', xintersect))
if yintersect is not None:
ax.spines['left'].set_position(('data', yintersect))
if colorbar:
ax_bar = fig.add_axes([0.89, 0.41, 0.02, 0.4])
(r1, g1, b1), (r2, g2, b2) = colorbar
cmap = mpl.colors.ListedColormap(zip(*[np.linspace(r1, r2, 101), np.linspace(g1, g2, 101), np.linspace(b1, b2, 101)]))
cb = mpl.colorbar.ColorbarBase(ax_bar, cmap=cmap, orientation='vertical', ticks=[0, 1])
cb.set_ticklabels(['0%', '100%'])
ax_bar.set_title('Convergent\nsimulations', size=5)
# scale x-axis internally.
if scalex:
xticks = ax.get_xticks()
ax.set_xticklabels([x*scalex for x in xticks])
if save:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
png_path = os.path.join(self.output_path, save + '.png')
png_path_old = os.path.join(self.output_path, save + '__old.png')
pdf_path = os.path.join(self.output_path, save + '.pdf')
# Only update pdf if the graph changed, to prevent confusing git.
if os.path.exists(png_path):
os.rename(png_path, png_path_old)
plt.savefig(png_path, dpi=fig.dpi*save_scale)
graph_changed = True
if os.path.exists(png_path_old):
graph_changed = not(filecmp.cmp(png_path, png_path_old))
os.remove(png_path_old)
try:
if graph_changed:
plt.savefig(pdf_path) #, bbox_inches='tight')
except Exception:
print('Error while trying to save pdf')
print(f'Saved to `{os.path.join(self.output_path, save)}`:')
if show:
plt.show()
else:
plt.close()
def empty(self, ax, data, colors):
ax.plot([], [])
def table(self, ax, columns, colors, width=1000, height=1000, header=None, row_names=None):
if header is None:
header = []
if row_names is None:
row_names = []
ax.plot([], [])
ax.set_xlim([0, width])
ax.set_ylim([0, height])
w = float(width)
h = float(height)
x_coords = list(range(0, width, width // len(columns)))
for x in x_coords:
ax.axvline(x, color='k', lw=0.5, clip_on=False)
for y in [0, height]:
ax.axhline(y, xmax=x_coords[-1]/w, color='k', lw=0.5, clip_on=False)
row_height = 10
column_width = x_coords[1]
n_per_col = 3
if header:
height -= 20
ax.axhline(height, xmax=x_coords[-1]/w, color='k', lw=0.5, clip_on=False)
ax.add_patch(patches.Rectangle((0, height), column_width*len(columns), 20, linewidth=0, facecolor='#cccccc'))
for i, h in enumerate(header):
ax.text(x_coords[i]+column_width*0.5, height+10, h, size=5,
verticalalignment='center', horizontalalignment='center')
margin = 5
y_heights = []
for col_i, column in enumerate(columns):
y_height = []
n_y = 0
for row_i, row in enumerate(column):
n_x = 0
if row_i != 0:
n_y += 1 + (margin*2.0)/row_height
ax.axhline(
height - (n_y*row_height),
xmin=x_coords[col_i]/w,
xmax=x_coords[col_i+1]/w,
color='k', lw=0.5, clip_on=False
)
for n_i, n in enumerate(row):
if n_x >= n_per_col:
n_x = 0
n_y += 1
c = colors[n]
if n.startswith('BWM'):
n = n[4:]
ax.text(
x_coords[col_i]+n_x*(column_width/n_per_col) + margin,
height - (n_y*row_height + margin),
n, size=5, color=c, verticalalignment='top')
n_x += 1
y_height.append(n_y+1)
y_heights.append(y_height)
for i in range(len(y_heights[-1])):
end = y_heights[-1][i]
start = y_heights[-1][i-1] if i else 0
ax.text(
x_coords[-1] + 10,
height - (np.mean([start, end])*row_height+margin*2),
row_names[i], size=5, verticalalignment='center'
)
# if row_names and col_i == len(columns)-1:
# ax.text(
# x_coords[-1],
# height - (n_y*row_height),
# row_names[row_i], size=5,
# )
def pie(self, ax, data, colors):
ax.pie(
data, startangle=90, counterclock=False, colors=colors,
wedgeprops={'edgecolor': (0.35, 0.25, 0), 'linewidth': 0.3}
)
def xy_graph(self, ax, data, colors, cumulative=False, err=[], dotted=True, rev_legend=False, no_legend=False,
stats=None, larval_stages=True, linkpoints=True, pvalues=None, smooth=False, alpha=1, legendpos=None,
legend_shift_right=0, legend_shift_top=0, markersize=6, legendcol=None, clipon=False, dots=True,
linewidth=None):
"""
Generates a simple line graph, where the x-axis is the developmental
timeline of C. elegans, with larval stages labeled instead of hours post-
hatching.
"""
xs, ys = data
larval_stage_ends = [0, 16, 25, 34, 45]
larval_stage_mids = [8, 20.5, 29.5, 39.5, 50]
larval_stage_labels = ['L1', 'L2', 'L3', 'L4', ' Adult']
# Set x-axis to larval stages.
if larval_stages:
ax.set_xlim([0, 55])
ax.set_xticks([0])
ax.tick_params(axis='x', labelbottom=False)
ax.set_xticks(larval_stage_mids, minor=True)
ax.set_xticklabels(larval_stage_labels, minor=True)
ax.tick_params(axis='x', which='minor', bottom=False, pad=1)
linestyle = 'dotted' if dotted else 'solid'
for t in larval_stage_ends:
plt.axvline(t, color='#999999', linestyle=linestyle, linewidth=Plotter.linewidth, zorder=-1)
stats_values = []
if stats:
for i, (l, y) in enumerate(ys):
reg = linregress(xs, y)
a = reg.slope
b = reg.intercept
assert stats in ('regression', 'spearmanr', 'log', 'regression_only')
if stats == 'regression':
p = reg.pvalue
if stats == 'regression_only':
p = -1
elif stats in ('spearmanr', 'log'):
if len(set(y)) == 1:
p = -1
else:
p = sc.stats.spearmanr(xs, y).pvalue
if stats == 'log':
xs[0] += 0.01
z = sc.optimize.curve_fit(lambda t, a, b: a+b*np.log(t), xs, y)
fit = lambda x: z[0][0] + z[0][1] * np.log(x)
else:
fit = lambda x: b + a * x
x_fit = np.arange(0.01, 55, 0.01)
y_fit = [fit(x) for x in x_fit]
h = (ax.get_ylim()[1] - ax.get_ylim()[0]) / 50
text_x = 55 + 2
text_y = fit(text_x)
if p < 0.05:
text_y -= h*0.5
stats_values.append((x_fit, y_fit, text_x, text_y, p))
y_total = np.zeros(len(ys[0][1]))
for i, (l, y) in enumerate(ys):
color_i = i % 8
color = ['k', 'grey', 'm', 'r', 'b', 'orange', 'g', 'y'][color_i]
edge_color = color
if colors and type(colors) == list:
color = colors[i]
edge_color = color
if colors and type(colors) == dict:
if l in colors:
color = colors[l]
edge_color = colors[l+'_edge'] if l+'_edge' in colors else color
else:
edge_color = 'k'
if err and sum(err[i]):
err_xs, err_ys, err_hs = zip(*[(err_x, err_y, err_h) for err_x, err_y, err_h in zip(xs, y, err[i]) if err_h])
e = ax.errorbar(err_xs, err_ys, yerr=err_hs, linestyle='None', elinewidth=Plotter.linewidth/1.5, color=color, capsize=0, capthick=Plotter.linewidth/1.5, clip_on=False)
for b in e[2]:
b.set_clip_on(False)
if cumulative:
line_colors = {k[:-5]: v for k, v in colors.items() if k.endswith('_edge')}
fill_colors = colors
colors = fill_colors
x_raw = list(xs)
y_raw = list(y)
y_before = y_total
y_total = y_total + y_raw
if x_raw[-1] == x_raw[-2]:
x_average = list(xs[:-2]) + [np.mean(xs[-2:])]
y_average = list(y_total[:-2]) + [np.mean(y_total[-2:])]
y_before_average = list(y_before[:-2]) + [np.mean(y_before[-2:])]
else:
x_average = x_raw
y_average = y_total
y_before_average = y_before
ax.plot(
list(x_average) + [60], list(y_average) + [y_average[-1]],
color=line_colors[l], label=l, linewidth=Plotter.linewidth, clip_on=clipon,
markersize=0, alpha=(0.0 if line_colors[l] == fill_colors[l] else 1)
)
ax.plot(
x_raw, y_total, color=line_colors[l], label=l, linewidth=0, clip_on=clipon,
marker='.', markeredgewidth=Plotter.linewidth/2,
markeredgecolor=line_colors[l], markersize=markersize,
alpha=(0.0 if line_colors[l] == fill_colors[l] else 1)
)
ax.fill_between(
list(x_average) + [60],
list(y_average) + [y_average[-1]],
list(y_before_average) + [y_before_average[-1]],
facecolor=fill_colors[l],
alpha=0.8
)
continue
if stats:
ax.plot(xs, y, markerfacecolor=color, markeredgecolor=edge_color, label=l, marker='.', linewidth=0, markersize=markersize, markeredgewidth=Plotter.linewidth/2.0, clip_on=clipon)
x_fit, y_fit, text_x, text_y, p = stats_values[i]
ps = [v[4] for v in stats_values]
ps_corrected = fdrcorrection0(ps)[1]
p = ps_corrected[i]
print('Corrected p-value:', p)
if p == -1:
t = ''
elif p > 0.05:
t = 'ns'
elif p > 0.01:
t = '*'
elif p > 0.001:
t = '**'
else:
t = '***'
ax.plot(x_fit, y_fit, color=edge_color, marker=None, linewidth=Plotter.linewidth/2.0, linestyle='dashed', clip_on=clipon)
ax.text(text_x, text_y, t, ha='left', va='center', color='k', linespacing=20, size=5)
if linkpoints:
if dots:
ax.plot(xs, y, color=color, marker='.', linewidth=0, markersize=markersize, alpha=alpha, label='_nolegend_')
# Take the mean of duplicate values.
if type(y) == pd.Series:
y_line = y.groupby(y.index.name).mean()
xs_line = y_line.index
else:
unique_y = defaultdict(list)
for j, x in enumerate(xs):
unique_y[x].append(y[j])
xs_line = sorted(set(xs))
y_line = [np.mean(unique_y[x]) for x in xs_line]
linewidth = linewidth or Plotter.linewidth
linestyle = 'solid'
if smooth:
for j in range(len(y_line)):
if j == 0 or j == len(y_line)-1:
continue
y_line[j] = np.mean(y_line[j-1:j+2])
ax.plot(xs_line, y_line, color=color, label=l, linewidth=linewidth, linestyle=linestyle, alpha=alpha)
else:
if not stats:
print(xs, y)
ax.plot(xs, y, marker='.', markerfacecolor=color, markeredgewidth=Plotter.linewidth/2, markeredgecolor=color, linewidth=0, markersize=markersize)
if pvalues:
for p, (text_x, text_y) in zip(*pvalues):
if p > 0.05:
t = 'ns'
elif p > 0.01:
t = '*'
elif p > 0.001:
t = '**'
else:
t = '***'
ylim = ax.get_ylim()
yrange = ylim[1]-ylim[0]
if '*' in t:
text_y -= yrange*0.02
text_y = min(ylim[1]-yrange*0.04, text_y)
text_y = max(ylim[0]+yrange*0.04, text_y)
ax.text(text_x, text_y, t, ha='center', va='center', color='k')
if len(ys) > 1 and not no_legend:
if cumulative:
ncol = 3
columnspacing = 1.5
handletextpad = 0.4
handlelength = 0.7
labels = [l for l, y in ys]
legend_elements = [Patch(facecolor=colors[l], edgecolor=colors[l+'_edge'], label=l) for l in labels]
else:
ncol = 1
columnspacing = None
handletextpad = 0
handlelength = None
position='right'
legend_elements, labels = ax.get_legend_handles_labels()
if err:
ncol = 3
position='above'
columnspacing = 0.5
handlelength = 1.3
if legendcol:
ncol = legendcol
if ncol == 2:
columnspacing = -4
# columnspacing = -5.5
# ncol = 2
position='above'
if legendpos:
position = legendpos
self._legend(
ax, legend_elements, labels, reverse=rev_legend, handletextpad=handletextpad,
ncol=ncol, columnspacing=columnspacing, position=position,
shift_right=legend_shift_right, shift_top=legend_shift_top,
handlelength=handlelength
)
def skeleton1d(self, ax, data, color, scale=5000, datasets=[], show_post=True):
ax.axis('equal')
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.set_xticks([])
ax.tick_params(length=0, pad=10, labelsize=12)
ax.axis([-1, 1, -1, 1])
sep = 0.7
max_dists = np.array(data[0])/scale
pre_dists = [np.array(d)/scale for d in data[1]]
# if show_post:
# post_dists = [np.array(d)/scale for d in data[2]]
ax.set_xlim((0, max(max_dists)+0.1))
y_ticks = []
for i, dist in enumerate(max_dists):
y = -i * sep + (len(max_dists)/2.0)
y_ticks.append(y)
ax.plot((0, dist), (y, y), color='k', linewidth=Plotter.linewidth*2, solid_capstyle='round')
ax.plot((-0.02, 0.02, -0.02, 0.02), (y-0.15, y-0.05, y+0.05, y+0.15), color='k', linewidth=Plotter.linewidth/2, clip_on=False)
for pre_dist in pre_dists[i]:
ax.arrow(pre_dist, y-0.08, 0, -0.1, head_length=0.12, width=0.01, head_width=0.1, overhang=0.05, color='r')
ax.set_yticks(y_ticks)
ax.set_yticklabels(datasets)
scalebar_x = ((dist*scale-3000)/scale, dist)
scalebar_y = (y_ticks[-1]-sep, y_ticks[-1]-sep)
ax.plot(scalebar_x, scalebar_y, color='k', linewidth=Plotter.linewidth)
ax.annotate(u'3 μm', xy=(np.mean(scalebar_x), np.mean(scalebar_y)-0.1), ha='center', va='top', fontsize=12)
ax.arrow(dist-1, y_ticks[0]+0.11, 0, -0.1, head_length=0.12, width=0.01, head_width=0.1, overhang=0.05, color='r')
ax.annotate(u'Synapse', xy=(dist-1+0.2, y_ticks[0]), ha='left', va='center', fontsize=12)
def kde(self, ax, data, colors, fill=True, clear_x=True, linewidth=None):
for i, (label, x) in enumerate(data):
sns.kdeplot(x=x, label=label, color=colors[i], fill=fill, clip=(0,1), linewidth=linewidth or self.linewidth)
if clear_x:
ax.plot((0, 1), (0, 0), color='k')
ax.tick_params(length=0)
self._legend(ax, position='above')
def hist(self, ax, data, colors, bins=20, fitbins=False, hist_range=None,
cumulative=False, relative=False, testweight=None, stats=None,
highlights=None):
if hist_range and cumulative:
hist_range = (hist_range[0], hist_range[1]*1.1)
if isinstance(data, tuple):
plots = []
labels = []
for i, (l, d) in enumerate(data):
if relative:
weights = np.zeros_like(d) + 1. / len(d)
if testweight and len(testweight) == len(weights):
weights *= testweight
else:
weights = np.zeros_like(d) + 1
if cumulative:
facecolor = (0, 0, 0, 0)
edgecolor = colors[i]
plots.append(Line2D([0], [0], color=edgecolor, linewidth=Plotter.linewidth))
else:
facecolor = colors[i]
edgecolor = np.array(colors[i][:3])/2
plots.append(Patch(facecolor=facecolor, edgecolor=edgecolor, linewidth=Plotter.linewidth, label=l))
ax.hist(d, bins=(int((max(d)-min(d))) if fitbins else bins), weights=weights, histtype='stepfilled', density=cumulative, cumulative=cumulative, range=hist_range, facecolor=facecolor, linewidth=Plotter.linewidth, edgecolor=edgecolor, clip_on=cumulative)
labels.append(l)
self._legend(ax, plots[::-1], labels[::-1])
else:
edgecolor = np.array(colors[0][:3])/2
ax.hist(data, bins=bins, histtype='stepfilled', density=cumulative, cumulative=cumulative, range=hist_range, facecolor=colors[0], linewidth=Plotter.linewidth, edgecolor=edgecolor, clip_on=False)
if hist_range:
r = hist_range[1] - hist_range[0]
ax.set_xlim(hist_range[0]-r*0.02, hist_range[1]+r*0.05)
if cumulative:
ax.set_xlim(hist_range[0], hist_range[1]/1.1*1.05)
if stats == 'wilcoxon':
ylim = ax.get_ylim()
h = (ylim[1] - ylim[0]) / 50
y = ylim[1] + h
p = sc.stats.wilcoxon(data).pvalue
p = 'p < 10$^{' + str(int(np.log10(p))) + '}$'
self._stats(ax, y, [(-0.25, 0.25)], [p])
if highlights:
for (label, (x, y)) in highlights:
ax.arrow(x, y+500, 0, -500, width=2, length_includes_head=True, head_width=6, head_length=200, linewidth=0, fc='k')
ax.annotate(label, (x, y+500), ha='center', va='bottom')
def simple_violin(self, ax, data, color, labels=[], x_positions=[], vert=True):
x_positions = x_positions or range(1, len(labels)+1)
violins = ax.violinplot(data, showextrema=False, showmeans=True, positions=x_positions, vert=vert)
for i, violin in enumerate(violins['bodies']):
violin.set_facecolor(color[i])
violin.set_edgecolor('black')
violin.set_alpha(1)
violins['cmeans'].set_edgecolor('black')
violins['cmeans'].set_linewidth(self.linewidth)
if vert:
ax.set_xticks(x_positions)
ax.set_xticklabels([l.capitalize() for l in labels])
ax.tick_params(axis='x', length=0)
else:
ax.set_yticks(x_positions)
ax.set_yticklabels([l.capitalize() for l in labels])
ax.tick_params(axis='y', length=0)
def violin(self, ax, data, color, split=None, order=[], cut=2, inner=None):
df = pd.DataFrame(columns=('x', 'y', 'hue'))
df = df.append(data)
hue = None
if split:
hue = split
split = True
sns.violinplot(ax=ax, x='x', y='y', hue=hue, data=df, order=order, cut=cut, inner=inner, split=split, palette=color)
ax.set_xticklabels([l.capitalize() for l in order])
means = df.groupby('x')['y'].mean()
for i, item in enumerate(order):
ax.plot((i-0.14, i+0.14), (means[item], means[item]), c='k')
# plots = []
# labels = []
# for i, l in enumerate(color.keys()):
# plots.append(Patch(facecolor=color[l], label='Synaptic '+l))
# labels.append('Synaptic '+l)
#
# self._legend(ax, plots, labels)
def swarm(self, ax, data, color, dotsize=3, stats=None):
df = pd.DataFrame(columns=('x', 'y1', 'y2', 'y3'))
df = df.append(data)
x, y = 'x', 'y'
sns.swarmplot(ax=ax, x=x, y=y, data=df, palette=color, size=dotsize, hue='x')
sns.boxplot(ax=ax, x=x, y=y, data=df, showfliers=False, width=0.1, color='#444444', linewidth=Plotter.linewidth)
# Set boxplot line color.
for i,artist in enumerate(ax.artists):
col = artist.get_facecolor()
artist.set_edgecolor(col)
artist.set_facecolor('None')
for line in ax.lines:
line.set_color(col)
line.set_mfc(col)
line.set_mec(col)
# Remove legend title and border.
self._legend(ax, markerscale=0.7)
if stats == 'wilcoxon':
ylim = ax.get_ylim()
h = (ylim[1] - ylim[0]) / 50
y = ylim[1] + h*3
p = sc.stats.wilcoxon(df['y']).pvalue
p = 'p < 10$^{' + str(int(np.log10(p))) + '}$'
self._stats(ax, y, [(0, 0)], [p])
def bar_graph(self, ax, data, colors,
linewidth=None, width=0.8, table=None, table_widths=None, table_rows=None,
legend=True, legend_title=None, rotate_ticks=False, dots=False, customstats=None,
error=None, barerr=None, vlines=[], larval_stages=False
):
if linewidth == None:
linewidth = Plotter.linewidth/2,
categories = []
plots = []
for i, (category, (xs, ys)) in enumerate(data):
categories.append(category)
if dots:
if not error:
plots.append(plt.scatter(
xs, ys, color=colors[i], marker='o', zorder=1
))
if error:
for x, y, e, c in zip(xs, ys, error, colors[i]):
eb = plt.errorbar(x, y, e, lw=0, marker='o', markersize=3, color=c, elinewidth=2, clip_on=False)
for j, b in enumerate(eb[2]):
b.set_clip_on(False)
else:
plots.append(plt.bar(
xs, ys, width=width, color=colors, linewidth=linewidth, edgecolor='k', clip_on=False
))
if barerr:
eb = plt.errorbar(xs, ys, barerr[i], lw=0, color='k', elinewidth=1)
ax.tick_params(axis='x', pad=5, length=0)
for vline in vlines:
ax.axvline(vline, color='#EEEEEE', linestyle='dotted', linewidth=Plotter.linewidth, zorder=-1)
if table:
table = ax.table(
cellText=table, cellLoc='center', edges='open',
rowLabels=table_rows, colWidths=table_widths,
loc='bottom', fontsize=5
)
table.set_fontsize(5)
table.scale(1, 0.5)
for cell in table.properties()['children']:
if cell._text.get_text() == table_rows[0]:
cell.set_text_props(fontweight='bold')
cell.set_fontsize(12)
if legend:
self._legend(ax, plots, categories, legend_title=legend_title)
if customstats:
xs, ys, text_x, text_y, t = customstats
c = (0.5, 0.5, 0.5)
c = 'k'
if xs is not None:
ax.plot(xs, ys, c=c, linewidth=Plotter.linewidth/2.0, clip_on=False, linestyle='dashed', zorder=-1)
ax.text(text_x, text_y, t, ha='left', va='center', color='k')
if larval_stages:
larval_stage_ends = [0, 16, 25, 34, 45]
larval_stage_mids = [8, 20.5, 29.5, 39.5, 50]
larval_stage_labels = ['L1', 'L2', 'L3', 'L4', ' Adult']
ax.set_xlim([0, 55])
ax.set_xticks([0])
ax.tick_params(axis='x', labelbottom=False)
ax.set_xticks(larval_stage_mids, minor=True)
ax.set_xticklabels(larval_stage_labels, minor=True)
ax.tick_params(axis='x', which='minor', bottom=False, pad=1)
linestyle = 'dotted'
for t in larval_stage_ends:
plt.axvline(t, color='#999999', linestyle=linestyle, linewidth=Plotter.linewidth, zorder=-1)
def stacked_bar_graph(self, ax, pie, colors, adjacent_bars=False,
stats=None, fancyborder=False,
horizontal=False, linewidth=None, relative=True, xlabels=[],
nospacing=False, directlegend=False, legend=True, legend_title=None, lines=[],
legend_shift_top=0, legendpos='above', legendcol=2, legendreverse=False,
width=0.7
):
categories = [n for n, ds in pie]
pie = np.array([ds for n, ds in pie])
data = np.copy(pie)
n_datasets = len(pie[0])
if relative:
pie = pie.astype(float)/pie.sum(axis=0)
if nospacing:
width = 1
cumulative = np.zeros(n_datasets)
plots = []
for i, ds in enumerate(pie):
args = {
'color': (colors[categories[i]] if colors else None),
'edgecolor': (colors[categories[i]+'_edge'] if colors and categories[i]+'_edge' in colors else 'k'),
'linewidth': linewidth if linewidth != None else Plotter.linewidth/2.0, 'clip_on': False,
}
if horizontal:
plots.append(plt.barh(
range(n_datasets), ds, left=cumulative, height=width, **args
))
if fancyborder:
cs = [c[:3] for c in colors[categories[i]]]
plots.append(plt.barh(
[y+width/2 for y in range(n_datasets)], ds, left=cumulative, height=0.1, color=cs
))
plots.append(plt.barh(
[y-width/2 for y in range(n_datasets)], ds, left=cumulative, height=0.1, color=cs
))
if i == len(pie)-1:
plots.append(plt.barh(
range(n_datasets), [0]*n_datasets, left=cumulative+ds, height=width, linewidth=6, edgecolor=cs
))
else:
plots.append(plt.bar(
range(n_datasets), ds, bottom=cumulative, width=width, **args
))
cumulative += ds
if lines:
for (x1, y1), (x2, y2) in lines:
ax.plot([x1, x2], [y1, y2], c='k', linewidth=Plotter.linewidth/2.0, alpha=0.3)
if directlegend:
ax.spines['left'].set_linewidth(0)
ax.spines['bottom'].set_linewidth(0)
ax.set_yticks([])
ax.set_xlim(left=-width*0.5)
y_bot = 0
y_top = 0
for t, vs in zip(categories, pie):
y_top += vs[-1]
ax.text(n_datasets - 0.3, np.mean([y_bot, y_top]), t, ha='left', va='center', color='k', size=5)
y_bot = y_top
elif legend:
self._legend(ax, plots, categories,
position=legendpos, legend_title=legend_title, shift_top=legend_shift_top,
shift_right=0.014,
handlelength=0.8, handletextpad=0.5, ncol=legendcol, columnspacing=-4.7, reverse=legendreverse
)
if horizontal:
ax.set_xlim(left=0)
plt.xlim((0, 1))
else:
ax.set_ylim(bottom=0)
if relative:
plt.ylim((0, 1))
if xlabels:
ax.set_xticks(range(len(xlabels)))
ax.set_xticklabels(xlabels)
ax.tick_params(axis='x', length=0)
if stats:
y = 1.06
comparisons = stats
ps = [proportions_ztest([data[-1][x1-1], data[-1][x2-1]], [sum([ds[x1-1] for ds in data]), sum([ds[x2-1] for ds in data])])[1] for x1, x2 in comparisons]
ps_corrected = fdrcorrection0(ps)[1]
print(comparisons)
print(ps_corrected)
self._stats(ax, y, [(x1-1, x2-1) for x1, x2 in comparisons], ps_corrected)
def horizontal_bar_graph(self, ax, data, colors, pvalues=[], groups=[''], groupspacer=0.5, x_range=None):
labels, correlations, error_lower, error_upper = zip(*data)
y_pos = range(len(data))[::-1]
group_patches = []
if len(groups) > 1:
y_pos = [y + int(y/len(groups))*groupspacer for y in y_pos]
for i in range(len(groups)):
group_patches.append(Patch(facecolor=colors[i], label=groups[i]))
labels = labels[::len(groups)]
labels_pos = np.array(y_pos[::len(groups)]) - 0.5*(len(groups)-1)
error = [np.abs(np.array(error_lower) - correlations), np.abs(np.array(error_upper) - correlations)]
ax.barh(y_pos, correlations, xerr=error, align='center', color=colors, ecolor='black', error_kw={'elinewidth': 1})
ax.plot([0, 0], [min(y_pos)-1, max(y_pos)+1], color='k', linewidth=1)
ax.set_yticks(labels_pos)
ax.set_yticklabels(labels, size=5)
ax.tick_params(length=0, axis='y')
if x_range:
ax.plot(x_range, (-1, -1), c='k', linewidth=Plotter.linewidth, clip_on=False)
max_x = max(max(np.abs(error_lower)), max(np.abs(error_upper)))
if len(groups) > 1:
self._legend(ax, group_patches[::-1], groups[::-1], legend_title='Increase of:', handlelength=0.7, handletextpad=0.5, position='above')
# ax, plots=[], labels=[], position='right', legend_title=None, reverse=True, handletextpad=None,
# shift_right=0, shift_top=0, ncol=1, columnspacing=None, markerscale=None, handlelength=None,
# handler_map=None
if pvalues:
# sig05 = np.where(fdrcorrection0(pvalues, alpha=0.05/2)[0])[0]
# if len(sig05) > 0:
# max_x += 0.05
# for idx in sig05:
# if correlations[idx] >= 0:
# x = error_upper[idx] + 0.05
# else:
# x = error_lower[idx] - 0.05
# ax.annotate(u'∗', xy=(x, y_pos[idx]), ha='center', va='center', fontname={'DejaVu Sans'}, fontsize=5)
sig001 = np.where(fdrcorrection0(pvalues, alpha=0.001/2)[0])[0]
if len(sig001) > 0:
max_x += 0.15
for idx in sig001:
x = error_upper[idx] + 0.05
ax.annotate(u'∗∗∗', xy=(x, y_pos[idx]), ha='left', va='center', fontname={'DejaVu Sans'}, fontsize=5)
ax.set_ylim([-1, max(y_pos)+1])
def _stats(self, ax, y, comparisons, pvalues, ticks=True):
ylim = ax.get_ylim()
h = (ylim[1] - ylim[0]) / 50.0
if not ticks:
h = 0
for p, (x1, x2) in zip(pvalues, comparisons):
if type(p) == str:
t = p
elif p > 0.05:
t = 'ns'
elif p > 0.01:
t = '*'
elif p > 0.001:
t = '**'
else:
t = '***'
c = 'k'
if x1 != x2:
ax.plot([x1, x1, x2, x2], [y, y+h, y+h, y], c=c, linewidth=Plotter.linewidth/2.0, clip_on=False)
text_y = y if '*' in t else y + h
ax.text(np.mean([x1, x2]), text_y, t, ha='center', va='bottom', color='k', fontsize=5)
y += h*4
ax.set_ylim(ylim)
def box_plot(self, ax, box, colors, ylim=None, show_outliers=True, stats=None, darkmedian=True, vert=True):
boxprops = dict(linewidth=Plotter.linewidth/2.0, color='k')
medianprops = dict(linewidth=Plotter.linewidth/2.0, color='#444444' if darkmedian else '#eeeeee')
whiskerprops = dict(linewidth=Plotter.linewidth/2.0)
bplot = ax.boxplot(
box, patch_artist=True, showfliers=show_outliers,
boxprops=boxprops, medianprops=medianprops,
whiskerprops=whiskerprops, capprops=whiskerprops,
vert=vert
)
if ylim:
if type(ylim) == tuple:
ax.set_ylim(ylim)
else:
ax.set_ylim(top=ylim)
for i, patch in enumerate(bplot['boxes']):
patch.set_facecolor(colors[i])
if stats:
ylim = ax.get_ylim()
h = (ylim[1] - ylim[0]) / 50.0
y = max(max(cap.get_ydata()) for cap in bplot['caps']) + h*3
if stats == 'anova':
p = sc.stats.f_oneway(*box).pvalue
self._stats(ax, y, [(1, len(box))], [p], ticks=False)
else:
comparisons = stats
ps = [sc.stats.mannwhitneyu(box[x1-1], box[x2-1], alternative='two-sided').pvalue for x1, x2 in comparisons]
ps_corrected = fdrcorrection0(ps)[1]
print('Corrected p-values:', comparisons, ps_corrected)
print('n:', [len(b) for b in box])
self._stats(ax, y, comparisons, ps_corrected)
if vert:
ax.tick_params(axis='x', length=0)
else:
ax.tick_params(axis='y', length=0)
def scatter(self, ax, data, colors=None, line=False, crop=False, legend='',
legendpos='right', legendcol=1, marker='o', markersize=6, markeredgewidth=1,
stats=None, alpha=0.8,
rev_legend=False, legend_shift_right=0, legend_shift_top=0, legend_title=None,
legendcolspace=1):
plots = []
if isinstance(data[0][0], (int, float, np.float32, np.float64, np.int64)):
data = ([data[0]], [data[1]])
colors = [colors]
if legend:
legend = [legend]
for x, y, c in zip(data[0], data[1], colors):
plot = ax.scatter(
x, y, marker='o', facecolors=(c if marker == '.' else 'none'), edgecolors=c,
s=markersize, alpha=alpha, linewidth=(0 if marker == '.' else markeredgewidth),
clip_on=crop,
)
plots.append(plot)
xs = [x for xs in data[0] for x in xs]
ys = [y for ys in data[1] for y in ys]
if stats == 'spearman_combined':
data = ([xs], [ys])
if stats in ('spearman_combined', 'spearman_individual', 'regression_only'):
for x, y, c in zip(data[0], data[1], colors):
if stats == 'spearman_combined':
c = 'k'
reg = linregress(x, y)
a, b = reg.slope, reg.intercept
xlim = ax.get_xlim()
min_x, max_x = min(xlim[0], min(xs)), max(xlim[1], max(xs))
ax.plot([min_x, max_x], [min_x*a+b, max_x*a+b], marker=None, color=c, linewidth=Plotter.linewidth/2.0, linestyle='dashed')
if stats == 'regression_only':
continue
cor = sc.stats.spearmanr(x, y)
r = cor.correlation
p = cor.pvalue*len(data[0])
text_x = min(ax.get_xlim()[1], max_x)
text_x *= 1.02
t = ''
if stats == 'spearman_combined':
t += u'r = {0:.2f}\n'.format(r)
if p > 0.05:
t += 'ns'
elif p > 0.01:
t += 'p < 0.05'
elif p > 0.001:
t += 'p < 0.01'
else:
t += 'p < 10$^{' + str(int(np.log10(p)) if p != 0 else -9) + '}$'
print('p:', p, ', r:', r, ', n:', len(x))
ax.text(text_x, text_x*a+b, t, ha='right', va='bottom', color='k', size=5)
if legend:
legend_obj = self._legend(
ax, plots, legend, legend_title=legend_title, reverse=rev_legend,
shift_right=legend_shift_right, shift_top=legend_shift_top, position=legendpos, handletextpad=-0.4,
ncol=legendcol, columnspacing=legendcolspace
)
for handles in legend_obj.legendHandles:
handles.set_alpha(1)
def adjacency_matrix(self, ax, data, color, borders=None, post_spacers=[], pre_spacers=[], text=None):
x = []
y = []
size = []
c = []
b = [] if borders else None
data = np.array(data)
cols, rows = data.shape
col_size = (self.figure_size[0] * self.page_size * 64 / cols) ** 2
text_xy = defaultdict(lambda: [[], []])
for row in range(rows):
for col in range(cols):
x.append(col)
y.append(row)
size.append(data[col][row]*col_size+(0.5 if borders else 0))
c.append(color[col][row])
if borders is not None:
b.append(borders[col][row])
if text is not None:
text_xy[text[col][row]][0].append(col)
text_xy[text[col][row]][1].append(row)
ax.scatter(
x, y, marker='s', clip_on=False, s=size, c=c,
linewidths=0.2 if borders else 0, edgecolors=b
)
for marker in text_xy:
ax.scatter(*text_xy[marker], marker=marker, color='w', s=1, linewidths=0.3)
xmin = -0.5
xmax = cols-0.5
hlines = [xmin, xmax]
for s in pre_spacers:
if s < hlines[-1]:
hlines.insert(-1, s-0.5)
hlines.insert(-1, s+0.5)
for i in range(rows+1):
for xfrom, xto in zip(hlines[::2], hlines[1::2]):
ax.plot([xfrom, xto], [i-0.5]*2, c=(0.75, 0.75, 0.75), linewidth=0.3, clip_on=False)
ymin = -0.5
ymax = rows-0.5
vlines = [ymin, ymax]
for s in post_spacers:
if s < vlines[-1]:
vlines.insert(-1, s-0.5)
vlines.insert(-1, s+0.5)
for i in range(cols+1):
for yfrom, yto in zip(vlines[::2], vlines[1::2]):
ax.plot([i-0.5]*2, [yfrom, yto], c=(0.75, 0.75, 0.75), linewidth=0.3, clip_on=False)
ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax))
ax.xaxis.tick_top()
ax.xaxis.set_tick_params(rotation=90)
ax.xaxis.set_label_position('top')
ax.xaxis.label.set_size(7)
ax.yaxis.label.set_size(7)
def venn(self, ax, data, color, labels=('Group1', 'Group2', 'Group3')):
d1, d2, d3 = data
v = venn3(subsets=(
len(d1-d2-d3),
len(d2-d1-d3),
len(d1.intersection(d2)-d3),
len(d3-d1-d2),
len(d1.intersection(d3)-d2),
len(d2.intersection(d3)-d1),
len(d1.intersection(d2).intersection(d3)),
), set_labels=labels)
v.get_patch_by_id('100').set_color('#aaaaaa')
v.get_patch_by_id('010').set_color('#aaaaaa')
v.get_patch_by_id('001').set_color('#aaaaaa')
v.get_patch_by_id('110').set_color('#777777')
v.get_patch_by_id('101').set_color('#777777')
v.get_patch_by_id('011').set_color('#777777')
v.get_patch_by_id('111').set_color('#222222')
for pid in ('100', '010', '001', '110', '101', '011', '111'):
v.get_patch_by_id(pid).set_linewidth(0)
print(v)
|
988,622 | 15eb05424d0b2679415f7d4867e0bf8f9d995371 | from fruits_db import session, Fruit
fruits = session.query(Fruit).all()
for fruit in fruits:
print()
print(f'Fruit: { fruit.name }')
print(f'Price: { fruit.price_cents } cents') |
988,623 | 753606f1fc4f73c391fe4565e9b2831f0f53535f | '''
상담원으로 일하고 있는 백준이는 퇴사를 하려고 한다.
오늘부터 N+1일째 되는 날 퇴사를 하기 위해서, 남은 N일 동안 최대한 많은 상담을 하려고 한다.
백준이는 비서에게 최대한 많은 상담을 잡으라고 부탁을 했고, 비서는 하루에 하나씩 서로 다른 사람의 상담을 잡아놓았다.
각각의 상담은 상담을 완료하는데 걸리는 기간 Ti와 상담을 했을 때 받을 수 있는 금액 Pi로 이루어져 있다.
N = 7인 경우에 다음과 같은 상담 일정표를 보자.
1일 2일 3일 4일 5일 6일 7일
Ti 3 5 1 1 2 4 2
Pi 10 20 10 20 15 40 200
1일에 잡혀있는 상담은 총 3일이 걸리며, 상담했을 때 받을 수 있는 금액은 10이다. 5일에 잡혀있는 상담은 총 2일이 걸리며, 받을 수 있는 금액은 15이다.
상담을 하는데 필요한 기간은 1일보다 클 수 있기 때문에, 모든 상담을 할 수는 없다. 예를 들어서 1일에 상담을 하게 되면, 2일, 3일에 있는 상담은 할 수 없게 된다. 2일에 있는 상담을 하게 되면, 3, 4, 5, 6일에 잡혀있는 상담은 할 수 없다.
또한, N+1일째에는 회사에 없기 때문에, 6, 7일에 있는 상담을 할 수 없다.
퇴사 전에 할 수 있는 상담의 최대 이익은 1일, 4일, 5일에 있는 상담을 하는 것이며, 이때의 이익은 10+20+15=45이다.
상담을 적절히 했을 때, 백준이가 얻을 수 있는 최대 수익을 구하는 프로그램을 작성하시오.
입력
첫째 줄에 N (1 ≤ N ≤ 15)이 주어진다.
둘째 줄부터 N개의 줄에 Ti와 Pi가 공백으로 구분되어서 주어지며, 1일부터 N일까지 순서대로 주어진다. (1 ≤ Ti ≤ 5, 1 ≤ Pi ≤ 1,000)
출력
첫째 줄에 백준이가 얻을 수 있는 최대 이익을 출력한다.
'''
import sys
sys.stdin = open('14501.txt')
def dfs(day=0, ans=0):
global result
if ans > result:
result = ans
for i in range(day, N):
if i + Tlist[i] <= N:
dfs(i + Tlist[i], ans + Plist[i])
Tlist = []
Plist = []
N = int(input())
for n in range(N):
T, P = map(int, input().split())
Tlist.append(T)
Plist.append(P)
result = 0
dfs()
print(result) |
988,624 | 20b82fe1703534ef657140dab7e2e186d3155474 | from MF_bias import MF_bias
import numpy as np
R = np.array([
[1.0, 4.0, 5.0, 0, 3.0],
[5.0, 1.0, 0, 5.0, 2.0],
[4.0, 1.0, 2.0, 5.0, 0],
[0, 3.0, 4.0, 0, 4.0]
])
mf = MF_bias(data=R, K_feature=2, beta=0.002, lambda_value=0.01, iterations=20000)
mf.train()
print(mf.gradient_descent()) |
988,625 | e2e0fbfc311d2b9fe40c76e0b02c64715a8a1d6f | import tensorflow as tf
import numpy as np
from feature_helpers.feature_generator import make_tfidf_combined_feature_5000, load_tfidf_y
seed = 12345
def weight_variable(name, shape):
return tf.get_variable(name=name, shape=shape,
initializer=tf.contrib.layers.variance_scaling_initializer
(factor=2.0, mode='FAN_IN', uniform=False, seed=seed))
#
def bias_variable(name, shape):
initial = tf.constant(1e-3, shape=shape)
return tf.Variable(initial, name=name)
lr = 0.001
batch_size = 188
training_epoch = 1
# hidden = (362, 942, 1071, 870, 318, 912, 247)
hidden = (600, 600, 600, 600)
n_classes = 4
export_dir = '../tf_model/'
head_dir = '../pickled_model/tfidf_head_feature_train_holdout.pkl'
body_dir = '../pickled_model/tfidf_body_feature_train_holdout.pkl'
label_dir = '../pickled_model/tfidf_label_one_hot_train_holdout.pkl'
init_bias = 0.001
mode = 'train'
# mode = 'test'
model_path = '../tf_model/ensemble_tfidf_5000_epoch70_n_fold_'
X_data = make_tfidf_combined_feature_5000(head_dir, body_dir)
y_data = load_tfidf_y(label_dir)
n_fold = 2
# exit()
# X_train, y_train = tf.train.shuffle_batch([X_train, y_train], batch_size, len_X, 10000, seed=12345)
n_input = X_data.shape[1]
class Model:
def __init__(self, sess, name):
self.sess = sess
self.name = name
self._build_model()
def _build_model(self):
with tf.variable_scope(self.name):
self.X = tf.placeholder("float32", [None, n_input])
self.Y = tf.placeholder("float32", [None, n_classes])
self.learning_rate_tensor = tf.placeholder(tf.float32)
self.momentum = tf.placeholder(tf.float32)
self.learning_rate_output = ""
layer1 = tf.nn.relu(tf.add(tf.matmul(self.X,
weight_variable(self.name+'w1', [n_input, hidden[0]])),
bias_variable(self.name+'b1', [hidden[0]])))
layer2 = tf.nn.relu(tf.add(tf.matmul(layer1,
weight_variable(self.name+'w2',[hidden[0], hidden[1]])),
bias_variable(self.name+'b2',[hidden[1]])))
layer3 = tf.nn.relu(tf.add(tf.matmul(layer2,
weight_variable(self.name+'w3',[hidden[1], hidden[2]])),
bias_variable(self.name+'b3',[hidden[2]])))
layer4 = tf.nn.relu(tf.add(tf.matmul(layer3,
weight_variable(self.name + 'w4', [hidden[2], hidden[3]])),
bias_variable(self.name + 'b4', [hidden[3]])))
# layer4 = tf.nn.relu(tf.add(tf.matmul(layer3,
# weight_variable(self.name+'w4',[hidden[2], hidden[3]])),
# bias_variable(self.name+'b4',[hidden[3]])))
#
# layer5 = tf.nn.relu(tf.add(tf.matmul(layer4,
# weight_variable(self.name+'w5',[hidden[3], hidden[4]])),
# bias_variable(self.name+'b5',[hidden[4]])))
#
# layer6 = tf.nn.relu(tf.add(tf.matmul(layer5,
# weight_variable(self.name+'w6',[hidden[4], hidden[5]])),
# bias_variable(self.name+'b6',[hidden[5]])))
#
# layer7 = tf.nn.relu(tf.add(tf.matmul(layer6,
# weight_variable(self.name+'w7',[hidden[5], hidden[6]])),
# bias_variable(self.name+'b7',[hidden[6]])))
# self.logits = tf.nn.softmax(tf.add(tf.matmul(layer7,
# weight_variable(self.name+'out_w',[hidden[6], n_classes])),
# bias_variable(self.name+'out_b',[n_classes])))
self.logits = tf.nn.softmax(tf.add(tf.matmul(layer4,
weight_variable(self.name+'out_w',[hidden[3], n_classes])),
bias_variable(self.name+'out_b',[n_classes])))
self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logits, labels=self.Y))
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_tensor).minimize(self.cost)
correct_prediction = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
self.saver = tf.train.Saver()
def predict(self, x_test):
return self.sess.run(self.logits, feed_dict={self.X: x_test})
def get_accuracy(self, x_test, y_test):
return self.sess.run(self.accuracy, feed_dict={self.X: x_test, self.Y: y_test})
def train(self, x_data, y_data, learning_rate, momentum):
return self.sess.run([self.cost, self.optimizer],
feed_dict={self.X: x_data, self.Y: y_data, self.learning_rate_tensor: learning_rate,
self.momentum: momentum})
def save(self, save_path):
self.saver.save(sess, save_path)
print('save model '+save_path)
def load(self, model_path):
self.saver.restore(self.sess, model_path)
split_num = 1
split_size = len(X_data) // 10
for fold in range(n_fold):
model_path = '../tf_model/ensemble_tfidf_5000_epoch70_n_fold_'+str(fold)
X_train = np.concatenate((X_data[:split_num*split_size], X_data[(split_num+1)*split_size:]), axis=0)
y_train = np.concatenate((y_data[:split_num*split_size],y_data[(split_num+1)*split_size:]), axis=0)
X_test = X_data[split_num*split_size : (split_num+1)*split_size]
y_test = y_data[split_num*split_size : (split_num+1)*split_size]
np.random.seed(seed)
np.random.shuffle(X_train)
np.random.seed(seed)
np.random.shuffle(y_train)
print(X_train[0])
print(y_train[0])
len_X = len(X_train)
fold_count = len_X // 10
with tf.Session(graph=tf.reset_default_graph()) as sess:
if mode == 'train':
models = []
num_models = 5
# for m in range(num_models):
# models.append(Model(sess, "model"+str(m)))
sess.run(tf.global_variables_initializer())
print('Learning Started!')
for epoch in range(training_epoch):
momentum_start = 0.5
momentum_end = 0.99
avg_cost_list = np.zeros(len(models))
total_batch = len_X // batch_size
# print('data shuffling...')
# print('seed : ', epoch * 100 + 1)
# np.random.seed(epoch)
# np.random.shuffle(X_train)
# np.random.seed(epoch)
# np.random.shuffle(y_train)
# print('data shuffling finished...')
calc_learning_rate = lr
i = 0
calc_momentum = momentum_start + (float((momentum_end - momentum_start) / training_epoch) * epoch)
if epoch > 0 and (epoch == 20 or epoch == 35 or epoch == 45):
calc_learning_rate = float(calc_learning_rate / 10.0)
# print(ep)
while i < len(X_train):
start = i
end = i + batch_size
batch_x = np.array(X_train[start:end])
batch_y = np.array(y_train[start:end])
for m_idx, m in enumerate(models):
c, _ = m.train(batch_x, batch_y, learning_rate=lr, momentum=calc_momentum)
avg_cost_list[m_idx] += c / total_batch
# print(epoch,', ', m_idx, 'accuracy : ', m.get_accuracy(X_test, y_test))
i += batch_size
if i % (batch_size*50) == 0:
print(i, '/', len(X_train))
avg_accuracy = 0.0
for ids, m in enumerate(models):
avg_accuracy += m.get_accuracy(X_test, y_test)
# m.save(model_path+"_epoch"+str(epoch)+"_model"+str(ids))
print('avg accuracy : ', avg_accuracy/len(models))
print('Epoch: ', epoch+1, ', cost = ', avg_cost_list)
saver = tf.train.Saver()
print('Training Finished!')
# _, c, out, y = sess.run([optimizer, cost, output, Y], feed_dict={
# X: batch_x, Y: batch_y, momentum : calc_momentum, learning_rate_tensor: calc_learning_rate
# })
# if i % (batch_size*50) == 0 and i != 0:
# print('epoch : {}, epoch_loss : {}, processing : {}/{}'.format(ep, c, i, len_X))
# # print(out[:20])
# # print(y[:20])
# epoch_loss += c
if mode =='test':
models = []
num_models = 5
# sess = saver.restore(sess, model_path+str(fold)+".ckpt")
# for m in range(num_models):
# models.append(Model(sess, "model" + str(m)).load(model_path+"_epoch"+str(epoch)+"_model"+str(m)))
print('model load finish!')
# print('Epoch', ep + 1, 'completed out of', ep, 'loss:', epoch_loss, 'LR=',calc_learning_rate)
predictions = np.zeros([fold_count, n_classes])
print('fold', fold,'test')
for m_idx, m in enumerate(models):
print(m_idx, 'Accuracy: ', m.get_accuracy(X_test, y_test))
p = m.predict(X_test)
predictions += p
ensemble_correct_prediction = tf.equal(tf.argmax(predictions, 1), tf.argmax(y_test, 1))
ensemble_accuracy = tf.reduce_mean(tf.cast(ensemble_correct_prediction, tf.float32))
print('Ensemble accuracy: ', sess.run(ensemble_accuracy))
|
988,626 | 94f77f1a6721ca5cb7e53d776ff1d201f4169107 | ## 012345678901234
frase = ' Roberto Mota'
## [Inicio:Fim:intervalo]
# vai do slot 1 até o slot 6 do array
print(frase[1:6])
# vai do slot 1 até o slot 6 do array pulando de 3 em 3
print(frase[1:14:3])
# vai do slot INICIO até o slot FIM pulando de dois em dois
print(frase[::2])
# utilizando aspas triplas, ele printa respeitando a quebra de linha
print(""" Texto Texto Texto Texto Texto Texto
Texto Texto Texto Texto Texto Texto Texto Texto
Texto Texto Texto Texto Texto Texto Texto Texto
Texto Texto Texto Texto Texto Texto Texto """)
# .count ('caracter') = conta quantos caracteres daquele existe no alvo
print(frase.count('t'))
# faz o mesmo do anterior mas antes da contagem, deixa tudo maiusculo
print(frase.upper().count('t'))
# faz o mesmo do anterior mas antes da contagem, deixa tudo minusculo
print(frase.lower().count('t'))
# verifica o tamanho da frase
print(len(frase))
# .strip() remove os espacos antes ou depois da frase.
print(len(frase.strip()))
# .replace(oque sai, oque entra) simplesmente troca as palavras
# combinada com replace para remover os espacos do comeco e fim
# Replace nao altera a string efetivamente, somente no ponto de chamada.
print(frase.strip().replace('Mota', 'Moota'))
# para alterar a string usamos a atribuição
frase = frase.strip().replace('Mota', 'Mootaaaaaa')
print('Nova frase: {}'.format(frase))
print('Confirmacao de string alterada: {}'.format(frase))
frase = "Roberto Mota"
# procura alguma palavra ou um alvo em uma frase
# retornando verdadeiro ou falso
print('Mota' in frase)
# retorna a posicao do que estou procurando (caso exista)
print(frase.find('Mota')) # Mota inicia na posicao 8
#.split cria uma lista da frase em questao
print(frase.split())
dividido = frase.split()
print(dividido[1])
|
988,627 | 27f783c27712a2f301238664d16971d74c9a4cc2 | #!/usr/bin/env python3
#エクサウィザーズ2019 A
import sys
import math
import bisect
sys.setrecursionlimit(1000000000)
from heapq import heappush, heappop,heappushpop
from collections import defaultdict
from itertools import accumulate
from collections import Counter
from collections import deque
from operator import itemgetter
from itertools import permutations
mod = 10**9 + 7
inf = float('inf')
def I(): return int(sys.stdin.readline())
def LI(): return list(map(int,sys.stdin.readline().split()))
a,b,c = LI()
if a == b and b == c and c == a:
print('Yes')
else:
print('No')
|
988,628 | e1d68814a9193e343df1abc6b2d6bc12d779312b | #coding = 'utf-8'
import os
def origin_path():
'''
使用config文件提供原始路径等
:return: 运行文件所在的根目录
'''
origin_path = os.path.dirname(os.path.abspath(__file__))
# print(origin_path)
return origin_path |
988,629 | dcdea8c0e0572b53a34f30198a0ed1337b5e9398 | import pandas
import scipy.stats
import seaborn
import matplotlib.pyplot as pyplot
aData = pandas.read_csv('ool_pds.csv', low_memory=False)
# W1_A11: Frequency of watching national news
# invalid data -1
# PPAGE: age
aSubData = aData[['PPAGE' , 'W1_A11']]
aSubData = aSubData[(aSubData['W1_A11']!= -1)]
print ('Association between Obama rate and the age of participants: ')
print (scipy.stats.pearsonr(aSubData['PPAGE'], aSubData['W1_A11']))
aPlot = seaborn.regplot(x='PPAGE', y='W1_A11', fit_reg=True, data=aSubData)
pyplot.xlabel('Age of participants')
pyplot.ylabel('Frequency of watching national news ? ')
pyplot.title('''ScatterPlot for the association between the age
and the frequency of watching the national news''')
|
988,630 | dc8dcfb84929df26d1ac8a91aa82225e261e2b30 | def get_current_ranges(current_readings):
if len(current_readings) == 0:
return 'ERR_EMPTY_INPUT'
return True
|
988,631 | 94cce7fab62914b8f32fe5903aabd7e412762d68 | import torch.nn as nn
# # Parameters to define the model.
# params = {
# 'nc' : 3,# Number of channles in the training images. For coloured images this is 3.
# 'nz' : 100,# Size of the Z latent vector (the input to the generator).
# 'ngf' : 64,# Size of feature maps in the generator. The depth will be multiples of this.
# 'ndf' : 64, # Size of features maps in the discriminator. The depth will be multiples of this.
# }
#===============================================================================
kernel_size = 4
stride = 2
padding = 1
# RIGUARDO LE DIMENSIONI ----------------------------------------------------
# In nn.Conv2d(ndf * d_in, ndf * d_out, 4, 2, 1, bias=False)
# kernel_size = 4, stride = 2, padding = 1
# se kernel size = stride + 2* padding (come e') allora la dimensione di uscita della immagine e'
# H_out = H_in / stride
# W_out = W_out / stride
#
# Anche in nn.ConvTranspose2d(ngf * d_in, ngf * d_out, 4, 2, 1, bias=False))
# H_out = H_in * stride
# W_out = W_out * stride
#------------------------
def DisLayerSN_d(ndf, k):
"""
Layer che usa la spectral norm
"""
d_in = 2**k
d_out = 2**(k+1)
out = nn.Sequential(nn.utils.spectral_norm(
nn.Conv2d(ndf*d_in, ndf*d_out, kernel_size, stride=stride, padding=padding, bias=False)),
nn.Dropout2d(),
nn.BatchNorm2d(ndf * d_out),
nn.LeakyReLU(0.2, inplace=True) )
return out
#------------------------
def DisLayerSN(ndf, k):
"""
Layer che usa la spectral norm
"""
d_in = 2**k
d_out = 2**(k+1)
out = nn.Sequential(nn.utils.spectral_norm(
nn.Conv2d(ndf*d_in, ndf*d_out, kernel_size, stride=stride, padding=padding, bias=False)),
nn.BatchNorm2d(ndf * d_out),
nn.LeakyReLU(0.2, inplace=True) )
return out
#------------------------
def DisLayer(ndf, k):
d_in = 2**k
d_out = 2**(k+1)
out = nn.Sequential(nn.Conv2d(ndf*d_in, ndf*d_out, kernel_size, stride=stride, padding=padding, bias=False),
nn.BatchNorm2d(ndf * d_out),
nn.LeakyReLU(0.2, inplace=True) )
return out
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
def GenLayerSN(ngf, k):
"""
Layer che usa la spectral norm
"""
d_in = 2**k
d_out = 2**(k-1)
out = nn.Sequential( nn.utils.spectral_norm(
nn.ConvTranspose2d(ngf * d_in, ngf * d_out, kernel_size, stride, padding, bias=False)),
nn.BatchNorm2d(ngf * d_out),
nn.ReLU(True) )
return out
#------------------------
def GenLayer(ngf, k):
d_in = 2**k
d_out = 2**(k-1)
out = nn.Sequential( nn.ConvTranspose2d(ngf * d_in, ngf * d_out, kernel_size, stride, padding, bias=False),
nn.BatchNorm2d(ngf * d_out),
nn.ReLU(True) )
return out
def GenLayerDropout(ngf, k):
d_in = 2**k
d_out = 2**(k-1)
out = nn.Sequential( nn.ConvTranspose2d(ngf * d_in, ngf * d_out, kernel_size, stride, padding, bias=False),
nn.Dropout2d(),
nn.BatchNorm2d(ngf * d_out),
nn.ReLU(True) )
return out |
988,632 | 5260432940ddff25a052b1bddfc5e58250057c46 | import os
import sys
import zipfile
# Make sure we have the correct command line arguments
if len(sys.argv) != 3:
print "Please provide command line arguments as follows:"
print "'python ziptool.py <Directory> <Zip File>' to append to zip file"
print "'python ziptool.py <Zip File> <Directory>' to extract from zip file"
sys.exit(0)
if os.path.exists(sys.argv[1]):
if os.path.isdir(sys.argv[1]):
directory = sys.argv[1]
zfilepath = sys.argv[2]
zipmode = True
else:
directory = sys.argv[2]
zfilepath = sys.argv[1]
zipmode = False
else:
if not os.path.isdir(sys.argv[2]):
directory = sys.argv[1]
zfilepath = sys.argv[2]
zipmode = True
else:
directory = sys.argv[2]
zfilepath = sys.argv[1]
zipmode = False
if zipmode:
with zipfile.ZipFile(zfilepath, 'a', zipfile.ZIP_DEFLATED) as zfile:
for filename in os.listdir(directory):
filepath = os.path.join(directory, filename)
zfile.write(filepath, filename, zipfile.ZIP_DEFLATED)
if filename != os.path.basename(zfilepath):
os.remove(filepath)
if not os.path.exists(os.path.join(directory, os.path.basename(zfilepath))):
os.rmdir(directory)
else:
with zipfile.ZipFile(zfilepath, "r") as zfile:
zfile.extractall(directory)
|
988,633 | 44f5ce2fbe32b40468c5739d99e2a1c81d3175be | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MultiTaskNet eval"""
import ast
import argparse
from mindspore import context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from src.utils.evaluate import test
from src.model.DenseNet import DenseNet121
from src.dataset.dataset import eval_create_dataset
parser = argparse.ArgumentParser(description='eval MultiTaskNet')
parser.add_argument('--device_target', type=str, default="Ascend")
parser.add_argument('--ckpt_path', type=str, default='')
parser.add_argument('--device_id', type=int, default=0)
parser.add_argument('--root', type=str, default='./data', help="root path to data directory")
parser.add_argument('-d', '--dataset', type=str, default='veri', help="name of the dataset")
parser.add_argument('--height', type=int, default=256, help="height of an image (default: 256)")
parser.add_argument('--width', type=int, default=256, help="width of an image (default: 256)")
parser.add_argument('--test-batch', default=100, type=int, help="test batch size")
parser.add_argument('--heatmapaware', type=ast.literal_eval, default=False, help="embed heatmaps to images")
parser.add_argument('--segmentaware', type=ast.literal_eval, default=False, help="embed segments to images")
args = parser.parse_args()
if __name__ == '__main__':
target = args.device_target
device_id = args.device_id
context.set_context(mode=context.GRAPH_MODE, device_target=target, save_graphs=False, device_id=device_id)
train_dataset_path = args.root
query_dataloader, gallery_dataloader, num_train_vids, \
num_train_vcolors, num_train_vtypes, _vcolor2label, \
_vtype2label = eval_create_dataset(dataset_dir=args.dataset,
root=train_dataset_path,
width=args.width,
height=args.height,
keyptaware=True,
heatmapaware=args.heatmapaware,
segmentaware=args.segmentaware,
train_batch=args.test_batch)
_model = DenseNet121(pretrain_path='',
num_vids=num_train_vids,
num_vcolors=num_train_vcolors,
num_vtypes=num_train_vtypes,
keyptaware=True,
heatmapaware=args.heatmapaware,
segmentaware=args.segmentaware,
multitask=True,
is_pretrained=False)
ckpt_path = args.ckpt_path
print("ckpt_path is {}".format(ckpt_path))
param_dict = load_checkpoint(ckpt_path)
load_param_into_net(_model, param_dict)
_distmat = test(_model, True, True, query_dataloader, gallery_dataloader,
_vcolor2label, _vtype2label, return_distmat=True)
|
988,634 | 6574f7c45da816be198d16ce51bf3ca45c36f705 | from django.shortcuts import render
from django.http import *
from .models import Quote
from .forms import *
def index(request):
return HttpResponse("<h1>Quote Index</h1>")
def quoteout(request):
obj = Quote.objects.all()
return render(request, 'quotation/quoteout.html', {'obj':obj})
def quotein(request):
if request.method == 'POST':
form = quote_form(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/quoteinput/')
else:
form = quote_form()
return render(request, 'quotation/quotein.html', {'form':form}) |
988,635 | 0e43d7e9c9879d4aa0268fcc9d9b27d40842655e | class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums_len = len(nums)
start_idx = 0
for curr_idx in range(nums_len):
if start_idx == curr_idx:
majority_element = nums[curr_idx]
count_majority = 1
else:
if nums[curr_idx] == majority_element:
count_majority += 1
else:
count_majority -= 1
if count_majority == 0:
start_idx = curr_idx+1
return majority_element
|
988,636 | 99f747016e0df5615465b81cd975ae1909643b9a | from os import walk
from LTM import *
from TRM import *
from HistBP import *
import cv2
import numpy as np
import matplotlib.pyplot as plt
import nltk
#from deep_activations import *
class vTE:
def __init__(self, settings=""):
self.settings = settings
self.task_graph = None
self.hists_mem = None
self.weights_mem = None
try:
self.task_specification=settings.task_text
except:
self.task_specification=None
self.task_type=1 #sentence
def get_relevance(self,LTM,img):
if self.task_type==0: #keyword
return self.get_kw(LTM,img,self.task_specification)
else: #sentence
return self.get_snt(LTM,img,self.task_specification)
def get_kw(self,LTM,img,keyword):
if LTM.method == 'HistogramBackprojection':
return self.backproject_kw(LTM,img,keyword)
elif LTM.method == 'SparseCoding':
return self.maxcode_kw(LTM,img,keyword)
elif LTM.method == 'DCNN':
return self.activations_kw(LTM,img,keyword)
def get_snt(self,LTM,img,sentence):
if LTM.method == 'HistogramBackprojection':
return self.backproject_snt(LTM,img,sentence)
elif LTM.method == 'SparseCoding':
return self.maxcode_snt(LTM,img,sentence)
elif LTM.method == 'DCNN':
return self.activations_snt(LTM,img,keyword)
def activations_kw(self,LTM,img,keyword):
relevance=keras_singleclass_heatmap2(img,keyword,LTM.keras_model_name,LTM.keras_dataset_name,LTM.keras_activation_layer)
return relevance
def activations_snt(self,LTM,img,sentence):
self.hists_mem,self.weights_mem=LTM.retrieve_snt(sentence)
aimTemp=image2featuremaps(LTM.basis,img)
#self.task_keywords,self.task_graph=text2graph(sentence)
rels=np.zeros((aimTemp.shape[0],aimTemp.shape[1],len(self.weights_mem)))
for w,weight in enumerate(self.weights_mem):
self.hists_mem[w]=self.hists_mem[w]/255
#plot_codes_hist(LTM.basis,self.hists_mem[w])
rels[:,:,w]=featuremaps2rmap(aimTemp,self.hists_mem[w])*weight
relevance=np.mean(rels,axis=(2))
relevance = cv2.resize(relevance, (img.shape[1],img.shape[0]), 0, 0, cv2.INTER_AREA)
return relevance
def maxcode_kw(self,LTM,img,keyword):
self.hists_mem,self.weights_mem=LTM.retrieve_kw(keyword)
self.hists_mem=self.hists_mem/255
#plot_codes_hist(LTM.basis,self.hists_mem)
aimTemp=image2featuremaps(LTM.basis,img)
relevance=featuremaps2rmap(aimTemp,self.hists_mem)
relevance = cv2.resize(relevance, (img.shape[1],img.shape[0]), 0, 0, cv2.INTER_AREA)
return relevance
def maxcode_snt(self,LTM,img,sentence):
self.hists_mem,self.weights_mem=LTM.retrieve_snt(sentence)
aimTemp=image2featuremaps(LTM.basis,img)
#self.task_keywords,self.task_graph=text2graph(sentence)
rels=np.zeros((aimTemp.shape[0],aimTemp.shape[1],len(self.weights_mem)))
for w,weight in enumerate(self.weights_mem):
self.hists_mem[w]=self.hists_mem[w]/255
#plot_codes_hist(LTM.basis,self.hists_mem[w])
rels[:,:,w]=featuremaps2rmap(aimTemp,self.hists_mem[w])*weight
relevance=np.mean(rels,axis=(2))
relevance = cv2.resize(relevance, (img.shape[1],img.shape[0]), 0, 0, cv2.INTER_AREA)
return relevance
def backproject_kw(self,LTM,img,keyword):
self.hists_mem,self.weights_mem=LTM.retrieve_kw(keyword)
hsvt = map2hsv(img)
relevance=hbProp(hsvt,self.hists_mem)
return relevance
def backproject_snt(self,LTM,img,sentence):
self.hists_mem,self.weights_mem=LTM.retrieve_snt(sentence)
hsvt = map2hsv(img)
#self.task_keywords,self.task_graph=text2graph(sentence)
rels=np.zeros((img.shape[0],img.shape[1],len(self.weights_mem)))
for w,weight in enumerate(self.weights_mem):
rels[:,:,w]=hbProp(hsvt,self.hists_mem[w])*weight
relevance=np.mean(rels,axis=(2))
return relevance
#deprecated (AIM)
def get_saliency(self,LTM,img):
aimTemp=image2featuremaps(LTM.basis,img)
saliency = featuremaps2smap(aimTemp)
saliency = cv2.resize(saliency, (img.shape[1],img.shape[0]), 0, 0, cv2.INTER_AREA)
return saliency
|
988,637 | dd74951a9ca39fe3ad6cd4f50ca11a9299b1b910 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: Benjamin Preisig
import logging
import lirc
from soco import SoCo
from soco.exceptions import SoCoException
import config
def is_playing(transport_info):
state = transport_info['current_transport_state']
if state == 'PLAYING':
return True
elif state == 'PAUSED_PLAYBACK':
return False
elif state == 'STOPPED':
return False
def run():
sonos = SoCo(config.IP_ADDRESS)
logging.info(u"Starting: {0}".format(sonos.player_name))
while True:
sockid = lirc.init("sore")
val = lirc.nextcode()
if val:
try:
button = val[0]
logging.info("hello: {0}".format(button))
if button == 'play':
if not is_playing(sonos.get_current_transport_info()):
sonos.play()
else:
sonos.pause()
elif button == 'plus':
sonos.volume += 2
elif button == 'minus':
sonos.volume -= 2
elif button == 'next':
sonos.next()
elif button == 'previous':
sonos.previous()
elif button == 'menu':
# play radio station
# from sonos.get_favorite_radio_stations():
# {u'uri': 'x-sonosapi-stream:s44255?sid=254&flags=8224&sn=0', u'title': 'ORF - Radio Wien'}
sonos.play_uri(uri='x-sonosapi-stream:s44255?sid=254&flags=8224&sn=0', title='ORF - Radio Wien', start=True)
except SoCoException as err:
logging.error("SoCo Error: {0}".format(err))
pass
except:
logging.error("Error: {0}".format(sys.exc_info()[1]))
if __name__ == "__main__":
# TODO: Logging
# logging.basicConfig(filename="/home/pi/SonosRemote/sore.log", level=logging.INFO)
run()
|
988,638 | 8afa8fea2a298e4e82e5c6cd9881e11615ac053c | from dataclasses import dataclass, field
from typing import List, Optional
from datexii.models.eu.datexii.v2.fuel_type2_enum import FuelType2Enum
from datexii.models.eu.datexii.v2.load_type2_enum import LoadType2Enum
from datexii.models.eu.datexii.v2.vehicle_type2_enum import VehicleType2Enum
from datexii.models.eu.datexii.v2.vehicle_usage2_enum import VehicleUsage2Enum
__NAMESPACE__ = "http://datex2.eu/schema/2/2_0"
@dataclass
class VehicleCharacteristicsExtended:
"""
Extension point for 'VehicleCharacteristics' to support additional attributes
and literals like additional fuel types, load types etc.
:ivar emission_classification: The valid list of entries for this
attribute has to be specified between the communication-
partners. Usually it's some country specific classification code
for emissions, which must be scored by vehicles to be valid.
:ivar operation_free_of_emission: Only vehicles that do not produce
emissions (e.g. electric driven). Hybrid driven cars are
allowed, when they switch to emission free mode within the
considered situation.
:ivar load_type2: Loads currently not supported in 'LoadTypeEnum'.
:ivar vehicle_type2: Vehicle types currently not supported in
'VehicleTypeEnum'.
:ivar fuel_type2: Fuel types currently not supported in
'FuelTypeEnum'.
:ivar vehicle_usage2: Usage types currently not supported in
'VehicleUsageTypeEnum'.
"""
emission_classification: List[str] = field(
default_factory=list,
metadata={
"name": "emissionClassification",
"type": "Element",
"namespace": "http://datex2.eu/schema/2/2_0",
"max_length": 1024,
}
)
operation_free_of_emission: Optional[bool] = field(
default=None,
metadata={
"name": "operationFreeOfEmission",
"type": "Element",
"namespace": "http://datex2.eu/schema/2/2_0",
}
)
load_type2: Optional[LoadType2Enum] = field(
default=None,
metadata={
"name": "loadType2",
"type": "Element",
"namespace": "http://datex2.eu/schema/2/2_0",
}
)
vehicle_type2: Optional[VehicleType2Enum] = field(
default=None,
metadata={
"name": "vehicleType2",
"type": "Element",
"namespace": "http://datex2.eu/schema/2/2_0",
}
)
fuel_type2: Optional[FuelType2Enum] = field(
default=None,
metadata={
"name": "fuelType2",
"type": "Element",
"namespace": "http://datex2.eu/schema/2/2_0",
}
)
vehicle_usage2: Optional[VehicleUsage2Enum] = field(
default=None,
metadata={
"name": "vehicleUsage2",
"type": "Element",
"namespace": "http://datex2.eu/schema/2/2_0",
}
)
|
988,639 | 37e8d738e9efa95193d324343b4f6435ae6fa34e | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 10 10:02:07 2019
@author: BrunoAfonso
"""
from surprise import AlgoBase
from MovieLens import MovieLens
from surprise import PredictionImpossible
import math
import numpy as np
import heapq
class ContentBasedAlgorithm(AlgoBase):
def __init__(self, user_number):
AlgoBase.__init__(self)
self.user_number = user_number
def recommend(self):
self.fit()
testing_set = self.test_without_user(int(self.user_number))
predictions = self.test(testing_set)
recommendations = []
print ("Recomendações:")
print(predictions)
for user_id, movie_id, actual_rating, estimated_rating, _ in predictions:
intMovieID = int(movie_id)
recommendations.append((intMovieID, estimated_rating))
recommendations.sort(key=lambda x: x[1], reverse=True)
for ratings in recommendations[:10]:
print(self.loader.getMovieName(ratings[0]), ratings[1])
def fit(self):
self.loader = MovieLens()
self.data = self.loader.loadMovieLensLatestSmall()
self.trainset = self.data.build_full_trainset()
print("Treino a iniciar....")
AlgoBase.fit(self, self.trainset)
genres = self.loader.getGenres()
years = self.loader.getYears()
self.similarities_matrix = np.zeros((self.trainset.n_items, self.trainset.n_items))
for itemA in range(self.trainset.n_items):
for itemB in range(itemA+1, self.trainset.n_items):
itemAA = int(self.trainset.to_raw_iid(itemA))
itemBB = int(self.trainset.to_raw_iid(itemB))
year_similarity = self.compute_year_similarity(itemAA, itemBB, years)
genre_similarity = self.calculate_genre_similarity(itemAA, itemBB, genres)
self.similarities_matrix[itemA, itemB] = genre_similarity*year_similarity
self.similarities_matrix[itemB, itemA] = genre_similarity*year_similarity
print("Treino terminado...")
return self
def calculate_genre_similarity(self, itemA, itemB, genres):
itemA = genres[itemA]
itemB = genres[itemB]
sumxx, sumxy, sumyy = 0, 0, 0
for i in range(len(itemA)):
x = itemA[i]
y = itemB[i]
sumxx += x * x
sumyy += y * y
sumxy += x * y
return sumxy/math.sqrt(sumxx*sumyy)
def compute_year_similarity(self, itemA, itemB, years):
diff = abs(years[itemA] - years[itemB])
sim = math.exp(-diff / 10.0)
return sim
def estimate(self, u, i):
#print("User: ", u)
#print("Item: ", i)
neighbors = []
n_neighbors = 40
for rating in self.trainset.ur[u]:
genre_similarity = self.similarities_matrix[i,rating[0]]
neighbors.append( (genre_similarity, rating[1]) )
k_neighbors = heapq.nlargest(n_neighbors, neighbors, key=lambda t: t[0])
similarity_total = 0
sum_weight = 0
for (temp_similarity, rating) in k_neighbors:
if (temp_similarity > 0):
similarity_total += temp_similarity
sum_weight += temp_similarity * rating
if (similarity_total == 0):
raise PredictionImpossible('No neighbors')
predicted_rating = sum_weight / similarity_total
return predicted_rating
def test_without_user(self, test_user):
trainset = self.trainset
fill = trainset.global_mean
test_data = []
u = trainset.to_inner_uid(str(test_user))
user_items = []
for rating in trainset.ur[u]:
user_items.append(rating[0])
#print(user_items)
for i in trainset.all_items():
if i not in user_items:
test_data.append((trainset.to_raw_uid(u), trainset.to_raw_iid(i), fill))
return test_data
|
988,640 | ebbb69984c2a5223d6f075e38c47d17e7b75e96a |
import ttg
table = ttg.Truths(['p', 'q'] , ['p and q', 'p or q', 'p xor q', 'p = q'], ints=False)
print(table.as_prettytable())
Ariels touch
|
988,641 | e714dc452f3cafa721717d9faf832de991e7b791 | from math import sqrt
type = input()
pi = 3.14
if type == 'треугольник':
a = float(input())
b = float(input())
c = float(input())
p = (a + b + c) / 2
print(sqrt(p*(p-a)*(p-b)*(p-c)))
elif type == 'прямоугольник':
a = float(input())
b = float(input())
print(a*b)
elif type == 'круг':
r = float(input())
print(pi*r**2)
|
988,642 | 975916c0bba76a3750222474b78bd7c6270de0c2 | # Generated by Django 2.2.3 on 2019-10-30 11:01
import ckeditor.fields
import ckeditor_uploader.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=200, null=True)),
('name_fa', models.CharField(blank=True, max_length=200, null=True)),
('name_en', models.CharField(blank=True, max_length=200, null=True)),
('content', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True)),
('content_fa', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True)),
('content_en', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True)),
('short_content', ckeditor.fields.RichTextField(blank=True, null=True)),
('short_content_fa', ckeditor.fields.RichTextField(blank=True, null=True)),
('short_content_en', ckeditor.fields.RichTextField(blank=True, null=True)),
('lang', models.CharField(default='fa', max_length=10)),
],
),
migrations.CreateModel(
name='Picture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='picture/')),
('name', models.CharField(blank=True, max_length=200, null=True)),
('post', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='past_memory.Post')),
],
),
]
|
988,643 | 54ef5fe2459abfb9cc8d5be454836d4f8431e0b6 | import src.utilities.custom_logger as cl
import logging
from src.base.basepage import BasePage
class NavigationPage(BasePage):
log = cl.custom_logger(logging.DEBUG)
def __init__(self, driver):
super().__init__(driver)
self.driver = driver
# Locators
_main_page_logo = "//a[@class='navbar-brand header-logo']"
_login_link = "Login"
_my_courses = "My Courses"
_all_courses = "All Courses"
_practice = "Practice"
_user_settings_icon = "//div[@id='navbar']//li[@class='dropdown']"
_successful_login = "//a[contains(@class, 'open-my-profile-dropdown')]"
def go_to_main_page(self):
self.web_scroll(direction="up")
self.element_click_(self._main_page_logo, locator_type="xpath")
def navigate_to_all_courses(self):
self.element_click_(locator=self._all_courses, locator_type="link")
def navigate_to_my_courses(self):
self.element_click_(locator=self._my_courses, locator_type="link")
def navigate_to_practice(self):
self.element_click_(locator=self._practice, locator_type="link")
def navigate_to_user_settings(self):
user_settings_element = self.wait_for_element_(locator=self._user_settings_icon,
locator_type="xpath", poll_frequency=1)
# self.elementClick(element=user_settings_element)
self.element_click_(locator=self._user_settings_icon, locator_type="xpath")
def logout(self):
self.element_click_(locator="_successful_login", locator_type="xpath")
self.element_click_(locator=".user-signout", locator_type="css")
self.wait_for_element_(locator=self._login_link, locator_type='link')
|
988,644 | 4b80751fc23aa3156b05041350a1a96db68f4723 | from collections import namedtuple
Context = namedtuple('Context', ['id', 'folder'])
|
988,645 | a51cae2b51b91d2b8551ae9574aa11a48af4d47b | # Generated by Django 2.2.10 on 2020-04-09 06:43
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0019_auto_20200409_1202'),
]
operations = [
migrations.RenameModel(
old_name='OrederPlaced',
new_name='OrderPlaced',
),
]
|
988,646 | efec2260ce9b74071604b5d3a5e3a3f8b77b6a55 | import datetime as dt
import json
import sys
from . import __version__
from .config import is_dry
from .dry import dummy_function
from .dry import dryable
class Stats:
"""Class used to collect kb run statistics.
"""
def __init__(self):
self.kallisto_version = None
self.bustools_version = None
self.start_time = None
self.call = None
self.commands = []
self.runtimes = []
self.end_time = None
self.elapsed = None
self.version = __version__
def start(self):
"""Start collecting statistics.
Sets start time, the command line call,
and the commands array to an empty list.
"""
self.start_time = dt.datetime.now()
self.call = ' '.join(sys.argv)
self.commands = []
def command(self, command, runtime=None):
"""Report a shell command was run.
:param command: a shell command, represented as a list
:type command: list
:param kwargs: additional command information
:type kwargs: dict
"""
cmd = ' '.join(command)
self.commands.append(cmd)
self.runtimes.append(runtime or 'not measured')
def end(self):
"""End collecting statistics.
"""
self.end_time = dt.datetime.now()
self.elapsed = (self.end_time - self.start_time).total_seconds()
@dryable(dummy_function)
def save(self, path):
"""Save statistics as JSON to path.
:param path: path to JSON
:type path: str
:return: path to saved JSON
:rtype: str
"""
if not is_dry():
with open(path, 'w') as f:
json.dump(self.to_dict(), f, indent=4)
return path
def to_dict(self):
"""Convert statistics to dictionary, so that it is easily parsed
by the report-rendering functions.
"""
return {
'version': self.version,
'start_time': self.start_time.isoformat(),
'end_time': self.end_time.isoformat(),
'elapsed': self.elapsed,
'call': self.call,
'commands': self.commands,
'runtimes': self.runtimes,
}
STATS = Stats()
|
988,647 | 8500fbef8e71add23cccc704361ec78009f0b2f3 |
import pandas as pd
#loading data from a file
df_realty = pd.read_csv('train.tsv',sep='\t',names=['price', 'num_of_rooms', 'area', 'num_of_floors', 'address', 'desc'])
df_desc = pd.read_csv('description.csv', header=0)
#joining data from files
df_realty = pd.merge(df_realty, df_desc, left_on='num_of_floors', right_on='liczba', how='left').drop('liczba', 1)
#saving data to a file
df_realty.to_csv('out2.csv', index=False)
|
988,648 | c6fcb731b3876236831a718d4d7036ca83d0a4a7 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
__url__ = ur"$URL$"[6:-2]
__author__ = ur"$Author$"[9:-2]
__revision__ = int("0" + ur"$Rev$"[6:-2])
__date__ = ur"$Date$"[7:-2]
extra = {}
from trac.util.dist import get_l10n_cmdclass
cmdclass = get_l10n_cmdclass()
if cmdclass:
extra['cmdclass'] = cmdclass
extractors = [
('**.py', 'python', None),
('**/templates/**.html', 'genshi', None),
]
extra['message_extractors'] = {
'tracwatchlist': extractors,
}
setup(
name = 'TracWatchlistPlugin',
version = '0.5',
description = "Watchlist Plugin for Trac 0.12",
keywords = 'trac watchlist wiki plugin',
author = 'Martin Scharrer',
author_email = 'martin@scharrer-online.de',
url = 'http://www.trac-hacks.org/wiki/WatchlistPlugin',
download_url = 'http://trac-hacks.org/svn/watchlistplugin/releases/',
license = 'GPLv3',
classifiers = ['Framework :: Trac'],
install_requires = ['Babel>= 0.9.5', 'Trac >= 0.12dev'],
packages = ['tracwatchlist'],
package_data = {
'tracwatchlist' : [
'htdocs/ico/*',
'htdocs/css/style.css',
'htdocs/css/dataTable.css',
'htdocs/css/jquery.autocomplete.css',
'htdocs/js/jquery.dataTables.min.js',
'htdocs/js/jquery.autocomplete.js',
'htdocs/js/dynamictables.js',
'htdocs/js/autocomplete.js',
'htdocs/js/watchlist.js',
'locale/*/LC_MESSAGES/*.mo',
'templates/*.html',
],
},
zip_safe = False,
entry_points = {'trac.plugins':
[
'tracwatchlist = tracwatchlist',
'tracwatchlist.plugin = tracwatchlist.plugin',
'tracwatchlist.db = tracwatchlist.db',
'tracwatchlist.nav = tracwatchlist.nav',
]},
**extra
)
|
988,649 | 1a152a61ece8b6e047c389fe9c05123eb649a4cb | from django.urls import path
from .views import cart_detail, add_to_cart, update_cart, remove_cart_item
urlpatterns = [
path("cart/", cart_detail, name="cart"),
path("cart/add/<item_id>/", add_to_cart, name="add_to_cart"),
path("cart/update/<item_id>/", update_cart, name="update_cart"),
path("cart/remove/<item_id>/", remove_cart_item, name="remove_cart_item"),
]
|
988,650 | 18777dc0e57cd6b507119058797d20be08d5b10e | import subprocess
def _check_output(*popenargs, **kwargs):
if 'stdout' in kwargs: # pragma: no cover
raise ValueError('stdout argument not allowed, '
'it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, _ = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
# overwrite CalledProcessError due to `output`
# keyword not being available (in 2.6)
class _CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
super(_CalledProcessError, self).__init__()
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '{c}' returned non-zero exit status {s}"\
.format(c=self.cmd, s=self.returncode)
if not hasattr(subprocess, 'check_output'):
subprocess.check_output = _check_output
subprocess.CalledProcessError = _CalledProcessError
|
988,651 | 697fb5e75d61275ac77e100c6837fc5900779ae4 | import os
import sys
import time
import argparse
import torch
from torchvision import utils
from model import Generator
from tqdm import tqdm
import numpy as np
def generate(args, g_ema, device):
with torch.no_grad():
g_ema.eval()
for i in tqdm(range(args.pics)):
sample_z = torch.randn(args.sample, args.latent, device=device)
sample, _ = g_ema([sample_z])
utils.save_image(
sample,
f'sample/{str(i).zfill(6)}.png',
nrow=1,
normalize=True,
range=(-1, 1),
)
def generate_specified_samples(args, g_ema, device):
try:
os.mkdir('latent_sample_single_slot')
except:
pass
with torch.no_grad():
g_ema.eval()
for i in range(1, 10):
sample_z = torch.zeros(args.sample, args.latent, device=device)
r_slot = np.random.randint(0, 512)
v_slot = np.random.randint(low=1, high=100)
sample_z[0][r_slot] = v_slot
sample, _ = g_ema([sample_z])
utils.save_image(
sample,
f'latent_sample_single_slot/{str(i).zfill(6)}.png',
nrow=1,
normalize=True,
range=(-1, 1),
)
def generate_larger_sample(args, g_ema, device):
try:
os.mkdir('latent_sample_single_slot')
except:
pass
with torch.no_grad():
g_ema.eval()
samples = []
for i in range(100):
sample_z = torch.zeros(args.sample, args.latent, device=device)
sample_z[0][13] = np.random.rand(1)[0]
# r_idx_1 = np.random.randint(0, 512)
# r_idx_2 = np.random.randint(0, 512)
# r_idx_3 = np.random.randint(0, 512)
# r_idx_4 = np.random.randint(0, 512)
# r_idx_5 = np.random.randint(0, 512)
# r_idx_6 = np.random.randint(0, 512)
sample_z[0][46] = np.random.random(1)[0] * 100
# sample_z[0][r_idx_2] = np.random.random(1)[0]
# sample_z[0][r_idx_3] = np.random.random(1)[0]
# sample_z[0][r_idx_4] = np.random.random(1)[0]
# sample_z[0][r_idx_5] = np.random.random(1)[0]
# sample_z[0][r_idx_6] = np.random.random(1)[0]
sample, _ = g_ema([sample_z])
sample = sample.detach().cpu().numpy()
sample = np.squeeze(sample, axis=0)
sample = np.einsum('kli->lik', sample)
if i % 10 == 0:
if i is not 0:
samples.append(sampleb)
sampleb = sample
sampleb = np.concatenate((sampleb, sample), axis=1)
# print(sampleb.shape)
image = samples[0]
for s in range(1, len(samples)):
image = np.concatenate((image, samples[s]), axis=0)
image = np.einsum('kli->ikl', image)
image = np.expand_dims(image, axis=0)
image = torch.Tensor(image)
print(image.shape)
utils.save_image(
image,
f'latent_sample_single_slot/{str(i).zfill(6)}.png',
normalize=True,
range=(-1, 1),
)
def generate_larger_overlaping_sample(args, g_ema, device):
try:
os.mkdir('latent_sample_single_slot')
except:
pass
with torch.no_grad():
g_ema.eval()
samples = []
for i in range(100):
sample_z = torch.zeros(args.sample, args.latent, device=device)
sample_z[0][10] = np.random.rand(1)[0]
r_idx_1 = np.random.randint(0, 512)
# r_idx_2 = np.random.randint(0, 512)
# r_idx_3 = np.random.randint(0, 512)
# r_idx_4 = np.random.randint(0, 512)
# r_idx_5 = np.random.randint(0, 512)
# r_idx_6 = np.random.randint(0, 512)
sample_z[0][55] = np.random.random(1)[0] * 1
# sample_z[0][r_idx_2] = np.random.random(1)[0]
# sample_z[0][r_idx_3] = np.random.random(1)[0]
# sample_z[0][r_idx_4] = np.random.random(1)[0]
# sample_z[0][r_idx_5] = np.random.random(1)[0]
# sample_z[0][r_idx_6] = np.random.random(1)[0]
sample, _ = g_ema([sample_z])
sample = sample.detach().cpu().numpy()
sample = np.squeeze(sample, axis=0)
sample = np.einsum('kli->lik', sample)
slice_in = 156
sample = sample[slice_in:-slice_in, slice_in:-slice_in, :]
if i % 10 == 0:
if i is not 0:
samples.append(sampleb)
sampleb = sample
sampleb = np.concatenate((sampleb, sample), axis=1)
# print(sampleb.shape)
image = samples[0]
for s in range(1, len(samples)):
image = np.concatenate((image, samples[s]), axis=0)
image = np.einsum('kli->ikl', image)
image = np.expand_dims(image, axis=0)
image = torch.Tensor(image)
print(image.shape)
utils.save_image(
image,
f'latent_sample_single_slot/overlapped_larger_sample_' + str(time.time()) + '.png',
normalize=True,
range=(-1, 1),
)
def list_styles(args, g_ema, device):
try:
os.mkdir('style_samples')
except:
pass
with torch.no_grad():
g_ema.eval()
for i in range(1, 512):
print(i)
torch.manual_seed(i)
sample_z = torch.zeros(args.sample, args.latent, device=device)
sample_z[0][i] = np.random.uniform(low=0.0, high=1.0)
sample, _ = g_ema([sample_z])
utils.save_image(
sample,
f'style_samples/style_{str(i).zfill(3)}.png',
nrow=1,
normalize=True,
range=(-1, 1),
)
if __name__ == '__main__':
device = 'cuda'
parser = argparse.ArgumentParser()
parser.add_argument('--size', type=int, default=1024)
parser.add_argument('--sample', type=int, default=1)
parser.add_argument('--pics', type=int, default=20)
parser.add_argument('--ckpt', type=str, default="stylegan2-ffhq-config-f.pt")
parser.add_argument('--channel_multiplier', type=int, default=2)
args = parser.parse_args()
args.latent = 512
args.n_mlp = 8
g_ema = Generator(
args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier
).to(device)
args.ckpt = '/home/sangwon/Work/NeuralNetworks/StyleGan/weights/stylegan2-1024-mult2-r1-10-asphalt-large/060000.pt'
checkpoint = torch.load(args.ckpt)
g_ema.load_state_dict(checkpoint['g_ema'])
# generate_larger_sample(args, g_ema, device)
generate_larger_overlaping_sample(args, g_ema, device)
# generate_specified_samples(args, g_ema, device)
# list_styles(args, g_ema, device)
generate(args, g_ema, device)
|
988,652 | 629df16ed492254663479c6f34ea0f2e61efb781 | import os
import pprint
import argparse
import humanfriendly
import boto3
import botocore.utils
DEFAULT_CHUNK_SIZE = 67108864 # 64MB
class Upload(object):
def __init__(self, args):
self.vault = args.vault
self.file = args.file
self.description = args.description
self.chunk_size = args.chunk_size
self.verbose = args.verbose
self.multipart_upload_resource = None
self.part_checksums = {}
def get_chunks(self):
start_byte = 0
count = 0
with open(self.file, 'rb') as f:
while True:
chunk = f.read(self.chunk_size)
if not chunk:
return
end_byte = start_byte + len(chunk) - 1
yield chunk, start_byte, end_byte, count
start_byte += len(chunk)
count += 1
if len(chunk) < self.chunk_size:
return
def initiate_upload(self):
client = boto3.client('glacier')
initiate_response = client.initiate_multipart_upload(vaultName=self.vault,
archiveDescription=self.description,
partSize=str(self.chunk_size))
pprint.pprint(initiate_response)
upload_id = initiate_response['uploadId']
account_id = boto3.client('sts').get_caller_identity()['Account']
glacier = boto3.resource('glacier')
self.multipart_upload_resource = glacier.MultipartUpload(account_id, self.vault, upload_id)
def upload_all_chunks(self):
for chunk in self.get_chunks():
chunk_bytes, start_byte, end_byte, count = chunk
content_range = 'bytes {}-{}/*'.format(start_byte, end_byte)
print('Uploading chunk {} ({})'.format(count, content_range))
response = self.multipart_upload_resource.upload_part(range=content_range, body=chunk_bytes)
self.part_checksums[chunk.count] = response['checksum']
def complete_upload(self):
total_size = os.path.getsize(self.file)
total_checksum = botocore.utils.calculate_tree_hash(open(self.file, 'rb'))
completion_response = self.multipart_upload_resource.complete(archiveSize=str(total_size),
checksum=total_checksum)
print('Completing upload')
pprint.pprint(completion_response)
print('\33[32mUpload complete: {}\33[0m'.format(self.file))
def parse_args():
def parse_chunk_size():
# Parse the chunk size if supplied as an argument, e.g. ['4MB'] -> 4194304
# If no argument is supplied, we use the default, and no parsing is needed
chunk_size = humanfriendly.parse_size(args.chunk_size[0], binary=True) \
if isinstance(args.chunk_size, list) \
else args.chunk_size
if chunk_size < 1048576 or chunk_size > 4294967296:
raise ValueError('Illegal chunk size: must be between 1 MiB and 4 GiB ({} b given)'.format(chunk_size))
# Only chunks that are a megabyte multiplied by a power of 2 are allowed, e.g. 2^4 MiB = 16 MiB
chunk_megabytes = int(chunk_size / 1048576)
if chunk_megabytes & (chunk_megabytes - 1):
raise ValueError('Illegal chunk size: {} is not a power of 2'.format(chunk_megabytes))
return chunk_size
parser = argparse.ArgumentParser(description='Upload large files to AWS Glacier easily.')
parser.add_argument('vault', nargs=1, type=str, help='Glacier vault to upload to')
parser.add_argument('file', nargs=1, type=str, help='file to upload')
parser.add_argument('-v', '--verbose', default=False, action='store_true', help='verbose output')
parser.add_argument('-d', '--description', nargs=1, type=str, metavar='DESC',
help='file description - defaults to file name')
parser.add_argument('-s', '--chunk-size', nargs=1, type=str, dest='chunk_size', default=DEFAULT_CHUNK_SIZE,
metavar='SIZE', help='chunk size, specified as number + scale, e.g. "4MB", "2GB" - '
'must be a megabyte multiplied by a power of 2, max 4 GiB, use '
'suffix MB/MiB/GB/GiB, etc. (defaults to 64 MiB)')
args = parser.parse_args()
args.vault = args.vault[0]
args.file = os.path.abspath(args.file[0])
args.description = args.description if args.description else os.path.basename(args.file)
args.chunk_size = parse_chunk_size()
return args
def main():
args = parse_args()
upload = Upload(args)
upload.initiate_upload()
upload.upload_all_chunks()
upload.complete_upload()
if __name__ == '__main__':
main()
|
988,653 | 4a82c7dfebb1662f0d95b0d63623e148fefd70a5 | # -*- coding: utf-8 -*-
"""
camplight.cli
~~~~~~~~~~~~~
This module implements the command-line interface to the Campfire API.
"""
import sys
import os
import optparse
from .api import *
from .exceptions import *
def die(msg):
sys.exit('error: %s' % msg)
def main(argv=None):
usage = 'Usage: %prog [options] <command> [args]'
parser = optparse.OptionParser(usage=usage)
parser.add_option('-u', '--url',
help='set Campfire URL',
default=os.environ.get('CAMPFIRE_URL'))
parser.add_option('-t', '--token',
help='set API token for authentication',
default=os.environ.get('CAMPFIRE_TOKEN'))
parser.add_option('-r', '--room',
help='set Campfire room',
default=os.environ.get('CAMPFIRE_ROOM'))
parser.add_option('-v', '--verbose',
help='be more verbose',
action='store_true',
default=os.environ.get('CAMPFIRE_VERBOSE'))
opts, args = parser.parse_args(argv)
if not opts.url:
die('Campfire URL missing')
if not opts.token:
die('API token missing')
if len(args) < 1:
die('too few arguments')
verbose = sys.stderr if opts.verbose else None
request = Request(opts.url, opts.token, verbose)
campfire = Campfire(request)
cmd = args.pop(0)
if cmd in ('account', 'rooms', 'user', 'presence', 'search'):
func = getattr(campfire, cmd)
elif cmd in ('status', 'recent', 'transcript', 'uploads',
'join', 'leave', 'lock', 'unlock', 'speak',
'paste', 'play', 'set-name', 'set-topic'):
if opts.room is None:
die('Campfire room missing')
try:
room = campfire.room(opts.room)
except (RequestException, CamplightException) as e:
die('%s: %s' % (e.__class__.__name__, e))
func = getattr(room, cmd.replace('-', '_'))
else:
die('invalid command')
try:
data = func(*args)
except TypeError:
die('invalid arguments')
except (RequestException, CamplightException) as e:
die('%s: %s' % (e.__class__.__name__, e))
if data:
# HACK re-encode json for pretty output
import json
print(json.dumps(data, indent=4))
|
988,654 | a87f49a1aaa039470c163b32b573e960c42b2b97 | import sys
import re
data = sys.stdin.read()
result = re.findall("you", data)
print(len(result))
|
988,655 | 05c44b33f9f8e2aefdaa9b64b4aa12a046a11705 | """webapps URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from project import views
from project.views import ChatNoticeListView, ChatNoticeUpdateView
from django.urls import path, include
import notifications.urls
from django.contrib.auth.decorators import login_required
urlpatterns = [
#user-related function
path('admin/', admin.site.urls),
path('', views.home, name='home'),
path('register', views.register, name='register'),
path('login', views.loginAction, name='login'),
path('logout', views.logoutAction, name='logout'),
path('myprofile', views.get_myprofile, name='myprofile'),
# path('profile', views.profile, name='profile'),
path('other_profile/<int:id>', views.other_profile_action, name='other_profile'),
path('picture/<int:id>', views.get_picture, name='picture'),
path('edituser', views.edituser, name='edituser'),
path('editlawyer', views.editlawyer, name='editlawyer'),
path('changeedit', views.change_edit, name='changeedit'),
# path('project/'),include('project.urls'),
#qna functino
path('qna', views.qna_stream, name='qna'),
path('create_question', views.create_question, name='create_question'),
path('delete_question/<int:question_id>', views.delete_question, name='delete_question'),
path('submit_answer/<question_id>', views.submit_answer, name="submit_answer"),
path('questionPage/<question_id>', views.questionPage, name='questionPage'),
path('get-qna', views.get_qna_action, name='get_qna'),
path('add-answer', views.add_answer_action, name='add_answer'),
#search function
path('search_keyword',views.search_keyword,name='search_keyword'),
#lawyer list
path('lawyer-list',views.lawyer_list,name='lawyer-list'),
#chat functions
path('chat/<int:pk>', views.start_chat, name ='chat'),
path('chatcontact', views.get_chatcontact, name ='chatcontact'),
path('create_chatmessage/<int:pk>', views.create_chatmessage_action, name='create_chatmessage'),
path('chat/get-chat/<int:pk>', views.get_chat_action, name='get_chat'),
path('chat/get-image/<int:id>', views.get_msg_image, name='get-image'),
# general and blogging function
path('homepage', views.homepage, name='homepage'),
path('blog/', views.blog, name='blog'),
path('categories', views.categories, name='categories'),
path('create_article', views.create_article, name='create_article'),
path('delete_article/<int:article_id>', views.delete_article, name='delete_article'),
path('articlePage/<article_id>', views.articlePage, name='articlePage'),
path('submit_comment/<article_id>', views.submit_comment, name="submit_comment"),
path('getImg/<int:type>/<int:id>', views.getImg, name='getImg'),
path('demo', views.demo, name='demo'),
path('collect/<int:id>/<int:type>', views.collect, name='collect'),
path('like/<int:id>/<int:type>', views.like, name='like'),
path('dislike/<int:id>/<int:type>', views.dislike, name='dislike'),
path('searchByTag/', views.searchByTag, name='searchByTag'),
# notification function
path('inbox/notifications/', include(notifications.urls, namespace='notifications')),
# path('notice/', include('notice.urls', namespace='notice')),
path('notice_list/', login_required(views.ChatNoticeListView.as_view()), name='notice_list'),
path('update/', login_required(views.ChatNoticeUpdateView.as_view()), name='notice_update'),
]
|
988,656 | a571055ce9ef24f9a915f99088fc9c0fd0d02c18 | # -*- encoding: utf-8 -*-
from . import FixtureTest
class HideEarlyNoAreaGardenTest(FixtureTest):
def test_allotments_node(self):
import dsl
z, x, y = (16, 32683, 21719)
self.generate_fixtures(
# https://www.openstreetmap.org/node/1271465901
dsl.point(1271465901, (-0.4660196, 51.7557019), {
'landuse': u'allotments',
'name': u'Midland Hill Allotments',
'source': u'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 1271465901,
'kind': u'allotments',
'min_zoom': 16,
})
def test_allotments_way(self):
import dsl
z, x, y = (16, 32748, 21779)
self.generate_fixtures(
# https://www.openstreetmap.org/way/32055218
dsl.way(32055218, dsl.tile_box(z, x, y), {
'landuse': u'allotments',
'name': u'Arvon Road allotments',
'source': u'openstreetmap.org',
}),
)
# should have point in POIs
self.assert_has_feature(
z, x, y, 'pois', {
'id': 32055218,
'kind': u'allotments',
'min_zoom': 16,
})
# and polygon in landuse
self.assert_has_feature(
z, x, y, 'landuse', {
'id': 32055218,
'kind': u'allotments',
})
def test_garden_node(self):
import dsl
z, x, y = (16, 10473, 25332)
self.generate_fixtures(
# https://www.openstreetmap.org/node/2969748430
dsl.point(2969748430, (-122.469992, 37.767533), {
'leisure': u'garden',
'name': u'South Africa Garden',
'source': u'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 2969748430,
'kind': u'garden',
'min_zoom': 16,
})
def test_university_node(self):
import dsl
z, x, y = (16, 10484, 25327)
self.generate_fixtures(
# https://www.openstreetmap.org/node/4628353540
dsl.point(4628353540, (-122.404460, 37.790842), {
'amenity': u'university',
'name': u'Academy of Arts University',
'source': u'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 4628353540,
'kind': u'university',
'min_zoom': 16,
})
|
988,657 | a393bfcdf9a78b6c69c165da824f013f66db06e1 | def remove_last_e(str):
if str[-1] == "e" and str[-2] == "e":
array = list(str)
array.pop()
return(''.join(array))
else:
return(str)
|
988,658 | e2be3ea3e2596d78c6fb70b570bd1f164bb28c5e | '''
You have an empty sequence, and you will be given queries. Each query is one of these three types:
1 x -Push the element x into the stack.
2 -Delete the element present at the top of the stack.
3 -Print the maximum element in the stack.
Input Format
The first line of input contains an integer, . The next lines each contain an above mentioned query. (It is guaranteed that each query is valid.)
Constraints
Output Format
For each type query, print the maximum element in the stack on a new line.
Sample Input
10
1 97
2
1 20
2
1 26
1 20
2
3
1 91
3
Sample Output
26
91
'''
# Enter your code here. Read input from STDIN. Print output to STDOUT
n = int(input())
stack = []
max_ = 0
for i in range(n):
query = list(map(int, input().split()))
if query[0] == 1:
stack.append(query[1])
if query[1] > max_:
max_ = query[1]
elif query[0] == 2:
top = stack.pop()
if top == max_:
if len(stack)==0:
max_ = 0
continue
max_ = max(stack)
elif query[0] == 3:
print(max_)
|
988,659 | 55d1f3713016d87f86091c9fb8de33791bd3d325 | # Exercício Python 027: Faça um programa que leia o nome completo de uma pessoa, mostrando em seguida o primeiro e o
# último nome separadamente.
# Ex: Ana Maria de Souza (primeiro = Ana; último = Souza.
name = str(input('Digite seu nome: ')).title().strip().split()
print('Muito prazer em te conhecer')
print('Seu primeiro nome é {}'.format(name[0]))
print('Seu segundo nome é {}'.format(name[-1]))
|
988,660 | ea536855ec460688c2b0a385e35baebfb603ba60 | #!/usr/bin/env python
"""GRR specific AFF4 objects."""
import re
import time
import logging
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import flow
from grr.lib import queue_manager
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import utils
from grr.lib.aff4_objects import standard
from grr.proto import flows_pb2
class SpaceSeparatedStringArray(rdfvalue.RDFString):
"""A special string which stores strings as space separated."""
def __iter__(self):
for value in self._value.split():
yield value
class VersionString(rdfvalue.RDFString):
@property
def versions(self):
version = str(self)
result = []
for x in version.split("."):
try:
result.append(int(x))
except ValueError:
break
return result
class VFSGRRClient(standard.VFSDirectory):
"""A Remote client."""
# URN of the index for client labels.
labels_index_urn = rdfvalue.RDFURN("aff4:/index/labels/clients")
class SchemaCls(standard.VFSDirectory.SchemaCls):
"""The schema for the client."""
client_index = rdfvalue.RDFURN("aff4:/index/client")
CERT = aff4.Attribute("metadata:cert", rdfvalue.RDFX509Cert,
"The PEM encoded cert of the client.")
FILESYSTEM = aff4.Attribute("aff4:filesystem", rdfvalue.Filesystems,
"Filesystems on the client.")
CLIENT_INFO = aff4.Attribute(
"metadata:ClientInfo", rdfvalue.ClientInformation,
"GRR client information", "GRR client", default="")
LAST_BOOT_TIME = aff4.Attribute("metadata:LastBootTime",
rdfvalue.RDFDatetime,
"When the machine was last booted",
"BootTime")
FIRST_SEEN = aff4.Attribute("metadata:FirstSeen", rdfvalue.RDFDatetime,
"First time the client registered with us",
"FirstSeen")
# Information about the host.
HOSTNAME = aff4.Attribute("metadata:hostname", rdfvalue.RDFString,
"Hostname of the host.", "Host",
index=client_index)
FQDN = aff4.Attribute("metadata:fqdn", rdfvalue.RDFString,
"Fully qualified hostname of the host.", "FQDN",
index=client_index)
SYSTEM = aff4.Attribute("metadata:system", rdfvalue.RDFString,
"Operating System class.", "System")
UNAME = aff4.Attribute("metadata:uname", rdfvalue.RDFString,
"Uname string.", "Uname")
OS_RELEASE = aff4.Attribute("metadata:os_release", rdfvalue.RDFString,
"OS Major release number.", "Release")
OS_VERSION = aff4.Attribute("metadata:os_version", VersionString,
"OS Version number.", "Version")
# ARCH values come from platform.uname machine value, e.g. x86_64, AMD64.
ARCH = aff4.Attribute("metadata:architecture", rdfvalue.RDFString,
"Architecture.", "Architecture")
INSTALL_DATE = aff4.Attribute("metadata:install_date", rdfvalue.RDFDatetime,
"Install Date.", "Install")
# The knowledge base is used for storing data about the host and users.
# This is currently a slightly odd object as we only use some of the fields.
# The proto itself is used in Artifact handling outside of GRR (e.g. Plaso).
# Over time we will migrate fields into this proto, but for now it is a mix.
KNOWLEDGE_BASE = aff4.Attribute("metadata:knowledge_base",
rdfvalue.KnowledgeBase,
"Artifact Knowledge Base", "KnowledgeBase")
GRR_CONFIGURATION = aff4.Attribute(
"aff4:client_configuration", rdfvalue.Dict,
"Running configuration for the GRR client.", "Config")
USER = aff4.Attribute("aff4:users", rdfvalue.Users,
"A user of the system.", "Users")
USERNAMES = aff4.Attribute("aff4:user_names", SpaceSeparatedStringArray,
"A space separated list of system users.",
"Usernames",
index=client_index)
# This information is duplicated from the INTERFACES attribute but is done
# to allow for fast searching by mac address.
MAC_ADDRESS = aff4.Attribute("aff4:mac_addresses", rdfvalue.RDFString,
"A hex encoded MAC address.", "MAC",
index=client_index)
KERNEL = aff4.Attribute("aff4:kernel_version", rdfvalue.RDFString,
"Kernel version string.", "KernelVersion")
# Same for IP addresses.
HOST_IPS = aff4.Attribute("aff4:host_ips", rdfvalue.RDFString,
"An IP address.", "Host_ip",
index=client_index)
PING = aff4.Attribute("metadata:ping", rdfvalue.RDFDatetime,
"The last time the server heard from this client.",
"LastCheckin", versioned=False, default=0)
CLOCK = aff4.Attribute("metadata:clock", rdfvalue.RDFDatetime,
"The last clock read on the client "
"(Can be used to estimate client clock skew).",
"Clock", versioned=False)
CLIENT_IP = aff4.Attribute("metadata:client_ip", rdfvalue.RDFString,
"The ip address this client connected from.",
"Client_ip", versioned=False)
# This is the last foreman rule that applied to us
LAST_FOREMAN_TIME = aff4.Attribute(
"aff4:last_foreman_time", rdfvalue.RDFDatetime,
"The last time the foreman checked us.", versioned=False)
LAST_INTERFACES = aff4.Attribute(
"aff4:last_interfaces", rdfvalue.Interfaces,
"Last seen network interfaces. Full history is maintained in the "
"clientid/network object. Separated for performance reasons.",
versioned=False)
LAST_CRASH = aff4.Attribute(
"aff4:last_crash", rdfvalue.ClientCrash,
"Last client crash.", creates_new_object_version=False,
versioned=False)
VOLUMES = aff4.Attribute(
"aff4:volumes", rdfvalue.Volumes,
"Client disk volumes.")
HARDWARE_INFO = aff4.Attribute(
"aff4:hardware_info", rdfvalue.HardwareInfo,
"Various hardware information.", default="")
# Valid client ids
CLIENT_ID_RE = re.compile(r"^C\.[0-9a-fA-F]{16}$")
@property
def age(self):
"""RDFDatetime at which the object was created."""
# TODO(user) move up to AFF4Object after some analysis of how .age is
# used in the codebase.
aff4_type = self.Get(self.Schema.TYPE)
if aff4_type:
return aff4_type.age
else:
# If there is no type attribute yet, we have only just been created and
# not flushed yet, so just set timestamp to now.
return rdfvalue.RDFDatetime().Now()
def Initialize(self):
# Our URN must be a valid client.id.
self.client_id = rdfvalue.ClientURN(self.urn)
def Update(self, attribute=None, priority=None):
if attribute == "CONTAINS":
flow_id = flow.GRRFlow.StartFlow(client_id=self.client_id,
flow_name="Interrogate",
token=self.token, priority=priority)
return flow_id
def OpenMember(self, path, mode="rw"):
return aff4.AFF4Volume.OpenMember(self, path, mode=mode)
AFF4_PREFIXES = {rdfvalue.PathSpec.PathType.OS: "/fs/os",
rdfvalue.PathSpec.PathType.TSK: "/fs/tsk",
rdfvalue.PathSpec.PathType.REGISTRY: "/registry",
rdfvalue.PathSpec.PathType.MEMORY: "/devices/memory"}
@staticmethod
def ClientURNFromURN(urn):
return rdfvalue.ClientURN(rdfvalue.RDFURN(urn).Split()[0])
@staticmethod
def PathspecToURN(pathspec, client_urn):
"""Returns a mapping between a pathspec and an AFF4 URN.
Args:
pathspec: The PathSpec instance to convert.
client_urn: A URN of any object within the client. We use it to find the
client id.
Returns:
A urn that corresponds to this pathspec.
Raises:
ValueError: If pathspec is not of the correct type.
"""
client_urn = rdfvalue.ClientURN(client_urn)
if not isinstance(pathspec, rdfvalue.RDFValue):
raise ValueError("Pathspec should be an rdfvalue.")
# If the first level is OS and the second level is TSK its probably a mount
# point resolution. We map it into the tsk branch. For example if we get:
# path: \\\\.\\Volume{1234}\\
# pathtype: OS
# mount_point: /c:/
# nested_path {
# path: /windows/
# pathtype: TSK
# }
# We map this to aff4://client_id/fs/tsk/\\\\.\\Volume{1234}\\/windows/
dev = pathspec[0].path
if pathspec[0].HasField("offset"):
# We divide here just to get prettier numbers in the GUI
dev += ":" + str(pathspec[0].offset / 512)
if (len(pathspec) > 1 and
pathspec[0].pathtype == rdfvalue.PathSpec.PathType.OS and
pathspec[1].pathtype == rdfvalue.PathSpec.PathType.TSK):
result = [VFSGRRClient.AFF4_PREFIXES[rdfvalue.PathSpec.PathType.TSK],
dev]
# Skip the top level pathspec.
pathspec = pathspec[1]
else:
# For now just map the top level prefix based on the first pathtype
result = [VFSGRRClient.AFF4_PREFIXES[pathspec[0].pathtype]]
for p in pathspec:
component = p.path
# The following encode different pathspec properties into the AFF4 path in
# such a way that unique files on the client are mapped to unique URNs in
# the AFF4 space. Note that this transformation does not need to be
# reversible since we always use the PathSpec when accessing files on the
# client.
if p.HasField("offset"):
component += ":" + str(p.offset / 512)
# Support ADS names.
if p.HasField("stream_name"):
component += ":" + p.stream_name
result.append(component)
return client_urn.Add("/".join(result))
def GetSummary(self):
"""Gets a client summary object.
Returns:
rdfvalue.ClientSummary
"""
self.max_age = 0
summary = rdfvalue.ClientSummary(client_id=self.urn)
summary.system_info.node = self.Get(self.Schema.HOSTNAME)
summary.system_info.system = self.Get(self.Schema.SYSTEM)
summary.system_info.release = self.Get(self.Schema.OS_RELEASE)
summary.system_info.version = str(self.Get(self.Schema.OS_VERSION, ""))
summary.system_info.kernel = self.Get(self.Schema.KERNEL)
summary.system_info.fqdn = self.Get(self.Schema.FQDN)
summary.system_info.machine = self.Get(self.Schema.ARCH)
summary.system_info.install_date = self.Get(
self.Schema.INSTALL_DATE)
summary.users = self.Get(self.Schema.USER)
summary.interfaces = self.Get(self.Schema.LAST_INTERFACES)
summary.client_info = self.Get(self.Schema.CLIENT_INFO)
summary.serial_number = self.Get(self.Schema.HARDWARE_INFO).serial_number
summary.timestamp = self.age
return summary
class UpdateVFSFileArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.UpdateVFSFileArgs
class UpdateVFSFile(flow.GRRFlow):
"""A flow to update VFS file."""
args_type = UpdateVFSFileArgs
def Init(self):
self.state.Register("get_file_flow_urn")
@flow.StateHandler()
def Start(self):
"""Calls the Update() method of a given VFSFile/VFSDirectory object."""
self.Init()
fd = aff4.FACTORY.Open(self.args.vfs_file_urn, mode="rw",
token=self.token)
# Account for implicit directories.
if fd.Get(fd.Schema.TYPE) is None:
fd = fd.Upgrade("VFSDirectory")
self.state.get_file_flow_urn = fd.Update(
attribute=self.args.attribute,
priority=rdfvalue.GrrMessage.Priority.HIGH_PRIORITY)
class VFSFile(aff4.AFF4Image):
"""A VFSFile object."""
class SchemaCls(aff4.AFF4Image.SchemaCls):
"""The schema for AFF4 files in the GRR VFS."""
STAT = standard.VFSDirectory.SchemaCls.STAT
CONTENT_LOCK = aff4.Attribute(
"aff4:content_lock", rdfvalue.RDFURN,
"This lock contains a URN pointing to the flow that is currently "
"updating this flow.")
PATHSPEC = aff4.Attribute(
"aff4:pathspec", rdfvalue.PathSpec,
"The pathspec used to retrieve this object from the client.")
FINGERPRINT = aff4.Attribute("aff4:fingerprint",
rdfvalue.FingerprintResponse,
"DEPRECATED protodict containing arrays of "
" hashes. Use AFF4Stream.HASH instead.")
def Update(self, attribute=None, priority=None):
"""Update an attribute from the client."""
if attribute == self.Schema.CONTENT:
# List the directory on the client
currently_running = self.Get(self.Schema.CONTENT_LOCK)
# Is this flow still active?
if currently_running:
flow_obj = aff4.FACTORY.Open(currently_running, token=self.token)
if flow_obj.IsRunning():
return
# The client_id is the first element of the URN
client_id = self.urn.Path().split("/", 2)[1]
# Get the pathspec for this object
pathspec = self.Get(self.Schema.STAT).pathspec
flow_urn = flow.GRRFlow.StartFlow(
client_id=client_id, flow_name="MultiGetFile", token=self.token,
pathspecs=[pathspec], priority=priority)
self.Set(self.Schema.CONTENT_LOCK(flow_urn))
self.Close()
return flow_urn
class MemoryImage(VFSFile):
"""The server representation of the client's memory device."""
class SchemaCls(VFSFile.SchemaCls):
LAYOUT = aff4.Attribute("aff4:memory/geometry", rdfvalue.MemoryInformation,
"The memory layout of this image.")
class VFSMemoryFile(aff4.AFF4MemoryStream):
"""A VFS file under a VFSDirectory node which does not have storage."""
class SchemaCls(aff4.AFF4MemoryStream.SchemaCls):
"""The schema for AFF4 files in the GRR VFS."""
# Support also VFSFile attributes.
STAT = VFSFile.SchemaCls.STAT
HASH = VFSFile.SchemaCls.HASH
PATHSPEC = VFSFile.SchemaCls.PATHSPEC
CONTENT_LOCK = VFSFile.SchemaCls.CONTENT_LOCK
FINGERPRINT = VFSFile.SchemaCls.FINGERPRINT
class VFSAnalysisFile(VFSFile):
"""A VFS file which has no Update method."""
def Update(self, attribute=None):
pass
class GRRForeman(aff4.AFF4Object):
"""The foreman starts flows for clients depending on rules."""
class SchemaCls(aff4.AFF4Object.SchemaCls):
"""Attributes specific to VFSDirectory."""
RULES = aff4.Attribute("aff4:rules", rdfvalue.ForemanRules,
"The rules the foreman uses.",
default=rdfvalue.ForemanRules())
def ExpireRules(self):
"""Removes any rules with an expiration date in the past."""
rules = self.Get(self.Schema.RULES)
new_rules = self.Schema.RULES()
now = time.time() * 1e6
expired_session_ids = set()
for rule in rules:
if rule.expires > now:
new_rules.Append(rule)
else:
for action in rule.actions:
if action.hunt_id:
expired_session_ids.add(action.hunt_id)
if expired_session_ids:
# Notify the worker to mark this hunt as terminated.
manager = queue_manager.QueueManager(token=self.token)
manager.MultiNotifyQueue(
[rdfvalue.GrrNotification(session_id=session_id)
for session_id in expired_session_ids])
if len(new_rules) < len(rules):
self.Set(self.Schema.RULES, new_rules)
self.Flush()
def _CheckIfHuntTaskWasAssigned(self, client_id, hunt_id):
"""Will return True if hunt's task was assigned to this client before."""
for _ in aff4.FACTORY.Stat(
[client_id.Add("flows/%s:hunt" %
rdfvalue.RDFURN(hunt_id).Basename())],
token=self.token):
return True
return False
def _EvaluateRules(self, objects, rule, client_id):
"""Evaluates the rules."""
try:
# Do the attribute regex first.
for regex_rule in rule.regex_rules:
path = client_id.Add(regex_rule.path)
fd = objects[path]
attribute = aff4.Attribute.NAMES[regex_rule.attribute_name]
value = utils.SmartStr(fd.Get(attribute))
if not regex_rule.attribute_regex.Search(value):
return False
# Now the integer rules.
for integer_rule in rule.integer_rules:
path = client_id.Add(integer_rule.path)
fd = objects[path]
attribute = aff4.Attribute.NAMES[integer_rule.attribute_name]
try:
value = int(fd.Get(attribute))
except (ValueError, TypeError):
# Not an integer attribute.
return False
op = integer_rule.operator
if op == rdfvalue.ForemanAttributeInteger.Operator.LESS_THAN:
if value >= integer_rule.value:
return False
elif op == rdfvalue.ForemanAttributeInteger.Operator.GREATER_THAN:
if value <= integer_rule.value:
return False
elif op == rdfvalue.ForemanAttributeInteger.Operator.EQUAL:
if value != integer_rule.value:
return False
else:
# Unknown operator.
return False
return True
except KeyError:
# The requested attribute was not found.
return False
def _RunActions(self, rule, client_id):
"""Run all the actions specified in the rule.
Args:
rule: Rule which actions are to be executed.
client_id: Id of a client where rule's actions are to be executed.
Returns:
Number of actions started.
"""
actions_count = 0
for action in rule.actions:
try:
# Say this flow came from the foreman.
token = self.token.Copy()
token.username = "Foreman"
if action.HasField("hunt_id"):
if self._CheckIfHuntTaskWasAssigned(client_id, action.hunt_id):
logging.info("Foreman: ignoring hunt %s on client %s: was started "
"here before", client_id, action.hunt_id)
else:
logging.info("Foreman: Starting hunt %s on client %s.",
action.hunt_id, client_id)
flow_cls = flow.GRRFlow.classes[action.hunt_name]
flow_cls.StartClients(action.hunt_id, [client_id])
actions_count += 1
else:
flow.GRRFlow.StartFlow(
client_id=client_id, flow_name=action.flow_name, token=token,
**action.argv.ToDict())
actions_count += 1
# There could be all kinds of errors we don't know about when starting the
# flow/hunt so we catch everything here.
except Exception as e: # pylint: disable=broad-except
logging.exception("Failure running foreman action on client %s: %s",
action.hunt_id, e)
return actions_count
def AssignTasksToClient(self, client_id):
"""Examines our rules and starts up flows based on the client.
Args:
client_id: Client id of the client for tasks to be assigned.
Returns:
Number of assigned tasks.
"""
client_id = rdfvalue.ClientURN(client_id)
rules = self.Get(self.Schema.RULES)
if not rules: return 0
client = aff4.FACTORY.Open(client_id, mode="rw", token=self.token)
try:
last_foreman_run = client.Get(client.Schema.LAST_FOREMAN_TIME) or 0
except AttributeError:
last_foreman_run = 0
latest_rule = max([rule.created for rule in rules])
if latest_rule <= int(last_foreman_run):
return 0
# Update the latest checked rule on the client.
client.Set(client.Schema.LAST_FOREMAN_TIME(latest_rule))
client.Close()
# For efficiency we collect all the objects we want to open first and then
# open them all in one round trip.
object_urns = {}
relevant_rules = []
expired_rules = False
now = time.time() * 1e6
for rule in rules:
if rule.expires < now:
expired_rules = True
continue
if rule.created <= int(last_foreman_run):
continue
relevant_rules.append(rule)
for regex in rule.regex_rules:
aff4_object = client_id.Add(regex.path)
object_urns[str(aff4_object)] = aff4_object
for int_rule in rule.integer_rules:
aff4_object = client_id.Add(int_rule.path)
object_urns[str(aff4_object)] = aff4_object
# Retrieve all aff4 objects we need.
objects = {}
for fd in aff4.FACTORY.MultiOpen(object_urns, token=self.token):
objects[fd.urn] = fd
actions_count = 0
for rule in relevant_rules:
if self._EvaluateRules(objects, rule, client_id):
actions_count += self._RunActions(rule, client_id)
if expired_rules:
self.ExpireRules()
return actions_count
class GRRAFF4Init(registry.InitHook):
"""Ensure critical AFF4 objects exist for GRR."""
# Must run after the AFF4 subsystem is ready.
pre = ["AFF4InitHook"]
def Run(self):
try:
# Make the foreman
fd = aff4.FACTORY.Create("aff4:/foreman", "GRRForeman",
token=aff4.FACTORY.root_token)
fd.Close()
except access_control.UnauthorizedAccess:
pass
class AFF4CollectionView(rdfvalue.RDFValueArray):
"""A view specifies how an AFF4Collection is seen."""
class RDFValueCollectionView(rdfvalue.RDFValueArray):
"""A view specifies how an RDFValueCollection is seen."""
class MRUCollection(aff4.AFF4Object):
"""Stores all of the MRU files from the registry."""
class SchemaCls(aff4.AFF4Object.SchemaCls):
LAST_USED_FOLDER = aff4.Attribute(
"aff4:mru", rdfvalue.MRUFolder, "The Most Recently Used files.",
default="")
class VFSFileSymlink(aff4.AFF4Stream):
"""A Delegate object for another URN."""
delegate = None
class SchemaCls(VFSFile.SchemaCls):
DELEGATE = aff4.Attribute("aff4:delegate", rdfvalue.RDFURN,
"The URN of the delegate of this object.")
def Initialize(self):
"""Open the delegate object."""
if "r" in self.mode:
delegate = self.Get(self.Schema.DELEGATE)
if delegate:
self.delegate = aff4.FACTORY.Open(delegate, mode=self.mode,
token=self.token, age=self.age_policy)
def Read(self, length):
if "r" not in self.mode:
raise IOError("VFSFileSymlink was not opened for reading.")
return self.delegate.Read(length)
def Seek(self, offset, whence):
return self.delegate.Seek(offset, whence)
def Tell(self):
return self.delegate.Tell()
def Close(self, sync):
super(VFSFileSymlink, self).Close(sync=sync)
if self.delegate:
return self.delegate.Close(sync)
def Write(self):
raise IOError("VFSFileSymlink not writeable.")
class AFF4RegexNotificationRule(aff4.AFF4NotificationRule):
"""AFF4 rule that matches path to a regex and publishes an event."""
class SchemaCls(aff4.AFF4Object.SchemaCls):
"""Schema for AFF4RegexNotificationRule."""
CLIENT_PATH_REGEX = aff4.Attribute("aff4:change_rule/client_path_regex",
rdfvalue.RDFString,
"Regex to match the urn.")
EVENT_NAME = aff4.Attribute("aff4:change_rule/event_name",
rdfvalue.RDFString,
"Event to trigger on match.")
NOTIFY_ONLY_IF_NEW = aff4.Attribute("aff4:change_rule/notify_only_if_new",
rdfvalue.RDFInteger,
"If True (1), then notify only when "
"the file is created for the first "
"time")
def _UpdateState(self):
regex_str = self.Get(self.Schema.CLIENT_PATH_REGEX)
if not regex_str:
raise IOError("Regular expression not specified for the rule.")
self.regex = re.compile(utils.SmartStr(regex_str))
self.event_name = self.Get(self.Schema.EVENT_NAME)
if not self.event_name:
raise IOError("Event name not specified for the rule.")
def Initialize(self):
if "r" in self.mode:
self._UpdateState()
def OnWriteObject(self, aff4_object):
if not self.event_name:
self._UpdateState()
client_name, path = aff4_object.urn.Split(2)
if not aff4.AFF4Object.VFSGRRClient.CLIENT_ID_RE.match(client_name):
return
if self.regex.match(path):
# TODO(user): maybe add a timestamp attribute to the rule so
# that we get notified only for the new writes after a certain
# timestamp?
if (self.IsAttributeSet(self.Schema.NOTIFY_ONLY_IF_NEW) and
self.Get(self.Schema.NOTIFY_ONLY_IF_NEW)):
fd = aff4.FACTORY.Open(aff4_object.urn, age=aff4.ALL_TIMES,
token=self.token)
stored_vals = list(fd.GetValuesForAttribute(fd.Schema.TYPE))
if len(stored_vals) > 1:
return
event = rdfvalue.GrrMessage(
name="AFF4RegexNotificationRuleMatch",
args=aff4_object.urn.SerializeToString(),
auth_state=rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED,
source=client_name)
flow.Events.PublishEvent(utils.SmartStr(self.event_name), event,
token=self.token)
class VFSBlobImage(aff4.BlobImage, aff4.VFSFile):
"""BlobImage with VFS attributes for use in client namespace."""
class SchemaCls(aff4.BlobImage.SchemaCls, aff4.VFSFile.SchemaCls):
pass
class AFF4RekallProfile(aff4.AFF4MemoryStream):
"""A Rekall profile in the AFF4 namespace."""
class SchemaCls(aff4.AFF4MemoryStream.SchemaCls):
PROFILE = aff4.Attribute("aff4:profile", rdfvalue.RekallProfile,
"A Rekall profile.")
|
988,661 | fb60cdc8f21c3aced00eb72caa4e1109318b609c | from diagrams import Cluster, Diagram, Edge
from diagrams.aws.compute import EC2, ECS
from diagrams.aws.network import ELB, Route53
from diagrams.aws.database import RDS
graph_attr = {
"fontsize": "45",
"bgcolor": "white"
}
with Diagram("Environment-Model", show = False, graph_attr= graph_attr):
with Cluster("Production"):
dns = Route53("DNS")
lb = ELB("Load Balancer")
with Cluster("Web Cluster"):
web1 = EC2("Server 1")
web2 = EC2("Server 2")
with Cluster("Database Cluster"):
db_master = RDS("Master")
db_master - [RDS("Mirror 1"),
RDS("Mirror 2")]
dns >> lb
lb >> web1 >> db_master
with Cluster("Beta Testing"):
dns = Route53("DNS")
lb = ELB("Load Balancer")
with Cluster("Web Cluster"):
web1 = EC2("Server 1")
web2 = EC2("Server 2")
with Cluster("Database Cluster"):
db_master = RDS("Master")
db_master - [RDS("Mirror 1"),
RDS("Mirror 2")]
dns >> lb
lb >> web1 >> db_master
with Cluster("Quality Assurance"):
EC2("Web Server") >> RDS("Database")
with Cluster("Development"):
EC2("Web Server") >> RDS("Database")
|
988,662 | f0c4e781bd6e8b61f8d7181d3404e254e8db0d5a | import os
import re
from maya import cmds as m
from fxpt.fx_refsystem.com import REF_ROOT_VAR_NAME, REF_ROOT_VAR_NAME_P, isPathRelative
from fxpt.fx_refsystem.transform_handle import TransformHandle
from fxpt.fx_utils.utils import cleanupPath
from fxpt.fx_utils.utils_maya import getLongName, getShape, getParent, parentAPI
from fxpt.fx_utils.watch import watch
ATTR_REF_FILENAME_NAMES = ('refFilename', 'refFilename', 'Reference Filename')
ATTR_REF_FILENAME = ATTR_REF_FILENAME_NAMES[0]
ATTR_REF_NODE_MESSAGE_NAMES = ('refNodeMessage', 'refNodeMessage', 'Ref Node Message')
ATTR_REF_NODE_MESSAGE = ATTR_REF_NODE_MESSAGE_NAMES[0]
ATTR_REF_SOURCE_PATH_NAMES = ('refSource', 'refSource', 'Reference Source')
ATTR_REF_SOURCE_PATH = ATTR_REF_SOURCE_PATH_NAMES[0]
REF_NODE_SUFFIX = '_refRN'
REF_LOCATOR_SUFFIX = '_refLoc'
REF_INST_NAME = 'refGeom'
REF_IMPORTED_GROUP = 'refImported'
INSTANCES_SOURCE_GROUP = 'refInstancesSource'
IMPORT_SOURCE_GROUP = 'refImportSource'
class RefHandle(object):
def __init__(self):
self.refFilename = None
self.idString = None
self.refShortName = None
self.refLocator = None
self.annotation = None
self.refNode = None
self.instanceSource = None
self.importSource = None
self.active = False
def __str__(self):
return 'refFilename={}, idString={}, refShortName={}, refLocator=[{}], annotation=[{}],' \
'refNode={}, instanceSource={}, importSource={}, active={}' \
.format(
self.refFilename,
self.idString,
self.refShortName,
self.refLocator,
self.annotation,
self.refNode,
self.instanceSource,
self.importSource,
self.active,
)
def loadFromRefLocatorShape(self, refLocatorShape):
self.refFilename = cleanupPath(m.getAttr('{}.{}'.format(refLocatorShape, ATTR_REF_FILENAME)))
self.idString = self.generateIdString(self.refFilename)
self.refShortName = self.generateShortName(self.refFilename)
self.refLocator = TransformHandle(shape=refLocatorShape)
self.setAnnotation(self.refShortName)
self.refNode = self.idString + REF_NODE_SUFFIX
self.instanceSource = '|{}|{}'.format(INSTANCES_SOURCE_GROUP, self.idString)
self.importSource = '|{}|{}'.format(IMPORT_SOURCE_GROUP, self.idString)
self.active = self.getActiveStateFromMaya()
def createNew(self, refFilename):
refLocatorTr = m.spaceLocator(p=(0, 0, 0))[0]
refLocatorTr = getLongName(m.rename(refLocatorTr, self.generateShortName(refFilename) + REF_LOCATOR_SUFFIX))
refLocatorSh = getShape(refLocatorTr)
m.addAttr(
refLocatorSh,
at='message',
shortName=ATTR_REF_NODE_MESSAGE_NAMES[0],
longName=ATTR_REF_NODE_MESSAGE_NAMES[1],
niceName=ATTR_REF_NODE_MESSAGE_NAMES[2]
)
m.addAttr(
refLocatorSh,
dt='string',
shortName=ATTR_REF_FILENAME_NAMES[0],
longName=ATTR_REF_FILENAME_NAMES[1],
niceName=ATTR_REF_FILENAME_NAMES[2]
)
m.setAttr('{}.{}'.format(refLocatorSh, ATTR_REF_FILENAME), refFilename, typ='string')
self.loadFromRefLocatorShape(refLocatorSh)
# noinspection PyMethodMayBeStatic
def generateIdString(self, refFilename):
s = os.path.splitext(refFilename)[0]
if isPathRelative(refFilename):
s = REF_ROOT_VAR_NAME + s[len(REF_ROOT_VAR_NAME_P):]
return re.sub('[^0-9a-zA-Z_]+', '__', s).lower()
# noinspection PyMethodMayBeStatic
def generateShortName(self, longFilename):
return os.path.splitext(os.path.basename(longFilename))[0]
def getActiveStateFromMaya(self):
if not m.objExists(self.refNode):
return False
return m.isConnected(self.refNode + '.message', self.refLocator.shape + '.' + ATTR_REF_NODE_MESSAGE)
def getAnnotationTransformHandle(self):
if self.isValid():
annotationShapes = self.refLocator.getChildren(allDescendants=True, typ='annotationShape')
if len(annotationShapes) == 1:
return TransformHandle(shape=annotationShapes[0])
else:
return None
def isValid(self):
return (self.refLocator is not None) and (self.refLocator.exists())
def setAnnotation(self, text):
if (self.annotation is None) or (not self.annotation.exists()):
self.annotation = self.getAnnotationTransformHandle()
if self.annotation is None:
self.createAnnotation()
m.setAttr(self.annotation.shape + '.text', text, typ='string')
def createAnnotation(self):
if not self.isValid():
return
annotationShapes = self.refLocator.getChildren(allDescendants=True, typ='annotationShape')
for s in annotationShapes:
m.delete(getParent(s))
annotationSh = m.annotate(self.refLocator.transform, p=(0, -0.5, 0))
annotationTr = getParent(annotationSh)
annotationTr = m.parent(annotationTr, self.refLocator.transform, relative=True)[0]
lockTransformations(annotationTr)
self.annotation = TransformHandle(transform=getLongName(annotationTr))
m.setAttr(self.annotation.shape + '.displayArrow', False)
m.setAttr(self.annotation.transform + '.overrideEnabled', True)
m.setAttr(self.annotation.transform + '.overrideDisplayType', 2)
def activate(self):
if self.active:
return
if not self.refExists():
m.warning('{}: {}: Reference does not exists. Activation skipped.'.format(self.refLocator.shape, self.refFilename))
return
if not m.objExists(self.instanceSource):
self.createRefSource()
m.instance(
self.instanceSource,
name=REF_INST_NAME
)
inst = '|{}|{}'.format(INSTANCES_SOURCE_GROUP, REF_INST_NAME)
m.setAttr(inst + '.overrideEnabled', True)
m.setAttr(inst + '.overrideDisplayType', 2)
lockTransformations(inst, visibility=True)
parentAPI(inst, self.refLocator.transform, absolute=False)
m.connectAttr(self.refNode + '.message', self.refLocator.shape + '.refNodeMessage', force=True)
self.active = True
def createRefSource(self):
if m.objExists(self.refNode):
m.file(
referenceNode=self.refNode,
removeReference=True,
force=True
)
fileType = 'mayaAscii' if self.refFilename.endswith('.ma') else 'mayaBinary'
m.file(
self.refFilename,
reference=True,
typ=fileType,
referenceNode=self.refNode,
groupReference=True,
groupName=self.idString,
mergeNamespacesOnClash=False,
namespace=self.refShortName,
options='v=0;',
)
createInvisibleGroup(INSTANCES_SOURCE_GROUP)
m.parent('|' + self.idString, '|' + INSTANCES_SOURCE_GROUP)
def importRef(self):
if not self.refExists():
m.warning('{}: {}: Reference does not exists. Import skipped.'.format(self.refLocator.shape, self.refFilename))
return
if not m.objExists(self.importSource):
self.createRefImportSource()
importedRefGroup = '{}_{}'.format(REF_IMPORTED_GROUP, self.refShortName)
obj = m.duplicate(
self.importSource,
)
m.rename(obj[0], importedRefGroup)
impGroup = '|{}|{}'.format(IMPORT_SOURCE_GROUP, importedRefGroup)
m.addAttr(
impGroup,
dt='string',
shortName=ATTR_REF_SOURCE_PATH_NAMES[0],
longName=ATTR_REF_SOURCE_PATH_NAMES[1],
niceName=ATTR_REF_SOURCE_PATH_NAMES[2]
)
m.setAttr('{}.{}'.format(impGroup, ATTR_REF_SOURCE_PATH), self.refFilename, typ='string')
impGroup = parentAPI(impGroup, self.refLocator.transform, absolute=False)
refLocatorParents = self.refLocator.getParents()
refLocatorParent = refLocatorParents[0] if refLocatorParents else None
parentAPI(impGroup, refLocatorParent)
m.delete(self.refLocator.transform)
# m.parent(inst, self.refLocator.transform, relative=True)
def createRefImportSource(self):
fileType = 'mayaAscii' if self.refFilename.endswith('.ma') else 'mayaBinary'
m.file(
self.refFilename,
i=True,
typ=fileType,
groupReference=True,
groupName=self.idString,
mergeNamespacesOnClash=False,
namespace=self.refShortName,
options='v=0;',
)
createInvisibleGroup(IMPORT_SOURCE_GROUP)
m.parent('|' + self.idString, '|' + IMPORT_SOURCE_GROUP)
def deactivate(self):
refGeom = self.refLocator.getChildren()
for rg in refGeom:
if stripNamespaces(rg.split('|')[-1]).startswith(REF_INST_NAME):
m.delete(rg)
if self.getActiveStateFromMaya():
m.disconnectAttr(self.refNode + '.message', self.refLocator.shape + '.refNodeMessage')
self.active = False
def setRefFilename(self, refFilename):
m.setAttr('{}.{}'.format(self.refLocator.shape, ATTR_REF_FILENAME), refFilename, typ='string')
self.loadFromRefLocatorShape(self.refLocator.shape)
def getRefFilename(self):
return cleanupPath(self.refFilename)
def setRefLocator(self, transformHandle):
self.refLocator = transformHandle
def refExists(self):
return os.path.exists(os.path.expandvars(self.refFilename))
def lockTransformations(transform, visibility=True):
attributes = ['.tx', '.ty', '.tz', '.rx', '.ry', '.rz', '.sx', '.sy', '.sz']
if visibility:
attributes.append('.v')
for attr in attributes:
m.setAttr(transform + attr, lock=True)
def stripNamespaces(name):
return name.split(':')[-1]
def createInvisibleGroup(name):
fullName = '|' + name
if not m.objExists(fullName):
m.createNode('unknownTransform', name=name, skipSelect=True)
m.setAttr('{}.v'.format(fullName), False, lock=True)
|
988,663 | 94f5c543076a5d5287b2f63da7503203e47c61da | import smtplib, ssl
port = 587 # For starttls
smtp_server = "smtp.gmail.com"
sender_email = "kryonps@gmail.com"
receiver_email = "kryonps@gmail.com"
password = "Kryon123!"
SUBJECT = "subject"
TEXT = "text"
message = 'Subject: {}\n\n{}'.format(SUBJECT, TEXT)
context = ssl.create_default_context()
with smtplib.SMTP(smtp_server, port) as server:
server.ehlo() # Can be omitted
server.starttls(context=context)
server.ehlo() # Can be omitted
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message) |
988,664 | 313dee84b7f618388e61fc75c1a4f73e23a2c6f2 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import deal_email_data
import os
import sys#sys.argv
import dnspod
import opendkim
import statistics
import iptable
import verifyemail
import verifyweb
import apiemail
reload(sys)
sys.setdefaultencoding('utf8')
print('————发邮件准备步骤,从上到下依次完成!,打错字按crtl+删除键回删!————\n生成密匙 \n删除全部域名解析\n添加全部域名解析\n检查邮件\n检查域名\nip轮询')
print('————发邮件开始步骤————\n记录和查看进度\n群发邮件\n发件统计')
print('————windows多服务器开启api步骤(从上到下依次输入即可)————\n检查api客户邮件\n导入客户邮箱到api\n开启客户邮箱api接口')
input_words= raw_input("""\n\033[1;32;40m请输入执行动作名字:""")
if input_words=='记录和查看进度':
deal_email_data.mail_record()
elif input_words=='群发邮件':
deal_email_data.sendemail()
elif input_words=='检查邮件':
verifyemail.verifyemail()
elif input_words=='检查域名':
verifyweb.verifyweb()
elif input_words=='删除全部域名解析' or input_words=='添加全部域名解析':
dnspod.dnspod(input_words)
elif input_words=='生成密匙':
opendkim.opendkim()
elif input_words=='发件统计':
statistics.statistics()
elif input_words=='ip轮询':
iptable.iptable()
elif input_words=='检查api邮件':
verifyemail.verifyapiemail()
elif input_words=='导入客户邮箱到api':
apiemail.msq_insert('all')
elif input_words=='开启客户邮箱api接口':
apiemail.start_useremail_api() |
988,665 | 21d5c7863542db59828a3bd8e4497ddac73b5a14 | class Position:
""" A class representing latitude and longitude coordinates """
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "{}, {}".format(self.x, self.y)
def move_north(self):
self.y += 1
def move_south(self):
self.y -= 1
def move_east(self):
self.x += 1
def move_west(self):
self.x -= 1
class Person:
""" A class that represents a human being """
def __init__(self, fname, lname, x_coord, y_coord):
self.first_name = fname
self.last_name = lname
self.position = Position(x_coord, y_coord)
def __str__(self):
return "Person object: {} positioned at {}".format(self.full_name(), self.position)
def full_name(self):
return "{} {}".format(self.first_name, self.last_name)
def move(self, direction):
if direction == 'N':
self.position.move_north()
elif direction == 'S':
self.position.move_south()
elif direction == 'E':
self.position.move_east()
elif direction == 'W':
self.position.move_west()
betty = Person("Betty", "Li", 0, 0)
print(betty)
print(betty.first_name)
betty.move('N')
betty.move('N')
betty.move('N')
print(betty)
natalie = Person("Natalie", "Black", 1, 1)
print(natalie)
natalie.move('W')
natalie.move('S')
print(natalie)
|
988,666 | 4dc03cfc9ee2b5f1d20c69176dd1b9a98a875375 | import fileinput
from functools import reduce
# BFFFBBFRRR: row 70, column 7, seat ID 567.
# FFFBBBFRRR: row 14, column 7, seat ID 119.
# BBFFBBFRLL: row 102, column 4, seat ID 820.
seats = [line.strip() for line in fileinput.input()]
to_bits = lambda str, zero, one: [{zero: 0, one: 1}[x] for x in str]
bits_to_num = lambda bits: reduce(lambda acc, b: acc*2 + b, bits, 0)
mk_seat_id = lambda row, column: row * 8 + column
def process(seat):
row = bits_to_num(to_bits(seat[:-3], 'F', 'B'))
column = bits_to_num(to_bits(seat[-3:], 'L', 'R'))
return (row, column, mk_seat_id(row, column))
seats2 = list(map(process, seats))
print(seats2)
print(max(seat_id for (row, column, seat_id) in seats2))
|
988,667 | b6be9d22de8774058871deeab53c176c92f70fe1 | import json
import boto3
from datetime import datetime
RESOURCE = boto3.resource('dynamodb', region_name='us-east-1')
def lambda_handler(event, context):
print(event)
response = "failed"
table = RESOURCE.Table('user')
usernames = table.scan()
if event['part'] == "1":
for user in usernames["Items"]:
print(user['user_name'])
if user['user_name'] == event['username']:
return "username already used"
response = table.put_item(
Item={
'user_name': event['username'],
'user_password': event['password'],
'user_fname': event['fname'],
'user_lname': event['lname'],
})
if event['part'] == '2':
response = table.update_item(
Key={
'user_name': event['username']
},
UpdateExpression="set user_age = :r, user_height = :b, user_weight = :w, user_gender = :g",
ExpressionAttributeValues={
':r': event['age'],
':b': event['height'],
':w': event['weight'],
':g': event['gender'],
},
ReturnValues="UPDATED_NEW"
)
return response
|
988,668 | 5e65848af4adeeb5c2867f72b356c3d8c664449b | import urllib2, urllib
import re
import base64
from mod_python import util
def index(req):
req.content_type = "text/xml"
theurl = 'https://prodweb.rose-hulman.edu/regweb-cgi/reg-sched.pl'
# Search Data
userToLookup = req.form.getfirst('infestor')
term = req.form.getfirst('templar')
view = 'table'
bt1 = 'ID/Username'
id4 = '' # Needed for form to submit
id5 = '' # Same as above
# Authentication Settings
username = req.form.getfirst('scv')
password = base64.b64decode(req.form.getfirst('immortal'))
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, theurl, username, password)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
# The Schedule Lookup Part
postData = urllib.urlencode([('termcode', term), ('id1', userToLookup), ('view', view), ('bt1', bt1), ('id4', id4), ('id5', id5)])
req = urllib2.Request(theurl, postData)
req.add_header("Content-type", "application/x-www-form-urlencoded")
# Get the page.
res = urllib2.urlopen(req)
page = res.read()
# pageFixed = page.replace("&", " and ");
# Search for the table rows we need. The '<A' section removes useless information from the top.
rgx = "<TR><TD><A.+?</TR>";
regex = re.compile(rgx, re.DOTALL)
scheduleRows = regex.findall(page)
outString = "<schedule>"
for listItem in scheduleRows:
outString += parseClassData(listItem) + "\n"
outString += "</schedule>"
return outString;
def parseClassData(raw):
noDoubles = re.sub('><', '', str.replace(raw, '\n', '')); # removes double tags and
clean = re.sub('<.+?>', '|', str.replace(str.replace(noDoubles, ' ', ''), '&', ' and '));
detailsList = clean.split('|');
roomList = detailsList[8].split(':'); # For classes that meet on a weird schedule
# detailsList
# 1 - Class Number
# 3 - Class Name
# 4 - Instructer
# 8 - Days/Hour/Room
# meetingDetails
# 0 - Days
# 1 - Hour(s)
# 2 - Room
# 10 - Final Details
xml = '<number>' + detailsList[1] + '</number>';
xml += '<name>' + detailsList[3] + '</name>';
xml += '<instructer>' + detailsList[4] + '</instructer>';
for meeting in roomList: # Classes that meet on a weird schedule
meetingDetails = meeting.split('/');
if (len(meetingDetails) > 1):
xml += '<meeting>'
xml += '<days>' + meetingDetails[0] + '</days>';
xml += '<hours>' + meetingDetails[1] + '</hours>';
xml += '<room>' + meetingDetails[2] + '</room>';
xml += '</meeting>'
else:
xml += '<meeting>'
xml += '<days>' + meetingDetails[0] + '</days>';
xml += '<hours>' + meetingDetails[0] + '</hours>';
xml += '<room>' + meetingDetails[0] + '</room>';
xml += '</meeting>'
xml += '<finalData>' + detailsList[10] + ' </finalData>';
return '<class>' + xml + '</class>'; |
988,669 | 3a9ddaa0d9b021e65beb1a4bf4cc9c22fe0df57e | from django.views.generic import ListView, TemplateView
from django.views.generic.edit import CreateView
from django.core.urlresolvers import reverse
from .models import Room, Message
class CreateRoom(CreateView):
"""Users can create chat rooms"""
model = Room
fields = ('name',)
def get_success_url(self):
room = Room.objects.first()
return reverse('chat_room', kwargs={'room': room.name, 'pk': room.pk})
class ChatRoom(TemplateView):
"""Chat room users can send messages"""
template_name = 'main/chatroom.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['messages'] = Message.objects.filter(room=self.kwargs['pk'])[:50]
context['room'] = Room.objects.get(name=self.kwargs['room'])
return context
class Rooms(ListView):
"""Page where users can chat"""
model = Room
|
988,670 | 39a820b59010fc0accdf3a1b3e04b58f2a2613fb | import datetime
from twisted.internet import reactor
from twisted.internet.defer import ensureDeferred
from twisted.trial.unittest import TestCase
from twisted.web.client import Agent, HTTPConnectionPool
from .. import treq as gh_treq
from .. import sansio
import treq._utils
class TwistedPluginTestCase(TestCase):
@staticmethod
def create_cleanup(gh):
def cleanup(_):
# We do this just to shut up Twisted.
pool = treq._utils.get_global_pool()
pool.closeCachedConnections()
# We need to sleep to let the connections hang up.
return ensureDeferred(gh.sleep(0.5))
return cleanup
def test_sleep(self):
delay = 1
start = datetime.datetime.now()
gh = gh_treq.GitHubAPI("gidgethub")
def test_done(ignored):
stop = datetime.datetime.now()
self.assertTrue((stop - start) > datetime.timedelta(seconds=delay))
d = ensureDeferred(gh.sleep(delay))
d.addCallback(test_done)
return d
def test__request(self):
request_headers = sansio.create_headers("gidgethub")
gh = gh_treq.GitHubAPI("gidgethub")
d = ensureDeferred(
gh._request(
"GET", "https://api.github.com/rate_limit", request_headers,
)
)
def test_done(response):
data, rate_limit, _ = sansio.decipher_response(*response)
self.assertIn("rate", data)
d.addCallback(test_done)
d.addCallback(self.create_cleanup(gh))
return d
def test_get(self):
gh = gh_treq.GitHubAPI("gidgethub")
d = ensureDeferred(gh.getitem("/rate_limit"))
def test_done(response):
self.assertIn("rate", response)
d.addCallback(test_done)
d.addCallback(self.create_cleanup(gh))
return d
|
988,671 | dcdc719a4f187d550b516b2b4b653f35e1c43935 | import logging
from datetime import datetime, date, timedelta
from pathlib import Path
import requests
import os
from io import BytesIO
import zipfile
import csv
from pathlib import Path
requests.packages.urllib3.disable_warnings()
requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':HIGH:!DH:!aNULL'
class Gesundheitsministerium:
def __init__(self, url, path, reference_time):
self.url = url
self.data_root_path = Path(path)
self.reference_time = datetime.strptime(reference_time, "%H:%M")
def download_and_unzip(self):
spec_path = self.data_root_path / datetime.now().strftime('%Y%m%d%H%M%S')
if not os.path.exists(spec_path):
os.mkdir(spec_path)
r = requests.get(self.url, verify=False, stream=True)
with zipfile.ZipFile(BytesIO(r.content), 'r') as zip_file_object:
zip_file_object.extractall(spec_path)
logging.info('Downloaded from %s', self.url)
logging.info('Saved to %s', spec_path)
return spec_path
def get_last_path(self):
"""
deprecated
Seems like a bad idea
"""
folders = os.listdir(self.data_root_path)
folders.sort(reverse=True)
spec_path = self.data_root_path / folders[0]
logging.info('Last download folder was %s', spec_path)
return spec_path
def find_last_folder_with_ref_time(self, folder_before, date_before=datetime.today(), filename='Epikurve.csv'): # - timedelta(days=
"""
deprecated
Seems like a bad idea
"""
folders = os.listdir(self.data_root_path)
folders.sort(reverse=True)
reached = False
ref_time = date_before.replace(hour=self.reference_time.hour, minute=self.reference_time.minute)
__folder_before = str(folder_before).split('/')[-1]
for folder in folders:
if reached:
path_csv = self.data_root_path / folder / filename
with open(path_csv) as f:
first = True
for x in csv.reader(f, delimiter=';'):
if first:
first = False
continue
ts = datetime.strptime(x[2], '%Y-%m-%dT%H:%M:%S')
break
if ts <= ref_time:
return folder
else:
if folder == __folder_before:
reached = True
def get_first_of_day(self, folder_before=None, day=datetime.today(), filename='Epikurve.csv'):
"""
deprecated
Seems like a bad idea
"""
folders = os.listdir(self.data_root_path)
folders.sort(reverse=True)
reached = folder_before is not None
__folder_before = str(folder_before).split('/')[-1]
for folder in folders:
if reached:
path_csv = self.data_root_path / folder / filename
with open(path_csv) as f:
first = True
for x in csv.reader(f, delimiter=';'):
if first:
first = False
continue
ts = datetime.strptime(x[2], '%Y-%m-%dT%H:%M:%S')
break
if ts.date() <= day.date():
return folder
else:
if folder == __folder_before:
reached = True |
988,672 | ef484f852e548d5cabef86970a65d4ea10a28c36 | # class Mobile:
# def __init__(self):
# print("Mobile Constructor Called")
# realme=Mobile()
class Mobile:
def __init__(self):
self.model="Realme X"
def show_model(self):
print(self.model)
realme=Mobile()
redmi=Mobile()
oneplus=Mobile()
print(realme.model)
print(redmi.model)
print(oneplus.model)
print()
redmi.model="Redmi 9pro"
print(realme.model)
print(redmi.model)
print(oneplus.model)
|
988,673 | 735a2627301fdbfe6cfbe6450725b2113fe802eb | from collections import deque
def add_bomb_to_pouch(bombs, val,bomb_pouch):
for key, value in bombs.items():
if value == val:
bomb_pouch[key] +=1
return bomb_pouch
def bomb_pouch_is_full(bomb_pouch):
if bomb_pouch['Datura Bombs']>=3 and bomb_pouch['Cherry Bombs']>=3 and bomb_pouch['Smoke Decoy Bombs']>=3:
return True
return False
def print_output(bomb_effects, bomb_casings, bomb_pouch, is_full):
res_str = ""
if is_full:
res_str+= "Bene! You have successfully filled the bomb pouch!"+"\n"
else:
res_str+="You don't have enough materials to fill the bomb pouch."+"\n"
if len(bomb_effects)==0:
res_str+= ( "Bomb Effects: empty" )+"\n"
else:
res_str+=(f"Bomb Effects: {', '.join([str(el) for el in bomb_effects])}")+"\n"
if len(bomb_casings)==0:
res_str+= ( "Bomb Casings: empty" )+"\n"
else:
res_str+=(f"Bomb Casings: {', '.join([str(el) for el in bomb_casings])}")+"\n"
for key,value in sorted(bomb_pouch.items()):
res_str+= (f"{key}: {value}")+"\n"
return res_str
bomb_effects = deque([int(x) for x in input().split(', ')])
bomb_casings = [int(x) for x in input().split(', ')]
bombs = {'Datura Bombs':40, 'Cherry Bombs': 60, 'Smoke Decoy Bombs': 120}
bomb_pouch ={"Datura Bombs":0, 'Cherry Bombs': 0, 'Smoke Decoy Bombs': 0}
is_full = False
while bomb_effects:
current_bomb_effect = bomb_effects[0]
if bomb_casings:
current_bomb_casings = bomb_casings[-1]
if current_bomb_casings+current_bomb_effect in bombs.values():
bomb_pouch = add_bomb_to_pouch(bombs, current_bomb_casings+current_bomb_effect,bomb_pouch)
bomb_effects.popleft()
bomb_casings.pop()
if bomb_pouch_is_full(bomb_pouch):
is_full = True
break
else:
bomb_casings[-1]-=5
else:
break
print(print_output(bomb_effects, bomb_casings, bomb_pouch, is_full))
|
988,674 | 3c4cf1a538e26b03e44a78ef2c8cdcd86ccb9273 | # Generated by Django 3.2.7 on 2021-09-18 03:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='servery',
old_name='open_saturday',
new_name='open_saturday_breakfast',
),
migrations.RenameField(
model_name='servery',
old_name='open_sunday',
new_name='open_saturday_dinner',
),
migrations.AddField(
model_name='servery',
name='open_saturday_lunch',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='servery',
name='open_sunday_breakfast',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='servery',
name='open_sunday_dinner',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='servery',
name='open_sunday_lunch',
field=models.BooleanField(default=True),
),
]
|
988,675 | d9b1b3bd96312b4de4ebedbbf03509f1bba171c2 | import json
import os
database={}
defaultDatabasePath='data'
def load():
global database
if os.path.exists(defaultDatabasePath):
with open(defaultDatabasePath ,'r' ,encoding='utf-8') as f:
database=json.loads(f.read())
def dump():
global database
with open(defaultDatabasePath ,'w' ,encoding='utf-8') as f:
f.write(json.dumps(database))
def addCountedWords(resault,category):
global database
if not (category in database):
database[category]=resault
return
for k in resault.keys():
if k in database[category]:
database[category][k]+=resault[k]
else :
database[category][k]=resault[k] |
988,676 | f53716a199e5b3724663f6cc7a612a708540656e | import re
import pprint
# 标记订单起始、价格
TITLE_reg = re.compile(r'订单已送达')
PRICE_reg = re.compile(r'^-?¥?(?P<price>[0-9]{0,3}(?:\.[0-9]{1,2})?)$')
# 标记折扣、附加费关键字
DISCOUNT_wds = ['红包', '满减', '立减']
ADDITION_wds = ['配送费', '包装费']
def ocr():
resp = {
'words_result': [
{'words': '23:15'}, {'words': '19.0K/sH令〔50'},
{'words': '订单已送达'}, {'words': '阿甘锅盔(合生汇店)'},
{'words': 'R)风雨长歌'}, {'words': '酸辣粉'}, {'words': '¥18'},
{'words': '发)发起人石敢当'}, {'words': '三姐妹套餐(牛肉+梅干菜+'},
{'words': '¥42'}, {'words': '酸辣粉'},
{'words': '¥18'}, {'words': '德谟克利希'},
{'words': '三姐妹套餐(牛肉+梅干菜+'}, {'words': '¥42'},
{'words': '其他'}, {'words': '店 铺满减'}, {'words': '20'},
{'words': '首次光顾立减'}, {'words': '-¥4'},
{'words': '店铺红包'}, {'words': '¥6'}, {'words': '配送费'},
{'words': '¥5.4'}, {'words': '包装费'}, {'words': '¥4'},
{'words': '③联系商家'}, {'words': '实付¥994'}, {'words': '配送信息'},
{'words': '送达时间'}, {'words': '尽快送达'}
],
'log_id': 1302606979516071936,
'words_result_num': 31}
return resp['words_result']
# 判断是否是店名title
def is_title(word):
return bool(TITLE_reg.findall(word))
# 判断是否是价格
def extract_price(word):
prices = PRICE_reg.findall(word)
try:
return prices and float(prices[0])
except ValueError:
return None
# 判断是否是折扣
def is_discount(wd):
for discount in DISCOUNT_wds:
if discount in wd:
return True
return False
# 判断是否是附加费用
def is_addition(wd):
for addition in ADDITION_wds:
if addition in wd:
return True
return False
# 判msg列表中最后一个元素的信息(折扣、附加费或其他)
def extract_attr(msg):
if not msg:
return
try:
attr = msg.pop()
if is_discount(attr):
return 'discount'
elif is_addition(attr):
return 'addition'
return msg.pop()
except IndexError:
return 'pre'
# 生成账单
def gen_accounts(words):
order_msg = []
orders = []
title = None
discount = 0
addition = 0
total = 0
title_flag = False
for word in words:
wd = word.get('words')
if not wd:
continue
# 提取本次订单店名
if is_title(wd):
title_flag = True
continue
elif title_flag:
title = wd
title_flag = False
continue
# 如果检测是金额,就拿order_msg list中最后一个数据,来判断当前的金额是附加费、折扣、商品金额或者其他
price = extract_price(wd)
if price and order_msg:
attr = extract_attr(order_msg)
if not attr:
continue
if 'discount' == attr:
discount += price
elif 'addition' == attr:
addition += price
elif 'pre' == attr and orders:
orders[-1]['order_price'] += price
else:
orders.append({'author': attr, 'order_price': price})
total += price
order_msg.clear()
else:
order_msg.append(wd)
for order in orders:
# 实付款
total_actual = total - discount * 2
# 商品原价+附加费
total_order = total - discount - addition
order_price = order['order_price']
actual_price = total_actual / total_order * order_price
order['actual_price'] = '%.2f' % actual_price
accounts = {
"store": title,
"total_order": total_order,
"total_actual": total_actual,
"addition": addition,
"discount": discount,
"guests": orders
}
pprint.pprint(accounts)
return accounts
if __name__ == '__main__':
words = ocr()
gen_accounts(words)
|
988,677 | 731323fdca54e8b6fc0b7156dc54bac36fcc3a4d | # Parser for hosts file
import re
class ParseError(Exception):
pass
entry_regex = re.compile(r"""^
\s*
(?:
([0-9a-fA-F.:]+)\s+ # rough IP
( # All HostNames
(?:
[a-zA-Z0-9:_.-]+
\s? # For the case when hostname is followed by newline
)+
)
)?
\s* # When you have unlimited spaces!
(\#.*)? # Comments always come last if there's any other content.
$""", re.VERBOSE)
def parse_line(line):
match = entry_regex.match(line)
if not match:
raise ParseError()
return (match.groups()[0],
tuple(match.groups()[1].split()) if match.groups()[1] else None,
match.groups()[2])
def parse_file(f):
'''Accepts a file-like object with data in hosts format, and yields (ip, (hosts,), comment) tuples'''
for line in f:
yield parse_line(line)
def format_line(ip, hosts, comment):
formatted_line = ""
if ip:
formatted_line = ip + "\t" + " ".join(hosts)
if comment:
if ip:
formatted_line += "\t"
formatted_line += comment
return formatted_line
|
988,678 | 772c202d451328f7cd30c3809e5c8cb8f92083ba | def insort_right(a, x, lo=0, hi=0):
a.append(x)
def insort_left(a, x, lo=0, hi=0):
a.append(x)
def insort(a, x, lo=0, hi=0):
a.append(x)
def bisect_right(a, x, lo=0, hi=0):
return 1
def bisect_left(a, x, lo=0, hi=0):
return 1
def bisect(a, x, lo=0, hi=0):
return 1
|
988,679 | 7b2c48a5fb5e232fc4017949dbee42ddd850c214 | from rest_framework import generics
from .models import listItem
from .serializers import TaskSerializer
class TasksList(generics.ListCreateAPIView):
"""
List all tasks, or create a new task.
"""
queryset = listItem.objects.all()
serializer_class = TaskSerializer
class TaskDetail(generics.RetrieveUpdateDestroyAPIView):
"""
Retrieve, update or delete a Task.
"""
queryset = listItem.objects.all()
serializer_class = TaskSerializer |
988,680 | 94197d701e2906fdb5ccafff053cf047d29ef113 | import flask_login
from app import app
login_manager = flask_login.LoginManager()
login_manager.init_app(app)
username = app.config['USERNAME']
password = app.config['PASSWORD']
users = {username: {'pw': password}}
class User(flask_login.UserMixin):
pass
@login_manager.user_loader
def user_loader(email):
if email not in users:
return
user = User()
user.id = email
return user
@login_manager.request_loader
def request_loader(request):
email = request.form.get('email')
if email not in users:
return
user = User()
user.id = email
# DO NOT ever store passwords in plaintext and always compare password
# hashes using constant-time comparison!
user.is_authenticated = request.form['pw'] == users[email]['pw']
return user
|
988,681 | b81978781e835a5d81417ea7cdb1c3aba73323fd | import numpy as n
from brainpipe.feature.brainfir import fir_filt, fir_order, filtvec
from scipy.signal import hilbert
__all__ = [
'phase'
]
####################################################################
# - Get the phase either for an array or a matrix :
####################################################################
# def phase(x, N, fs, fc, winCenter=None, winLength=0, cycle=3):
# # If no center frequency vec is specified, then it will return all the timepoints :
# if winCenter is None : winCenter = n.arange(0, N, 1)
# else : winCenter = n.array(winCenter).astype(int)
# # Get size elements :
# x = n.matrix(x)
# fc = n.array(fc)
# ndim = len(x.shape)
# # Check size :
# if ndim == 1:
# npts, ncol = len(x), 1
# elif ndim == 2:
# rdim = n.arange(0,len(x.shape),1)[n.array(x.shape) == N]
# if len(rdim) != 0 : rdim = rdim[0]
# else: raise ValueError("None of x dimendion is "+str(N)+" length. [x] = "+str(x.shape))
# npts, ncol = x.shape[rdim], x.shape[1-rdim]
# if x.shape[0] != npts: x = x.T
# # Get the filter order :
# fOrder = fir_order(fs, npts, fc[0], cycle = cycle)
# # Compute the phase for each colums :
# xF = n.zeros((npts,ncol))
# for k in range(0,ncol):
# xF[:,k] = n.angle(hilbert(fir_filt(n.array(x[:,k]).T, fs, fc, fOrder)))
# # Define the window vector :
# winVec = n.vstack((winCenter-winLength/2,winCenter+winLength/2)).astype(int)
# nbWin = winVec.shape[1]
# # Bin the phase :
# if winLength == 0:
# xShape = xF[list(winCenter),:]
# elif winLength != 0:
# xShape = n.zeros((nbWin,ncol))
# for k in range(0,nbWin):
# print(winVec[0,k],winVec[1,k])
# xShape[k,:] = n.mean(xF[ winVec[0,k]:winVec[1,k], : ],0)
# return xShape
def phase(x, fs, fc, window=None, winCenter=None, winLength=0, **kwargs):
# -----------------------------------------------------------------------
# Check input arguments :
# -----------------------------------------------------------------------
timeL, trials = x.shape
# Number of frequencies :
if type(fc)==tuple:fc=[fc]
nfc = len(fc)
# Phase bining (or not :D)
if (winCenter is not None) or (winLength!=0):
window = [(k-winLength/2,k+winLength/2) for k in winCenter]
if window is not None:
window = [tuple(n.array((k[0],k[1])).astype(int)) for k in window]
# -----------------------------------------------------------------------
# Extract phase :
# -----------------------------------------------------------------------
xF = filtvec(x, fs, fc, 'phase', **kwargs)
# -----------------------------------------------------------------------
# Bin the phase :
# -----------------------------------------------------------------------
if (window is None) : xShape = xF
else:
nbWin = len(window)
xShape = n.zeros((nfc, nbWin, trials))
for k in range(0,nbWin):
xShape[:,k,:] = n.mean(xF[ :, window[k][0]:window[k][1], : ],1)
return xShape |
988,682 | db9678b475476ca0ee7dd7e84e6f656ee03d12dc | from .models import Task
from rest_framework import serializers
class TaskSerializers(serializers.ModelSerializer):
class Meta:
model = Task
fields = ("id", "task_name", "task_desc", "is_completed", "date_created")
|
988,683 | 1961a82599c124a22703f306a6ec648cda076685 | # https://docs.python.org/ja/3/library/socket.html
import socket
HOST = '127.0.0.1'
PORT = 31415
with socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM) as sock:
sock.bind((HOST, PORT))
sock.listen(1)
while True:
conn, client_addr = sock.accept()
with conn:
print(f'Connected by {client_addr}')
while True:
binary_data = conn.recv(1024)
if not binary_data: break
data = binary_data.decode('utf-8')
print(f'Received from {client_addr}: {data}')
message = 'こんにちは、世界 🌏'
binary_message = message.encode('utf-8')
conn.sendall(binary_message)
|
988,684 | 054049b35e0969b533e8735f59944f71784297c7 | #!/usr/bin/python3
'''
First Common Ancestor:
Design an algorithm and write code fo find the first common ancestor of two nodes in a binary tree.
Avoid storing additional nodes in a data structure.
NOTE: This is not necessarily a binary search tree.
'''
|
988,685 | 2aae0fc1fc223c7539631c93b56be1a945e1df72 | class Initializer:
pass
|
988,686 | a940765af20feb7ab221bd583c4876d5b86ed474 | palabra = "Un texto de varias palabras"
# print(palabra[0])
# print(palabra[1])
# print(palabra.replace("e","o",1))
#print(palabra[0])
#slices
print(palabra[::-1])
#Crear funcion que permita ingresando un texto, saber si algo es palindromo
#Ana
#Luz azul
#Anita lava la tina |
988,687 | e22382340446cbb36db70ed82fc86e473a83a769 | import flask_wtf
from wtforms import StringField, validators, SubmitField, IntegerField
from larigira.formutils import AutocompleteStringField
class Form(flask_wtf.Form):
nick = StringField('Audio nick', validators=[validators.required()],
description='A simple name to recognize this audio')
path = AutocompleteStringField('dl-suggested-dirs',
'Path', validators=[validators.required()],
description='Full path to source directory')
howmany = IntegerField('Number', validators=[validators.optional()],
default=1,
description='How many songs to be picked'
'from this dir; defaults to 1')
submit = SubmitField('Submit')
def populate_from_audiospec(self, audiospec):
if 'nick' in audiospec:
self.nick.data = audiospec['nick']
if 'paths' in audiospec:
self.path.data = audiospec['paths'][0]
if 'howmany' in audiospec:
self.howmany.data = audiospec['howmany']
else:
self.howmany.data = 1
def receive(form):
return {
'kind': 'randomdir',
'nick': form.nick.data,
'paths': [form.path.data],
'howmany': form.howmany.data or 1
}
|
988,688 | 1bdabd9d17d30fedd12ba2d4978511aca9e59e70 | class Insertion(object):
"""Insertion candidate"""
def __init__(self, arc, start_position, end_position, hval=None):
self.arc = arc
self.hval = hval
self.add_cost = 0
# This is position of the first node of the arc in the node list
self.start_position = start_position
self.end_position = end_position
def __repr__(self):
return str(self.arc[0], self.arc[1]) |
988,689 | ab3388354053d9c2c82f9338d2ca8cd1dd46c07b | class cal3():
p=0
t=0
r=0
def __init__(self,p,t,r):
self.p=p
self.t=t
self.r=r
print('The principal is', p)
print('The time period is', t)
print('The rate of interest is',r)
def calinterst(self):
interst = (self.p * self.t * self.r)/100
print("The Simple Interest is : ", interst)
display = cal3(8,6,8)
display.calinterst()
|
988,690 | 7d7383b300f8a035749394262e1f5434c0cf2f86 | import typing
import inspect
from .model import CogCommandObject, CogSubcommandObject
from .utils import manage_commands
def cog_slash(
*,
name: str = None,
description: str = None,
guild_ids: typing.List[int] = None,
options: typing.List[dict] = None,
connector: dict = None
):
"""
Decorator for Cog to add slash command.\n
Almost same as :func:`.client.SlashCommand.slash`.
Example:
.. code-block:: python
class ExampleCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@cog_ext.cog_slash(name="ping")
async def ping(self, ctx: SlashContext):
await ctx.send(content="Pong!")
:param name: Name of the slash command. Default name of the coroutine.
:type name: str
:param description: Description of the slash command. Default ``None``.
:type description: str
:param guild_ids: List of Guild ID of where the command will be used. Default ``None``, which will be global command.
:type guild_ids: List[int]
:param options: Options of the slash command. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.
:type options: List[dict]
:param connector: Kwargs connector for the command. Default ``None``.
:type connector: dict
"""
def wrapper(cmd):
desc = description or inspect.getdoc(cmd)
if options is None:
opts = manage_commands.generate_options(cmd, desc, connector)
else:
opts = options
_cmd = {
"func": cmd,
"description": desc,
"guild_ids": guild_ids,
"api_options": opts,
"connector": connector,
"has_subcommands": False,
}
return CogCommandObject(name or cmd.__name__, _cmd)
return wrapper
def cog_subcommand(
*,
base,
subcommand_group=None,
name=None,
description: str = None,
base_description: str = None,
base_desc: str = None,
subcommand_group_description: str = None,
sub_group_desc: str = None,
guild_ids: typing.List[int] = None,
options: typing.List[dict] = None,
connector: dict = None
):
"""
Decorator for Cog to add subcommand.\n
Almost same as :func:`.client.SlashCommand.subcommand`.
Example:
.. code-block:: python
class ExampleCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@cog_ext.cog_subcommand(base="group", name="say")
async def group_say(self, ctx: SlashContext, text: str):
await ctx.send(content=text)
:param base: Name of the base command.
:type base: str
:param subcommand_group: Name of the subcommand group, if any. Default ``None`` which represents there is no sub group.
:type subcommand_group: str
:param name: Name of the subcommand. Default name of the coroutine.
:type name: str
:param description: Description of the subcommand. Default ``None``.
:type description: str
:param base_description: Description of the base command. Default ``None``.
:type base_description: str
:param base_desc: Alias of ``base_description``.
:param subcommand_group_description: Description of the subcommand_group. Default ``None``.
:type subcommand_group_description: str
:param sub_group_desc: Alias of ``subcommand_group_description``.
:param guild_ids: List of guild ID of where the command will be used. Default ``None``, which will be global command.
:type guild_ids: List[int]
:param options: Options of the subcommand. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.
:type options: List[dict]
:param connector: Kwargs connector for the command. Default ``None``.
:type connector: dict
"""
base_description = base_description or base_desc
subcommand_group_description = subcommand_group_description or sub_group_desc
def wrapper(cmd):
desc = description or inspect.getdoc(cmd)
if options is None:
opts = manage_commands.generate_options(cmd, desc, connector)
else:
opts = options
_sub = {
"func": cmd,
"name": name or cmd.__name__,
"description": desc,
"base_desc": base_description,
"sub_group_desc": subcommand_group_description,
"guild_ids": guild_ids,
"api_options": opts,
"connector": connector,
}
return CogSubcommandObject(_sub, base, name or cmd.__name__, subcommand_group)
return wrapper
|
988,691 | bc09fd8b0738c1cb15d5b7eb3eac37cfb48b7fb0 | # TASK: 12/27/2019
# Words like first, second, and third are referred to as ordinal numbers.
# They correspond to the cardinal numbers 1, 2, and 3.
# Write a function called "cardinalToOrdinal()" that takes an integer from 1 to 15 and
# returns a string containing the corresponding English ordinal number as
# its only result. It should return the string, "Integer out of range" if a
# value outside of this range is provided as a parameter.
# Include a main program that demonstrates your function by displaying each integer
# from 1 to 15 and its corresponding ordinal number (or the out of range message).
import random
def cardinalToOrdinal(num):
if num < 1 or num > 15:
print(format("Integer out of range: " + str(num), ">52s"))
first = ''
second = ''
third = ''
fourth = ''
fifth = ''
sixth = ''
seventh = ''
eigth = ''
nineth = ''
tenth = ''
eleventh = ''
twelfth = ''
thirteenth = ''
fourteenth = ''
fifteenth = ''
if num == 1:
first = "first"
elif num == 2:
second = "second"
elif num == 3:
third = "third"
elif num == 4:
fourth = "fourth"
elif num == 5:
fifth = "fifth"
elif num == 6:
sixth = "sixth"
elif num == 7:
seventh = "seventh"
elif num == 8:
eigth = "eigth"
elif num == 9:
nineth = "nineth"
elif num == 10:
tenth = "tenth"
elif num == 11:
eleventh = "eleventh"
elif num == 12:
twelfth = "twelfth"
elif num == 13:
thirteenth = "thirteenth"
elif num == 14:
fourteenth = "fourteenth"
elif num == 15:
fifteenth = "fifteenth"
return first, second, third, fourth, fifth, sixth, seventh, eigth, nineth, tenth, eleventh, twelfth, thirteenth, fourteenth, fifteenth
def main():
print("TABLE OF INTEGERS AND ORDINAL NUMBERS".center(80))
print("-------------------------------------".center(80))
for i in range(1,16):
num1, num2, num3, num4, num5, num6, num7, num8, num9, num10, num11, num12, num13, num14, num15 = cardinalToOrdinal(i)
print(format(i, "30d") + " " + num1 + num2 + num3 + num4 + num5 \
+ num6 + num7 + num8 + num9 + num10 + num11 + num12 + num13 + num14 + num15)
main()
|
988,692 | 9eee1af2a2fb836660728f8638f5055db4209483 | #! /usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import division,print_function,absolute_import,unicode_literals
import os
from TSF_Forth import *
def TSF_uri_Initwords(TSF_words): #TSF_doc:URLとファイルパス関連のワードを追加する(TSFAPI)。
TSF_words["#TSF_mainfile"]=TSF_uri_mainfile; TSF_words["#メインファイル名"]=TSF_uri_mainfile
TSF_words["#TSF_fileext"]=TSF_uri_fileext; TSF_words["#ファイルの拡張子"]=TSF_uri_fileext
return TSF_words
def TSF_uri_mainfile(): #TSF_doc:[filepath]実行メインファイル名を取得する。1スタック積み下ろし。
TSF_Forth_pushthat(TSF_Forth_mainfile())
return None
def TSF_uri_fileext(): #TSF_doc:[filepath]ファイルの拡張子を取得する。1スタック積み下ろし、1スタック積み上げ。
TSF_tsvU=TSF_Forth_popthat()
TSF_tsvU=os.path.splitext(TSF_tsvU)[1]
TSF_Forth_pushthat(TSF_tsvU)
return None
def TSF_uri_debug(TSF_argvs): #TSF_doc:「TSF/TSF_shuffle.py」単体テスト風デバッグ関数。
TSF_debug_log=""
TSF_Forth_init(TSF_argvs,[TSF_uri_Initwords])
TSF_Forth_setTSF(TSF_Forth_1ststack(),"\t".join(["UTF-8","#TSF_encoding","TSF_fileexttest:","#TSF_this","0","#TSF_fin."]))
TSF_Forth_setTSF("TSF_uri.py:","\t".join(["Python{0.major}.{0.minor}.{0.micro}".format(sys.version_info),sys.platform,TSF_io_stdout]))
TSF_Forth_setTSF("TSF_fileexttest:","\t".join(["debug/sample_quine.tsf","#TSF_fileext","1","#TSF_echoN"]))
TSF_Forth_addfin(TSF_argvs)
TSF_Forth_run()
for TSF_thename in TSF_Forth_stackskeys():
TSF_debug_log=TSF_Forth_view(TSF_thename,True,TSF_debug_log)
return TSF_debug_log
if __name__=="__main__":
from collections import OrderedDict
print("")
TSF_argvs=TSF_io_argvs()
print("--- {0} ---".format(TSF_argvs[0]))
TSF_debug_savefilename="debug/debug_uri.log"
TSF_debug_log=TSF_uri_debug(TSF_argvs)
TSF_io_savetext(TSF_debug_savefilename,TSF_debug_log)
print("")
try:
print("--- {0} ---\n{1}".format(TSF_debug_savefilename,TSF_debug_log))
except:
print("can't 'print(TSF_debug_savefilename,TSF_debug_log)'")
finally:
pass
sys.exit()
# Copyright (c) 2017 ooblog
# License: MIT
# https://github.com/ooblog/TSF1KEV/blob/master/LICENSE
|
988,693 | d602439bc40c0ad8f208123df5485a7873951f0e | from django.db import models
from accounts.models import DiscordUser
# Create your models here.
import datetime, jwt, time
from skillbase_api import settings
from rest_framework.authtoken.models import Token
from asgiref.sync import sync_to_async
class Mod(models.Model):
name = models.CharField(max_length=128)
code = models.FileField(upload_to="mods_files/")
def __str__(self):
return self.name
class HaveMod(models.Model):
user = models.ForeignKey(DiscordUser, on_delete=models.CASCADE)
mods = models.ManyToManyField(Mod, blank=True)
def __str__(self):
return f"{self.user.discord_id}"
def get_expire():
return datetime.datetime.now() + datetime.timedelta(minutes = 1)
|
988,694 | 9bfac9733043f6b713eba63713b03b3183ec6e73 | import requests
from bs4 import BeautifulSoup as bs
import urllib
#get source code to parse
r = requests.get("http://www.co.pacific.wa.us/gis/DesktopGIS/WEB/index.html")
html = r.text
#parse through to get links to files
soup = bs(html)
#link containers
h, j = [], []
#gets all the links
for link in soup.find_all("a"):
h.append(link.get("href"))
#narrows to just zip files (shpfiles)
for i in h:
if "zip" in i:
j.append(i)
url ="http://www.co.pacific.wa.us/gis/DesktopGIS/WEB/"
for i in j:
print "Saving " + url + i
urllib.urlretrieve(url+i, "C:/Users/Derek/Documents/WWU/Thesis/PacCountyGIS/" + i[0:len(i)-4] + ".zip")
print "Success"
|
988,695 | 880e641a9a032710839c0cbc97de150e71c753fd | #!/usr/bin/env python3
from binascii import unhexlify
def xor_two_str(s1,s2):
if len(s1) != len(s2):
raise "XOR EXCEPTION: Strings are not of equal length!"
return ''.join(format(int(a, 16) ^ int(b, 16), 'x') for a,b in zip(s1,s2))
KEY1 = "a6c8b6733c9b22de7bc0253266a3867df55acde8635e19c73313"
KEY2 = xor_two_str("37dcb292030faa90d07eec17e3b1c6d8daf94c35d4c9191a5e1e", KEY1)
print("[-] KEY2: {}".format(KEY2))
KEY3 = xor_two_str("c1545756687e7573db23aa1c3452a098b71a7fbf0fddddde5fc1", KEY2)
print("[-] KEY3: {}".format(KEY3))
KEY4 = xor_two_str(xor_two_str(KEY1, KEY2), KEY3)
print("[-] KEY4: {}\n".format(KEY4))
FLAG = xor_two_str("04ee9855208a2cd59091d04767ae47963170d1660df7f56f5faf", KEY4)
print("[*] FLAG: {}".format(unhexlify(FLAG))) |
988,696 | 04eeab411d8c780cfcfd276eb14f202302e87b11 | from django.contrib.auth.models import User
from django.db.models import Avg
from app import models
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'password', 'email',
'first_name', 'last_name')
read_only_fields = 'id',
extra_kwargs = {'password': {'write_only': True}}
class UnitOfMeasureSerializer(serializers.ModelSerializer):
class Meta:
model = models.UnitOfMeasure
fields = ('pk', 'name')
class StepSerializer(serializers.ModelSerializer):
class Meta:
model = models.Step
fields = ('pk', 'sequence', 'instruction')
class IngredientTypeSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.IngredientType
fields = ('pk', 'name', 'picture')
class IngredientSerializer(serializers.ModelSerializer):
type = IngredientTypeSerializer(many=False, read_only=True)
class Meta:
model = models.Ingredient
fields = ('pk', 'banner', 'icon', 'name', 'description', 'type')
class RecipeComponentSerializer(serializers.ModelSerializer):
ingredient = IngredientSerializer(many=False, read_only=True)
unit_of_measure = UnitOfMeasureSerializer(many=False, read_only=True)
class Meta:
model = models.RecipeComponent
fields = ('pk', 'quantity', 'adjective', 'unit_of_measure',
'ingredient', 'extra')
class RecipeOverviewSerializer(serializers.ModelSerializer):
class Meta:
model = models.Recipe
fields = ('pk', 'url', 'name', 'description', 'banner', 'icon')
class RecipeTypeSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.RecipeType
fields = ('pk', 'name', 'picture')
class RecipeSerializer(serializers.HyperlinkedModelSerializer):
categories = RecipeTypeSerializer(many=True, read_only=True)
recipe_components = RecipeComponentSerializer(many=True, read_only=True)
steps = StepSerializer(many=True, read_only=True)
rating = serializers.SerializerMethodField()
reviews = serializers.SerializerMethodField()
class Meta:
model = models.Recipe
fields = ('pk', 'name', 'rating', 'reviews', 'description', 'banner',
'icon', 'time_to_complete', 'default_serving_size',
'categories', 'recipe_components', 'steps')
def get_rating(self, obj):
ratings = models.Rating.objects.filter(recipe=obj)
return ratings.aggregate(Avg('rating'))['rating__avg']
def get_reviews(self, obj):
return len(models.Rating.objects.filter(recipe=obj))
|
988,697 | a3a3caa4efe699665c6db00fcb4ef026f19bba74 | # !/usr/bin/env python
# coding=utf-8
"""
Make a graph for lecture 2, hippo digestion
"""
from __future__ import print_function
import sys
import numpy as np
from scipy import interpolate
from common import make_fig, GOOD_RET
__author__ = 'hbmayes'
def graph_alg_eq():
"""
Given a simple algebraic equation, makes a graph
"""
fig_name = 'lect2_hippo'
# x-axis
x_start = 0.0 # initial conversion
x_end = 0.999 # final conversion; didn't choose 1 to avoid divide by zero error
num_steps = 2001 # for solving/graphing
conversion_scale = np.linspace(x_start, x_end, num_steps)
# y-axis
design_eq = np.divide((1.00+16.5*(1.00-conversion_scale)), 1.75*(1-conversion_scale))
make_fig(fig_name, conversion_scale, design_eq,
x_label=r'conversion (X, unitless)', y_label=r'$\displaystyle\frac{C_{F0}}{-r_F}$ (hr)',
x_lima=0.0, x_limb=1.0, y_lima=0.0, y_limb=24.0,
)
def graph_points():
"""
Given a few points, makes a smooth curve
:return: saves a file with the graph
"""
fig_name = 'lect2_num_solv'
# given data
x = np.array([0.0, 0.4, 0.6, 0.8])
ra = np.array([0.01, 0.0080, 0.005, 0.002])
design_eq = np.divide(2.0, ra)
print("Generic example design equation points: {}".format(["{:0.1f}".format(x) for x in design_eq]))
# cubic spline
x_new = np.linspace(0.0, 0.8, 101)
# alternately, from interpolation
y_interp = interpolate.interp1d(x, design_eq, kind='quadratic')
make_fig(fig_name, x, design_eq, ls1='o', x2_array=x_new, y2_array=y_interp(x_new),
x_label=r'conversion (X, unitless)', y_label=r'$\displaystyle\frac{F_{A0}}{-r_A} \left(L\right)$',
x_lima=0.0, x_limb=0.8, y_lima=0.0, y_limb=1000,
fig_width=4, color2='green',
)
def graph_smooth_from_pts():
"""
Given a few points, interpolates a smooth curve
:return: saves a file with the graph
"""
fig_name = 'lect2_isom'
# given data
x = np.array([0.0, 0.2, 0.4, 0.6, 0.65])
ra = np.array([39.0, 53.0, 59.0, 38.0, 25.0])
design_eq = np.divide(50.0, ra)
print("Isom example design equation points: {}".format(design_eq))
# cubic spline
tck = interpolate.splrep(x, design_eq, s=0)
x_new = np.linspace(0.0, 0.7, 101)
y_new = interpolate.splev(x_new, tck, der=0)
# alternately, from interpolation
cubic_interp = interpolate.interp1d(x, design_eq, kind='quadratic', fill_value="extrapolate")
make_fig(fig_name, x, design_eq, ls1='o', x2_array=x_new, y2_array=y_new,
x3_array=x_new, y3_array=cubic_interp(x_new),
y1_label="data", y2_label="quadratic", y3_label="cubic",
x_label=r'conversion (X, unitless)', y_label=r'$\displaystyle\frac{F_{A0}}{-r_A} \left(m^3\right)$',
x_lima=0.0, x_limb=0.7, y_lima=0.0, y_limb=2.5,
)
def main():
""" Runs the main program.
"""
graph_alg_eq()
graph_points()
graph_smooth_from_pts()
return GOOD_RET # success
if __name__ == '__main__':
status = main()
sys.exit(status)
|
988,698 | dc62734db3e292c3b034a4313f9dddf2c407d1cb | import random
#menu options lists
appetizer=["fries","stuffed potatoes","mozzerella sticks"]
main=["shrimp", "pizza", "pasta"]
dessert=["lava cake", "carrot cake", "cookie", "ice cream"]
#defines the function meal
def meal():
print(main[random.randint(0,2)])
#begin program output
print("The chef will choose your meal tonight, all you have to do is ask")
print('Do you want an appetizer or to go straight to the meal? Type appetizer or meal')
answer1=input()
if answer1=='appetizer':
print(appetizer[random.randint(0,2)])
meal()
else:
meal()
print('Do you want dessert? Type yes or no')
answer2=input()
if answer2=='yes':
print(dessert[random.randint(0,3)])
print("We hope you enjoyed your meal!")
else:
print("Maybe next time... Have a nice night!")
|
988,699 | dba10d47ce5624132fa973d1dde29e0ac64d8c13 |
def digit_sort(lst):
return [int(x) for x in sorted(sorted([str(n) for n in lst]),key=len,reverse=True)]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.