text stringlengths 38 1.54M |
|---|
from apm import *
import numpy as np
import random
import matplotlib.pyplot as plt
import time
# initial parameters
n_iter = 150 # number of cycles
x = 37.727 # true value
# filtered bias update
alpha = 0.0951
# mhe tuning
horizon = 30
# Select server
server = 'http://byu.apmonitor.com'
# Application names
app1 = 'mhe1'
app2 = 'mhe2'
# Clear previous application
apm(server,app1,'clear all')
apm(server,app2,'clear all')
# Load model and horizon
apm_load(server,app1,'valve.apm')
apm_load(server,app2,'valve.apm')
horizon = 50
apm_option(server,app1,'apm.ctrl_hor',50)
apm_option(server,app1,'apm.pred_hor',50)
apm_option(server,app2,'apm.ctrl_hor',50)
apm_option(server,app2,'apm.pred_hor',50)
# Load classifications
apm_info(server,app1,'FV','d')
apm_info(server,app2,'FV','d')
apm_info(server,app1,'CV','flow')
apm_info(server,app2,'CV','flow')
# Options
apm_option(server,app1,'apm.imode',5)
apm_option(server,app2,'apm.imode',5)
apm_option(server,app1,'apm.ev_type',1)
apm_option(server,app2,'apm.ev_type',2)
apm_option(server,app1,'d.STATUS',1)
apm_option(server,app2,'d.STATUS',1)
apm_option(server,app1,'flow.FSTATUS',1)
apm_option(server,app2,'flow.FSTATUS',1)
apm_option(server,app1,'flow.WMEAS',100)
apm_option(server,app2,'flow.WMEAS',100)
apm_option(server,app1,'flow.WMODEL',0)
apm_option(server,app2,'flow.WMODEL',10)
apm_option(server,app1,'flow.dcost',0)
apm_option(server,app2,'flow.dcost',0)
apm_option(server,app1,'apm.coldstart',1)
apm_option(server,app2,'apm.coldstart',1)
apm_option(server,app1,'apm.web_plot_freq',5)
apm_option(server,app2,'apm.web_plot_freq',5)
# Initialize both L1 and L2 applications
apm(server,app1,'solve')
apm(server,app2,'solve')
apm_option(server,app1,'apm.coldstart',0)
apm_option(server,app2,'apm.coldstart',0)
# Create storage for results
xtrue = x * np.ones(n_iter+1)
z = x * np.ones(n_iter+1)
timer = np.zeros(n_iter+1)
xb = np.empty(n_iter+1)
x1mhe = np.empty(n_iter+1)
x2mhe = np.empty(n_iter+1)
# initial estimator values
x0 = 40
xb[0] = x0
x1mhe[0] = x0
x2mhe[0] = x0
# add noise
for i in range(len(z)):
z[i] = z[i] + (random.random()-0.5)*2.0
# outliers
z[50] = 100
z[100] = 0
# Create plot
plt.figure(figsize=(10,7))
plt.ion()
plt.show()
start_time = time.time()
prev_time = start_time
## Cycle through measurement sequentially
for k in range(1, n_iter+1):
print('Cycle ' + str(k) + ' of ' + str(n_iter))
# Sleep time
sleep_max = 1.0
sleep = sleep_max - (time.time() - prev_time)
if sleep>=0.01:
time.sleep(sleep-0.01)
else:
time.sleep(0.01)
timer[k] = k
# filtered bias update
xb[k] = alpha * z[k] + (1.0-alpha) * xb[k-1]
# L2-norm MHE
apm_meas(server,app2,'flow',z[k])
sol2 = apm(server,app2,'solve')
x2mhe[k] = apm_tag(server,app2,'flow.model')
# L1-norm MHE
apm_meas(server,app1,'flow',z[k])
sol1 = apm(server,app1,'solve')
x1mhe[k] = apm_tag(server,app1,'flow.model')
# Plot
plt.clf()
ax=plt.subplot(1,1,1)
ax.grid()
plt.plot(timer[0:k],z[0:k],'kx',linewidth=2)
plt.plot(timer[0:k],xb[0:k],'g--',linewidth=3)
plt.plot(timer[0:k],x2mhe[0:k],'k-',linewidth=3)
plt.plot(timer[0:k],x1mhe[0:k],'r.-',linewidth=3)
plt.plot(timer[0:k],xtrue[0:k],'k:',linewidth=2)
plt.legend(['Measurement','Filtered Bias Update',\
'Sq Error MHE','l_1-Norm MHE', \
'Actual Value'])
plt.xlabel('Time (sec)')
plt.ylabel('Flow Rate (T/hr)')
plt.axis([0, timer[k], 32, 45])
plt.draw()
plt.pause(0.05)
apm_web(server,app1)
apm_web(server,app2)
|
from django.contrib import admin
from .models import Student , Gender , Subject
# Register your models here.
admin.site.register(Student)
admin.site.register(Gender)
admin.site.register(Subject) |
import datetime as dt
from contracts import contract
from typedclass.exceptions import TypedClassValidationError
@contract
def simplifier_date(value):
"""
:type value: TypedClassAny
:rtype: str
"""
if isinstance(value, dt.date):
return value.strftime('%Y-%m-%d')
raise TypedClassValidationError('Not a date')
|
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import base_hasattr
from Products.CMFPlone.utils import safe_unicode
from collective.z3cform.datagridfield import DataGridFieldFactory
from collective.z3cform.datagridfield import DictRow
from imio.helpers.content import get_schema_fields
from imio.project.core import _
from imio.project.core import _tr
from plone import api
from plone.autoform import directives
from plone.dexterity.content import Container
from plone.dexterity.schema import DexteritySchemaPolicy
from plone.supermodel import model
from z3c.form import validator
from z3c.form.browser.select import SelectFieldWidget
from zope import schema
from zope.component import provideAdapter
from zope.interface import Interface
from zope.interface import Invalid
from zope.interface import implements
from zope.interface import invariant
from zope.schema.interfaces import IVocabularyFactory
from zope.schema.vocabulary import SimpleTerm
from zope.schema.vocabulary import SimpleVocabulary
ERROR_VALUE_REMOVED_IS_IN_USE = "The key '${removed_key}' can not be removed because it is currently " \
"used (for example by '${used_by_url}')."
field_constraints = {
'titles': {},
'mandatory': {'project': ['IDublinCore.title']},
'indexes': {'project': [('IDublinCore.title', 1)]},
'empty': {'project': []},
}
def get_pt_fields_voc(pt, excluded, constraints={}):
"""
Returns a vocabulary with the given portal type fields, not excluded.
Mandatory ones are suffixed with asterisk.
"""
terms = []
mandatory = constraints.get('mandatory', {}).get(pt, [])
positions = {fld: pos for fld, pos in constraints.get('indexes', {}).get(pt, [])}
empty = constraints.get('empty', {}).get(pt, [])
if pt not in constraints.setdefault('titles', {}):
constraints['titles'][pt] = {}
for name, field in get_schema_fields(type_name=pt, prefix=True):
if name in excluded:
continue
title = _tr(field.title)
constraints['titles'][pt][name] = title
if name in mandatory:
title = u'{} *'.format(title)
if name in positions:
title = u'{} {}'.format(title, positions[name])
if name in empty:
title = u'{} -'.format(title)
terms.append(SimpleTerm(name, title=title))
return SimpleVocabulary(terms)
def field_list(dic, pt_fld):
"""
Used in field checks
"""
dic_pt_flds = []
for field in dic[pt_fld]:
dic_pt_flds.append(field['field_name'])
return dic_pt_flds
def mandatory_check(data, constraints):
""" Check the presence of mandatory fields """
dic = data._Data_data___
mandatory = constraints.get('mandatory', {})
missing = {}
for pt in mandatory:
pt_fld = '{}_fields'.format(pt)
if pt_fld in dic:
dic_pt_flds = field_list(dic, pt_fld)
for mand in mandatory[pt]:
if mand not in dic_pt_flds:
missing_pt = missing.setdefault(pt, [])
missing_pt.append(mand)
msg = u''
for pt in missing:
fields = [u"'{}'".format(constraints['titles'][pt][fld]) for fld in missing[pt]]
msg += _(u"for '${type}' type => ${fields}. ", mapping={'type': _(pt),
'fields': ', '.join(fields)})
if msg:
raise Invalid(_(u'Missing mandatory fields: ${msg}', mapping={'msg': msg}))
def position_check(data, constraints):
""" Check the position of fields """
dic = data._Data_data___
indexes = constraints.get('indexes', {})
errors = {}
for pt in indexes:
pt_fld = '{}_fields'.format(pt)
if pt_fld in dic:
dic_pt_flds = field_list(dic, pt_fld)
for (fld, i) in indexes[pt]:
if dic_pt_flds.index(fld) + 1 != i:
if pt not in errors:
errors[pt] = []
errors[pt].append((fld, i))
msg = u''
for pt in errors:
fields = [_(u"'${fld}' at position ${i}",
mapping={'fld': constraints['titles'][pt][fld], 'i': i}) for (fld, i) in errors[pt]]
msg += _(u"for '${type}' type => ${fields}. ", mapping={'type': _(pt),
'fields': ', '.join(fields)})
if msg:
raise Invalid(_(u'Some fields have to be at a specific position: ${msg}', mapping={'msg': msg}))
def _validateKeyNotUsed(context,
value,
stored_value,
attribute_using_keys,
sub_attribute_using_key=None,
portal_types=[]):
"""
Check if a key was removed in the given p_value regarding p_stored_value on context.
If a key was removed, check that no given p_portal_types is using it.
It suppose that given p_value is a list of dicts with 'key' and 'label' as existing keys.
Given p_attribute_using_keys is the name of the attribute of sub objects using this key.
Given p_sub_attribute_using_key is in the case of a datagridfield using this vocabulary,
it is the name of the column using the vocabulary..."""
# we only go further if a key was actually removed
# so compare stored values to new given values
storedKeys = [stored['key'] for stored in stored_value]
newKeys = [newValue['key'] for newValue in value]
removedKeys = set(storedKeys).difference(set(newKeys))
if not removedKeys:
return
# if we found removed keys, then check that it was not used
params = {'path': {'query': '/'.join(context.getPhysicalPath())}, }
if portal_types:
params['portal_type'] = portal_types
catalog = getToolByName(context, 'portal_catalog')
brains = catalog(**params)
for brain in brains:
# do not validate context... (root element). While doing a query on path, the context is also found...
if brain.portal_type == context.portal_type:
continue
obj = brain.getObject()
if not base_hasattr(obj, attribute_using_keys):
continue
used_value = getattr(obj, attribute_using_keys, ())
# we need a tuple so 'set' here under works...
# this is because we want this method to be a bit generic...
if isinstance(used_value, unicode):
used_value = (used_value,)
# if we use a datagridfield, we have to get the relevant column
if sub_attribute_using_key:
# it means that we use a datagridfield and that data is stored
# in a dict contained in the list...
tmpres = []
for line in used_value or ():
tmpres.append(line[sub_attribute_using_key])
used_value = tuple(tmpres)
if not used_value:
continue
intersectionValues = set(used_value).intersection(removedKeys)
if intersectionValues:
wrong_removed_key = intersectionValues.pop()
raise Invalid(_(ERROR_VALUE_REMOVED_IS_IN_USE,
mapping={'removed_key': wrong_removed_key,
'used_by_url': obj.absolute_url(), }))
class RemovedValueIsNotUsedByCategoriesFieldValidator(validator.SimpleFieldValidator):
def validate(self, value):
# while removing a value from a defined vocabulary, check that
# it is not used anywhere...
super(validator.SimpleFieldValidator, self).validate(value)
# in the creation process, the validator is called but it is not necessary
if not self.context.portal_type == 'projectspace':
return
stored_value = getattr(self.context, self.field.getName())
_validateKeyNotUsed(self.context,
value,
stored_value,
'categories',
None,
[])
class RemovedValueIsNotUsedByPriorityFieldValidator(validator.SimpleFieldValidator):
def validate(self, value):
# while removing a value from a defined vocabulary, check that
# it is not used anywhere...
super(validator.SimpleFieldValidator, self).validate(value)
# in the creation process, the validator is called but it is not necessary
if not self.context.portal_type == 'projectspace':
return
stored_value = getattr(self.context, self.field.getName())
_validateKeyNotUsed(self.context,
value,
stored_value,
'priority',
None,
[])
class RemovedValueIsNotUsedByBudgetTypesFieldValidator(validator.SimpleFieldValidator):
def validate(self, value):
# while removing a value from a defined vocabulary, check that
# it is not used anywhere...
super(validator.SimpleFieldValidator, self).validate(value)
# in the creation process, the validator is called but it is not necessary
if not self.context.portal_type == 'projectspace':
return
stored_value = getattr(self.context, self.field.getName())
_validateKeyNotUsed(self.context,
value,
stored_value,
# we use a datagridfield, we need to provide field using
# value and column name of the datagridfield...
'budget',
'budget_type',
[])
class ProjectFieldsVocabulary(object):
implements(IVocabularyFactory)
def __call__(self, context):
return get_pt_fields_voc('project',
['IDublinCore.description', 'IDublinCore.contributors', 'IDublinCore.creators',
'IDublinCore.effective', 'IDublinCore.expires', 'IDublinCore.language',
'IDublinCore.rights', 'IDublinCore.subjects', 'INameFromTitle.title',
'IVersionable.changeNote', 'notes'],
field_constraints)
class IVocabularySchema(Interface):
"""
Schema used for the vocabulary datagrid field.
"""
label = schema.TextLine(
title=_("Label"),
required=True, )
key = schema.ASCIILine(
title=_("Key"),
required=True, )
possible_years = SimpleVocabulary([SimpleTerm(y) for y in range(2012, 2040)])
class IProjectFieldsSchema(Interface):
field_name = schema.Choice(
title=_(u'Field name'),
vocabulary=u'imio.project.core.ProjectFieldsVocabulary',
)
read_tal_condition = schema.TextLine(
title=_("Read TAL condition"),
required=False,
)
write_tal_condition = schema.TextLine(
title=_("Write TAL condition"),
required=False,
)
class IProjectSpace(model.Schema):
"""
Project schema, field ordering
"""
last_reference_number = schema.Int(
title=_(u"Last reference number"),
# description=_(u""),
required=False,
default=0,
)
categories_values = schema.List(
title=_(u'Categories values'),
description=_(u"Enter one different value by row. Label is the displayed value. Key is the stored value:"
" in lowercase, without space."),
required=True,
value_type=DictRow(title=u"",
schema=IVocabularySchema,
required=False),
)
directives.widget('categories_values', DataGridFieldFactory, display_table_css_class='listing', allow_reorder=True)
priority_values = schema.List(
title=_(u'Priority values'),
description=_(u"Enter one different value by row. Label is the displayed value. Key is the stored value:"
" in lowercase, without space."),
required=True,
value_type=DictRow(title=u"",
schema=IVocabularySchema,
required=False),
)
directives.widget('priority_values', DataGridFieldFactory, display_table_css_class='listing', allow_reorder=True)
budget_types = schema.List(
title=_(u'Budget types values'),
description=_(u"Enter one different value by row. Label is the displayed value. Key is the stored value:"
" in lowercase, without space."),
required=True,
value_type=DictRow(title=u"",
schema=IVocabularySchema,
required=False),
)
directives.widget('budget_types', DataGridFieldFactory, display_table_css_class='listing', allow_reorder=True)
budget_years = schema.List(
title=_(u'Budget concerned years'),
# description=_(u"Select all years concerned by budget."),
required=True,
value_type=schema.Choice(vocabulary=possible_years)
)
directives.widget('budget_years', SelectFieldWidget, multiple='multiple', size=6)
project_budget_states = schema.List(
title=_(u"${type} budget globalization states", mapping={'type': _('Project')}),
description=_(u'Put states on the right for which you want to globalize budget fields.'),
required=False,
value_type=schema.Choice(vocabulary=u'imio.project.core.ProjectStatesVocabulary'),
)
use_ref_number = schema.Bool(
title=_(u'Use reference number'),
description=_(u'Used in Title, documents, etc.'),
default=True,
)
INS_code = schema.TextLine(
title=_(u'INS Code'),
# description=_(u'5-character code statistically representing the town'),
required=False,
)
current_fiscal_year = schema.Int(
title=_(u'Current fiscal year'),
# description=_(u''),
required=False,
)
plan_values = schema.List(
title=_(u'Plan values'),
description=_(u"Enter one different value by row. Label is the displayed value. Key is the stored value:"
" in lowercase, without space."),
required=True,
value_type=DictRow(title=u"",
schema=IVocabularySchema,
required=False),
)
directives.widget('plan_values', DataGridFieldFactory, display_table_css_class='listing', allow_reorder=True)
project_fields = schema.List(
title=_(u"${type} fields display", mapping={'type': _('Project')}),
description=_(u'Put fields on the right to display it. Flags are : ...'),
value_type=DictRow(title=_(u'Field'),
schema=IProjectFieldsSchema,
required=False),
)
directives.widget('project_fields', DataGridFieldFactory, display_table_css_class='listing',
allow_reorder=True, auto_append=False)
organization_type = schema.Choice(
title=_(u'Organization type'),
vocabulary=u'imio.project.core.content.projectspace.organization_type_vocabulary',
default='ac',
)
colorize_project_rows = schema.Bool(
title=_(u"colorize project's rows"),
description=_(u"Visual way to highlight internal panes in dashboards"),
default=False,
)
@invariant
def validateSettings(data):
mandatory_check(data, field_constraints)
position_check(data, field_constraints)
validator.WidgetValidatorDiscriminators(RemovedValueIsNotUsedByCategoriesFieldValidator,
field=IProjectSpace['categories_values'])
provideAdapter(RemovedValueIsNotUsedByCategoriesFieldValidator)
validator.WidgetValidatorDiscriminators(RemovedValueIsNotUsedByPriorityFieldValidator,
field=IProjectSpace['priority_values'])
provideAdapter(RemovedValueIsNotUsedByPriorityFieldValidator)
validator.WidgetValidatorDiscriminators(RemovedValueIsNotUsedByBudgetTypesFieldValidator,
field=IProjectSpace['budget_types'])
provideAdapter(RemovedValueIsNotUsedByBudgetTypesFieldValidator)
class ProjectSpace(Container):
""" """
implements(IProjectSpace)
class ProjectSpaceSchemaPolicy(DexteritySchemaPolicy):
""" """
def bases(self, schemaName, tree):
return (IProjectSpace,)
class ProjectStatesVocabulary(object):
""" Project workflow states """
implements(IVocabularyFactory)
def __call__(self, context):
pw = api.portal.get_tool('portal_workflow')
for workflow in pw.getWorkflowsFor('project'):
states = [value for value in workflow.states.values()]
terms = []
for state in states:
terms.append(SimpleTerm(state.id, title=_(safe_unicode(state.title), domain='plone')))
return SimpleVocabulary(terms)
class OrganizationTypeVocabulary(object):
implements(IVocabularyFactory)
def __call__(self, context):
""""""
terms = [SimpleTerm(u'ac', u'ac', u'AC'), SimpleTerm(u'cpas', u'cpas', u'CPAS')]
return SimpleVocabulary(terms)
|
# This project's release version
__version__ = "1.2.0"
# This project's release commit hash
COMMIT = "5faa5b60bf8c960c636a6a976b5a71e51c1b2f60"
|
class Student:
schoolName = "중앙" # 클래스 변수, java에서 static, 전역변수
def __init__(self, name, age, hobby):
self.name = name; self.age = age; self.hobby = hobby; # instance 변수, 지역변수
def print1(self):
print(f"이름 : {self.name}, 나이 : {self.age}, 취미 : {self.hobby}")
st1 = Student("영희", 34, "게임")
st2 = Student("철수", 52, "포기")
st1.print1(); Student.print1(st2)
print(Student.schoolName)
print(st1.schoolName, st2.schoolName) |
#!/usr/bin/python
# coding=utf-8
from rtd_conf import *
from rtd_db import *
import os
import hashlib
import feedparser
import urllib
import urllib2
from bencode import bdecode
import logging
import re
'''
TODO LIST
===
1. 修改下载流程,下载失败时,写入数据库,down_count为0
2. 读取rss网站之前,首先读取数据库中down_count为0的文件,尝试下载
3. 定时检查download目录是否有新文件出现,如果有新文件出现则更新到数据库中
4. 目前的下载方式urlretrieve貌似不能设置超时时间,需要找一个可以设置超时时间的方式
'''
g_tmp_tname = 'torrent.tmp'
g_logname = 'logfile/rtd.log'
def log_out(msg):
logging.info(msg)
def debug_out(msg):
logging.debug(msg)
def get_name_by_tfile(feedbuff):
try:
tor = bdecode(feedbuff)
except:
return None
if not tor.has_key('info'):
return None
if type(tor['info']) != type({}):
return None
if not tor['info'].has_key('name'):
return None
if type(tor['info']['name']) != type('str'):
return None
return tor['info']['name']
def download_torrent(tor, path):
if os.path.isfile(path):
os.remove(path)
try:
urllib.urlretrieve(tor.address,path)
except Exception, e:
log_out("Download torrent err, addr %s" %tor.address)
return None
with open(path, 'r') as pf:
buf = pf.read()
tor.file_sha1 = hashlib.sha256(buf).hexdigest()
tor.tor_name = get_name_by_tfile(buf)
if not tor.tor_name:
log_out('Invalid log file, addr %s'%tor.address)
return None
tor.file_name = tor.get_std_name()
tor.file_down_count = tor.file_down_count + 1
return True
def download_torrents_failed_last(db):
pass
def mv_torrent(web, tor):
with open(os.path.join(web.download_dir, tor.get_std_name()), 'wb') as wpf:
with open(os.path.join(web.temp_download_dir, g_tmp_tname), 'rb') as rpf:
wpf.write(rpf.read())
def save_undown_torrent(tor,db):
pass
def get_all_addrs(web_addr):
rss_web = feedparser.parse(web_addr)
down_links = []
for rss_tor in rss_web.entries:
if not rss_tor.has_key('links'):
log_out('Invalid rss-address[%s], cannot find links element'\
% web_addr)
continue
for addr_info in rss_tor['links']:
if not addr_info.has_key('href'):
log_out('Invalid rss-address[%s], cannot find href element'\
% web_addr)
continue
down_links.append(addr_info['href'])
return down_links
def is_download_link(addr):
conn = urllib2.urlopen(addr)
cinfo = conn.info()
conn.close()
if not cinfo.has_key('content-type'):
return False
if cinfo['content-type'] == 'application/x-bittorrent':
return True
if not re.compile('torrent').search(cinfo['content-type']):
return False
log_out('Unknown content-type(%s) but i guess it is a torrent addr' % \
cinfo['content-type'])
return True
if __name__ == '__main__':
#0. init resources
g_name = 'rtd.ini'#stub
rconf = rss_conf(g_name)
rdb = rss_db(rconf.dbname)
logging.basicConfig(filename=g_logname,
level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
encoding="UTF-8")
#1. download the torrents which failed to downloaded last time
download_torrents_failed_last(rdb)
#2. read in rss and download torrents
for rweb in rconf.webs:
debug_out('Get torrents info from %s'%rweb.address)
addrs = get_all_addrs(rweb.address)
for tor_addr in addrs:
#如果地址不是下载地址,则直接跳过,首先从数据库中查找,如果数据库中没有,再通过HEAD请求判断
if rdb.is_webaddr_exist(tor_addr):
debug_out('Not download link by db, addr %s'%tor_addr)
continue
if not is_download_link(tor_addr):
debug_out('Not download link, addr %s'%tor_addr)
rdb.add_webpage(webpage(tor_addr, rweb.name));
continue
#首先判断数据库中是否保存了种子地址,如果没保存,则开始下载
if rdb.is_toraddr_exist(tor_addr):
debug_out('Torrent address %s exists, skipped'%tor_addr)
continue
tor_name = os.path.join(rweb.temp_download_dir, g_tmp_tname)
debug_out('Begin to download torrent, address %s, path %s'%\
(tor_addr, tor_name))
tor = torrent(address = tor_addr, webname = rweb.name)
if not download_torrent(tor, tor_name):
#if failed to download the torrent, save to db and try to download it later
log_out('failed to download torrent from address %s' % tor_addr)
save_undown_torrent(tor, rdb)
continue
#下载完成后,应该立即写入数据库。
#如果没有立即写入,后续的操作可能会continue,漏掉写入流程,
#最终导致每次连接rss都会重复下载种子
rdb.add_tor(tor)
if rdb.is_sha_exist(tor.file_sha1):
log_out('Duplicate torrent downloaded, addr[%s], name[%s]' \
% (tor.address, tor.file_name))
continue
mv_torrent(rweb, tor)
log_out('Download torrent[%s], from web[%s], address[%s]'\
% (tor.file_name, rweb.name, tor.address))
#3. release all resources
rdb.close()
|
from schema.models import db
# getall values
def getAll(model):
data = model.query.all()
return data
def add_instance(model ,**kwargs):
instance = model(**kwargs)
db.session.add(instance)
commit_changes()
# delete instance
def delete_instance(model,id):
model.query.filter_by(id =id).delete()
commit_changes
# edit instance
def edit_instance(model, id,**kwargs):
instance = model.query.filter_by(id =id).all()[0]
for attr, new_value in kwargs:
setattr(instance,attr,new_value)
commit_changes
def commit_changes():
db.session.commit()
|
import sys
import numpy as np
import ase.io
from ase.lattice.cubic import BodyCenteredCubic
from quippy import Potential, BOHR
def write_control_file(filename, template, atoms, alat, nk):
"""
Generate control file for TBE code from template and Atoms object
"""
plat_str = ''
for i in range(3):
plat_str += ("%12.8f"*3) % tuple(atoms.cell[i, :]/alat) + '\n '
plat_str = plat_str.rstrip()
site_str = ''
for sym, pos in zip(atoms.get_chemical_symbols(),
atoms.get_positions()):
site_str += ('ATOM=%-2s POS=%12.8f %12.8f %12.8f\n ' %
((sym,) + tuple(pos/alat)))
site_str = site_str.rstrip()
dict = {
'ALAT': alat/BOHR,
'NBAS': len(atoms),
'NK': nk,
'PLAT_STR': plat_str,
'SITE_STR': site_str
}
ctrl_file = open(filename, 'w')
ctrl_file.write(template % dict)
ctrl_file.close()
alat = 2.87 # lattice constant, angstrom
nk = 16 # number of k-points along cubic axes
n = 2 # we use an n x n x n supercell of primitive cell
atoms = BodyCenteredCubic(symbol='Fe',
latticeconstant=alat)
atoms *= [n, n, n]
# template control file for tbe code
ctrl_template = """HEADER auto-generated control file
%% const fp=0 cpl=1 xcf=4 gga=3
%% const pureFe=0 PRBmodel=0 CModel2=0 scale=1.0
%% const epsilon=1 sign=epsilon?-1:1 xFe=1/3
%% const nbas=%(NBAS)d nspec=3
%% const sd=1 ovlp=sd ul=1 u1=0 tbu=0 io=1 nitq=50
%% const verb=31 so=0 nsp=2 tetra=0 metal={fp?3:1} width=0.002 N=1
%% const au=0.529177 NdFe=6.8
%% const beta=0.2 nx=5 kmix=300 nav=0
%% const show=0 mpol=0
%% const dyn=0 temp=300 taup=10 taub=100 time=100000 tstep=5
%% const hess=F relax=0 nit=50 xtol=1d-3 gtol=5d-4 step=0.01 nkill=100 nitf=50
%% const fs=0.048377 K=1/0.6333328d-5 amass=1.09716d-3
VERS TB=10 LM=7 FP=7 ASA=7
IO SHOW={show} HELP=F VERBOS={verb} WKP=F
CONST nit=100 conv=1d-2 qtol=1d-2 pair=F
V0at=79.76508 V0bcc=V0at*2 V0fcc=V0at*4
vfrac0=1 vfrac=vfrac0 Vbcc=vfrac*V0bcc Vfcc=vfrac*V0fcc
q0=sqrt(8/3) q=q0 qJang=1.57693 qexp=1.582 qTB=1.614
ahcp=(Vfcc/(sqrt(3)*q))^(1/3)
abcc=Vbcc^(1/3) afcc=Vfcc^(1/3) aeps=sqrt(3)*ahcp
alat=%(ALAT)f
nk=%(NK)d mull=-1 bzj=1 ewtol=1d-6 ef0=0.643
R=2.2 RC=0.8
%% const NdFe=7 V0at=79.76508 V0bcc=V0at*2 a0bcc=V0bcc^(1/3)
V0bcc={V0bcc} a0bcc={a0bcc}
# cut-offs in a.u.
%% ifdef scale
fixscale=abcc
%% else
fixscale=a0bcc
%% endif
r1CFs0=0.5278607028 rcCFs0=1.789982983
r1CFsp0=0.6109702668 rcCFsp0=1.643917618
r1CFd0=0.5945948286 rcCFd0=1.673566269
r1CFsau=r1CFs0*fixscale rcCFsau=rcCFs0*fixscale
r1CFspau=r1CFs0*fixscale rcCFspau=rcCFs0*fixscale
r1CFdau=r1CFd0*fixscale rcCFdau=rcCFd0*fixscale
r1CFpp0=0.5007117092 rcCFpp0=1.507038147
r1CFppau=r1CFpp0*fixscale rcCFppau=rcCFpp0*fixscale
r1HFau=0.8*fixscale rcHFau=2*fixscale
r1HFppau=0.75*fixscale rcHFppau=0.95*fixscale
r1CC0=0.6 rcCC0=1
CCrc1au=3.6 CCrc2au=5.42
r1CCau=r1CC0*fixscale rcCCau=rcCC0*fixscale
r1ssau=1.1*fixscale rcssau=2*fixscale
r1sdau=1.1*fixscale rcsdau=2*fixscale
r1ddau={PRBModel?1.1:0.9}*fixscale rcddau=1.4*fixscale
r1ppau={PRBModel?1.1:0.9}*fixscale rcppau=1.4*fixscale
cutmod={PRBModel?1:2}
%% ifdef sd
rmaxhau=3*fixscale
%% else
rmaxhau=1.4*fixscale
%% endif
r1HHau=1.1*fixscale rcHHau=1.4*fixscale
# on-site terms
q0s={sd?1:0} q0p=0 q0dFe={sd?7:NdFe}
esFe=0.15 epFe=0.45 edFe=0 momFe={nsp==1?0:2}
U=1 Us=U Up=U UdFe=U stniFe={sd?0.055:0.05}
q0sC=2 q0pC=2
esc=-0.467663945 epc=0.08275667052 UC=1.238348985
q0H=1 esH=-0.085 UH=1.2 momH={nsp==1?0:1}
spp=0 ppd=0 sdd=0 pdp=0 ddd=0 pdf=0 ddg=0
# hopping integrals
qsp=1 qpp=1 qpd=1
fsp=0 fpp=0 fpd=0
odds=0 oddp=0 oddd=0 opp=0 osp=0 opd=0
# Fe-Fe
r0ff=0.5*sqrt(3)*V0bcc^(1/3)
qdds0=1 qddp0=1 qddd0=1 qss0=0.3 qsd0=0.57
fdd0=0.65 fss0=-0.35 fsd0=-0.5
fdds0=-fdd0*6 fddp0=fdd0*4.914539385 fddd0=fdd0*-2.232504465
qdds=0.9 qddp=0.9 qddd=0.9 qss=qss0 qsd=0.3
hddsr0=fdds0*exp(-qdds0*r0ff)
hddpr0=fddp0*exp(-qddp0*r0ff)
hdddr0=fddd0*exp(-qddd0*r0ff)
hssr0=fss0*exp(-qss0*r0ff)
hsdr0=fsd0*exp(-qsd0*r0ff)
fdds=hddsr0*exp(qdds*r0ff)
fddp=hddpr0*exp(qddp*r0ff)
fddd=hdddr0*exp(qddd*r0ff)
fss=hssr0*exp(qss*r0ff)
fsd=hsdr0*exp(qsd*r0ff)
qoss0=qss qosd0=qsd
oss0=0.45 osd0=0.5
ossr0=oss0*exp(-qoss0*r0ff)
osdr0=osd0*exp(-qosd0*r0ff)
qoss=qoss0 qosd=qosd0
oss=ossr0*exp(qoss*r0ff)
osd=osdr0*exp(qosd*r0ff)
# Fe-C
r0CF=3.519361994
qCFss0=0.6 qCFsp0=0.6 qCFsd0=0.6 qCFpds0=0.7 qCFpdp0=0.7
fCFss0=-2 fCFsp0=2.25 fCFsd0=-0.5
fCFpds0=-1.5 fCFpdp0=1
hCFssr0=fCFss0*exp(-qCFss0*r0CF)
hCFspr0=fCFsp0*exp(-qCFsp0*r0CF)
hCFsdr0=fCFsd0*exp(-qCFsd0*r0CF)
hpdsr0=fCFpds0*exp(-qCFpds0*r0CF)
hpdpr0=fCFpdp0*exp(-qCFpdp0*r0CF)
qCFss=0.5654777585 qCFsp=0.7602419272 qCFsd=0.3024914302
qCFpds=0.6436211918 qCFpdp=0.6652876311
fCFss=hCFssr0*exp(qCFss*r0CF)
fCFsp=hCFspr0*exp(qCFsp*r0CF)
fCFsd=hCFsdr0*exp(qCFsd*r0CF)
fCFpds=hpdsr0*exp(qCFpds*r0CF)
fCFpdp=hpdpr0*exp(qCFpdp*r0CF)
ofacCFss=0.5502992445 ofacCFsp=0.5487607608
ofacCFsd=0.3601562852 ofacCFpd=0.4335108427
qoCFss0=0.6 qoCFsp0=0.6 qoCFsd0=0.5
qoCFpds0=0.5 qoCFpdp0=0.5
oCFss0=-ofacCFss*fCFss0 oCFsp0=-ofacCFsp*fCFsp0
oCFsd0=-ofacCFsd*fCFsd0
oCFpds0=-ofacCFpd*fCFpds0 oCFpdp0=-ofacCFpd*fCFpdp0
oCFssr0=oCFss0*exp(-qoCFss0*r0CF)
oCFspr0=oCFsp0*exp(-qoCFsp0*r0CF)
oCFsdr0=oCFsd0*exp(-qoCFsd0*r0CF)
opdsr0=oCFpds0*exp(-qoCFpds0*r0CF)
opdpr0=oCFpdp0*exp(-qoCFpdp0*r0CF)
qoCFss=0.3010599981 qoCFsp=0.3911389194 qoCFsd=0.3408022068
qoCFpds=0.3063617442 qoCFpdp=0.4551807593
oCFss=oCFssr0*exp(qoCFss*r0CF)
oCFsp=oCFspr0*exp(qoCFsp*r0CF)
oCFsd=oCFsdr0*exp(qoCFsd*r0CF)
oCFpds=opdsr0*exp(qoCFpds*r0CF)
oCFpdp=opdpr0*exp(qoCFpdp*r0CF)
# Fe-H
r0HF=1.453500953
qHFss0=0.592 qHFsd0=0.601
fHFss0=-0.8365709269 fHFsd0=-0.5041736305
hHFssr0=fHFss0*exp(-qHFss0*r0HF)
hHFsdr0=fHFsd0*exp(-qHFsd0*r0HF)
qHFss=0.7762840122 qHFsd=0.4544987809
fHFss=hHFssr0*exp(qHFss*r0HF)
fHFsd=hHFsdr0*exp(qHFsd*r0HF)
ofacHFss=0.4676030053 ofacHFsd=0.399106628
qoHFss0=0.552 qoHFsd0=0.412
oHFss0=-ofacHFss*fHFss0 oHFsd0=-ofacHFsd*fHFsd0
oHFssr0=oHFss0*exp(-qoHFss0*r0HF)
oHFsdr0=oHFsd0*exp(-qoHFsd0*r0HF)
qoHFss=0.2863260142 qoHFsd=0.473014452
oHFss=oHFssr0*exp(qoHFss*r0HF)
oHFsd=oHFsdr0*exp(qoHFsd*r0HF)
fHHss=0 qHHss=0.5
fHFsp=0 qHFsp=0
oHHss=0 oHFsp=0 qoHFsp=0
# C-C
%% const Ry=13.61 au=0.529177 d0d=1.54/au
# Harrison translated to exponential scaling
# vsss=-5/{Ry}*exp(2) vsps=4.7/{Ry}*exp(2)
# vpps=5.5/{Ry}*exp(2) vppp=-1.55/{Ry}*exp(2)
# decayCC=2/d0d mCC=0 pCC=2*decayCC bCC=38 CCmode=2
# Harrison's power law (Xu, Wang, Chan and Ho, JPCM 4, 6047 (1992))
vsss=-5/{Ry}*{d0d}^2 vsps=4.7/{Ry}*{d0d}^2
vpps=5.5/{Ry}*{d0d}^2 vppp=-1.55/{Ry}*{d0d}^2
decayCC=2 mCC=-4 pCC=0 bCC=43 CCmode=3
qssCC=decayCC qspCC=decayCC qppCC=decayCC
CCscal=1 oCCscal=0
fCCsss=CCscal*vsss fCCsps=CCscal*vsps
fCCpps=CCscal*vpps fCCppp=CCscal*vppp
oCCsss=-oCCscal*vsss oCCsps=-oCCscal*vsps
oCCpps=-oCCscal*vpps oCCppp=-oCCscal*vppp
# Terence C-C model (GSP)
# CCmode=5
# CCsss=-0.37241 CCsps=0.481098 CCpps=0.32075 CCppp=-0.06013
# CCnsss=2.95401 CCnsps=2.92818 CCnpps=2.93431 CCnppp=2.92822
# CCnc=6.5 CCr0=2.90319 CCrc=4.11960
# CCA=1.15575
# CCnp=3.69592
# CCncp=5.96232 CCr0p=CCr0 CCrcp=4.1950
# Fe-Fe pair potential
%% ifdef sd
%% ifdef scale
%% ifdef PRBModel
b0=536 m0=0 p0=1.49 b1=-371.2 m1=0 p1=1.413111
%% else
b0=665.6 m0=0 p0=1.408429 b1=-536.8 m1=0 p1=1.362971
%% endif
%% else
b0=698.666667 m0=0 p0=1.52 b1=-517.466667 m1=0 p1=1.4576
%% endif
%% else
%% ifdef scale
b0=682.8 m0=0 p0=1.5165 b1=-466.8 m1=0 p1=1.435
%% else
b0=683.1 m0=0 p0=1.5376 b1=-459.5 m1=0 p1=1.4544
%% endif
%% endif
# Fe-C pair potential
q0CF=2.396165226 n0CF=0 b0CFfac=0.7711879106
q1CF=1.555534479 n1CF=0 b1CFfac=-0.01932497471
b0CF0=1000 b0CF=b0CF0*b0CFfac
b1CF0=1000 b1CF=b1CF0*b1CFfac
# Fe-H pair potential
qHF=2.69224661 nHF=-1 bHFfac=0.2995633136
bHF0=1000 bHF=bHF0*bHFfac
# C-C pair potential
# see C-C hopping above
# cut-offs in alat units
%% ifdef scale
ascale = alat
%% else
ascale = 1
%% endif
rmaxh=rmaxhau/ascale
r1CFs=r1CFsau/ascale rcCFs=rcCFsau/ascale
r1CFsp=r1CFsau/ascale rcCFsp=rcCFsau/ascale
r1CFd=r1CFdau/ascale rcCFd=rcCFdau/ascale
r1CFpp=r1CFppau/ascale rcCFpp=rcCFppau/ascale
r1HF=r1HFau/ascale rcHF=rcHFau/ascale
r1CC=r1CCau/ascale rcCC=rcCCau/ascale
r1ss=r1ssau/ascale rcss=rcssau/ascale r1sd=r1sdau/ascale rcsd=rcsdau/ascale
r1dd=r1ddau/ascale rcdd=rcddau/ascale r1pp=r1ppau/ascale rcpp=rcppau/ascale
r1CFpp=r1CFppau/ascale rcCFpp=rcCFppau/ascale
r1HFpp=r1HFppau/ascale rcHFpp=rcHFppau/ascale
CCrc1=CCrc1au/ascale CCrc2=CCrc2au/ascale
%% ifdef mpol
spp=0 ppd=0 sdd=0 pdp=1 ddd=3 pdf=0 ddg=6
%% else
spp=0 ppd=0 sdd=0 pdp=0 ddd=0 pdf=0 ddg=0
%% endif
force=1 pv=1 mol=0
ITER CONV=conv CONVC=qtol NIT={nitq} MIX=A{nx},b={beta}
DYN
%% if dyn==1|dyn==2|dyn==3
MD[MODE={dyn} TSTEP={tstep/fs} TEMP={temp/K} TAUP={taup/fs}
TIME={time/fs} TAUB={taub/fs}]
%% elseif relax>0
MSTAT[MODE={relax} HESS={hess} XTOL={xtol} GTOL={gtol}
STEP={step} NKILL={nkill}] NIT={nitf}
%% endif
STRUC NBAS={nbas} NSPEC={nspec} NL={fp?5:3} ALAT=alat
PLAT= %(PLAT_STR)s
SITE
%(SITE_STR)s
BZ NKABC=nk TETRA={tetra} METAL={metal}
EFMAX=2 EF0=ef0 DELEF=0.01 N={N} W={width}
NPTS=5001 BZJOB=bzj SAVDOS=T NOINV=F
INVIT=F MULL=mull DOS=-4.5 1 EFMAX=2
HAM NSPIN={nsp} ELIND=-0.8 GMAX=gmax REL=T SO={so}
XCFUN={xcf} GGA={gga} FORCES=12
PWMODE=pwmode PWEMIN=1 PWEMAX=pwemax OVEPS=oveps
SPEC
ATOM=Fe Z=26 R=R I=stniFe A=0.025 AMASS=55.845/{amass}
IDU= 0 0 0 0 UH= 0 0 0 0 JH=stniFe stniFe stniFe stniFe
COLOUR=0.1 0.1 0.1 RADIUS=0.5
%% ifdef fp
LMX=2 LMXA=4 KMXA=4 LFOCA=1
RSMH=0.95 0.95 0.95 0 EH=-0.1 -0.1 -0.1 -0.1
RSMH2=0.95 0.95 0.95 EH2=-1.1 -1.1 -1.1
Q=2 0 6 MMOM=0 0 2 PZ=0 {cpl?3.9:0}
%% else
IDXDN={sd?1:3} 3 1 QPOL= spp ppd sdd pdp ddd pdf ddg 0 0 0
%% endif
ATOM=C Z=6 R=RC I=stniC A=0.025 AMASS=12.0107/{amass}
LMX=2 LMXL=2 LMXA=2
IDU= 0 0 0 0 UH= 0 0 0 0 JH=stniC stniC stniC stniC
COLOUR=0.5 0 0 RADIUS=0.25
RSMH=0.9 0.9 0.9 0 EH=-0.1 -0.1 -0.1 -0.1
MMOM=0 2 0
%% ifndef fp
IDXDN=1 1 3
%% endif
ATOM=H Z=1 R=RH I=stniH A=0.025 AMASS=1.00794/{amass}
LMX=2 LMXL=2 LMXA=2
IDU= 0 0 0 0 UH= 0 0 0 0 JH=stniH stniH stniH stniH
RSMH=RH/1.5 RH/1.5 RH/1.5 0 EH=-0.1 -0.1 -0.1 -0.1
MMOM=1 0 0
COLOUR=0.9 0.2 0.2 RADIUS=0.2
%% ifndef fp
IDXDN=1 3 3
%% endif
START CNTROL=T
ATOM=Fe P= 4 4 3 4 4 3
Q= q0s/{nsp} esFe Us
q0p/{nsp} epFe Up
(q0dFe+momFe)/{nsp} edFe UdFe
q0s/{nsp} esFe Us
q0p/{nsp} epFe Up
(q0dFe-momFe)/{nsp} edFe UdFe
ATOM=C P= 1 2 3 1 2 3
Q= q0sC/{nsp} esC UC
q0pC/{nsp} epC UC
0 0 0
q0sC/{nsp} esC UC
q0pC/{nsp} epC UC
0 0 0
ATOM=H P= 1 2 3 1 2 3
Q= (q0H+momH)/{nsp} esH UH
0 0 0
0 0 0
(q0H-momH)/{nsp} esH UH
0 0 0
0 0 0
OPTIONS ASA[ADNF[0] NSPH[0] TWOC[0] CCOR[1]]
ME
2
Fe Fe MEMODE=2 PPMODE=10 POLY=5 CUTMOD=cutmod CUTPP=r1pp rcpp
| fss fsp fpp -fpp/2 fsd fpd -fpd/sqrt(3)
fdds fddp fddd
DECAY=qss qsp qpp qpp qsd qpd qpd qdds qddp qddd
CUT=r1ss rcss 0 0 0 0 0 0 r1sd rcsd 0 0 0 0 r1dd rcdd r1dd rcdd r1dd rcdd
@ oss osp opp -opp/2 osd opd -opd/sqrt(3)
odds oddp oddd
DECAY=qoss qsp qpp qpp qosd qpd qpd qdds qddp qddd
CUT=r1ss rcss 0 0 0 0 0 0 r1sd rcsd 0 0 0 0 r1dd rcdd r1dd rcdd r1dd rcdd
! b0 m0 p0 b1 m1 p1 0 0 0
C C MEMODE=CCmode PPMODE=10 POLY=5 CUTMOD=cutmod CUTPP=r1CC rcCC
| fCCsss fCCsps fCCpps fCCppp 0 0 0 0 0 0
DECAY=qssCC qspCC qppCC qppCC 0 0 0 0 0 0
CUT= r1CC rcCC r1CC rcCC r1CC rcCC r1CC rcCC
0 0 0 0 0 0 0 0 0 0 0 0
@ oCCsss oCCsps oCCpps oCCppp 0 0 0 0 0 0
DECAY=qssCC qspCC qppCC qppCC 0 0 0 0 0 0
CUT= r1CC rcCC r1CC rcCC r1CC rcCC r1CC rcCC
0 0 0 0 0 0 0 0 0 0 0 0
! bCC mCC pCC 0 0 0 0 0 0
# Terence C-C model (GSP)
# 2 2 MEMODE=CCmode PPMODE=30 POLY=5 CUTMOD=2 CUTPP=CCrc1 CCrc2
# | CCsss CCnsss CCnc CCr0 CCrc
# CCsps CCnsps CCnc CCr0 CCrc
# CCpps CCnpps CCnc CCr0 CCrc
# CCppp CCnppp CCnc CCr0 CCrc
# 0 0 0 1 1
# 0 0 0 1 1
# 0 0 0 1 1
# 0 0 0 1 1
# 0 0 0 1 1
# 0 0 0 1 1
# CUT= CCrc1 CCrc2 CCrc1 CCrc2 CCrc1 CCrc2 CCrc1 CCrc2
# 0 0 0 0 0 0 0 0 0 0 0 0
# @ 0 0 0 1 1
# 0 0 0 1 1
# 0 0 0 1 1
# 0 0 0 1 1
# 0 0 0 1 1
# 0 0 0 1 1
# 0 0 0 1 1
# 0 0 0 1 1
# 0 0 0 1 1
# 0 0 0 1 1
# CUT= CCrc1 CCrc2 CCrc1 CCrc2 CCrc1 CCrc2 CCrc1 CCrc2
# 0 0 0 0 0 0 0 0 0 0 0 0
# ! CCA 1 -1 CCnp CCncp CCr0p CCrcp 0 0
Fe C MEMODE=2 PPMODE=10 POLY=5 CUTMOD=cutmod CUTPP=r1CFpp rcCFpp
| fCFss fCFsp 0 0 fCFsd fCFpds fCFpdp 0 0 0
DECAY=qCFss qCFsp 0 0 qCFsd qCFpds qCFpdp 0 0 0
CUT= r1CFs rcCFs r1CFsp rcCFsp 0 0 0 0
r1CFd rcCFd r1CFd rcCFd r1CFd rcCFd 0 0 0 0 0 0
@ oCFss oCFsp 0 0 oCFsd oCFpds oCFpdp 0 0 0
DECAY=qoCFss qoCFsp 0 0 qoCFsd qoCFpds qoCFpdp 0 0 0
CUT= r1CFs rcCFs r1CFsp rcCFsp 0 0 0 0
r1CFd rcCFd r1CFd rcCFd r1CFd rcCFd 0 0 0 0 0 0
! b0CF n0CF q0CF b1CF n1CF q1CF 0 0 0
C Fe MEMODE=2 PPMODE=10 POLY=5 CUTMOD=cutmod CUTPP=r1CFpp rcCFpp
| fCFss fCFsp 0 0 fCFsd fCFpds fCFpdp 0 0 0
DECAY=qCFss qCFsp 0 0 qCFsd qCFpds qCFpdp 0 0 0
CUT= r1CFs rcCFs r1CFsp rcCFsp 0 0 0 0
r1CFd rcCFd r1CFd rcCFd r1CFd rcCFd 0 0 0 0 0 0
@ oCFss oCFsp 0 0 oCFsd oCFpds oCFpdp 0 0 0
DECAY=qoCFss qoCFsp 0 0 qoCFsd qoCFpds qoCFpdp 0 0 0
CUT= r1CFs rcCFs r1CFsp rcCFsp 0 0 0 0
r1CFd rcCFd r1CFd rcCFd r1CFd rcCFd 0 0 0 0 0 0
! b0CF n0CF q0CF b1CF n1CF q1CF 0 0 0
Fe H MEMODE=2 PPMODE=10 POLY=5 CUTMOD=cutmod CUTPP=r1HFpp rcHFpp
| fHFss fHFsp 0 0 fHFsd 0 0 0 0 0
DECAY=qHFss qHFsp 0 0 qHFsd 0 0 0 0 0
CUT= r1HF rcHF r1HF rcHF 0 0 0 0 r1HF rcHF 0 0 0 0 0 0 0 0 0 0
@ oHFss oHFsp 0 0 oHFsd 0 0 0 0 0
DECAY=qoHFss qoHFsp 0 0 qoHFsd 0 0 0 0 0
CUT= r1HF rcHF r1HF rcHF 0 0 0 0 r1HF rcHF 0 0 0 0 0 0 0 0 0 0
! bHF nHF qHF 0 0 0 0 0 0
H Fe MEMODE=2 PPMODE=10 POLY=5 CUTMOD=cutmod CUTPP=r1HFpp rcHFpp
| fHFss fHFsp 0 0 fHFsd 0 0 0 0 0
DECAY=qHFss qHFsp 0 0 qHFsd 0 0 0 0 0
CUT= r1HF rcHF r1HF rcHF 0 0 0 0 r1HF rcHF 0 0 0 0 0 0 0 0 0 0
@ oHFss oHFsp 0 0 oHFsd 0 0 0 0 0
DECAY=qoHFss qoHFsp 0 0 qoHFsd 0 0 0 0 0
CUT= r1HF rcHF r1HF rcHF 0 0 0 0 r1HF rcHF 0 0 0 0 0 0 0 0 0 0
! bHF nHF qHF 0 0 0 0 0 0
H H MEMODE=2 PPMODE=0
| 0 0 0 0 0 0 0 0 0 0
DECAY=0 0 0 0 0 0 0 0 0 0
CUT= 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
@ 0 0 0 0 0 0 0 0 0 0
DECAY=0 0 0 0 0 0 0 0 0 0
CUT= 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
! 0 0 0 0 0 0 0 0 0
C H MEMODE=2 PPMODE=0
| 0 0 0 0 0 0 0 0 0 0
DECAY=0 0 0 0 0 0 0 0 0 0
CUT= 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
@ 0 0 0 0 0 0 0 0 0 0
DECAY=0 0 0 0 0 0 0 0 0 0
CUT= 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
! 0 0 0 0 0 0 0 0 0
H C MEMODE=2 PPMODE=0
| 0 0 0 0 0 0 0 0 0 0
DECAY=0 0 0 0 0 0 0 0 0 0
CUT= 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
@ 0 0 0 0 0 0 0 0 0 0
DECAY=0 0 0 0 0 0 0 0 0 0
CUT= 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
! 0 0 0 0 0 0 0 0 0
TB FORCES=force EVDISC=T RMAXH=rmaxh TRH=T RHO=T 3PV=pv
MOL=mol GAMMA=F PAIR=pair SCALE={scale}
UL={ul} IODEL={io} OVLP={ovlp} TBU={tbu} NOUAVG={nav} U1={u1}
EWALD TOL=ewtol NKDMX=1999 NKRMX=1999
OPTIONS ASA[ADNF[0] NSPH[0] TWOC[0] CCOR[1]]"""
# generate a control file from template for this atomic config
write_control_file('ctrl.fe', ctrl_template, atoms, alat, nk=nk)
# setup QUIP Potential interfacing to LMTO TBE library
# control file is the one written above from template
# NB: order of types must match order in control file
tb_pot = Potential('IP LMTO_TBE', param_str="""
<params>
<LMTO_TBE_params n_types="3" control_file="ctrl.fe">
<per_type_data type="1" atomic_num="26"/>
<per_type_data type="2" atomic_num="6"/>
<per_type_data type="3" atomic_num="1"/>
</LMTO_TBE_params>
</params>""")
print tb_pot
# compute energies and forces for a range of lattice constants
if True:
a = np.linspace(2.5, 3.0, 5)
e = []
f = []
configs = []
for aa in a:
atoms = BodyCenteredCubic(symbol='Fe',
latticeconstant=aa)
atoms *= [n, n, n]
atoms.set_calculator(tb_pot)
e.append(atoms.get_potential_energy())
f.append(atoms.get_forces()[0, 0])
configs.append(atoms.copy())
# print lattice constants, energies and forces
print 'a = ', list(a)
print 'e = ', list(e)
print 'f = ', list(f)
|
n = 1;
while n <= 100:
if n > 10:# 当 n = 11 时,条件满足。执行break语句
break #break语句会结束当前循环
print(n)
n = n + 1
pass
print('end') |
#!/usr/bin/env python
from flask import Flask, render_template, request, url_for, send_from_directory, redirect, session
from flask_sqlalchemy import SQLAlchemy
import json
import sys
import ast
from collections import OrderedDict
import datetime
import os
import logging
import uuid
import glob
import shutil
from credibility_toolkit import parse_url, parse_text
import platform
app = Flask(__name__)
monthNames = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
badCollectionData = {}
##### main stuf that needs to be outside the main when served by gunicorn and nginx. main is not ran unless called with python
credsFile = "../dbCredentials.json"
jsonCreds = ""
try:
jsonCreds = open(credsFile)
except:
sys.stderr.write("Error: Invalid database credentials file\n")
sys.exit(1)
creds = json.load(jsonCreds)
bcd = open("badCollection.json")
badCollectionData = json.load(bcd)
POSTGRES = {
'user': creds["user"],
'pw': creds["passwd"],
'db': creds["db"],
'host': creds["host"],
'port': creds["port"]
}
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://%(user)s:%(pw)s@%(host)s:%(port)s/%(db)s' % POSTGRES
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['DEBUG'] = True
db = SQLAlchemy(app)
db.session.commit()
for tmp in glob.glob(os.path.join("static", "tmp-*")):
try:
os.remove(tmp)
except:
pass
app.secret_key = str(uuid.uuid4())
####################################################
@app.after_request
def add_header(response):
"""
Add headers to both force latest IE rendering engine or Chrome Frame, and
also to cache the rendered page for 10 minutes.
"""
response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
response.headers['Cache-Control'] = 'public, max-age=0'
return response
@app.route("/")
def home():
return render_template('home.html')
@app.route("/credibilitytoolkit")
def credibility():
if 'tmpfile' not in session or not os.path.isfile(session['tmpfile']):
session['tmpfile'] = os.path.join("static", "tmp-" + str(uuid.uuid4()))
if platform.system() == "Windows":
session['tmpfile'] = session['tmpfile'].replace("\\", "/")
shutil.copy2(os.path.join("static", "output.json"), session['tmpfile'])
return render_template('credibility.html', json=session['tmpfile'])
@app.route("/visualizationtoolkit")
def main():
return render_template('sourceComparator.html')
@app.route("/about")
def aboutPage():
return render_template('about.html')
@app.route("/datasetRequest")
def dataPage():
return render_template('datasetRequest.html')
@app.route("/help")
def help():
return render_template('help.html')
@app.route("/newssource")
def sourcePage():
return render_template('newsSource.html')
@app.route("/allsources")
def allSourcesPage():
return render_template('allSources.html')
@app.route("/features")
def features():
return render_template('features.html')
# --- Credibility toolkit internal APIs
def send_error(text):
return """
<html><body><script>
alert("%s");
location.replace("%s");
</script></body></html>
""" % (text, url_for('credibility'))
@app.route("/article", methods=['GET', 'POST'])
def article():
"""
when the user wants to add an article, this function will get called. We will
then load the json file, run the credibility toolkit on the url, and then
append the results to the json file.
"""
if 'tmpfile' not in session or not os.path.isfile(session['tmpfile']):
return 'ERROR: Unable to find JSON file'
if 'url' not in request.form :
return 'ERROR: No URL in POST'
url = request.form['url']
with open(session['tmpfile'], 'r') as infile:
output = json.load(infile)
# check if we already have this url parsed, if so, reparse it
output["urls"] = filter(lambda x: x["url"] != url, output["urls"])
try:
parse_url(output, url)
with open(session['tmpfile'], 'w') as outfile:
json.dump(output, outfile, indent=2)
except Exception as inst:
return send_error(inst)
return redirect(url_for('credibility'), code=302)
@app.route("/clear")
def clear():
if 'tmpfile' in session:
tmp = session.pop('tmpfile')
try:
os.remove(tmp)
except:
pass
session['tmpfile'] = os.path.join("static", "tmp-" + str(uuid.uuid4()))
if platform.system() == "Windows":
session['tmpfile'] = session['tmpfile'].replace("\\", "/")
with open(session['tmpfile'], 'w') as outfile:
output = {'urls':[]}
json.dump(output, outfile, indent=2)
return redirect(url_for('credibility'), code=302)
@app.route("/reset")
def reset():
if 'tmpfile' in session:
tmp = session.pop('tmpfile')
try:
os.remove(tmp)
except:
pass
return redirect(url_for('credibility'), code=302)
@app.route("/manual", methods=['GET', 'POST'])
def manual():
"""
when the user wants to add an article, this function will get called. We will
then load the json file, run the credibility toolkit on the url, and then
append the results to the json file.
"""
if 'tmpfile' not in session or not os.path.isfile(session['tmpfile']):
return send_error('ERROR: Unable to find JSON file')
if 'manual_entry_title' not in request.form :
return send_error('ERROR: Empty Title')
if 'manual_entry_text' not in request.form :
return send_error('ERROR: Empty Text')
title = request.form['manual_entry_title']
text = request.form['manual_entry_text']
with open(session['tmpfile'], 'r') as infile:
output = json.load(infile)
try:
parse_text(output, title, text)
with open(session['tmpfile'], 'w') as outfile:
json.dump(output, outfile, indent=2)
except Exception as inst:
return send_error(inst)
return redirect(url_for('credibility'), code=302)
@app.route("/remove", methods=['GET', 'POST'])
def remove():
"""
remove an article from output.json
"""
if 'tmpfile' not in session or not os.path.isfile(session['tmpfile']):
return send_error('ERROR: Unable to find JSON file')
url = request.form['url']
if len(url) > 3:
with open(session['tmpfile'], 'r') as infile:
output = json.load(infile)
# check if we already have this url parsed, if so, reparse it
output["urls"] = filter(lambda x: x["url"] != url, output["urls"])
with open(session['tmpfile'], 'w') as outfile:
json.dump(output, outfile, indent=2)
return redirect(url_for('credibility'), code=302)
### --- Define internal APIs ---
### -- Internal APIs for news source page ---
@app.route("/getSourceMetadata")
def sendSourceMetadata():
source = request.args.get("source")
# Gets latest computed values
sqlStatement = "SELECT * " \
"FROM sourceMetadata " \
"WHERE source = '%s' " \
"ORDER BY dateComputed " \
"LIMIT 1 " %(source)
results = db.engine.execute(sqlStatement)
data = {}
for row in results:
data["perCredible"] = '{0:.3g}'.format(row[1]*100)
data["perImpartial"] = '{0:.3g}'.format(row[2]*100)
data["isSatire"] = row[3]
print row
response = app.response_class(
response = json.dumps(data),
status = 200,
mimetype='application/json'
)
return response
# Gets valid month + year for which source has published articles
@app.route("/getSourcePublishDates")
def sendValidSourcePublishDates():
source = request.args.get("source")
times = getSourcePublishDates(source)
data = {}
data["dates"] = []
for t in times:
data["dates"].append(t.strftime("%b %Y"))
response = app.response_class(
response = json.dumps(data),
status = 200,
mimetype='application/json'
)
return response
def getSourcePublishDates(source):
sqlStatement = "SELECT make_date(CAST(y AS INTEGER), CAST(m AS INTEGER), 1) " \
"FROM" \
"( " \
"SELECT EXTRACT(YEAR FROM datePublished) as y, EXTRACT(MONTH FROM datePublished) as m " \
"FROM articleFeatures "
if (source != ""):
sqlStatement += "WHERE source = '%s' " %(source)
sqlStatement += "GROUP BY EXTRACT(YEAR FROM datePublished), EXTRACT(MONTH FROM datePublished) " \
") as findDates"
results = db.engine.execute(sqlStatement)
times = []
for row in results:
times.append(row[0])
print times
times.sort()
return times
@app.route("/getSourcePublishCounts")
def sendSourcePublishCounts():
source = request.args.get("source")
allDates = getSourcePublishDates("")
sqlStatement = "SELECT EXTRACT(MONTH FROM datePublished) as m, EXTRACT(YEAR FROM datePublished) as y, COUNT(id) " \
"FROM articleFeatures " \
"WHERE source = '%s' " \
"GROUP BY source, m, y" %(source)
results = db.engine.execute(sqlStatement)
data = {}
for row in results:
month = int(row[0])
year = int(row[1])
date = datetime.datetime(year, month, 1).strftime("%b %Y")
count = row[2]
data[date] = count
dataSorted = OrderedDict()
# Add 0 counts for months not available for source and asterix next to month name if due to bad collection
for d in allDates:
date = d.strftime("%b %Y")
month = d.month
year = d.year
if date not in data:
if (source in badCollectionData):
for x in badCollectionData[source]:
if x["month"] == month and x["year"] == year:
date = "*" + date
continue
dataSorted[date] = {}
dataSorted[date] = 0
else:
dataSorted[date] = data[date]
response = app.response_class(
response = json.dumps(dataSorted),
status = 200,
mimetype='application/json'
)
return response
@app.route("/getSourceFacebookEngagement")
def sendSourceFacebookEngagement():
source = request.args.get("source")
allDates = dates = getSourcePublishDates("")
sqlStatement = "SELECT EXTRACT(MONTH FROM datePublished) as m, EXTRACT(YEAR FROM datePublished) as y, SUM(FB_Comment_Counts), SUM(FB_Share_Counts), SUM(FB_Reaction_Counts ) " \
"FROM articleFeatures " \
"WHERE source = '%s'" \
"GROUP BY source, m, y" %(source)
results = db.engine.execute(sqlStatement)
data = {}
for row in results:
month = int(row[0])
year = int(row[1])
date = datetime.datetime(year, month, 1).strftime("%b %Y")
comments = row[2]
shares = row[3]
reactions = row[4]
data[date] = {}
data[date]["comments"] = comments
data[date]["shares"] = shares
data[date]["reactions"] = reactions
dataSorted = OrderedDict()
# Add 0 counts for months not available for source
for d in allDates:
date = d.strftime("%b %Y")
if date not in data:
dataSorted[date] = {}
dataSorted[date]["comments"] = 0
dataSorted[date]["shares"] = 0
dataSorted[date]["reactions"] = 0
else:
dataSorted[date] = data[date]
response = app.response_class(
response = json.dumps(dataSorted),
status = 200,
mimetype='application/json'
)
return response
@app.route("/getMostSharedArticles")
def sendMostSharedArticles():
source = request.args.get("source")
sqlStatement = "SELECT articleMetadata.url, articleMetadata.title, articleFeatures.FB_Share_Counts " \
"FROM articleMetadata " \
"INNER JOIN articleFeatures ON articleMetadata.id = articleFeatures.id " \
"WHERE articleFeatures.source = '%s' " \
"ORDER BY articleFeatures.FB_Share_Counts DESC " \
"LIMIT 10" %(source)
results = db.engine.execute(sqlStatement)
data = {}
data["articles"] = []
for row in results:
url = row[0]
title = row[1]
shares = row[2]
articleData = {}
articleData["url"] = url
articleData["title"] = title
articleData["shares"] = shares
data["articles"].append(articleData)
response = app.response_class(
response = json.dumps(data),
status = 200,
mimetype='application/json'
)
return response
@app.route("/getTopSourcePhrases")
def sendTopPhrases():
source = request.args.get("source")
month = int(request.args.get("month"))
year = int(request.args.get("year"))
sqlStatement = "SELECT * " \
"FROM topSourcePhrases " \
"WHERE source = '%s' and month = '%d' and year = '%d' " %(source, month, year)
results = db.engine.execute(sqlStatement)
data = {}
data["orderedPhrases"] = []
for row in results:
# Gets the top 5 phrases, ignoring any NULL ones
for i in range(2, 7):
phrase = row[i]
if (phrase != "NULL"):
data["orderedPhrases"].append(phrase)
response = app.response_class(
response = json.dumps(data),
status = 200,
mimetype='application/json'
)
return response
## -- Internal APIs for bubble chart page ---
@app.route("/getBubbleChartData")
def sendBubbleChartData():
sourceValue = request.args.get("sourceValue")
xAxis = request.args.get("xAxis")
yAxis = request.args.get("yAxis")
bubbleColor = request.args.get("bubbleColor")
bubbleSize = request.args.get("bubbleSize")
startDate = request.args.get("startDate")
endDate = request.args.get("endDate")
selectedSources = ast.literal_eval(request.args.get('sources'))
selectedSources = [s.strip() for s in selectedSources]
selectedSources = ["'{0}'".format(s) for s in selectedSources]
sqlStatement = ""
if (sourceValue == "Median"):
sqlStatement = "SELECT source, median(%s) as median_value, median(%s) as median_value, median(%s) as median_value, median(%s) as median_value, " %(xAxis, yAxis, bubbleColor, bubbleSize)
elif (sourceValue == "Average"):
sqlStatement = "SELECT source, AVG(%s), AVG(%s), AVG(%s), AVG(%s), " %(xAxis, yAxis, bubbleColor, bubbleSize)
elif (sourceValue == "Maximum"):
sqlStatement = "SELECT source, MAX(%s), MAX(%s), MAX(%s), MAX(%s), " %(xAxis, yAxis, bubbleColor, bubbleSize)
sqlStatement += "COUNT('id'), MAX(FB_Share_Counts), MAX(FB_Comment_Counts), MAX(FB_Reaction_Counts) " \
"FROM articleFeatures " \
"WHERE datePublished::date >= '%s' and datePublished::date <= '%s' and source in (%s)" \
"GROUP BY source" %(startDate, endDate, ", ".join(selectedSources))
results = db.engine.execute(sqlStatement)
data = {}
data["values"] = []
numArticles = 0
numSources = 0
mostFBShares = 0
mostFBComments = 0
mostFBReactions = 0
for row in results:
print str([row[0].encode('utf-8')] + list(row[1:5])) + ","
if (mostFBShares < row[6]):
mostFBShares = row[6]
if (mostFBComments < row[7]):
mostFBComments = row[7]
if (mostFBReactions < row[8]):
mostFBReactions = row[8]
numSources += 1
numArticles += row[5]
sourceData = {}
sourceData["source"] = row[0]
sourceData["xAxis"] = row[1] * 1000
sourceData["yAxis"] = row[2] * 1000
sourceData["bubbleColor"] = row[3] * 1000
sourceData["bubbleSize"] = row[4] * 1000
data["values"].append(sourceData)
data["numArticles"] = formatCount(numArticles)
data["numSources"] = numSources
data["mostFBShares"] = formatCount(mostFBShares)
data["mostFBComments"] = formatCount(mostFBComments)
data["mostFBReactions"] = formatCount(mostFBReactions)
response = app.response_class(
response = json.dumps(data),
status = 200,
mimetype='application/json'
)
return response
@app.route("/getDateRange")
def sendDateRange():
sqlStatement = "SELECT MIN(datePublished), MAX(datePublished) " \
"FROM articleFeatures"
results = db.engine.execute(sqlStatement)
data = {}
for row in results:
data["startDate"] = row[0].strftime("%Y-%m-%d")
data["endDate"] = row[1].strftime("%Y-%m-%d")
response = app.response_class(
response = json.dumps(data),
status = 200,
mimetype='application/json'
)
return response
@app.route("/getAllSources")
def sendAllSources():
sqlStatement = "SELECT DISTINCT(source) " \
"FROM articleFeatures"
results = db.engine.execute(sqlStatement)
data = {}
data["sources"] = []
for row in results:
data["sources"].append(row[0])
data["sources"].sort()
response = app.response_class(
response = json.dumps(data),
status = 200,
mimetype='application/json'
)
return response
def formatCount(num):
if num < 9999:
return num
else:
newRepr = float(num/1000)
newRepr = str(format(newRepr, '.1f')) + "k"
return newRepr
@app.route('/js/<path:path>')
def send_js(path):
"""
expose the js directory so we don't have to have everything in static
"""
return send_from_directory('static/js', path)
@app.route('/css/<path:path>')
def send_css(path):
"""
expose the css directory so we don't have to have everything in static
"""
return send_from_directory('static/css', path)
if __name__ == "__main__":
credsFile = "../dbCredentials.json"
jsonCreds = ""
try:
jsonCreds = open(credsFile)
except:
sys.stderr.write("Error: Invalid database credentials file\n")
sys.exit(1)
creds = json.load(jsonCreds)
badCollectionData = open("badCollection.json")
badCollectionData = json.load(badCollectionData)
POSTGRES = {
'user': creds["user"],
'pw': creds["passwd"],
'db': creds["db"],
'host': creds["host"],
'port': creds["port"]
}
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://%(user)s:%(pw)s@%(host)s:%(port)s/%(db)s' % POSTGRES
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['DEBUG'] = True
db = SQLAlchemy(app)
db.session.commit()
for tmp in glob.glob(os.path.join("static", "tmp-*")):
try:
os.remove(tmp)
except:
pass
app.secret_key = str('2e239029-814f-4ece-b99c-1f539509ca10')
app.run(host='0.0.0.0', port=80, threaded=True)
|
import md4
import sha
import utils
# the2nd: needed for new challenge_response()
from pyDes import *
# https://programadorphp.es/docs/pear_manual_english/package.encryption.crypt-chap.html
# CHAP-MD5 (RFC 1994)
# MS-CHAPv1 (RFC 2433) http://www.rfc-base.org/txt/rfc-2433.txt
# MS-CHAPv2 (RFC 2759) http://www.rfc-base.org/txt/rfc-2759.txt
# Microsoft Vendor-specific RADIUS Attributes (MS-CHAP, MPPE)
# http://www.rfc-base.org/txt/rfc-2548.txt
def generate_nt_response_mschap(challenge,password):
"""
NtChallengeResponse(
IN 8-octet Challenge,
IN 0-to-256-unicode-char Password,
OUT 24-octet Response )
{
NtPasswordHash( Password, giving PasswordHash )
ChallengeResponse( Challenge, PasswordHash, giving Response )
}
"""
password_hash=nt_password_hash(password)
return challenge_response(challenge,password_hash)
def generate_nt_response_mschap2(authenticator_challenge,peer_challenge,username,password):
"""
GenerateNTResponse(
IN 16-octet AuthenticatorChallenge,
IN 16-octet PeerChallenge,
IN 0-to-256-char UserName,
IN 0-to-256-unicode-char Password,
OUT 24-octet Response )
{
8-octet Challenge
16-octet PasswordHash
ChallengeHash( PeerChallenge, AuthenticatorChallenge, UserName,
giving Challenge)
NtPasswordHash( Password, giving PasswordHash )
ChallengeResponse( Challenge, PasswordHash, giving Response )
}
"""
challenge=challenge_hash(peer_challenge,authenticator_challenge,username)
password_hash=nt_password_hash(password)
return challenge_response(challenge,password_hash)
def challenge_hash(peer_challenge,authenticator_challenge,username):
"""
ChallengeHash(
IN 16-octet PeerChallenge,
IN 16-octet AuthenticatorChallenge,
IN 0-to-256-char UserName,
OUT 8-octet Challenge
{
/*
* SHAInit(), SHAUpdate() and SHAFinal() functions are an
* implementation of Secure Hash Algorithm (SHA-1) [11]. These are
* available in public domain or can be licensed from
* RSA Data Security, Inc.
*/
SHAInit(Context)
SHAUpdate(Context, PeerChallenge, 16)
SHAUpdate(Context, AuthenticatorChallenge, 16)
/*
* Only the user name (as presented by the peer and
* excluding any prepended domain name)
* is used as input to SHAUpdate().
*/
SHAUpdate(Context, UserName, strlen(Username))
SHAFinal(Context, Digest)
memcpy(Challenge, Digest, 8)
}
"""
sha_hash=sha.new()
sha_hash.update(peer_challenge)
sha_hash.update(authenticator_challenge)
sha_hash.update(username)
return sha_hash.digest()[:8]
# This method generates NT-Hash from the given plaintext-password or from the password property.
# The NT-Hash is computed like this: md4(str2unicode(plaintext))
def nt_password_hash(passwd,pad_to_21_bytes=True):
"""
NtPasswordHash(
IN 0-to-256-unicode-char Password,
OUT 16-octet PasswordHash )
{
/*
* Use the MD4 algorithm [5] to irreversibly hash Password
* into PasswordHash. Only the password is hashed without
* including any terminating 0.
*/
"""
# we have to have UNICODE password
pw = utils.str2unicode(passwd)
# do MD4 hash
md4_context = md4.new()
md4_context.update(pw)
res = md4_context.digest()
if pad_to_21_bytes:
# addig zeros to get 21 bytes string
res = res + '\000\000\000\000\000'
return res
# the2nd: using the two functions below challenge_response response is much much faster.
def challenge_response(challenge, password_hash):
""" generate ntlm response """
response = des(get_parity_key(password_hash[:7]), ECB).encrypt(challenge)
response += des(get_parity_key(password_hash[7:]), ECB).encrypt(challenge)
zpwd = (password_hash[14]) + (password_hash[15]) + "\0\0\0\0\0"
response += des(get_parity_key(zpwd), ECB).encrypt(challenge)
return response
def get_parity_key(key):
""" get parity key """
pkey=""
next = 0
for i in xrange(7):
tmp = key[i]
pkey += chr( (ord(tmp) >> i) | next | 1)
next = (ord(tmp) << (7-i)) & 0xFF
pkey += chr(next | 1)
return pkey
# FIXME: we should check if this is because of pyDes is faster!
# the2nd: disabled because the two functions above are so much faster
#def challenge_response(challenge,password_hash):
# """
# ChallengeResponse(
# IN 8-octet Challenge,
# IN 16-octet PasswordHash,
# OUT 24-octet Response )
# {
# Set ZPasswordHash to PasswordHash zero-padded to 21 octets
#
# DesEncrypt( Challenge,
# 1st 7-octets of ZPasswordHash,
# giving 1st 8-octets of Response )
#
# DesEncrypt( Challenge,
# 2nd 7-octets of ZPasswordHash,
# giving 2nd 8-octets of Response )
#
# DesEncrypt( Challenge,
# 3rd 7-octets of ZPasswordHash,
# giving 3rd 8-octets of Response )
# }
# """
# zpassword_hash=password_hash
## while len(zpassword_hash)<21:
## zpassword_hash+="\0"
#
# response=""
# des_obj=des.DES(zpassword_hash[0:7])
# response+=des_obj.encrypt(challenge)
#
# des_obj=des.DES(zpassword_hash[7:14])
# response+=des_obj.encrypt(challenge)
#
# des_obj=des.DES(zpassword_hash[14:21])
# response+=des_obj.encrypt(challenge)
# return response
def generate_authenticator_response(nt_response,peer_challenge,authenticator_challenge,username,password=False,password_hash=False):
"""
GenerateAuthenticatorResponse(
IN 0-to-256-unicode-char Password,
IN 24-octet NT-Response,
IN 16-octet PeerChallenge,
IN 16-octet AuthenticatorChallenge,
IN 0-to-256-char UserName,
OUT 42-octet AuthenticatorResponse )
{
16-octet PasswordHash
16-octet PasswordHashHash
8-octet Challenge
/*
* "Magic" constants used in response generation
*/
Magic1[39] =
{0x4D, 0x61, 0x67, 0x69, 0x63, 0x20, 0x73, 0x65, 0x72, 0x76,
0x65, 0x72, 0x20, 0x74, 0x6F, 0x20, 0x63, 0x6C, 0x69, 0x65,
0x6E, 0x74, 0x20, 0x73, 0x69, 0x67, 0x6E, 0x69, 0x6E, 0x67,
0x20, 0x63, 0x6F, 0x6E, 0x73, 0x74, 0x61, 0x6E, 0x74};
Magic2[41] =
{0x50, 0x61, 0x64, 0x20, 0x74, 0x6F, 0x20, 0x6D, 0x61, 0x6B,
0x65, 0x20, 0x69, 0x74, 0x20, 0x64, 0x6F, 0x20, 0x6D, 0x6F,
0x72, 0x65, 0x20, 0x74, 0x68, 0x61, 0x6E, 0x20, 0x6F, 0x6E,
0x65, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6F,
0x6E};
/*
* Hash the password with MD4
*/
NtPasswordHash( Password, giving PasswordHash )
/*
* Now hash the hash
*/
HashNtPasswordHash( PasswordHash, giving PasswordHashHash)
SHAInit(Context)
SHAUpdate(Context, PasswordHashHash, 16)
SHAUpdate(Context, NTResponse, 24)
SHAUpdate(Context, Magic1, 39)
SHAFinal(Context, Digest)
ChallengeHash( PeerChallenge, AuthenticatorChallenge, UserName,
giving Challenge)
SHAInit(Context)
SHAUpdate(Context, Digest, 20)
SHAUpdate(Context, Challenge, 8)
SHAUpdate(Context, Magic2, 41)
SHAFinal(Context, Digest)
/*
* Encode the value of 'Digest' as "S=" followed by
* 40 ASCII hexadecimal digits and return it in
* AuthenticatorResponse.
* For example,
* "S=0123456789ABCDEF0123456789ABCDEF01234567"
*/
}
"""
Magic1="\x4D\x61\x67\x69\x63\x20\x73\x65\x72\x76\x65\x72\x20\x74\x6F\x20\x63\x6C\x69\x65\x6E\x74\x20\x73\x69\x67\x6E\x69\x6E\x67\x20\x63\x6F\x6E\x73\x74\x61\x6E\x74"
Magic2="\x50\x61\x64\x20\x74\x6F\x20\x6D\x61\x6B\x65\x20\x69\x74\x20\x64\x6F\x20\x6D\x6F\x72\x65\x20\x74\x68\x61\x6E\x20\x6F\x6E\x65\x20\x69\x74\x65\x72\x61\x74\x69\x6F\x6E"
# the2nd: modifed for OTPme to allow verification without the need to have a clear-text password
# if we got a password we have to generate its hash and hash_hash
if password:
password_hash=nt_password_hash(password,False)
password_hash_hash=hash_nt_password_hash(password_hash)
elif password_hash:
# if we got the password_hash we only have to generate the hash_hash
password_hash_hash=hash_nt_password_hash(password_hash)
sha_hash=sha.new()
sha_hash.update(password_hash_hash)
sha_hash.update(nt_response)
sha_hash.update(Magic1)
digest=sha_hash.digest()
challenge=challenge_hash(peer_challenge,authenticator_challenge,username)
sha_hash=sha.new()
sha_hash.update(digest)
sha_hash.update(challenge)
sha_hash.update(Magic2)
digest=sha_hash.digest()
return "S="+convert_to_hex_string(digest)
def convert_to_hex_string(string):
hex_str=""
for c in string:
hex_tmp=hex(ord(c))[2:]
if len(hex_tmp)==1:
hex_tmp="0"+hex_tmp
hex_str+=hex_tmp
return hex_str.upper()
def hash_nt_password_hash(password_hash):
"""
HashNtPasswordHash(
IN 16-octet PasswordHash,
OUT 16-octet PasswordHashHash )
{
/*
* Use the MD4 algorithm [5] to irreversibly hash
* PasswordHash into PasswordHashHash.
*/
}
"""
md4_context = md4.new()
md4_context.update(password_hash)
res = md4_context.digest()
return res
def lm_password_hash(password):
"""
LmPasswordHash(
IN 0-to-14-oem-char Password,
OUT 16-octet PasswordHash )
{
Set UcasePassword to the uppercased Password
Zero pad UcasePassword to 14 characters
DesHash( 1st 7-octets of UcasePassword,
giving 1st 8-octets of PasswordHash )
DesHash( 2nd 7-octets of UcasePassword,
giving 2nd 8-octets of PasswordHash )
}
"""
ucase_password=password.upper()[:14]
while len(ucase_password)<14:
ucase_password+="\0"
password_hash=des_hash(ucase_password[:7])
password_hash+=des_hash(ucase_password[7:])
return password_hash
def des_hash(clear):
"""
DesHash(
IN 7-octet Clear,
OUT 8-octet Cypher )
{
/*
* Make Cypher an irreversibly encrypted form of Clear by
* encrypting known text using Clear as the secret key.
* The known text consists of the string
*
* KGS!@#$%
*/
Set StdText to "KGS!@#$%"
DesEncrypt( StdText, Clear, giving Cypher )
}
"""
# the2nd: moved here because of naming conflict with pyDes
import des
des_obj=des.DES(clear)
return des_obj.encrypt(r"KGS!@#$%")
|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_probability as tfp
from utils import PlotGmm, DataGenerator
# Generate Data
n=2000
dg = DataGenerator(n)
x, z = dg.generate_data()
# Plot
plot_gmm = PlotGmm()
plot_gmm.plot_data(x, z, dg.mu, dg.cov, dg.pi)
# Model
tfd = tfp.distributions
tfb = tfp.bijectors
class GmmLayer(tf.keras.layers.Layer):
def __init__(self, k=2, *args, **kwargs):
super(GmmLayer, self).__init__(*args, **kwargs)
def build(self, input_shape):
print(input_shape)
initnorm = tf.random_normal_initializer()
inituni = tf.random_uniform_initializer(0, 1)
self.k = k
self.covchol = tfp.util.TransformedVariable(
tf.eye(input_shape[-1], batch_shape=(self.k,)),
# inituni(shape=(self.k, 2, 2)),
tfb.FillScaleTriL(),
name='covchol', dtype=tf.float32
)
self.covchol_val = self.covchol.variables
self.mu = tf.Variable(initnorm(shape=(self.k, input_shape[-1])), name='mu', dtype=tf.float32)
self.pi = tf.Variable(initnorm(shape=(self.k,)), name='pi', dtype=tf.float32)
self.bimix_gauss = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(logits=self.pi),
components_distribution=tfd.MultivariateNormalTriL(
loc=self.mu,
scale_tril=self.covchol
)
)
def call(self, inputs):
return self.bimix_gauss.log_prob(inputs)
def get_params(self):
covchol_tensor = tf.convert_to_tensor(self.covchol)
pi = tf.math.softmax(self.pi)
return [t.numpy() for t in (self.mu, covchol_tensor, pi)]
class UpdatePlotCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
m = self.model.get_layer(name='gmm')
mu, covchol, pi = m.get_params()
cov = covchol @ covchol.transpose(0,2,1)
plot_gmm.update_plot(mu, cov, pi, epoch)
print(f'Training: {logs}')
k = 2
model = tf.keras.Sequential(GmmLayer(k, name='gmm'))
model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.02),
loss=lambda y_true, y_pred: -y_pred)
y = np.ones(x.shape[0])
model.fit(x, y,
batch_size=100,
epochs=100,
verbose=0,
steps_per_epoch=int(len(x)/100),
callbacks=[UpdatePlotCallback()]
)
plt.show(block=True)
|
# Using the canvas api to query Canvas LMS to locate active courses for a user
# Before using the canvas api instantiate a new canvas object
# Import the Canvas class
from canvasapi import Canvas
# Canvas API URL
API_URL = "https://canvas.instructure.com/"
# Canvas API key
API_KEY = "your access token here"
# Initialize a new Canvas object
canvas = Canvas(API_URL, API_KEY)
#grab user xxxxxx
user = canvas.get_user(your user_id here)
# access the user's name
user.name
# confirms user name
print(user)
# list of courses the user is enrolled in
courses = user.get_courses(enrollment_status='active')
# print the list of courses this user is enrolled on
for courses in courses:
print(courses)
|
# this will throw an exception!
try:
int("a")
d = {}
d["a"]
except ValueError:
print("A value exception happended.")
except KeyError:
print("A key was not found.")
print("End of the program") |
from django.contrib import admin
from vpractice.models import Timu, Question, QRcomment, Replay, Attention, RepaType
# Register your models here.
class TimuAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'answer', 'tips', 'A', 'B', 'C', 'D', 'E', 'F', 'sequence')
search_fields = ('title', )
class ReplayAdmin(admin.ModelAdmin):
list_display = ('question', 'replay_user', 'like', 'dislike','score', 'createtime')
admin.site.register(Timu, TimuAdmin)
# admin.site.register(Question)
# admin.site.register(QRcomment)
# admin.site.register(Replay, ReplayAdmin)
# admin.site.register(Attention)
# admin.site.register(RepaType)
|
#Fazer um sistema de Loteria (Deve pedir o nome do usuário, pedir um número e comparar com um conjunto aleatório de número (de 0 a 100) e dizer se o usuário advinhou)
from random import randint
comp = randint(0,100)
num = int(input('Digite um número de 1 a 100: '))
if num == comp:
print('Você acertou!{}'.format(num))
else:
print('Você errou! O número digitado foi {} e o número sortiado foi{}'.format(num,comp))
|
# Create your views here.
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from api.models.fleet_types import FleetType
from api.serializers.fleet_types import FleetTypeSerializer
# Create your views here.
class Fleet_typesViewset(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated,)
queryset = FleetType.objects.all()
serializer_class = FleetTypeSerializer
filter_backends = (DjangoFilterBackend,)
filter_fields = ('workspace',)
|
import networkx as nx
from BasicGraphOperations import readGraph
G1 = readGraph("../Q1weighted.txt",directed=False)
G2 = readGraph("../Q1unweighted.txt",directed=False)
def maxValKey(dictionary):
values=list(dictionary.values())
keys=list(dictionary.keys())
return keys[values.index(max(values))]
node1 = maxValKey(nx.degree(G1))
node2 = maxValKey(nx.degree(G2))
print(node1,G1.degree(node1))
print(node2,G2.degree(node2))
import networkx as nx
from BasicGraphOperations import readGraph
import numpy
G1 = readGraph("Q2edgelist.txt",directed=True)
adjMat = nx.adjacency_matrix(G1)
print(adjMat.toarray())
"""
TODO Faster with just ccm = adj*adj.T
"""
def getCoCitationMatrix(G1):
ccm = numpy.zeros([len(G1.nodes()),len(G1.nodes())])
for i in range(1,len(G1.nodes())):
ccm[i][i] = G1.in_degree(i)
for j in range(i+1,len(G1.nodes())):
ccm[i][j] = len(list(set(G1.predecessors(i)).intersection(set(G1.predecessors(j)))))
ccm = ccm + ccm.T - numpy.diag(ccm.diagonal())
return ccm
"""
TODO Faster with just bcm = adj.T*adj
"""
def getBibliographicCoupling(G1):
bcm = numpy.zeros([len(G1.nodes()),len(G1.nodes())])
for i in range(1,len(G1.nodes())):
bcm[i][i] = G1.out_degree(i)
for j in range(i+1,len(G1.nodes())):
bcm[i][j] = len(list(set(G1.successors(i)).intersection(set(G1.successors(j)))))
bcm = bcm + bcm.T - numpy.diag(bcm.diagonal())
return bcm
print(getCoCitationMatrix(G1))
print(getBibliographicCoupling(G1))
import networkx as nx
from BasicGraphOperations import readGraph
import numpy
import operator
import matplotlib.pyplot as plt
G1 = readGraph("../citation.txt", directed=True, skip=1, delimiter='\t')
print(len(G1.nodes()))
print(len(G1.edges()))
degseq = list(G1.in_degree().values())
dmax = max(degseq)+1
freq = [ 0 for d in range(dmax) ]
for d in degseq:
freq[d] += 1
plt.loglog(freq)
plt.show()
#plt.hist(freq)
#plt.show()
citations = G1.in_degree()
citations = sorted(citations.items(), key=operator.itemgetter(1),reverse=True)
print(citations[0])
print(citations[1])
print(citations[2])
|
from dimagi.utils.couch.undo import DELETED_SUFFIX
from dimagi.utils.modules import to_function
from corehq.apps.reports.models import (
FormExportSchema,
CaseExportSchema,
)
from .const import (
CASE_EXPORT,
FORM_EXPORT,
DEID_TRANSFORM_FUNCTIONS,
TRANSFORM_FUNCTIONS,
)
def is_occurrence_deleted(last_occurrences, app_ids_and_versions):
is_deleted = True
for app_id, version in app_ids_and_versions.iteritems():
if last_occurrences.get(app_id) >= version:
is_deleted = False
break
return is_deleted
def convert_saved_export_to_export_instance(domain, saved_export):
from .models import (
FormExportDataSchema,
FormExportInstance,
CaseExportDataSchema,
CaseExportInstance,
PathNode,
)
# Build a new schema and instance
schema = None
instance_cls = None
export_type = saved_export.type
if export_type == FORM_EXPORT:
instance_cls = FormExportInstance
schema = FormExportDataSchema.generate_schema_from_builds(
domain,
saved_export.app_id,
_extract_xmlns_from_index(saved_export.index),
)
elif export_type == CASE_EXPORT:
instance_cls = CaseExportInstance
schema = CaseExportDataSchema.generate_schema_from_builds(
domain,
_extract_casetype_from_index(saved_export.index),
)
instance = instance_cls.generate_instance_from_schema(schema)
instance.name = saved_export.name
instance.is_deidentified = saved_export.is_safe
instance.export_format = saved_export.default_format
instance.transform_dates = saved_export.transform_dates
instance.legacy_saved_export_schema_id = saved_export._id
if saved_export.type == FORM_EXPORT:
instance.split_multiselects = saved_export.split_multiselects
instance.include_errors = saved_export.include_errors
# With new export instance, copy over preferences from previous export
for old_table in saved_export.tables:
table_path = _convert_index_to_path_nodes(old_table.index)
new_table = instance.get_table(_convert_index_to_path_nodes(old_table.index))
if new_table:
new_table.label = old_table.display
new_table.selected = True
else:
continue
# The SavedExportSchema only saves selected columns so default all the selections to False
# unless found in the SavedExportSchema (legacy)
for new_column in new_table.columns:
new_column.selected = False
for column in old_table.columns:
index = column.index
transform = None # can be either the deid_transform or the value transform on the ExportItem
if column.doc_type == 'StockExportColumn':
# Handle stock export column separately because it's a messy edge since
# it doesn't have a unique index (_id).
index, new_column = new_table.get_column(
[PathNode(name='stock')],
'ExportItem',
None,
)
if new_column:
new_column.selected = True
new_column.label = column.display
continue
if column.transform:
transform = _convert_transform(column.transform)
if _is_repeat(old_table.index):
index = '{table_index}.{column_index}'.format(
table_index=_strip_repeat_index(old_table.index),
column_index=column.index,
)
column_path = _convert_index_to_path_nodes(index)
# The old style column indexes always look like they contains no repeats,
# so replace that parts that could be repeats with the table path
column_path = table_path + column_path[len(table_path):]
system_property = _get_system_property(
column.index,
transform,
export_type,
new_table.path
)
if system_property:
column_path, transform = system_property
guess_types = ['ScalarItem', 'MultipleChoiceItem', 'ExportItem']
# Since old exports had no concept of item type, we just guess all
# the types and see if there are any matches.
for guess_type in guess_types:
index, new_column = new_table.get_column(
column_path,
guess_type,
_strip_deid_transform(transform),
)
if new_column:
break
if not new_column:
continue
new_column.label = column.display
new_column.selected = True
if transform and not _strip_deid_transform(transform):
# Must be deid transform
new_column.deid_transform = transform
instance.save()
saved_export.doc_type += DELETED_SUFFIX
saved_export.converted_saved_export_id = instance._id
saved_export.save()
return instance
def _extract_xmlns_from_index(index):
return index[1]
def _extract_casetype_from_index(index):
return index[1]
def _is_repeat(index):
return index.startswith('#') and index.endswith('#') and index != '#'
def _strip_repeat_index(index):
index = index.strip('#.')
index = index.replace('#.', '') # For nested repeats
return index
def _strip_deid_transform(transform):
return None if transform in DEID_TRANSFORM_FUNCTIONS.keys() else transform
def _convert_transform(serializable_transform):
transform_fn = to_function(serializable_transform.dumps_simple())
if not transform_fn:
return None
for slug, fn in list(TRANSFORM_FUNCTIONS.iteritems()) + list(DEID_TRANSFORM_FUNCTIONS.iteritems()):
if fn == transform_fn:
return slug
return None
def _get_system_property(index, transform, export_type, table_path):
from .models import (
MAIN_TABLE,
CASE_HISTORY_TABLE,
PARENT_CASE_TABLE,
)
from .conversion_mappings import (
FORM_PROPERTY_MAPPING,
CASE_PROPERTY_MAPPING,
CASE_HISTORY_PROPERTY_MAPPING,
PARENT_CASE_PROPERTY_MAPPING,
REPEAT_GROUP_PROPERTY_MAPPING,
)
system_property = None
transform = _strip_deid_transform(transform)
if export_type == FORM_EXPORT:
if table_path == MAIN_TABLE:
system_property = FORM_PROPERTY_MAPPING.get((index, transform))
elif table_path[-1].is_repeat:
system_property = REPEAT_GROUP_PROPERTY_MAPPING.get((index, transform))
elif export_type == CASE_EXPORT:
if table_path == MAIN_TABLE:
system_property = CASE_PROPERTY_MAPPING.get((index, transform))
elif table_path == CASE_HISTORY_TABLE:
system_property = CASE_HISTORY_PROPERTY_MAPPING.get((index, transform))
elif table_path == PARENT_CASE_TABLE:
system_property = PARENT_CASE_PROPERTY_MAPPING.get((index, transform))
return system_property
def _convert_index_to_path_nodes(index):
from corehq.apps.export.models.new import MAIN_TABLE
from corehq.apps.export.models.new import PathNode
if index == '#':
return MAIN_TABLE
elif _is_repeat(index):
split_index = index.split('.')[1:] # Remove first "#"
path = []
for part in split_index:
# If the part is "#" we know the previous piece in the path is a repeat group
if part == '#':
path[-1].is_repeat = True
else:
path.append(PathNode(name=part, is_repeat=False))
return path
else:
return [PathNode(name=n) for n in index.split('.')]
def revert_new_exports(new_exports):
"""
Takes a list of new style ExportInstance and marks them as deleted as well as restoring
the old export it was converted from (if it was converted from an old export)
:param new_exports: List of ExportInstance
:returns: Any old exports that were restored when decommissioning the new exports
"""
reverted_exports = []
for new_export in new_exports:
if new_export.legacy_saved_export_schema_id:
schema_cls = FormExportSchema if new_export.type == FORM_EXPORT else CaseExportSchema
old_export = schema_cls.get(new_export.legacy_saved_export_schema_id)
old_export.doc_type = old_export.doc_type.rstrip(DELETED_SUFFIX)
old_export.save()
reverted_exports.append(old_export)
new_export.doc_type += DELETED_SUFFIX
new_export.save()
return reverted_exports
|
# Generated by Django 3.1.1 on 2021-01-16 13:35
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('lifesaver', '0012_auto_20210116_2234'),
]
operations = [
migrations.RemoveField(
model_name='nurse',
name='work_shift',
),
]
|
import numpy as np
from scipy import ndimage
from scipy.spatial import distance
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import euclidean_distances
from itertools import compress
import time
import cv2
import gtsam
from gtsam import symbol_shorthand
L = symbol_shorthand.L
X = symbol_shorthand.X
from gtsam import (Cal3_S2, DoglegOptimizer,GenericProjectionFactorCal3_S2,
Marginals,NonlinearFactorGraph, PinholeCameraCal3_S2, Point2, Point3,Pose3,
PriorFactorPoint3, PriorFactorPose3, Rot3, Values, BetweenFactorPose3,Pose3AttitudeFactor,NoiseModelFactor, Unit3)
from gtsam.utils import plot
import ImageProcessingFunctions as IP
from SLAMFunctions import rotBody2Ned
from oct2py import octave
from oct2py import Oct2Py
import os
from skimage import exposure
import math
oc = Oct2Py()
oc.addpath('/home/kyrre/Dokumenter/prosjektoppgave/masteroppgave/code/Experiments/Image Processing Experiment/uwit')
class KeyFrame:
pose_w_c = None
frame_histogram = None
frame_TF_IDF_histogram = None
S_G = None
S_L = None
def __init__(self,key_prev,key_cur, image_raw, blurSize, featureDetector, frame_prev, kps_prev, des_prev, time_captured, ori_meas):
#' Initializes KeyFrame by finding keypoints, descriptors, matches with prev image and estimating Pose
self.frame_id = key_cur
self.frame_cur = self.process_image(image_raw,blurSize)
self.kps_cur, self.des_cur = IP.getKeyPointsAndFeatures(self.frame_cur, featureDetector)
self.frame_prev = frame_prev
self.kps_prev = kps_prev
self.des_prev = des_prev
self.good_matches_keys = []
self.good_matches = []
self.time_captured = time_captured
#'matching
image_with_matches, good_features, src_pts, dst_pts, H,matchesMask = IP.FeatureMatching(frame_prev,self.frame_cur,self.kps_prev,self.des_prev,self.kps_cur,self.des_cur,featureDetector)
self.good_matches.append(good_features)
self.good_matches_keys.append(key_prev)
self.keyframe_orientation = ori_meas
@staticmethod
def process_image(image,blurSize):
img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
img_clahs = oc.clahs(img_gray,15,15,nout='max_nout')
processed_image = cv2.GaussianBlur(img_clahs,(blurSize,blurSize),cv2.BORDER_DEFAULT)
return processed_image
def get_image(self):
return self.frame_cur
def get_keyframe_id(self):
return self.frame_id
def get_local_saliency_and_global_saliency(self):
return self.S_L, self.S_G
def print_time_captured(self):
print(self.time_captured)
def get_keyframe_orientation(self):
return self.keyframe_orientation
def get_kps_and_des(self):
return self.kps_cur, self.des_cur
def matchImages(self,frame_prev, kps_prev, des_prev,featureDetector):
matchImage, good_features, src_pts, dst_pts,H,matchesMask = IP.FeatureMatching(frame_prev,self.get_image(),kps_prev,des_prev,self.kps_cur,self.des_cur,featureDetector) #NOTE: Litt usikker på rekkefølge her
return matchImage,good_features, src_pts, dst_pts,H
def add_BOW_data(self,image_histogram,TF_IDF_histogram,S_L,S_G):
# NOTE: good_features should be the descriptors of current image match with previous image
self.frame_histogram = image_histogram
self.frame_TF_IDF_histogram = TF_IDF_histogram
self.S_L = S_L
self.S_G = S_G #BagOfWords.main_function(good_features)
def get_BOW_data(self):
return self.frame_histogram, self.S_L, self.S_G, self.frame_TF_IDF_histogram
def get_good_matches(self):
return self.good_matches
def get_good_features_in_cur_image(self,idx):
good_features = self.get_good_matches()
kps_cur, des_cur = self.get_kps_and_des()
if idx == 0:
cur_good_kps, cur_good_des = self.convert_matches_to_kp_and_descriptors(good_features[0],kps_cur,des_cur)
else :
cur_good_kps = 0
cur_good_des= 0
return cur_good_kps, cur_good_des
def update_global_and_local_saliency_and_TF_IDF_histogram(self,S_L,S_G,TF_IDF_histogram):
self.S_L = S_L
self.S_G = S_G
self.frame_TF_IDF_histogram = TF_IDF_histogram
def full_bundle_adjustment(self):
pass
def triangulate(self,):
pass
@staticmethod
def convert_matches_to_kp_and_descriptors(good_features,kps_cur,des_cur):
#inputs kps_cur and des_cur which are the raw feature extraction from the current image.
cur_good_des = []
cur_good_kps = []
for mat in good_features:
cur_idx = mat.trainIdx
#prev_idx = mat.queryIdx see link for more info: https://stackoverflow.com/questions/30716610/how-to-get-pixel-coordinates-from-feature-matching-in-opencv-python
cur_good_kps.append(kps_cur[cur_idx].pt)
cur_good_des.append(des_cur[cur_idx])
cur_good_des = np.asarray(cur_good_des)
cur_good_kps = np.asarray(cur_good_kps)
return cur_good_kps, cur_good_des
"originaly inspired by the post https://qvault.io/python/binary-search-tree-in-python/"
class BSTNode:
def __init__(self, clusterListInput,val=None,d =0):
self.leftChild = None #descriptor-of-cluster-senter 1
self.rightChild = None #descriptor-of-cluster-senter 2
self.val = val #descriptor-of-cluster-senter prev. This value is set to None if it is the initial node.
self.clusterList = clusterListInput # contains all the clusterInput
self.depth = d
self.leafIdx = None #only given to leafs
def hasRightChild(self):
return self.rightChild
def hasLeftChild(self):
return self.leftChild
def insertLeftChild(self,Node):
self.leftChild = Node
def insertRightChild(self,Node):
self.rightChild = Node
def findClusterSize(self):
return self.clusterList.shape[0]
def getClusterList(self):
return self.clusterList
def labelLeaf(self,leafIdx):
self.leafIdx = leafIdx
def getLeafIdx(self):
return self.leafIdx
def getDepth(self):
return self.depth
def getVal(self):
return self.val
def evalEuclideanDistance(self,word):
#only run function if one knows that the node has child nodes
#returns true if leftNode is closest
dist_left = euclidean_distances(word,self.hasLeftChild().getVal())
dist_right = euclidean_distances(word,self.hasRightChild().getVal())
if (dist_left>dist_right):
return False
else:
return True
class binary_search_tree():
def __init__(self,root_node: BSTNode):
self.root = root_node
self.depth = 0
self.leafCounter = 0
def generate_search_tree(self,root_node, depthTracker=0):
nr_of_descriptors = root_node.findClusterSize()
depthTracker += 1
if (depthTracker >self.getTreeDepth()):
self.incrementTreeDepth()
if nr_of_descriptors != 1:
#divide into two clusters
ClusterListofRoot = root_node.getClusterList()
kmeans = KMeans(n_clusters = 2, n_init=10)
kmeans.fit(ClusterListofRoot)
clusterCenters = kmeans.cluster_centers_
#' Adding left and right Cluster to tree
leftClusterCenter = np.array([clusterCenters[0]])
leftCluster = ClusterListofRoot[self.ClusterIndicesNumpy(0,kmeans.labels_)]
left_node = BSTNode(leftCluster,leftClusterCenter,depthTracker)
root_node.insertLeftChild(left_node)
self.generate_search_tree(left_node,depthTracker)
rightClusterCenter = np.array([clusterCenters[1]])
rightCluster= ClusterListofRoot[self.ClusterIndicesNumpy(1,kmeans.labels_)]
right_node = BSTNode(rightCluster,rightClusterCenter,depthTracker)
root_node.insertRightChild(right_node)
self.generate_search_tree(right_node,depthTracker)
else:
root_node.labelLeaf(self.getLeafCounter())
print(root_node.getLeafIdx())
self.incremenLeafCounter()
def incrementTreeDepth(self):
self.depth += 1
def getTreeDepth(self):
return self.depth
def getLeafCounter(self):
return self.leafCounter
def incremenLeafCounter(self):
self.leafCounter += 1
@staticmethod
def ClusterIndicesNumpy(clustNum, labels_array):
return np.where(labels_array == clustNum)[0]
def singleWordSearch(self,root_node: BSTNode, word):
if root_node.getLeafIdx() == None:
#evaluate right vs left
leftClosest = root_node.evalEuclideanDistance(word)
if leftClosest:
return self.singleWordSearch(root_node.hasLeftChild(),word)
else:
return self.singleWordSearch(root_node.hasRightChild(),word)
else:
leaf_val = root_node.getLeafIdx()
return leaf_val
def wordSearch(self,root_node: BSTNode, features : np.array, nr_words_in_vocabulary : int):
nr_words = features.shape[0]
histogram = np.zeros((1,nr_words_in_vocabulary))
for i in range(0,nr_words):
word = np.array([features[i,:]])
word_idx = self.singleWordSearch(root_node,word)
histogram[0,word_idx] += 1
return histogram
def get_root_node(self):
return self.root
def cosine_distance(TF_IDF1, TF_IDF2):
#this gives a value between 0 and 1. 1 Indicating 100% similar, 0 not similar at all
cos_dist = TF_IDF1@TF_IDF2.T/(np.linalg.norm(TF_IDF1)* np.linalg.norm(TF_IDF2))
if math.isnan(cos_dist):
return 0
else:
return cos_dist
def evaluate_yaw(yaw_th,yaw_cur,yaw_array):
return np.sin(np.abs(yaw_array-yaw_cur)) <= yaw_th
def evaluate_depth(depth_th,depth_cur,depth_array):
return np.abs(depth_array-depth_cur)<= depth_th
def evaluate_global_saliency(S_G_th,S_G_cur,S_G_array):
return np.abs(S_G_cur-S_G_array)<= S_G_th
def generate_depth_and_yaw_filter(depth_th,depth_cur,depth_array,yaw_th,yaw_cur,yaw_array):
return (evaluate_depth(depth_th,depth_cur,depth_array) & evaluate_yaw(yaw_th,yaw_cur,yaw_array))
class BOW(binary_search_tree):
def __init__(self,root_node, treeDepth, nr_of_words,n_link_proposal,NR_RECENT_IMAGES_TO_EXCLUDE_FROM_LOOP_CLOSURE):
super().__init__(root_node)
self.depth = treeDepth
self.leafCounter = nr_of_words
#initializing histograms:
self.global_histogram = np.zeros((1,nr_of_words)) # Keeping global word count
self.word_occurence_in_images = np.zeros((1,nr_of_words))
self.binary_global_histogram = (np.zeros((1,nr_of_words)) != 0) # Binary histogram tracking all words discovered from dictionary
self.N = 0 # Number of images comprising the vocabulary database
self.W = 0 # Contains number of unique words
self.Rmax = 0 # Maximum image rarity discovered
#loop closure parameters:
self.cost_matrix = None
self.TF_IDF_histograms = []
self.nr_images_excluded_from_loop_closure = NR_RECENT_IMAGES_TO_EXCLUDE_FROM_LOOP_CLOSURE
self.n_link_proposal = n_link_proposal #number indicating how many best matches one should extract
def local_saliency(self,image_histogram):
#examine entropy (H):
non_unique_word_count_in_image = sum(sum(image_histogram))
nr_of_words_in_vocabulary = self.get_W()
binary_image_histogram = (image_histogram>0)
H = 0
for i in range(0,self.leafCounter):
word_found = binary_image_histogram[0,i]
if word_found:
p_w = image_histogram[0,i]/non_unique_word_count_in_image
H += p_w*np.log2(p_w)
H = -H
S_L = H/np.log2(nr_of_words_in_vocabulary)
return H, S_L
def global_saliency_and_TF_IDF_histogram(self,image_histogram):
N_t = self.get_N()
R_i_t = 0
number_of_images_containing_certain_word = self.get_word_occurence_in_images()
binary_image_histogram = (image_histogram>0)
TF_IDF_histogram = np.zeros((1,binary_image_histogram.shape[1]))
nr_words_in_image = sum(sum(image_histogram))
for i in range(0,self.getLeafCounter()):
word_found = binary_image_histogram[0,i]
if word_found:
images_containing_word_i = number_of_images_containing_certain_word[0,i]
IDF = np.log2(N_t/images_containing_word_i)
R_i_t += IDF
TF_IDF_histogram[0,i] = (image_histogram[0,i]/nr_words_in_image)*IDF
if R_i_t > self.get_Rmax():
self.set_Rmax(R_i_t)
if (R_i_t !=0 and self.get_Rmax()!=0):
S_G = R_i_t/self.get_Rmax()
else:
#start condition for image 1 #this will be updated
S_G = 0
return S_G, TF_IDF_histogram
def update_global_histogram(self,histogram):
self.global_histogram = self.global_histogram + histogram
def find_new_binary_global_histogram(self,image_histogram):
binary_hist = (image_histogram>0)
old_global = self.get_binary_global_histogram()
new_global = np.logical_or(old_global,binary_hist)
return new_global,binary_hist
def find_newly_discovered_words(self,new_binary_global_histogram):
old_global = self.get_binary_global_histogram()
diff =new_binary_global_histogram^old_global #XOR operations: if one index is set to True, one know that this index has been added
new_words_found = list(compress(range(len(diff[0,:])), diff[0,:]))
return new_words_found
#' The saliency algorithm:
def main_function(self,image_features,keyframeList,batch_size):
#' Generating image histogram for features extracted:
image_histogram = self.wordSearch(self.get_root_node(), image_features, self.getLeafCounter())
#' Searching for new words and updating binary_global_histogram:
new_binary_global_histogram,binary_hist = self.find_new_binary_global_histogram(image_histogram)
new_words_found = self.find_newly_discovered_words(new_binary_global_histogram)
self.update_word_occurence_in_images(binary_hist)
new_S_L, new_S_G, new_TF_IDF_histogram = [], [], []
self.increment_N()
#checks if list is empty
if not new_words_found:
#calculate local and global saliency
H, S_L = self.local_saliency(image_histogram)
S_G, TF_IDF_histogram = self.global_saliency_and_TF_IDF_histogram(image_histogram)
else:
self.update_W(len(new_words_found)) #add all new words found
self.set_binary_global_histogram(new_binary_global_histogram)
# update for every batch of size X images
if (self.get_N()%batch_size ==0) and (self.get_N()!=0):
#adding this measurement
H, S_L = self.local_saliency(image_histogram)
S_G, TF_IDF_histogram = self.global_saliency_and_TF_IDF_histogram(image_histogram)
#update
start = time.time()
new_S_L, new_S_G, new_TF_IDF_histogram = self.update_global_and_local_saliencies_and_TF_IDF_histogram(keyframeList[:-1])
end = time.time()
print(f"Update time of local and global saliency and TF_IDF_histogram {end - start}")
else:
start = time.time()
H, S_L = self.local_saliency(image_histogram)
end = time.time()
print(f"Runtime of the local saliency calculation {end - start}")
S_G,TF_IDF_histogram = self.global_saliency_and_TF_IDF_histogram(image_histogram)
return image_histogram, H, S_L, S_G, TF_IDF_histogram, new_S_L, new_S_G, new_TF_IDF_histogram
#' These functions should be run when N and W has been updated,
def update_global_and_local_saliencies_and_TF_IDF_histogram(self,keyframes: list):
nr_keyframes = len(keyframes)
new_S_L = []
new_S_G = []
new_TF_IDF_histogram= []
for idx in range(0,nr_keyframes):
keyframe = keyframes[idx]
frame_histogram, S_L_prev, S_G_prev,TF_IDF_histogram = keyframe.get_BOW_data()
H, S_L_new = self.local_saliency(frame_histogram)
S_G_new, TF_IDF_histogram = self.global_saliency_and_TF_IDF_histogram(frame_histogram)
new_S_L.append(S_L_new)
new_S_G.append(S_G_new)
new_TF_IDF_histogram.append(TF_IDF_histogram)
return new_S_L, new_S_G, new_TF_IDF_histogram
def get_word_occurence_in_images(self):
return self.word_occurence_in_images
def update_word_occurence_in_images(self,binary_image_histogram):
self.word_occurence_in_images = self.word_occurence_in_images.astype(int) + binary_image_histogram
def increment_N(self):
self.N += 1
def set_N(self,value):
self.N = value
def get_N(self):
return self.N
def get_W(self):
return self.W
def update_W(self,nr_new_words):
self.W += nr_new_words
def get_Rmax(self):
return self.Rmax
def set_Rmax(self,Rmax_new):
self.Rmax = Rmax_new
def get_binary_global_histogram(self):
return self.binary_global_histogram
def set_binary_global_histogram(self,binary_histogram):
self.binary_global_histogram = binary_histogram
#' Loop Closure Functions:
def increment_cost_matrix(self,new_TF_IDF_histogram):
TF_IDF_histograms = self.get_TF_IDF_histograms()
new_cost_matrix = self.get_cost_matrix()
nr_previous_histograms = len(TF_IDF_histograms)-1
if new_cost_matrix is None:
new_row = np.zeros((1,1))
self.set_cost_matrix(new_row)
else:
rows, cols = new_cost_matrix.shape
new_row = np.zeros((1,cols))
for idx in range(0,nr_previous_histograms):
#compare all histograms distance
if idx ==0:
print(TF_IDF_histograms[idx])
new_row[0,idx] = cosine_distance(new_TF_IDF_histogram, TF_IDF_histograms[idx])
new_cost_matrix = np.concatenate((new_cost_matrix,new_row),axis=0) #add new row
new_row = np.concatenate((new_row,np.zeros((1,1))),axis=1)
new_cost_matrix = np.concatenate((new_cost_matrix,new_row.T),axis=1) #add new column
#replacing old TF_IDF_array
self.set_cost_matrix(new_cost_matrix)
return new_row
def extract_n_link_hypothesis(self,filtered_array,filtered_array_idx):
is_empty = (filtered_array_idx.size == 0)
if is_empty:
return []
else:
#check nr of links
length_filtered_array = len(filtered_array)
n_links = self.get_n_link_proposal()
if length_filtered_array < n_links:
print('filtered_array',filtered_array)
print(filtered_array.shape)
indices = np.argpartition(filtered_array, -length_filtered_array)[-length_filtered_array:]
link_proposals = []
for index in indices:
link_proposals.append((filtered_array[index], filtered_array_idx[index]))#' returns the index in the original array
link_proposals.sort(reverse=True)
link_proposals = [(b, a) for a, b in link_proposals]
else:
indices = np.argpartition(filtered_array, -n_links)[-n_links:]
link_proposals = []
for index in indices:
link_proposals.append((filtered_array[index], filtered_array_idx[index]))#' returns the index in the original array
link_proposals.sort(reverse=True)
link_proposals = [(b, a) for a, b in link_proposals]
#return link_proposals[x][y] where x determines the best match, while y is (0 or 1) indicating index and value respectively
return link_proposals
def get_n_link_proposal(self):
return self.n_link_proposal
def update_cost_matrix():
pass
def get_cost_matrix(self):
return self.cost_matrix
def add_TF_IDF_histogram(self,TF_IDF_histogram):
self.TF_IDF_histograms.append(TF_IDF_histogram)
def remove_TF_IDF_histogram(self):
self.TF_IDF_histograms.pop()
def get_nr_images_excluded_from_loop_closure(self):
return self.nr_images_excluded_from_loop_closure
def get_TF_IDF_histograms(self):
return self.TF_IDF_histograms
def update_TF_IDF_histograms(self,idx,new_TF_IDF_histogram):
self.TF_IDF_histograms[idx] = new_TF_IDF_histogram
def set_cost_matrix(self,new_cost_matrix):
self.cost_matrix = new_cost_matrix
#NOTE: this should only be called if global saliency of image is high enough
def loop_closure_link_hypothesis(self,depth_th,depth_cur,depth_array,yaw_th,yaw_cur,yaw_array,new_TF_IDF_histogram):
#look for changes in vocabulary size
#if changes: update length of TF-IDF diagrams
ignore_last_x_images = self.get_nr_images_excluded_from_loop_closure()
new_row = self.increment_cost_matrix(new_TF_IDF_histogram)
#don't include the previous x images in the loop closure
new_row = new_row[0,:-ignore_last_x_images]
#don't do loop closure if the nr of images accumulated is not high enough
if (depth_array.shape[1]> ignore_last_x_images):
filter = generate_depth_and_yaw_filter(depth_th,depth_cur,depth_array[0,:-ignore_last_x_images],yaw_th,yaw_cur,yaw_array[0,:-ignore_last_x_images])
#setting values in new_row to 0 if not a potential candidate
filtered_candidates = filter*new_row
filtered_candidates_index = np.nonzero(filtered_candidates)
filtered_candidates = filtered_candidates[filtered_candidates_index]
#get n-best_candidates
n_link_hypothesis = self.extract_n_link_hypothesis(filtered_candidates,filtered_candidates_index[0])
return n_link_hypothesis
else:
#returns an empy list if too early in the SLAM
return []
def validate_loop_closure(keyframe_current,keyframe_prev,K,featureDetector,HOMOGRAPHY_TH,n_dvl):
MATCH_FOUND = False
#this function takes in two keyframes: the current keyframe and the previous seen keyframe
image_current = keyframe_current.get_image()
image_prev = keyframe_prev.get_image()
kps_cur_image, des_cur_image = IP.getKeyPointsAndFeatures(image_current,featureDetector) #keyframe_list[-1].get_kps_and_des()
kps_prev_image, des_prev_image = IP.getKeyPointsAndFeatures(image_prev,featureDetector)#keyframe_list[prev_image_idx].get_kps_and_des()
#1. it matches features using Homography
ImageMatches, good, src_pts, dst_pts, H,matchesMask = IP.FeatureMatching(image_current,image_prev,kps_cur_image,des_cur_image,kps_prev_image,des_prev_image,featureDetector)
#2. validates the homography
#3. Decomposes the Homography into 4 possible transformation solutions
#4. Reduce into 2 solutions based on the fact that the points should be in front of both images
if matchesMask is None:
('not good enough matches or Homography is incorrect')
return None, None, None, MATCH_FOUND
elif len(matchesMask) <=10 or np.linalg.det(H)<HOMOGRAPHY_TH:
('not good enough matches or Homography is incorrect')
return None, None, None, MATCH_FOUND
else:
MATCH_FOUND = True
#input good matches
#R, T = IP.EstimateRotationAndTranslationHomography(H,K)
num, Rs, Ts, Ns = cv2.decomposeHomographyMat(H,K)
# filter negativ depth solutions:
Rs_filtered = []
Ts_filtered = []
Ns_filtered = []
for i in range(0,len(Ns)):
#removing solutions that does not have positive depth
if Ns[i][2,0]>0:
Rs_filtered.append(Rs[i])
Ts_filtered.append(Ts[i])
Ns_filtered.append(Ns[i])
#obtain rotation matrix for when keyframes was taken based on absolute measurements of yaw,pitch and roll
ori_meas_cur = keyframe_current.get_keyframe_orientation()
ori_meas_prev = keyframe_prev.get_keyframe_orientation()
abs_Rw_cur = rotBody2Ned(ori_meas_cur[0,2],ori_meas_cur[0,1],ori_meas_cur[0,0])
abs_Rw_prev = rotBody2Ned(ori_meas_prev[0,2],ori_meas_prev[0,1],ori_meas_prev[0,0])
abs_Rprev_cur = (abs_Rw_prev.T)@abs_Rw_cur
#5. Determines which of the 2 remaining solutions that is correct
matchIdx = evaluate_euclidean_distance_of_normal_vectors(n_dvl,Ns_filtered)
return ImageMatches, Rs_filtered[matchIdx], Ts_filtered[matchIdx], MATCH_FOUND
def evaluate_euclidean_distance_of_normal_vectors(n_dvl,n_homography):
#change the orientation of the normal vector obtained from the DVL
n_dvl = -n_dvl
dist = 1000 #a high number
for i in range(0,len(n_homography)):
dist_i = np.linalg.norm(n_dvl-n_homography[i])
if dist_i < dist:
dist = dist_i
idx = i
return idx
|
# 练习1 实现进程的例外一种方式--面对对象
# from multiprocessing import Process
# import time
#
# class Myprocess(Process):
# def __init__(self, a, b): # 传递参数
# self.a = a
# self.b = b
# super().__init__()
# def run(self):
# time.sleep(1)
# print('My Process')
# print(self.a, self.b)
#
# if __name__ == '__main__':
# mp = Myprocess(1, 2)
# mp.start()
# mp.join()
# print('in main')
# 练习2:守护进程
# from multiprocessing import Process
# import time
#
# def func():
# print('in func start')
# time.sleep(1) # 到了时间片会切出来
# print('in func')
#
# def foo():
# print('in foo')
#
# if __name__ == '__main__':
# p1 = Process(target=func)
# p1.daemon = True
# p1.start()
# time.sleep(1)
# p2 = Process(target=foo)
# p2.start()
# print('in main')
# 练习3:进程同步---锁
# from multiprocessing import Process, Lock
# import time
#
# def func(lock, i):
# global n
# lock.acquire()
# time.sleep(0.2)
# print(i)
# lock.release()
#
# if __name__ == '__main__':
# lock = Lock()
# for i in range(10):
# p = Process(target=func, args=(lock, i))
# p.start()
# 练习4:生产者消费者模型---基于队列实现
# from multiprocessing import Process, Queue
# import time
#
# def consume(q):
# while True:
# food = q.get()
# if food:
# print(q.get())
# else:
# break
#
# def producer(q):
# for i in range(1, 10):
# print('produce-->', i)
# q.put(i)
# q.put('hehe')
# q.put('haha')
#
# if __name__ == '__main__':
# q = Queue()
# p1 = Process(target=producer, args=(q, ))
# p2 = Process(target=producer, args=(q, ))
# c = Process(target=consume, args=(q, ))
# c.start()
# p1.start()
# p2.start()
# p1.join()
# p2.join()
# q.put(None)
|
#!/usr/bin/env python
# coding=utf-8
from data import *
from ser_status import *
print "Date:", date
print hostname
print "Ip:", ip
#Print system-wide float percentage of all CPU used.
print "CPU used: %d %%" % cpuU
#Print the CPU temp
print "CPU temp: %d°C" % cpuT
#Print statistics of the system memory usage.
print "Ram total: %dMb" % ramT
print "Ram used: %dMb" % ramU
print "Ram free: %dMb" % ramF
print "Percent used ram: %d%%" % ramPU
#Print if specified process 'omxplayer.bin' is running.
print process
print "Serial status:%s" % serial
#Writes the data to logg.txt (overwrites each time)
with open("logg.txt", "w") as text_file:
text_file.write("""Date: %s \n%s\nIp: %s
CPU usage: %d \n CPU temp: %d \n Total ram: %d \n Ram used: %d
Ram free: %d \n Ram free percent: %d \n %s
Serial status: %s""" % (date, hostname, ip, cpuU, cpuT, ramT, ramU, ramF, ramPU, process, serial))
#Mail alerts!
import mail
if (cpuT >62):
mail.send("CPU temp is above 62°C!")
else:
pass
if (ramPU >90):
mail.send("RAM usage is over 90%!")
else:
pass
if (process == "omxplayer.bin isn't running"):
mail.send("omxplayer.bin isn't running!")
else:
pass
if (serial == "ON"):
pass
else:
mail.send("Serial failure!")
|
HTTP_PORT = 8082
WS_PORT = 8084
WIDTH = 640
HEIGHT = 480
FRAMERATE = 24
RECORD_SECONDS_AT_A_TIME = 5
VFLIP = True
HFLIP = False
SCREENSHOTS_PATH = '/home/pi/site/pistreaming/screenshots/'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-04-08 18:51:21
# @Author : mutudeh (josephmathone@gmail.com)
# @Link : ${link}
# @Version : $Id$
import os
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def reverseKGroup(self, head, k):
if not head or head.next is None:
return head
new_head, old_head = self.reverseK(head,k)
record = new_head
while 1:
tem_new,tem_old = self.reverseK(old_head.next,k)
if tem_new is None:
old_head.next = tem_old
break
old_head.next = tem_new
old_head = tem_old
return record
def reverseK(self,head,k):
if not head:
return (None,head)
# detect if the length is enough
if not self.detecter(head,k):
return (None,head)
record = head
l,r = head,head.next
for i in range(k-1):
tem = r.next
r.next = l
l = r
r = tem
record.next = r
return (l,record)
def detecter(self,head,k):
count = 0
while head:
count += 1
head = head.next
if count >= k:
return True
return False
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
s = Solution()
new_head = s.reverseKGroup(head,3)
print(new_head.next.next.next.next)
# tem1,tem2 = s.reverseK(head,2) |
from sixopy.gr import Gr
from sixopy.bq import bQ
import pandas as pd
import numpy as np
import os
from tqdm import tqdm
import json
def main():
creds = r"Kobas Data-5c6ec97a9dae.json"
bq = bQ(creds)
table = 'sixers_instagram'
# sche = bq.schema('social_data',table)
# newest_post_date_sixers = bq.query_sync('''
# SELECT created_date FROM [denmark-house-sales:social_data.sixers_instagram]
# where user = 'sixers'
# order by created_time DESC LIMIT 1''')[0]['created_date']
# newest_post_date_njdevils = bq.query_sync('''
# SELECT created_date FROM [denmark-house-sales:social_data.sixers_instagram]
# where user = 'njdevils'
# order by created_time DESC LIMIT 1''')[0]['created_date']
# sincedates = {'njdevils': newest_post_date_njdevils,'sixers':newest_post_date_sixers}
s = Gr() # Initialize class
users = ['sixers']
s.username = 'sixers'
with open('sixers\\sixers.json',encoding='utf8') as json_data:
a = json.load(json_data)
w = pd.DataFrame(s.create_list(a))
w['created_time'] = pd.to_datetime(w['created_time'])
w['created_date'] = w['created_time'].dt.strftime('%Y-%m-%d')
sche = bq.schema_from_df(w,types={'created_time':'DATETIME'})
print(w.dtypes)
# recreate tables
bq.delete_table('social_data',table)
bq.create_table('social_data',table, schema = sche)
for user in users:
# w = s.get_all_user_posts(user) # get all posts metadata
# w = pd.DataFrame(df)
w.to_json(user+'_gr_posts.json',orient='records')
# w = pd.read_json(user+'_gr_posts.json',orient='records',dtype=False)
w = w.ix[w['created_date'] < '2017-12-11']
w.to_json(user+'_gr_posts.json',orient='records')
w = w.replace({np.nan: None})
bq.stream_data('social_data','sixers_instagram',w.to_dict('records'))
# Download for the league
# users = pd.read_csv('social accounts.csv')['Instagram']
# for u in tqdm(users):
# w = s.get_all_user_posts(u) # get all posts metadata
# w = pd.DataFrame(w)
# w.to_json('nhl-instagram/'+u+'_gr_posts.json',orient='records')
# w = pd.read_json('nhl-instagram/'+u+'_gr_posts.json',orient='records')
# w['created_date'] = w['created_time'].dt.strftime('%Y-%m-%d')
# w = w.replace({np.nan: None})
# w.to_json('nhl-instagram/'+u+'_gr_posts.json',orient='records')
# files = os.listdir('nhl-instagram')
# w = pd.read_json('nhl-instagram/'+files[0],orient = 'records')
# shema = bq.schema_from_df(w)
# bq.create_dataset('social_content')
# bq.delete_table('social_content','instagram')
# bq.create_table('social_content','instagram',shema)
# for f in os.listdir('nhl-instagram'):
# w = pd.read_json('nhl-instagram/'+f,orient = 'records')
# w = w.replace({np.nan: None})
# bq.stream_data('social_content','instagram',w.to_dict('records'))
if __name__ == "__main__":
main() |
"""Test advantage model."""
from tests.factories import MessageFactory
def test_message_name() -> None:
message = MessageFactory.build()
assert str(message) == message.text
|
import os
import re
import time
import threading
import datetime
from functools import wraps
from typing import Optional, Iterable, Dict, Any
import logging
from PySide2.QtCore import Qt
from binaryninjaui import (
UIContext,
DockHandler,
DockContextHandler,
UIAction,
UIActionHandler,
Menu,
)
import binaryninja
from binaryninja import PluginCommand, BinaryView
from binaryninja.interaction import show_message_box
from binaryninja.enums import MessageBoxButtonSet, MessageBoxIcon, VariableSourceType
from binaryninja.binaryview import BinaryDataNotification
import binsync
from binsync import State, StateContext
from binsync.data import Patch, Function, Comment, StackVariable, StackOffsetType
from .ui import find_main_window, BinjaDockWidget, create_widget
from .config_dialog import ConfigDialog
from .control_panel import ControlPanelDialog, ControlPanelDockWidget
_l = logging.getLogger(name=__name__)
def instance():
main_window = find_main_window()
try:
dock = [x for x in main_window.children() if isinstance(x, BinjaDockWidget)][0]
except:
dock = BinjaDockWidget("dummy")
return dock
#
# Decorators
#
def init_checker(f):
@wraps(f)
def initcheck(self, *args, **kwargs):
if not self.check_client():
raise RuntimeError("Please connect to a repo first.")
return f(self, *args, **kwargs)
return initcheck
def make_state(f):
"""
Build a writeable State instance and pass to `f` as the `state` kwarg if the `state` kwarg is None.
Function `f` should have have at least two kwargs, `user` and `state`.
"""
@wraps(f)
def state_check(self, *args, **kwargs):
state = kwargs.pop('state', None)
user = kwargs.pop('user', None)
if state is None:
save_before_return = True
state = self.client.get_state(user=user)
else:
save_before_return = False
r = f(self, *args, **kwargs, state=state)
if save_before_return:
state.save()
return r
return state_check
def make_ro_state(f):
"""
Build a read-only State instance and pass to `f` as the `state` kwarg if the `state` kwarg is None.
Function `f` should have have at least two kwargs, `user` and `state`.
"""
@wraps(f)
def state_check(self, *args, **kwargs):
state = kwargs.pop('state', None)
user = kwargs.pop('user', None)
if state is None:
state = self.client.get_state(user=user)
return f(self, *args, **kwargs, state=state)
return state_check
#
# Controller
#
class BinsyncController:
def __init__(self):
self._client = None # type: binsync.Client
self.control_panel = None
self.curr_bv = None # type: Optional[BinaryView]
self.curr_func = None # type: Optional[binaryninja.function.Function]
# start the worker routine
self.worker_thread = threading.Thread(target=self.worker_routine, daemon=True)
self.worker_thread.start()
def worker_routine(self):
while True:
# reload the control panel if it's registered
if self.control_panel is not None:
try:
self.control_panel.reload()
except RuntimeError:
# the panel has been closed
self.control_panel = None
# pull the repo every 10 seconds
if self.check_client() and self._client.has_remote \
and (
self._client._last_pull_attempt_at is None
or (datetime.datetime.now() - self._client._last_pull_attempt_at).seconds > 10
):
self._client.pull()
time.sleep(1)
def connect(self, user, path, init_repo, ssh_agent_pid=None, ssh_auth_sock=None):
self._client = binsync.Client(user, path, init_repo=init_repo, ssh_agent_pid=ssh_agent_pid,
ssh_auth_sock=ssh_auth_sock)
if self.control_panel is not None:
self.control_panel.reload()
def check_client(self, message_box=False):
if self._client is None:
if message_box:
show_message_box(
"BinSync client does not exist",
"You haven't connected to a binsync repo. Please connect to a binsync repo first.",
MessageBoxButtonSet.OKButtonSet,
MessageBoxIcon.ErrorIcon,
)
return False
return True
def mark_as_current_function(self, bv, bn_func):
self.curr_bv = bv
self.curr_func = bn_func
self.control_panel.reload()
def current_function(self, message_box=False) -> Optional[Function]:
all_contexts = UIContext.allContexts()
if not all_contexts:
if message_box:
show_message_box(
"UI contexts not found",
"No UI context is available. Please open a binary first.",
MessageBoxButtonSet.OKButtonSet,
MessageBoxIcon.ErrorIcon,
)
return
ctx = all_contexts[0]
handler = ctx.contentActionHandler()
if handler is None:
if message_box:
show_message_box(
"Action handler not found",
"No action handler is available. Please open a binary first.",
MessageBoxButtonSet.OKButtonSet,
MessageBoxIcon.ErrorIcon,
)
return
actionContext = handler.actionContext()
func = actionContext.function
if func is None:
if message_box:
show_message_box(
"No function is in selection",
"Please navigate to a function in the disassembly view.",
MessageBoxButtonSet.OKButtonSet,
MessageBoxIcon.ErrorIcon,
)
return None
return func
def state_ctx(self, user=None, version=None, locked=False) -> StateContext:
return self._client.state_ctx(user=user, version=version, locked=locked)
@init_checker
def status(self) -> Dict[str,Any]:
return self._client.status()
@init_checker
def users(self):
return self._client.users()
@init_checker
@make_state
def push_function(self, bn_func: binaryninja.function.Function, state: State=None):
# Push function
func = binsync.data.Function(
int(bn_func.start)
) # force conversion from long to int
func.name = bn_func.name
state.set_function(func)
@init_checker
@make_state
def push_patch(self, patch, state: State=None):
state.set_patch(patch.offset, patch)
@init_checker
@make_state
def push_stack_variable(self, bn_func: binaryninja.Function, stack_var: binaryninja.function.Variable,
state: State=None):
if stack_var.source_type != VariableSourceType.StackVariableSourceType:
raise TypeError("Unexpected source type %s of the variable %r." % (stack_var.source_type, stack_var))
type_str = stack_var.type.get_string_before_name()
size = stack_var.type.width
v = StackVariable(stack_var.storage,
StackOffsetType.BINJA,
stack_var.name,
type_str,
size,
bn_func.start)
state.set_stack_variable(bn_func.start, stack_var.storage, v)
@init_checker
@make_state
def push_stack_variables(self, bn_func, state: State=None):
for stack_var in bn_func.stack_layout:
# ignore all unnamed variables
# TODO: Do not ignore re-typed but unnamed variables
if re.match(r"var_\d+[_\d+]{0,1}", stack_var.name) \
or stack_var.name in {
'__saved_rbp', '__return_addr',
}:
continue
if not stack_var.source_type == VariableSourceType.StackVariableSourceType:
continue
self.push_stack_variable(bn_func, stack_var, state=state)
@init_checker
@make_ro_state
def pull_stack_variables(self, bn_func, user: Optional[str]=None, state: State=None) -> Dict[int,StackVariable]:
try:
return dict(state.get_stack_variables(bn_func.start))
except KeyError:
return { }
@init_checker
@make_ro_state
def pull_stack_variable(self, bn_func, offset: int, user: Optional[str]=None, state: State=None) -> StackVariable:
return state.get_stack_variable(bn_func.start, offset)
@init_checker
@make_ro_state
def pull_function(self, bn_func, user: Optional[str]=None, state: State=None) -> Optional[Function]:
"""
Pull a function downwards.
:param bv:
:param bn_func:
:param user:
:return:
"""
# pull function
try:
func = state.get_function(int(bn_func.start))
return func
except KeyError:
return None
@init_checker
@make_ro_state
def fill_function(self, bn_func: binaryninja.function.Function, user: Optional[str]=None,
state: State=None) -> None:
"""
Grab all relevant information from the specified user and fill the @bn_func.
"""
_func = self.pull_function(bn_func, user=user, state=state)
if _func is None:
return
# name
bn_func.name = _func.name
# comments
for _, ins_addr in bn_func.instructions:
_comment = self.pull_comment(ins_addr, user=user, state=state)
if _comment is not None:
bn_func.set_comment_at(ins_addr, _comment)
# stack variables
existing_stack_vars: Dict[int,Any] = dict((v.storage, v) for v in bn_func.stack_layout
if v.source_type == VariableSourceType.StackVariableSourceType)
for offset, stack_var in self.pull_stack_variables(bn_func, user=user, state=state).items():
bn_offset = stack_var.get_offset(StackOffsetType.BINJA)
# skip if this variable already exists
type_, _ = bn_func.view.parse_type_string(stack_var.type)
if bn_offset in existing_stack_vars \
and existing_stack_vars[bn_offset].name == stack_var.name \
and existing_stack_vars[bn_offset].type == type_:
continue
bn_func.create_user_stack_var(bn_offset, type_, stack_var.name)
@init_checker
@make_state
def remove_all_comments(self, bn_func: binaryninja.function.Function, user: Optional[str]=None,
state: State=None) -> None:
for _, ins_addr in bn_func.instructions:
if ins_addr in state.comments:
state.remove_comment(ins_addr)
@init_checker
@make_state
def push_comments(self, comments: Dict[int,str], state: State=None) -> None:
# Push comments
for addr, comment in comments.items():
comm_addr = int(addr)
state.set_comment(comm_addr, comment)
@init_checker
@make_ro_state
def pull_comment(self, addr, user: Optional[str]=None, state: State=None) -> Optional[str]:
"""
Pull comments downwards.
:param bv:
:param start_addr:
:param end_addr:
:param user:
:return:
"""
try:
return state.get_comment(addr)
except KeyError:
return None
@init_checker
@make_ro_state
def pull_comments(self, start_addr, end_addr: Optional[int]=None,
user: Optional[str]=None, state: State=None) -> Optional[Iterable[str]]:
"""
Pull comments downwards.
:param bv:
:param start_addr:
:param end_addr:
:param user:
:return:
"""
return state.get_comments(start_addr, end_addr=end_addr)
controller = BinsyncController()
class CurrentFunctionNotification(BinaryDataNotification):
def __init__(self, controller):
super().__init__()
self.controller = controller
def function_update_requested(self, view, func):
print("function_update_requested", view, func)
self.controller.mark_as_current_function(view, func)
def function_updated(self, view, func):
print("function_updated", view, func)
self.controller.mark_as_current_function(view, func)
def symbol_added(self, view, sym):
print(view, sym)
def symbol_updated(self, view, sym):
print(view, sym)
def symbol_removed(self, view, sym):
print(view, sym)
def launch_binsync_configure(context):
if context.binaryView is None:
show_message_box(
"No binary is loaded",
"There is no Binary View available. Please open a binary in Binary Ninja first.",
MessageBoxButtonSet.OKButtonSet,
MessageBoxIcon.ErrorIcon,
)
return
d = ConfigDialog(controller)
d.exec_()
# register a notification to get current functions
# TODO: This is a bad idea since more than one functions might be updated when editing one function :/
# notification = CurrentFunctionNotification(controller)
# context.binaryView.register_notification(notification)
def open_control_panel(*args):
d = ControlPanelDialog(controller)
d.show()
class PatchDataNotification(BinaryDataNotification):
def __init__(self, view, controller):
super().__init__()
self._view = view
self._controller = controller
self._patch_number = 0
def data_written(self, view, offset, length):
# TODO think about the naming
file_offset = offset - view.start
obj_name = os.path.basename(view.file.original_filename)
patch = Patch(obj_name, file_offset, view.read(offset, length))
self._patch_number += 1
self._controller.push_patch(patch)
class EditFunctionNotification(BinaryDataNotification):
def __init__(self, view, controller):
super().__init__()
self._view = view
self._controller = controller
def function_updated(self, view, func):
self._controller.push_function(func)
def start_patch_monitor(view):
notification = PatchDataNotification(view, controller)
view.register_notification(notification)
def start_function_monitor(view):
notification = EditFunctionNotification(view, controller)
view.register_notification(notification)
UIAction.registerAction("Configure BinSync...")
UIActionHandler.globalActions().bindAction(
"Configure BinSync...", UIAction(launch_binsync_configure)
)
Menu.mainMenu("Tools").addAction("Configure BinSync...", "BinSync")
open_control_panel_id = "BinSync: Open control panel"
UIAction.registerAction(open_control_panel_id)
UIActionHandler.globalActions().bindAction(
open_control_panel_id, UIAction(open_control_panel)
)
Menu.mainMenu("Tools").addAction(open_control_panel_id, "BinSync")
# register the control panel dock widget
dock_handler = DockHandler.getActiveDockHandler()
dock_handler.addDockWidget(
"BinSync: Control Panel",
lambda n,p,d: create_widget(ControlPanelDockWidget, n, p, d, controller),
Qt.RightDockWidgetArea,
Qt.Vertical,
True
)
PluginCommand.register_for_function(
"Push function upwards", "Push function upwards", controller.push_function
)
# TODO how can we avoid having users to click on this menu option?
PluginCommand.register(
"Start Sharing Patches", "Start Sharing Patches", start_patch_monitor
)
PluginCommand.register(
"Start Sharing Functions", "Start Sharing Functions", start_function_monitor
)
|
import gym
import gym_envs
env = gym.make('my_goal_env-v0')
print("isinstance(env, gym.GoalEnv)", isinstance(env, gym.GoalEnv)) |
##################### generated by xml-casa (v2) from calanalysis.xml ###############
##################### 78255864a2b8535063cf2fc5161fc628 ##############################
from __future__ import absolute_import
from .__casac__ import calanalysis as _calanalysis
from .platform import str_encode as _str_ec
from .platform import str_decode as _str_dc
from .platform import dict_encode as _dict_ec
from .platform import dict_decode as _dict_dc
from .platform import dict_encode as _quant_ec
from .platform import dict_decode as _quant_dc
from .platform import encode as _any_ec
from .platform import decode as _any_dc
from .typecheck import validator as _pc
from .coercetype import coerce as _coerce
class calanalysis:
### self
def __init__(self, *args, **kwargs):
"""Construct a calibration analysis tool.
"""
self._swigobj = kwargs.get('swig_object',None)
if self._swigobj is None:
self._swigobj = _calanalysis()
def open(self, caltable=''):
"""This member function opens a calibration table.
"""
schema = {'caltable': {'type': 'cReqPath', 'coerce': _coerce.expand_path}}
doc = {'caltable': caltable}
assert _pc.validate(doc,schema), str(_pc.errors)
_open_result = self._swigobj.open(_str_ec(_pc.document['caltable']))
return _open_result
def close(self):
"""This member function closes a calibration table.
"""
_close_result = self._swigobj.close()
return _close_result
def calname(self):
"""This member function returns calibration table name.
"""
_calname_result = _str_dc(self._swigobj.calname())
return _calname_result
def msname(self):
"""This member function returns the name of the MS that created this calibration
table.
"""
_msname_result = _str_dc(self._swigobj.msname())
return _msname_result
def viscal(self):
"""This member function returns the type of calibration table ('B', 'G', 'T',
etc.).
"""
_viscal_result = _str_dc(self._swigobj.viscal())
return _viscal_result
def partype(self):
"""This member function returns the parameter column type in the calibration table
('Complex' or 'Float').
"""
_partype_result = _str_dc(self._swigobj.partype())
return _partype_result
def polbasis(self):
"""This member function returns the polarization basis in the calibration table
('L' for linear or 'C' for circular).
"""
_polbasis_result = _str_dc(self._swigobj.polbasis())
return _polbasis_result
def numfield(self):
"""This member function returns the number of fields in the calibration table.
"""
_numfield_result = self._swigobj.numfield()
return _numfield_result
def field(self, name=True):
"""This member function returns the fields in the calibration table.
"""
schema = {'name': {'type': 'cBool'}}
doc = {'name': name}
assert _pc.validate(doc,schema), str(_pc.errors)
_field_result = [_str_dc(_x) for _x in self._swigobj.field(_pc.document['name'])]
return _field_result
def numantenna(self):
"""This member function returns the number of antennas in the calibration table.
"""
_numantenna_result = self._swigobj.numantenna()
return _numantenna_result
def numantenna1(self):
"""This member function returns the number of antenna 1s in the calibration table.
"""
_numantenna1_result = self._swigobj.numantenna1()
return _numantenna1_result
def numantenna2(self):
"""This member function returns the number of antenna 2s in the calibration table.
"""
_numantenna2_result = self._swigobj.numantenna2()
return _numantenna2_result
def antenna(self, name=True):
"""This member function returns the antennas in the calibration table.
"""
schema = {'name': {'type': 'cBool'}}
doc = {'name': name}
assert _pc.validate(doc,schema), str(_pc.errors)
_antenna_result = [_str_dc(_x) for _x in self._swigobj.antenna(_pc.document['name'])]
return _antenna_result
def antenna1(self, name=True):
"""This member function returns the antenna 1s in the calibration table.
"""
schema = {'name': {'type': 'cBool'}}
doc = {'name': name}
assert _pc.validate(doc,schema), str(_pc.errors)
_antenna1_result = [_str_dc(_x) for _x in self._swigobj.antenna1(_pc.document['name'])]
return _antenna1_result
def antenna2(self, name=True):
"""This member function returns the antenna 2s in the calibration table.
"""
schema = {'name': {'type': 'cBool'}}
doc = {'name': name}
assert _pc.validate(doc,schema), str(_pc.errors)
_antenna2_result = [_str_dc(_x) for _x in self._swigobj.antenna2(_pc.document['name'])]
return _antenna2_result
def numfeed(self):
"""This member function returns the number of feeds in the calibration table.
"""
_numfeed_result = self._swigobj.numfeed()
return _numfeed_result
def feed(self):
"""This member function returns the feeds in the calibration table.
"""
_feed_result = [_str_dc(_x) for _x in self._swigobj.feed()]
return _feed_result
def numtime(self):
"""This member function returns the number of times in the calibration table.
"""
_numtime_result = self._swigobj.numtime()
return _numtime_result
def time(self):
"""This member function returns the times (in MJD seconds) in the calibration
table.
"""
_time_result = self._swigobj.time()
return _time_result
def numspw(self):
"""This member function returns the number of spectral windows in the calibration
table.
"""
_numspw_result = self._swigobj.numspw()
return _numspw_result
def spw(self, name=True):
"""This member function returns the spectral windows in the calibration table.
"""
schema = {'name': {'type': 'cBool'}}
doc = {'name': name}
assert _pc.validate(doc,schema), str(_pc.errors)
_spw_result = [_str_dc(_x) for _x in self._swigobj.spw(_pc.document['name'])]
return _spw_result
def numchannel(self):
"""This member function returns the number of channels per spectral window in the
calibration table.
"""
_numchannel_result = self._swigobj.numchannel()
return _numchannel_result
def freq(self):
"""This member function returns the frequencies per spectral window in the
calibration table.
"""
_freq_result = _dict_dc(self._swigobj.freq())
return _freq_result
def get(self, field=[ ], antenna=[ ], timerange=[ ], spw=[ ], feed=[ ], axis='TIME', ap='AMPLITUDE', norm=False, unwrap=False, jumpmax=float(0.0)):
"""This member function returns the calibration data.
"""
schema = {'field': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'antenna': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'timerange': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'spw': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'feed': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'axis': {'type': 'cStr'}, 'ap': {'type': 'cStr'}, 'norm': {'type': 'cBool'}, 'unwrap': {'type': 'cBool'}, 'jumpmax': {'type': 'cFloat', 'coerce': _coerce.to_float}}
doc = {'field': field, 'antenna': antenna, 'timerange': timerange, 'spw': spw, 'feed': feed, 'axis': axis, 'ap': ap, 'norm': norm, 'unwrap': unwrap, 'jumpmax': jumpmax}
assert _pc.validate(doc,schema), str(_pc.errors)
_get_result = _dict_dc(self._swigobj.get(_any_ec(_pc.document['field']), _any_ec(_pc.document['antenna']), _any_ec(_pc.document['timerange']), _any_ec(_pc.document['spw']), _any_ec(_pc.document['feed']), _str_ec(_pc.document['axis']), _str_ec(_pc.document['ap']), _pc.document['norm'], _pc.document['unwrap'], _pc.document['jumpmax']))
return _get_result
def fit(self, field=[ ], antenna=[ ], timerange=[ ], spw=[ ], feed=[ ], axis='TIME', ap='AMPLITUDE', norm=False, unwrap=False, jumpmax=float(0.0), order='AVERAGE', type='LSQ', weight=False):
"""This member function returns the calibration data and fits along the
non-iteration axis.
"""
schema = {'field': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'antenna': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'timerange': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'spw': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'feed': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'axis': {'type': 'cStr'}, 'ap': {'type': 'cStr'}, 'norm': {'type': 'cBool'}, 'unwrap': {'type': 'cBool'}, 'jumpmax': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'order': {'type': 'cStr'}, 'type': {'type': 'cStr'}, 'weight': {'type': 'cBool'}}
doc = {'field': field, 'antenna': antenna, 'timerange': timerange, 'spw': spw, 'feed': feed, 'axis': axis, 'ap': ap, 'norm': norm, 'unwrap': unwrap, 'jumpmax': jumpmax, 'order': order, 'type': type, 'weight': weight}
assert _pc.validate(doc,schema), str(_pc.errors)
_fit_result = _dict_dc(self._swigobj.fit(_any_ec(_pc.document['field']), _any_ec(_pc.document['antenna']), _any_ec(_pc.document['timerange']), _any_ec(_pc.document['spw']), _any_ec(_pc.document['feed']), _str_ec(_pc.document['axis']), _str_ec(_pc.document['ap']), _pc.document['norm'], _pc.document['unwrap'], _pc.document['jumpmax'], _str_ec(_pc.document['order']), _str_ec(_pc.document['type']), _pc.document['weight']))
return _fit_result
|
# Generated by Django 2.2 on 2020-07-27 11:49
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0005_auto_20200727_0034'),
]
operations = [
migrations.AlterField(
model_name='product',
name='description',
field=ckeditor.fields.RichTextField(blank=True, verbose_name='Description'),
),
]
|
from home.models import Post
from wagtail.admin.panels import FieldPanel
from wagtail.models import Page
from wagtail.fields import StreamField
from wagtail.documents.blocks import DocumentChooserBlock
from newamericadotorg.helpers import paginate_results, get_program_and_subprogram_posts, get_org_wide_posts
from programs.models import AbstractContentPage
from home.models import AbstractHomeContentPage
class PressRelease(Post):
"""
Press Release class that inherits from the abstract
Post model and creates pages for Press Releases.
"""
parent_page_types = ['ProgramPressReleasesPage']
subpage_types = []
attachment = StreamField([
('attachment', DocumentChooserBlock(required=False, null=True)),
], null=True, blank=True, use_json_field=True)
content_panels = Post.content_panels + [
FieldPanel('attachment'),
]
class Meta:
verbose_name = 'Press Release'
class AllPressReleasesHomePage(AbstractHomeContentPage):
"""
A page which inherits from the abstract Page model and
returns every Press Release in the PressRelease model
for the organization-wide Press Release Homepage
"""
parent_page_types = ['home.Homepage']
subpage_types = []
@property
def content_model(self):
return PressRelease
class Meta:
verbose_name = "Homepage for all Press Releases"
class ProgramPressReleasesPage(AbstractContentPage):
"""
A page which inherits from the abstract Page model and
returns all Press Releases associated with a specific
Program or Subprogram
"""
parent_page_types = ['programs.Program', 'programs.Subprogram', 'programs.Project']
subpage_types = ['PressRelease']
@property
def content_model(self):
return PressRelease
class Meta:
verbose_name = "Press Releases Homepage"
|
#946. Validate Stack Sequences
class Solution:
def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
s = []
i = 0
for e in pushed:
s.append(e)
while s and s[-1] == popped[i]:
s.pop()
i += 1
return i == len(popped) |
class ICBC:
# 类变量 总行的钱
total_money = 1000000
# 类方法
@classmethod
# cls 保存当前类的地址
def print_total_money(cls):
print(id(cls),id(ICBC))
print(cls.total_money)
def __init__(self,name,money):
self.name = name
self.money = money
# 从总行扣除钱数
ICBC.total_money -= money
# 查询/操作类中的数据 不要用实例方法
# def print_total_money(self):
# print(ICBC.total_money)
i01 = ICBC('北京天坛支行',100000)
i02 = ICBC('北京万寿路支行',100000)
ICBC.print_total_money()
# 非主流:通过对象访问类成员
# i02.print_total_money()
|
validNumber = False
userInput = 0
while not validNumber:
userInput = input("Please enter a valid number: ")
if userInput.isdigit():
validNumber = True
else:
print("The number you entered is not valid. Please enter another number.")
print("The number you entered was: " + userInput + ". Your input was valid.")
|
# BSD LICENSE
#
# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import inspect
from settings import LOG_NAME_SEP, FOLDERS
"""
logger module with several log level. Testframwork and TestSuite log
will be saved into different log files.
"""
SPDK_ENV_PAT = r"SPDK_*"
def RED(text):
return "\x1B[" + "31;1m" + str(text) + "\x1B[" + "0m"
def GREEN(text):
return "\x1B[" + "32;1m" + str(text) + "\x1B[" + "0m"
def get_subclasses(module, clazz):
"""
Get module attribute name and attribute.
"""
for subclazz_name, subclazz in inspect.getmembers(module):
if hasattr(subclazz, '__bases__') and clazz in subclazz.__bases__:
yield (subclazz_name, subclazz)
logging.SPDK_DUT_CMD = logging.INFO + 1
logging.SPDK_DUT_OUTPUT = logging.DEBUG + 1
logging.SPDK_DUT_RESULT = logging.WARNING + 1
logging.SPDK_TESTER_CMD = logging.INFO + 2
logging.SPDK_TESTER_OUTPUT = logging.DEBUG + 2
logging.SPDK_TESTER_RESULT = logging.WARNING + 2
logging.SUITE_DUT_CMD = logging.INFO + 3
logging.SUITE_DUT_OUTPUT = logging.DEBUG + 3
logging.SUITE_TESTER_CMD = logging.INFO + 4
logging.SUITE_TESTER_OUTPUT = logging.DEBUG + 4
logging.addLevelName(logging.SPDK_DUT_CMD, 'SPDK_DUT_CMD')
logging.addLevelName(logging.SPDK_DUT_OUTPUT, 'SPDK_DUT_OUTPUT')
logging.addLevelName(logging.SPDK_DUT_RESULT, 'SPDK_DUT_RESULT')
logging.addLevelName(logging.SPDK_TESTER_CMD, 'SPDK_TESTER_CMD')
logging.addLevelName(logging.SPDK_TESTER_OUTPUT, 'SPDK_TESTER_OUTPUT')
logging.addLevelName(logging.SPDK_TESTER_RESULT, 'SPDK_TESTER_RESULT')
logging.addLevelName(logging.SUITE_DUT_CMD, 'SUITE_DUT_CMD')
logging.addLevelName(logging.SUITE_DUT_OUTPUT, 'SUITE_DUT_OUTPUT')
logging.addLevelName(logging.SUITE_TESTER_CMD, 'SUITE_TESTER_CMD')
logging.addLevelName(logging.SUITE_TESTER_OUTPUT, 'SUITE_TESTER_OUTPUT')
message_fmt = '%(asctime)s %(levelname)20s: %(message)s'
date_fmt = '%d/%m/%Y %H:%M:%S'
RESET_COLOR = '\033[0m'
stream_fmt = '%(color)s%(levelname)20s: %(message)s' + RESET_COLOR
log_dir = None
def add_salt(salt, msg):
if not salt:
return msg
else:
return '[%s] ' % salt + str(msg)
class BaseLoggerAdapter(logging.LoggerAdapter):
"""
Upper layer of original logging module.
"""
def spdk_dut_cmd(self, msg, *args, **kwargs):
self.log(logging.SPDK_DUT_CMD, msg, *args, **kwargs)
def spdk_dut_output(self, msg, *args, **kwargs):
self.log(logging.SPDK_DUT_OUTPUT, msg, *args, **kwargs)
def spdk_dut_result(self, msg, *args, **kwargs):
self.log(logging.SPDK_DUT_RESULT, msg, *args, **kwargs)
def spdk_tester_cmd(self, msg, *args, **kwargs):
self.log(logging.SPDK_TESTER_CMD, msg, *args, **kwargs)
def spdk_tester_output(self, msg, *args, **kwargs):
self.log(logging.SPDK_TESTER_CMD, msg, *args, **kwargs)
def spdk_tester_result(self, msg, *args, **kwargs):
self.log(logging.SPDK_TESTER_RESULT, msg, *args, **kwargs)
def suite_dut_cmd(self, msg, *args, **kwargs):
self.log(logging.SUITE_DUT_CMD, msg, *args, **kwargs)
def suite_dut_output(self, msg, *args, **kwargs):
self.log(logging.SUITE_DUT_OUTPUT, msg, *args, **kwargs)
def suite_tester_cmd(self, msg, *args, **kwargs):
self.log(logging.SUITE_TESTER_CMD, msg, *args, **kwargs)
def suite_tester_output(self, msg, *args, **kwargs):
self.log(logging.SUITE_TESTER_OUTPUT, msg, *args, **kwargs)
class ColorHandler(logging.StreamHandler):
"""
Color of log format.
"""
LEVEL_COLORS = {
logging.DEBUG: '', # SYSTEM
logging.SPDK_DUT_OUTPUT: '\033[00;37m', # WHITE
logging.SPDK_TESTER_OUTPUT: '\033[00;37m', # WHITE
logging.SUITE_DUT_OUTPUT: '\033[00;37m', # WHITE
logging.SUITE_TESTER_OUTPUT: '\033[00;37m', # WHITE
logging.INFO: '\033[00;36m', # CYAN
logging.SPDK_DUT_CMD: '', # SYSTEM
logging.SPDK_TESTER_CMD: '', # SYSTEM
logging.SUITE_DUT_CMD: '', # SYSTEM
logging.SUITE_TESTER_CMD: '', # SYSTEM
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.SPDK_DUT_RESULT: '\033[01;34m', # BOLD BLUE
logging.SPDK_TESTER_RESULT: '\033[01;34m', # BOLD BLUE
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.__dict__['color'] = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class SPDKLOG(BaseLoggerAdapter):
"""
log class for framework and testsuite.
"""
def __init__(self, logger, crb="suite"):
global log_dir
filename = inspect.stack()[1][1][:-3]
self.name = filename.split('/')[-1]
self.error_lvl = logging.ERROR
self.warn_lvl = logging.WARNING
self.info_lvl = logging.INFO
self.debug_lvl = logging.DEBUG
if log_dir is None:
self.log_path = os.getcwd() + "/../" + FOLDERS['Output']
else:
self.log_path = log_dir # log dir should contain tag/crb global value and mod in spdk
self.spdk_log = "TestFramework.log"
self.logger = logger
self.logger.setLevel(logging.DEBUG)
self.crb = crb
super(SPDKLOG, self).__init__(self.logger, dict(crb=self.crb))
self.salt = ''
self.fh = None
self.ch = None
# add default log file
fh = logging.FileHandler(self.log_path + "/" + self.spdk_log)
ch = ColorHandler()
self.__log_hander(fh, ch)
def __log_hander(self, fh, ch):
"""
Config stream handler and file handler.
"""
fh.setFormatter(logging.Formatter(message_fmt, date_fmt))
ch.setFormatter(logging.Formatter(stream_fmt, date_fmt))
# file hander default level
fh.setLevel(logging.DEBUG)
# console handler default leve
ch.setLevel(logging.INFO)
self.logger.addHandler(fh)
self.logger.addHandler(ch)
if self.fh is not None:
self.logger.removeHandler(self.fh)
if self.ch is not None:
self.logger.removeHandler(self.ch)
self.fh = fh
self.ch = ch
def warning(self, message):
"""
warning level log function.
"""
message = add_salt(self.salt, message)
self.logger.log(self.warn_lvl, message)
def info(self, message):
"""
information level log function.
"""
message = add_salt(self.salt, message)
self.logger.log(self.info_lvl, message)
def error(self, message):
"""
error level log function.
"""
message = add_salt(self.salt, message)
self.logger.log(self.error_lvl, message)
def debug(self, message):
"""
debug level log function.
"""
message = add_salt(self.salt, message)
self.logger.log(self.debug_lvl, message)
def config_execution(self, crb):
"""
Reconfigure framework logfile.
"""
log_file = self.log_path + '/' + self.spdk_log
fh = logging.FileHandler(log_file)
ch = ColorHandler()
self.__log_hander(fh, ch)
def set_salt(crb, start_flag):
if LOG_NAME_SEP in crb:
old = '%s%s' % (start_flag, LOG_NAME_SEP)
if not self.salt:
self.salt = crb.replace(old, '', 1)
if crb.startswith('dut'):
self.info_lvl = logging.SPDK_DUT_CMD
self.debug_lvl = logging.SPDK_DUT_OUTPUT
self.warn_lvl = logging.SPDK_DUT_RESULT
set_salt(crb, 'dut')
elif crb.startswith('tester'):
self.info_lvl = logging.SPDK_TESTER_CMD
self.debug_lvl = logging.SPDK_TESTER_OUTPUT
self.warn_lvl = logging.SPDK_TESTER_RESULT
set_salt(crb, 'tester')
else:
self.error_lvl = logging.ERROR
self.warn_lvl = logging.WARNING
self.info_lvl = logging.INFO
self.debug_lvl = logging.DEBUG
def config_suite(self, suitename, crb=None):
"""
Reconfigure suitename logfile.
"""
log_file = self.log_path + '/' + suitename + '.log'
fh = logging.FileHandler(log_file)
ch = ColorHandler()
self.__log_hander(fh, ch)
if crb == 'dut':
self.info_lvl = logging.SUITE_DUT_CMD
self.debug_lvl = logging.SUITE_DUT_OUTPUT
elif crb == 'tester':
self.info_lvl = logging.SUITE_TESTER_CMD
self.debug_lvl = logging.SUITE_TESTER_OUTPUT
def getLogger(name, crb="suite"):
"""
Get logger handler and if there's no handler will create one.
"""
logger = SPDKLOG(logging.getLogger(name), crb)
return logger
_TESTSUITE_NAME_FORMAT_PATTERN = r'TEST SUITE : (.*)'
_TESTSUITE_ENDED_FORMAT_PATTERN = r'TEST SUITE ENDED: (.*)'
_TESTCASE_NAME_FORMAT_PATTERN = r'Test Case (.*) Begin'
_TESTCASE_RESULT_FORMAT_PATTERN = r'Test Case (.*) Result (.*):'
|
#!/usr/bin/env python
import sys
list_cv_error = sys.argv[1]
k_CV = {}
for i in open(list_cv_error):
i = i.strip().split(' ')
cverror = float(i[-1])
K = int(i[2].replace('(K=', '').replace('):', ''))
if K not in k_CV:
k_CV[K] = [cverror]
continue
k_CV[K] += [cverror]
Ks = sorted([K for K in k_CV])
MeanCV = []
for K in Ks:
MeanCV.append(sum(k_CV[K]) / float(len(k_CV[K])))
Values = sorted(zip(MeanCV, Ks))
print(Values[0][1])
|
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404, HttpResponseRedirect
from .forms import RegisterForm
from django.view.generic import TemplateView
from .models import EventManagement
from NGO.app.models import User
def get_name(request):
if request.method == 'POST':
form = RegisterForm(request.POST)
if form.is_valid():
return HttpResponseRedirect('/thanks/')
else:
form = RegisterForm()
return render(request, 'register.html', {'form': form})
# trying to retrieve all data from database, not done yet.
def get_event(request):
data = EventManagement.objects.all()
return render(request, '', {'data': data})
def home(request):
users = User.objects.all()
return render(request, 'home.html', {'users': users})
"""TODO: We need an event_detail.html page"""
def event_detail(request, id):
try:
event = Event.objects.get(id=id)
except User.DoesNotExist:
raise Http404('Event not found...')
return render(request, '''event_detail.html''', {'event': event})
pass |
import matplotlib.pyplot as plt
import lentil
circlemask = lentil.util.circlemask((256, 256), 128)
plt.imshow(circlemask)
plt.savefig('../../_static/img/circle_mask.png', transparent=True, bbox_inches='tight', dpi=150)
amplitude = lentil.util.circle((256, 256), 128)
plt.imshow(amplitude)
plt.savefig('../../_static/img/circle_amplitude.png', transparent=True, bbox_inches='tight', dpi=150)
z4 = lentil.zernike.zernike(circlemask, 4)
plt.imshow(z4)
plt.savefig('../../_static/img/circle_focus.png', transparent=True, bbox_inches='tight', dpi=150)
|
import os
import requests
import json
import datetime
def ps_a_command_on_terminal(handedoverdestinationsrv):
if handedoverdestinationsrv == "":
destinationsrv = 'http://127.0.0.1:5000/'
else:
destinationsrv = handedoverdestinationsrv
try:
os.system("""docker ps -a --format "{{.ID}} {{.Names}} {{.State}} {{.Image}}" > ps-a.txt""")
hostname_stream = os.popen('hostname')
hostname = hostname_stream.read()
hostname = str.rstrip(hostname)
#first action delete all container information on backend from from this node
send_response = requests.delete(destinationsrv+'nodes/'+hostname)
current_timestamp = str(datetime.datetime.now())
execinformation = "yes"
file_array = []
filename = "ps-a.txt"
try:
filepointer = open(filename, 'r')
file_array = filepointer.readlines() #list element by line
except:
print("some error while loading:", filename)
if len(file_array) > 0:
for line in file_array:
element = line.split(" ")
try:
json={"containerid": element[0], "name": element[1], "status": element[2], "node": hostname, "timestamp": current_timestamp}
send_response = requests.post(destinationsrv+'containers', json={"containerid": element[0], "name": element[1], "status": element[2], "node": hostname, "timestamp": current_timestamp})
except:
print("something went wrong by requests call ")
else:
print("no content in file?!?")
except:
return None
return execinformation
psatxt = ps_a_command_on_terminal("") |
import numpy as np
import csv
from scipy.fftpack import fft
class DataSet(object):
def __init__(self, acdata, labels):
assert acdata.shape[0] == labels.shape[0], (
"acdata.shape: %s labels.shape: %s" % (acdata.shape,
labels.shape))
assert acdata.shape[3] == 1
acdata = acdata.reshape(acdata.shape[0],
acdata.shape[1] * acdata.shape[2])
acdata = acdata.astype(np.float32)
self._acdata = acdata
self._labels = labels
@property
def acdata(self):
return self._acdata
@property
def labels(self):
return self._labels
# 784*2=1568, 1567*2 + 5 = 3139
with open("/home/sujithpk/Desktop/cnn/train_sig.csv") as file:
reader=csv.reader(file)
tr_sig=list(reader) #tr_sig is the list format of csv file train_sig
with open("/home/sujithpk/Desktop/cnn/test_sig.csv") as file:
reader=csv.reader(file)
ts_sig=list(reader) #ts_sig is the list format of csv file test_sig
def getData(colno,tr_or_ts):
# colno = column to be read, tr_or_ts = train data or test data
ac_sig = np.zeros(3139)
if tr_or_ts == 11:
for i in range(3139):
ac_sig[i] = float(tr_sig[i][colno]) / 2.38
elif tr_or_ts == 22:
for i in range(3139):
ac_sig[i] = float(ts_sig[i][colno]) / 2.38
#sliding window 5 long, step size 2
ac_smpld = np.zeros(1568)
for m in range(1568):
adn = 0.0
for n in range(5):
adn = adn + float(ac_sig[m*2 + n]) # sum
ac_smpld[m] = adn / 5 #average
han_wind=np.hanning(1568)
ac_han=np.multiply(ac_smpld,han_wind)
#get fft of ac_han
ac_fft = abs(fft(ac_han))
ac_data = np.zeros(784) # final result : the training data
#finding rms of bands
for i in range(784):
sq_sum = 0.0
for j in range(2):
sq_sum = sq_sum + ac_fft[i*2 + j] * ac_fft[i*2 + j] #squared sum
sq_sum = sq_sum /2 #mean of squared sum
ac_data[i] = np.sqrt(sq_sum) #root of mean of squared sum = rms
return ac_data
def read_inp(n_train,n_test,one_hot=False):
class DataSets(object):
pass
data_sets = DataSets()
print('\n.......Reading data inputs.......\n')
VALIDATION_SIZE = int(n_train/10) #usually 10% of n_train
train_acdata = np.zeros((n_train,28,28,1))
for i in range(n_train):
count=0
acdat=getData(i,11)
for j in range(28):
for k in range(28):
train_acdata[i,j,k,0]=acdat[count]
count+=1
train_labels = np.zeros(n_train)
n1=int(n_train/9) #28
cnt1=0
for i in range(3):
for j in range(n1):
for k in range(3):
train_labels[cnt1] = i*3 +k
cnt1=cnt1+1
test_acdata = np.zeros((n_test,28,28,1))
for i in range(n_test):
count=0
acdat=getData(i,22)
for j in range(28):
for k in range(28):
test_acdata[i,j,k,0]=acdat[count]
count+=1
ext_lab = np.zeros((n_test,))
validation_acdata = train_acdata[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
train_acdata = train_acdata[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
data_sets.train = DataSet(train_acdata, train_labels)
data_sets.validation = DataSet(validation_acdata, validation_labels)
data_sets.test = DataSet(test_acdata, ext_lab)
return data_sets
|
# Generated by Django 2.1.5 on 2019-01-24 14:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_userprofile_nick_name'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='user_type',
field=models.CharField(choices=[('s_group_leader', '小组长'), ('b_group_leader', '大组长'), ('administrator', '管理员')], default='', max_length=50, verbose_name='用户类型'),
),
]
|
"""
.. module:: CDataSplitterStratifiedKFold
:synopsis: Stratified K-Fold
.. moduleauthor:: Marco Melis <marco.melis@unica.it>
"""
from sklearn.model_selection import StratifiedKFold
from secml.array import CArray
from secml.data.splitter import CDataSplitter
class CDataSplitterStratifiedKFold(CDataSplitter):
"""Stratified K-Folds dataset splitting.
Provides train/test indices to split data in train test sets.
This dataset splitting object is a variation of KFold, which
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Parameters
----------
num_folds : int, optional
Number of folds to create. Default 3.
This correspond to the size of tr_idx and ts_idx lists.
For stratified K-Fold, this cannot be higher than the
minimum number of samples per class in the dataset.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, is the RandomState instance used by np.random.
Attributes
----------
class_type : 'strat-kfold'
Examples
--------
>>> from secml.data import CDataset
>>> from secml.data.splitter import CDataSplitterStratifiedKFold
>>> ds = CDataset([[1,2],[3,4],[5,6],[7,8]],[1,0,0,1])
>>> stratkfold = CDataSplitterStratifiedKFold(num_folds=2, random_state=0).compute_indices(ds)
>>> stratkfold.num_folds # Cannot be higher than the number of samples per class
2
>>> stratkfold.tr_idx
[CArray(2,)(dense: [1 3]), CArray(2,)(dense: [0 2])]
>>> stratkfold.ts_idx
[CArray(2,)(dense: [0 2]), CArray(2,)(dense: [1 3])]
"""
__class_type = 'strat-kfold'
def __init__(self, num_folds=3, random_state=None):
super(CDataSplitterStratifiedKFold, self).__init__(
num_folds, random_state=random_state)
def compute_indices(self, dataset):
"""Compute training set and test set indices for each fold.
Parameters
----------
dataset : CDataset
Dataset to split.
Returns
-------
CDataSplitter
Instance of the dataset splitter with tr/ts indices.
"""
# Resetting indices
self._tr_idx = []
self._ts_idx = []
sk_splitter = StratifiedKFold(n_splits=self.num_folds,
shuffle=True,
random_state=self.random_state)
# We take sklearn indices (iterators) and map to list of CArrays
for train_index, test_index in \
sk_splitter.split(X=dataset.X.get_data(),
y=dataset.Y.get_data()):
train_index = CArray(train_index)
test_index = CArray(test_index)
self._tr_idx.append(train_index)
self._ts_idx.append(test_index)
return self
|
from omegaconf.dictconfig import DictConfig
import torch
import pytorch_lightning as pl
import torch.nn as nn
from torch import optim
import torchvision.utils as vutils
from .baseline_model import *
from .criterions import *
class BaselineSSModelPl(pl.LightningModule):
def __init__(self, cfg: DictConfig):
super().__init__()
self.cfg = cfg
self.core = BaselineSSModel()
self.criterion = FocalLoss()
def forward(self, x, y):
yh = self.core(x)
loss = self.criterion(yh, y)
return yh, loss
def training_step(self, batch, batch_idx):
x, y = batch["x"], batch["y"]
_, loss = self.forward(x, y)
return {"loss": loss}
def training_epoch_end(self, outputs):
loss = torch.stack([x["loss"] for x in outputs]).mean()
self.log("train_loss", loss, prog_bar=True)
def validation_step(self, batch, batch_idx):
x, y = batch["x"], batch["y"]
yh, loss = self.forward(x, y)
return {"val_loss": loss}
def validation_epoch_end(self, outputs):
loss = torch.stack([x["val_loss"] for x in outputs]).mean()
self.log("val_loss", loss, prog_bar=True)
def configure_optimizers(self):
if self.cfg.train.optim == "adam":
return optim.Adam(
self.core.parameters(),
lr=self.cfg.train.lr,
betas=self.cfg.train.betas,
weight_decay=self.cfg.train.weight_decay,
amsgrad=True,
)
else:
raise NotImplementedError
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.indexpage, name='indexpage'),
path('login/', views.login_view, name='login'),
path('register/', views.register, name='register'),
path('update/', views.update, name='update'),
path('personal/', views.personal, name='personal'),#查看历史攻略
path('logout/', views.logoff, name='logout'),
path('index/', views.indexpage, name='indexpage'),
path('log/', views.log, name='log'),
# path('user/<userId>', views.user, name='user'), 用这个可以穿参数
path('user/', views.user, name='user'),
path('api/code/', views.code, name='code'),
path('strategyList/',views.strategyList,name='strategyList'),
path('strategyDetail/',views.enterUserPage,name='strategyDetail'),
path('followList/',views.followList,name='followList'),
] |
import random
from math import pi, sqrt, cos, sin
for i in range(10):
# print(random.uniform(0,1))
t = 2 * pi * random.uniform(0,1)
r = 20 * sqrt(random.uniform(0,1))
x = 0 + r * cos(t)
y = 0 + r * sin(t)
print('x, y', (x,y)) |
# OMAP3503 GP_TIMER PWM DRIVER
# Class to configure OMAP3503
# general purpose timers 8,9,10,11
# for PWM signal generation as well
# as update frequencies
import mmap, os, sys
import logging
logger = logging.getLogger('gumstix.motors.driver')
def setup_pwm():
MAP_SIZE = 4096
MAP_MASK = MAP_SIZE - 1
f = os.open("/dev/mem", os.O_RDWR | os.O_SYNC)
logger.info('Configuring PWM Mux')
#PWM for GPIO 8, 11
logger.debug('Configuring MUX for GPT 11, 8')
addr = 0x48002178
m = mmap.mmap(f, MAP_SIZE, mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ, offset=addr & ~MAP_MASK)
m.seek(addr & MAP_MASK)
m.write('\x02\x01') #gpt11,8_MUX, Turn on PWM
m.close()
logger.debug('Configuring MUX for GPT 10, 9')
#PWM for GPIO 9, 10
addr = 0x48002174
m = mmap.mmap(f, MAP_SIZE, mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ, offset=addr & ~MAP_MASK)
m.seek(addr & MAP_MASK)
m.write('\x02\x01') #gpt10,9_MUX, Turn on PWM
m.close()
os.close(f)
class pwm_driver():
def __init__(self, number, mode, timer_freq):
#Select timer
if number == 1:
self.startAddr = str(0x4903e000)
elif number == 2:
self.startAddr = str(0x49040000)
elif number == 3:
self.startAddr = str(0x48086000)
elif number == 4:
self.startAddr = str(0x48088000)
else:
pass #wrong input
#Offsets, TI OMAP35xx technical reference manual
self.TIOCP_CFG = str(0x010)
self.TISTAT = str(0X014)
self.TISR = str(0x018)
self.TIER = str(0x01c)
self.TWER = str(0x020)
self.TCLR = str(0x024)
self.TCRR = str(0x028)
self.TLDR = str(0x02c)
self.TTGR = str(0x030)
self.TWPS = str(0x034)
self.TMAR = str(0x038)
self.TCAR1 = str(0x03c)
self.TSICR = str(0x040)
self.TCAR2 = str(0x044)
self.TPIR = str(0x048)
self.TNIR = str(0x04c)
self.TCVR = str(0x050)
self.TOCR = str(0x054)
self.TOWR = str(0x058)
#Pre-defined writes
self.MAP_SIZE = 4096
self.MAP_MASK = self.MAP_SIZE - 1
self.stop = '00000000'
self.start = '00001843'
self.update_cycles(timer_freq)
logger.debug('Driver initiated for motor: \t ' + str(number))
def read(self, addr): #Not in use, yet
f = os.open("/dev/mem", os.O_RDWR | os.O_SYNC)
m = mmap.mmap(f, self.MAP_SIZE, mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ, offset=int(addr, 16) & ~self.MAP_MASK)
m.seek(int(addr, 16) & self.MAP_MASK)
c = m.read(4)
value = value.replace("0x", "").replace("/x", "").replace(" ", "") # strip 0x, /x, spaces
value = value[6:8]+value[4:6]+value[2:4]+value[0:2] # re order
m.close
os.close(f)
logger.debug('Read:' +
'\n\t Hex Value \t ' + str(value) +
'\n\t Address: \t ' + str(addr))
return value
def write(self, addr, value):#Feedback needed
f = os.open("/dev/mem", os.O_RDWR | os.O_SYNC)
m = mmap.mmap(f, self.MAP_SIZE, mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ, offset=int(addr, 16) & ~self.MAP_MASK)
m.seek(int(addr, 16) & self.MAP_MASK)
# Re-Arrange values into backwards pairs, decode converts the '\\x' to and escape string
value = (('\\x' + value[6:8]).decode('string_escape') +
('\\x' + value[4:6]).decode('string_escape') +
('\\x' + value[2:4]).decode('string_escape') +
('\\x' + value[0:2]).decode('string_escape'))
logger.debug('Written:' +
'\n\t Hex Value \t ' + str(value.encode('string_escape')) +
'\n\t Address: \t ' + str(addr))
m.write(value)
m.close
os.close(f)
def addrOffset(self, offset):
addr = hex(int(self.startAddr) + int(offset)).replace("0x", "").replace("L", "")
logger.debug('Offset Address: \t ' + str(addr))
return addr
def strToHex(self, string):
value = hex(string).replace("0x", "").replace("L", "")
logger.debug('String -> Hex: \t ' + str(value))
return value
def hexToStr(self, hex):
value = int(hex, 16).replace("L", "")
logger.debug('Hex -> String: \t ' + str(value))
return value
def steps(self, freq):
if(freq > 32000):
return #frequency to high
steps = int( (1/float(freq)) / (1/float(32000)) )#Number of 32KHz cycles into given frequency cycle
logger.debug('Steps: \t ' + str(steps))
return steps
def freq_cycles(self, freq):
steps = self.steps(freq)
value = 4294967295 - steps
self.load = value
logger.debug('Cycles Frequency: \t ' + str(value))
return self.strToHex(value)
def freq_pulsewidth(self,freq):
steps = self.steps(freq)
value = self.load + steps
logger.debug('Pulse Width Frequency \t ' + str(value))
return self.strToHex(value)
#Start of callable functions
def update_cycles(self, freq): # Changes timer
self.stop_motors()
#Calculate hex for timer cycles and update timer load
value = self.freq_cycles(freq)
addr = self.addrOffset(self.TLDR)
logger.info('Cycles Update:'
+ '\n\t Frequency: \t ' + str(value)
+ '\n\t Address: \t ' + str(addr))
self.write(addr, value)
#Reset timer counter
value = 'ffffffff'
addr = self.addrOffset(self.TCRR)
logger.debug('Pulse Width Update'
+ '\n\t Frequency: \t ' + str(value)
+ '\n\t Address: \t ' + str(addr))
self.write(addr, value)
self.start_motors()
def update_pulsewidth(self, freq): # Changes the control pulse width on the timer
self.stop_motors()
#Calculate Hex for frequency and update timer match
value = self.freq_pulsewidth(freq)
addr = self.addrOffset(self.TMAR)
logger.info('Pulse Width Update'
+ '\n\t Frequency: \t ' + str(value)
+ '\n\t Address: \t ' + str(addr))
self.write(addr, value)
#Reset timer counter
value = 'ffffffff'
addr = self.addrOffset(self.TCRR)
logger.info('Pulse Width Update'
+ '\n\t Frequency: \t ' + str(value)
+ '\n\t Address: \t ' + str(addr))
self.write(addr, value)
self.start_motors()
def read_speed(self):
addr = self.addrOffset(self.TMAR)
value = self.read(addr)
freq = self.hexToStr(value)
logger.info('Pulse Width Read'
+ '\n\t Hex: \t ' + str(value)
+ '\n\t Frequency: \t ' + str(freq)
+ '\n\t Address: \t ' + str(addr))
return value
def start_motors(self): # Starts PWM on timer
addr = self.addrOffset(self.TCLR)
self.write(addr, self.start)
def stop_motors(self): # Stops Timer
addr = self.addrOffset(self.TCLR)
self.write(addr, self.stop)
|
__author__ = 'pierregagliardi'
import time
import os
import sys
from sklearn import metrics
from sklearn.svm import SVC
from projet_sentiment_analysis.code.utilities import extract_data
#Evaluation of the model on the test set
if __name__ == "__main__":
general_path='projet_sentiment_analysis/'
path_to_training_set=general_path+'training_set_60000/training_set_unigram_all_features/'
path_to_pickle=general_path+'pickle_hyper_parameters/'
(X_train, y_train, X_test, y_test,number_training, number_testing)= extract_data.extract_training_and_testing_set(
path_to_training_set+'metrics_training_set_7000.data',
path_to_training_set+'metrics_testing_set_7000.data')
start_time = time.clock()
clf=SVC(C=100, cache_size=2000, class_weight=None,gamma=0.01,
kernel='rbf', max_iter=-1, probability=False, shrinking=True,
tol=0.001, verbose=False)
clf.fit(X_train, y_train)
y_prediction=clf.predict(X_test)
print(metrics.classification_report(y_test, y_prediction))
end_time = time.clock()
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
|
import FWCore.ParameterSet.Config as cms
# ------------------------------------------------------------------------------
# configure a filter to run only on the events selected by TkAlMinBias AlcaReco
import copy
from HLTrigger.HLTfilters.hltHighLevel_cfi import *
ALCARECOPCCRandomFilter = copy.deepcopy(hltHighLevel)
ALCARECOPCCRandomFilter.HLTPaths = ['pathALCARECOAlCaPCCRandom']
ALCARECOPCCRandomFilter.throw = True ## dont throw on unknown path names
ALCARECOPCCRandomFilter.TriggerResultsTag = cms.InputTag("TriggerResults","","RECO")
from Calibration.LumiAlCaRecoProducers.alcaRawPCCProducer_cfi import *
seqALCARECOPromptCalibProdLumiPCC = cms.Sequence(ALCARECOPCCRandomFilter *
rawPCCProd)
|
# Perfect Minimal Hashing
# By Vladyslav Ovchynnykov
import sys
GLOBAL_DICTIONARY = "/usr/share/dict/words" # Дефолтний словник слів на UNIX системах
USER_WORDS = sys.argv[1:]
if not len(USER_WORDS):
USER_WORDS = ['hello', 'goodbye', 'dog', 'cat']
def hash(num, str):
'''
Обчислює певну хеш-функцію для заданого рядка. Кожне значення для
цілого числа num призводить до різного хеш-значення
'''
if num == 0:
num = 0x01000193
# Використовуємо FNV алгоритм з http://isthe.com/chongo/tech/comp/fnv/
for c in str:
num = ((num * 0x01000193) ^ ord(c)) & 0xffffffff
return num
def create_minimal_perfect_hash(input_dict):
'''
Обчислює мінімальну досконалу хеш-таблицю з використанням словника. Функція
повертає кортеж (G, V). G і V - масиви. G містить проміжний продукт -
таблиця значень, необхідну для обчислення індексу значення в V. V містить
значення словника
'''
size = len(input_dict)
# Крок 1: кладемо всі ключі в 'кошик'
buckets = [[] for i in range(size)]
G = [0] * size
values = [None] * size
for key in input_dict.keys():
buckets[hash(0, key) % size].append(key)
# Крок 2: сортуйємо кошики та обробляєсо ті, що мають найбільше елементів
buckets.sort(key=len, reverse=True)
for b in range(size):
bucket = buckets[b]
if len(bucket) <= 1:
break
d = 1
item = 0
slots = []
# Повторно пробуємо різні значення d, доки ми не знайдемо хеш-функцю
# якия поміщає всі предмети в кошик в вільні слоти
while item < len(bucket):
slot = hash(d, bucket[item]) % size
if values[slot] is not None or slot in slots:
d += 1
item = 0
slots = []
else:
slots.append(slot)
item += 1
G[hash(0, bucket[0]) % size] = d
for i in range(len(bucket)):
values[slots[i]] = input_dict[bucket[i]]
if not b % 5000:
print("bucket %d r" % (b))
# Лише кошикі з одним елементом залишаються. Обрабляємо їх швидше, безпосередньо
# розміщуючи їх у вільному місці. Використовуємо негативне значення num для позначення цього
freelist = []
for i in range(size):
if values[i] is None:
freelist.append(i)
for b in range(b, size):
bucket = buckets[b]
if len(bucket) == 0:
break
slot = freelist.pop()
# Ми віднімаємо 1, щоб забезпечити його негативне значення, навіть якщо нульове місце
# було використано
G[hash(0, bucket[0]) % size] = -slot - 1
values[slot] = input_dict[bucket[0]]
if (b % 5000) == 0:
print("bucket %d r" % (b))
return (G, values)
def perfect_hash_lookup(G, V, key):
'''Знаходимо значення в хеш-таблиці, визначеному G і V'''
d = G[hash(0, key) % len(G)]
if d < 0:
return V[-d - 1]
return V[hash(d, key) % len(V)]
print("Reading words")
input_dict = {}
line = 1
for key in open(GLOBAL_DICTIONARY, "rt").readlines():
input_dict[key.strip()] = line
line += 1
print("Creating perfect hash")
(G, V) = create_minimal_perfect_hash(input_dict)
for word in USER_WORDS:
line = perfect_hash_lookup(G, V, word)
print("Word %s occurs on line %d" % (word, line))
|
import random
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/hello')
def hello():
return '<h1>Hello World from Japan</h1>'
@app.route('/goodbye')
def goodbye():
return 'Goodbye'
@app.route('/add/<a>/<b>')
def add(a, b):
return str(int(a) + int(b))
@app.route('/omikuji')
def omikuji():
results = ['大吉', '吉', '凶']
# fortune_num=random.randint(0,len(result)-1)
result = random.choice(results)
# return f'今日の運勢は{result}'
return render_template('omikuji.html', unsei=result)
if __name__ == '__main__':
app.run(debug=True)
|
n=int(input("Enter the no."))
result=1
for i in range(n,0,-1):
result=result*i
print("fact of",n,"is",result)
|
import sys, time, torch, pdb
from bcachefs import Bcachefs
import benzina.torch as B
if __name__ == "__main__":
_x = torch.cuda.FloatTensor(10,10) ; del _x
with Bcachefs(sys.argv[1]) as bchfs:
d = B.dataset.ImageNet(bchfs, split="train")
l = B.DataLoader(d,
batch_size = 256,
seed = 0,
shape = (256,256),
warp_transform = None,
norm_transform = 1/255,
bias_transform = -0.5)
n = 0
try:
t =- time.time()
for images, targets in l:
#
# The targets tensor is still collated on CPU. Move it to same
# device as images.
#
targets = targets.to(images.device)
n += len(images)
except:
raise
finally:
t += time.time()
print("Time: {}".format(t))
print("Images: {}".format(n))
print("Speed: {} images/second".format(n/t))
|
def _change_matrix(coin_set, change_amount):
matrix = [[0 for m in range(change_amount+1)] for m in range(len(coin_set) + 1)]
for i in range(change_amount+1):
matrix[0][i] = i
return matrix
def change_making(coins, change):
matrix = _change_matrix(coins, change)
for c in range(1, len(coins) + 1):
for r in range(1, change + 1):
if coins[c-1] == r:
matrix[c][r] = 1
elif coins[c-1] > r:
matrix[c][r] = matrix[c-1][r]
else:
matrix[c][r] = min(matrix[c-1][r], 1+matrix[c][r - coins[c-1]])
return matrix[-1][-1]
print(change_making([6,9,20], 56))
|
import ray
import time
from statistics import mean
from collections import defaultdict
import apps.net.remote as rmt
import apps.net.ping as ping
from apps.net.util import k8s, s3
from thirdparty.microps.oracle.feature.cloud.gen_aws_ec2 import aws_resource_map
@ray.remote
def run(run_config: dict, wrks: dict) -> dict:
"""
Run memcached benchmark with fixed configurations.
Returns a list consisting of results from multiple runs, where
each result is a map of k-v pairs.
"""
def validate():
for k in {"keySize", "valueSize",
"serverThread", "clientThread",
"runTime", "waitTime", "warmupTime"}:
assert k in run_config, f"run: missing config entry '{k}', abort"
validate()
rid = run_config["run_id"]
# get servers and clients
sit = run_config["serverInstanceType"]
cit = run_config["clientInstanceType"]
if sit == cit:
ns = run_config["numServerInstance"]
nc = run_config["numClientInstance"]
mcd_servers = wrks[sit][:ns]
mut_clients = wrks[cit][ns: ns + nc]
else:
mcd_servers = wrks[sit]
mut_clients = wrks[cit]
# install deps and clean up
print("run: assume the remote VM image contains all deps; "
"nothing to install;")
print(rmt.clean_default_apps(mcd_servers + mut_clients,
extra_app=["memcached", "mutilate"],
docker_cont=["memcached"]))
ex_ip_to_in_ip = k8s.get_worker_external_internal_ip_map()
# Step 1: start the memcached servers
# get memcached server IPs (internal VPC IP); we are
# not using a load balancer here, mutilate does client-side
# load balancing already
port = 11211
server_ex_ips = mcd_servers
server_in_ips = [ex_ip_to_in_ip[e] for e in server_ex_ips]
client_ex_ips = mut_clients
client_in_ips = [ex_ip_to_in_ip[i] for i in client_ex_ips]
num_server_thread = run_config.get("serverThread", -1)
if num_server_thread < 0:
num_server_thread = aws_resource_map[run_config["serverInstanceType"]]["vCPUs"]
run_config["serverThread"] = num_server_thread
# demux server runner type, default run on bare metal
runner_type = run_config.get("runner", "bare")
if runner_type == "bare":
cmd_ = f"memcached -t {num_server_thread} -c 32768 > /dev/null 2>&1 & "
rmt.cmd_remote(mcd_servers, cmd_=cmd_)
elif runner_type == "docker":
# default tag: 1.4.33
tag = run_config.get("tag", "1.4.33")
# run the container
cmd_ = f"sudo docker run --name memcached -d -p {port}:{port} memcached:{tag} " \
f"memcached -t {num_server_thread} -c 32768 > /dev/null 2>&1 & "
rmt.cmd_remote(mcd_servers, cmd_=cmd_)
# wait a bit for the container to be ready
time.sleep(5)
print(f"run: docker image memcached:{tag}")
else:
raise Exception(f"run: unknown runner type {runner_type}")
print(f"run: using {runner_type} runner type")
print(f"run: memcached servers at internal IPs {server_in_ips}, public IPs {server_ex_ips} with {cmd_}")
# Step 2: start the mutilate agents
master = mut_clients[0]
agents = mut_clients[1:]
if len(agents) >= 1:
_cmd_agent = f"mutilate -T {run_config['clientThread']} " \
f"-K {run_config['keySize']} " \
f"-V {run_config['valueSize']} " \
f"-c 4 " \
f"-A > /dev/null 2>&1 & "
print("run: agents", agents, _cmd_agent)
rmt.cmd_remote(agents, cmd_=_cmd_agent)
# Step 3: start the mutilate master runner
# TODO: add input distribution knob
def make_master_cmd():
server_str = " ".join([f"-s {si}:{port}" for si in server_in_ips])
agent_str = " ".join([f"-a {ex_ip_to_in_ip[ax]}" for ax in agents])
option_str = f"-T {run_config['clientThread']} " \
f"-K {run_config['keySize']} " \
f"-V {run_config['valueSize']} " \
f"-t {run_config['runTime']} " \
f"-w {run_config['warmupTime']} " \
f"-c 1 " \
f"-W {run_config['waitTime']} --noload"
return f"mutilate {server_str} --loadonly", \
f"mutilate {server_str} {agent_str} {option_str}"
_cmd_load, _cmd_run = make_master_cmd()
print("run: master", master, _cmd_run)
start = time.time()
rmt.cmd_remote([master], cmd_=_cmd_load)
raw = rmt.cmd_remote([master], cmd_=_cmd_run, out=True)[0].decode("utf-8")
print(f"run: finished in {time.time() - start}s")
print("run results, sample:\n", raw)
# Step 4: upload logs
s3.dump_and_upload_file(run_config,
bucket=run_config["logBucket"],
key=s3.path_join(rid, "config"))
s3.dump_and_upload_file(raw,
bucket=run_config["logBucket"],
key=s3.path_join(rid, "log"))
# Step 5: parse and aggregate results
def parse(_raw) -> dict:
_raw = _raw.split("\n")
results = dict()
for l in _raw:
vs = l.split()
if len(vs) < 1:
continue
v_type, v = vs[0], None
if v_type == "read":
v = {"avg_lat_read": vs[1],
"std_lat_read": vs[2],
"min_lat_read": vs[3],
"99th_lat_read": vs[8],
}
elif v_type.startswith("Total"):
v = {"qps": vs[3]}
elif v_type.startswith("RX"):
v = {"rx_goodput": vs[-2]}
elif v_type.startswith("TX"):
v = {"tx_goodput": vs[-2]}
if v is not None:
results.update(v)
return results
r = dict()
r.update(parse(raw))
print("run: results", r)
r.update(run_config)
# pair wise latency info
lat = mean(ping.bipartite_lats(mcd_servers, client_in_ips))
r.update({
"avg_client_server_lat": lat,
})
# debugging info
r.update({
"debug_num_server": len(mcd_servers),
"debug_num_client": len(mut_clients),
"debug_num_agent": len(agents),
"debug_client_ex_IPs": mut_clients,
"debug_server_ex_IPs": mcd_servers,
"debug_client_in_IPs": client_in_ips,
"debug_server_in_IPs": server_in_ips,
})
return r
|
import pytest
from dcapytorch import *
import torch
import torch.utils.data
from torch.nn.modules.utils import _single, _pair, _triple
from torch.utils.cpp_extension import BuildExtension, CUDA_HOME
import numpy as np
from torchvision import datasets, transforms
from tqdm import tqdm
def get_mnist(batch_size):
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)
return train_loader
def train(model, device, train_loader, loss, optimizer, epoch, target_name='data', profiler=False):
model.train()
with tqdm(train_loader) as pbar:
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = loss(model(data),locals()[target_name])
output.backward()
optimizer.step()
if batch_idx % 20:
pbar.set_postfix(cur_loss='{:.3f}'.format(output.item()))
if profiler:
mem_report()
pbar.update()
def get_cells_bundles(cell_compatibilities):
cells_bundles = []
tmp = []
prev = 2
for i, cell in enumerate(cell_compatibilities):
if cell == 1:
tmp.append(i)
prev = cell
else:
tmp.append(i)
if prev != cell:
prev = cell
cells_bundles.append(tmp)
tmp = []
return cells_bundles, tmp
def test_init_ADNN():
cell_flatten = Flatten()
cell_linear = ADNNLinearCell(256, 256,act_fn=torch.nn.ReLU,weight_init=torch.nn.init.eye_)
cell_conv1d = ADNNConv1dCell(64, 64, kernel_size=9,
stride=2, padding=0, groups=64,dilation=2)
cell_reshape = Reshape(16,16)
unroll=0
cells = cell_flatten
adnn = ADNN(cells,unroll)
print('Init with one cell - OK')
cells = (cell_linear,cell_linear)
adnn = ADNN(cells,unroll)
print('Init with tuple of cells - OK')
cells = torch.nn.Sequential(cell_linear,cell_linear)
adnn = ADNN(cells,unroll)
print('Init with Sequential - OK')
cells = (cell_flatten, cell_linear, cell_linear,cell_flatten,cell_linear,cell_reshape)
comp = [1,2,2,1,2,1]
bundles = [[0,1],[2],[3,4]]
ls = [5]
adnn = ADNN(cells, unroll)
assert [cell.adnn_compatibility for cell in adnn.cells] == comp, 'ADNN compatibility check fails'
assert adnn.cells_bundles == bundles and adnn.last_stand == ls, 'Separating on bundles fails'
cells = (cell_linear, cell_linear,cell_flatten,cell_linear)
comp = [2,2,1,2]
bundles = [[0],[1],[2,3]]
ls = None
adnn = ADNN(cells, unroll)
assert [cell.adnn_compatibility for cell in adnn.cells] == comp, 'ADNN compatibility check fails'
assert adnn.cells_bundles == bundles and adnn.last_stand == ls, 'Separating on bundles fails'
cells = (cell_flatten,cell_flatten, cell_linear, cell_linear, cell_linear,
cell_flatten, cell_flatten,cell_linear)
comp = [1,1,2,2,2,1,1,2]
bundles = [[0,1,2],[3],[4],[5,6,7]]
ls = None
adnn = ADNN(cells, unroll)
assert [cell.adnn_compatibility for cell in adnn.cells] == comp, 'ADNN compatibility check fails'
assert adnn.cells_bundles == bundles and adnn.last_stand == ls, 'Separating on bundles fails'
print('Consistency checking - OK')
def test_ADNN_run_simple_autoencoder():
unroll=0
cells=[Flatten(),
ADNNLinearCell(784, 100,act_fn=torch.nn.ReLU),
ADNNLinearCell(100, 100,act_fn=torch.nn.ReLU),
ADNNLinearCell(100, 784),
Reshape(1,28,28)
]
adnn = ADNN(cells,unroll)
device = torch.device("cpu")
if CUDA_HOME:
device = torch.device("cuda")
epochs = 1
batch_size = 32
model = adnn.to(device)
optimizer = torch.optim.Adam(model.parameters())
train_loader = get_mnist(batch_size)
loss = torch.nn.MSELoss()
print('Summary:\n\nNetArchitecture: {net_arch}\n\nOptimizer: {opt}\n\nLoss: {loss}\n\n'.format(net_arch=adnn,opt=optimizer,loss=loss))
for epoch in tqdm(range(1, epochs + 1),desc='Training without unroll'):
train(model, device, train_loader, loss, optimizer, epoch)
print('First subtest passed')
adnn.reset_parameters()
unroll = 5
adnn.unroll = unroll + 1
for epoch in tqdm(range(1, epochs + 1),desc='Training with unroll={}'.format(adnn.unroll-1)):
train(model, device, train_loader, loss, optimizer, epoch)
print('Second subtest passed')
def test_ADNN_run_convolutional_autoencoder():
unroll=0
cells=[ADNNConv2dCell(1, 16, kernel_size=3, stride=2,padding=1,act_fn=torch.nn.ReLU),
ADNNConv2dCell(16, 8, kernel_size=3, stride=2,padding=1,act_fn=torch.nn.ReLU),
ADNNConv2dCell(8, 8, kernel_size=3, stride=2,act_fn=torch.nn.ReLU),
Flatten(),
ADNNLinearCell(72,72,act_fn=torch.nn.ELU),
Reshape(8,3,3),
ADNNConvTranspose2dCell(8,8,kernel_size=3, stride=2,act_fn=torch.nn.ReLU),
ADNNConvTranspose2dCell(8,16,kernel_size=3, stride=2,padding=1,act_fn=torch.nn.ReLU),
ADNNConvTranspose2dCell(16,1,kernel_size=3, stride=2,output_padding=1)
]
adnn = ADNN(cells,unroll)
device = torch.device("cpu")
if CUDA_HOME:
device = torch.device("cuda")
epochs = 1
batch_size = 32
model = adnn.to(device)
optimizer = torch.optim.Adam(model.parameters())
train_loader = get_mnist(batch_size)
loss = torch.nn.MSELoss()
print('Summary:\n\nNetArchitecture: {net_arch}\n\nOptimizer: {opt}\n\nLoss: {loss}\n\n'.format(net_arch=adnn,opt=optimizer,loss=loss))
for epoch in tqdm(range(1, epochs + 1),desc='Training without unroll'):
train(model, device, train_loader, loss, optimizer, epoch)
print('First subtest passed')
adnn.reset_parameters()
unroll = 20
adnn.unroll = unroll + 1
for epoch in tqdm(range(1, epochs + 1),desc='Training with unroll={}'.format(adnn.unroll-1)):
train(model, device, train_loader, loss, optimizer, epoch)
print('Second subtest passed')
def test_ADNN_run_incorporated():
adnn_decoder = ADNN(cells=[Reshape(8,3,3),
ADNNConvTranspose2dCell(8,8,kernel_size=3, stride=2,act_fn=torch.nn.ReLU),
ADNNConvTranspose2dCell(8,16,kernel_size=3, stride=2,padding=1,act_fn=torch.nn.ReLU),
ADNNConvTranspose2dCell(16,1,kernel_size=3, stride=2,output_padding=1)
],unroll=0)
net = torch.nn.Sequential(torch.nn.Conv2d(1, 16, kernel_size=3, padding=1, stride=2),
torch.nn.ReLU(),
torch.nn.Conv2d(16, 8, kernel_size=3, padding=1, stride=2),
torch.nn.ReLU(),
torch.nn.Conv2d(8, 8, kernel_size=3, stride=2),
torch.nn.ReLU(),
Flatten(),
torch.nn.Linear(72, 72),
torch.nn.ELU(),
adnn_decoder
)
device = torch.device("cpu")
if CUDA_HOME:
device = torch.device("cuda")
epochs = 1
batch_size = 32
model = net.to(device)
optimizer = torch.optim.Adam(model.parameters())
train_loader = get_mnist(batch_size)
loss = torch.nn.MSELoss()
print('Summary:\n\nNetArchitecture: {net_arch}\n\nOptimizer: {opt}\n\nLoss: {loss}\n\n'.format(net_arch=net,opt=optimizer,loss=loss))
for epoch in tqdm(range(1, epochs + 1),desc='Training without unroll'):
train(model, device, train_loader, loss, optimizer, epoch)
print('First subtest passed')
for cell in model:
if hasattr(cell,'reset_parameters'):
cell.reset_parameters()
unroll = 20
net[-1].unroll = unroll + 1
for epoch in tqdm(range(1, epochs + 1),desc='Training with unroll={}'.format(net[-1].unroll-1)):
train(model, device, train_loader, loss, optimizer, epoch)
print('Second subtest passed')
|
# -*- coding: utf-8 -*-
from Products.Five import BrowserView
from plone import api
class UtilsView(BrowserView):
def is_gdpr(self):
return api.portal.get_registry_record(
'imio.gdpr.interfaces.IGDPRSettings.is_text_ready',
default=False)
|
#!/usr/bin/env python
#-*- coding: UTF-8 -*-
__author__ = 'helljump'
import logging
import startup
from utils import MyProgressDialog
from PyQt4 import QtGui
from PyQt4 import QtCore
from dbobj import Document
import re
from ZODB.utils import p64
import os
from pytils.translit import slugify
from ZODB.POSException import POSKeyError
import transaction
from phpserialize import loads, dumps
from datetime import datetime
from xml.sax.saxutils import escape
from ui.kadaver_export_ui import Ui_Dialog
log = logging.getLogger(__name__)
SPLIT = re.compile("<hr class=['\"]?more['\"]?>")
class LocalFS(object):
def save(self, fname, data):
fout = open(fname, 'wb+')
fout.write(data)
fout.close()
def isdir(self, name):
return os.path.isdir(name)
def isfile(self, name):
return os.path.isfile(name)
def makedirs(self, name):
return os.makedirs(name)
def chdir(self, name):
return os.chdir(name)
def chmod(self, name, mode):
return os.chmod(name, mode)
def getmtime(self, name):
return os.path.getmtime(name)
def _read(data):
egg = loads(data)
eggslug = egg["tags"]
gziped = egg.get("packed", False)
for cslug, fields in egg["categories"].items():
cat = unicode(fields["title"], "utf-8")
for aslug, afields in egg["articles"].items(): # @UnusedVariable
if afields["category"] == cslug:
doc = Document()
doc.cats.add(cat)
doc.title = unicode(afields["title"], "utf-8")
for i, tagslug in afields["tags"].items(): # @UnusedVariable
doc.tags.add(unicode(eggslug[tagslug]["title"], "utf-8"))
try:
doc.date = datetime.strptime(afields["date"], "%Y-%m-%dT%H:%M:%S.%f")
except ValueError: # no millis
doc.date = datetime.strptime(afields["date"], "%Y-%m-%dT%H:%M:%S")
if gziped:
afields["text"] = afields["text"].decode("zlib")
afields["intro"] = afields["intro"].decode("zlib")
arr = []
text = unicode(afields["text"], "utf-8")
intro = unicode(afields["intro"], "utf-8")
if len(intro) > 3:
arr.append(intro)
if len(text) > 3:
arr.append(text)
doc.text = '\n<hr class="more">\n'.join(arr)
yield doc
def reader(parent):
fn = QtGui.QFileDialog.getOpenFileName(parent, u'Импорт', QtCore.QString(), u'Kadaver file (*.kad)')
if fn.isEmpty():
return False
fn = unicode(fn)
pdlg = MyProgressDialog(u'Импорт', u'Чтение файла', u'Отмена', parent=parent)
pdlg.show()
root = parent.project.root()
documents = root["documents"]
cats = root["cats"]
tags = root["tags"]
try:
data = open(fn, 'rb').read()
for doc in _read(data):
documents.appendSorted(doc)
cats.update(doc.cats)
tags.update(doc.tags)
QtGui.qApp.processEvents()
if pdlg.wasCanceled():
raise IOError('Canceled')
transaction.commit()
except:
transaction.abort()
raise
finally:
pdlg.close()
return True
class ImageWriter(object):
IMAGE = re.compile('(cm3\://image/(\d+))')
def __init__(self, prj, fn, fs):
prefs = prj.root().get('prefs', {})
self.fs = fs
self.conn = prj.con()
self.ww = prefs.get('image_width', 320)
self.hh = prefs.get('image_height', 200)
self.imgdir = os.path.join(fn, 'images') # prefs.get('image_dir', 'images')
self.imgurl = '/images' # prefs.get('image_dir', 'images')
if re.match("^[a-z]\:", self.imgdir, re.IGNORECASE) is None: # rel path
self.imgdir = os.path.join(os.path.dirname(fn), self.imgdir)
if not fs.isdir(self.imgdir):
fs.makedirs(self.imgdir)
self.buff = QtCore.QBuffer()
def __call__(self, doc):
text = doc.text
for i, row in enumerate(ImageWriter.IMAGE.findall(text)):
url, oid = row
try:
obj = self.conn.get(p64(int(oid)))
data = obj.data.open('r').read()
if data[1:4] == 'PNG':
frmt = 'PNG'
else:
frmt = 'JPG'
img = QtGui.QImage.fromData(data)
if img.width() > self.ww or img.height() > self.hh:
img = img.scaled(self.ww, self.hh, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
if i > 0:
imgname = "%s-%i.%s" % (doc.get_slug(), i, frmt.lower())
else:
imgname = "%s.%s" % (doc.get_slug(), frmt.lower())
egg = "%s/%s" % (self.imgdir, imgname)
spam = "%s/%s" % (self.imgurl, imgname)
img.save(self.buff, frmt)
self.fs.save(egg, self.buff.data())
self.buff.reset()
text = text.replace(url, spam)
except POSKeyError:
log.error('wrong link %s', url)
return text
def generate_uniq_name(names, new_name):
egg = orig_egg = slugify(new_name)[:50]
cnt = 1
while egg in names:
egg = "%s-%i" % (orig_egg, cnt)
cnt += 1
return egg
def _write(prj, fn, pack=True, slugform=None, overwrite=False, fs=LocalFS()):
datapath = os.path.join(fn, 'data/articles.kad')
#if not os.path.isdir(os.path.join(fn, 'data')):
# raise Exception(u'не похоже на каталог Кадавр CMS')
if not overwrite:
j = 1
while fs.isfile(datapath):
datapath = os.path.join(fn, 'data/articles_%i.kad' % j)
j += 1
if slugform is None:
slugform = '%Y-%m-%d/%%(slug)s'
#slugform = '%Y-%m-%d/%H-%M-%S/%%(slug)s'
documents = prj.root()['documents']
docs = sorted(documents, key=lambda doc: str(doc.cats))
iwriter = ImageWriter(prj, fn, fs)
categories = {}
articles = {}
tags = {}
obj = {
"categories": categories,
"articles": articles,
"tags": tags,
"packed": pack
}
cat = None
for j, doc in enumerate(docs):
if cat != doc.cats:
cattitle = list(doc.cats)[0] if len(doc.cats) > 0 else u'Новости'
catslug = generate_uniq_name(categories, cattitle)
categories[catslug] = {"title": escape(cattitle)}
cat = doc.cats
slug = doc.date.strftime(slugform) % {"slug": slugify(doc.title)}
text = iwriter(doc)
subtext = SPLIT.split(text, 1)
if len(subtext) == 2:
intro, text = subtext
else:
intro, text = subtext[0], ''
if pack:
intro = intro.encode("utf-8").encode("zlib")
text = text.encode("utf-8").encode("zlib")
articles[slug] = {
"title": escape(doc.title),
"slug": slug,
"category": catslug,
"intro": intro,
"text": text,
"date": doc.date.isoformat(),
"tags": [slugify(tag) for tag in doc.tags]
}
for tag in doc.tags:
tagslug = slugify(tag)
if tagslug in tags:
amount = tags[tagslug]["amount"] + 1
else:
amount = 1
tags[tagslug] = {
"title": escape(tag),
"amount": amount
}
yield j
fs.save(datapath, dumps(obj))
class Dialog(QtGui.QDialog, Ui_Dialog):
def __init__(self, project, parent=None):
super(Dialog, self).__init__(parent)
self.setupUi(self)
self.project = project
root = project.root()
prefs = root.get('prefs', {}).get('kadaver_export', {})
self.path_le.text = prefs.get('path', 'kadaver')
self.slugform_le.setText(prefs.get('slugform', '%Y-%m-%d/%%(slug)s'))
self.pack_cb.setChecked(prefs.get('pack', True))
self.overwrite_cb.setChecked(prefs.get('overwrite', True))
@QtCore.pyqtSlot()
def on_path_le_clicked(self):
fn = QtGui.QFileDialog.getExistingDirectory(self, u'Экспорт')
if fn.isEmpty():
return
self.path_le.text = fn
def accept(self):
root = self.project.root()
root['prefs']['kadaver_export'] = {
'path': unicode(self.path_le.text),
'slugform': unicode(self.slugform_le.text()),
'pack': self.pack_cb.isChecked(),
'overwrite': self.overwrite_cb.isChecked()
}
transaction.commit()
self.setResult(1)
self.hide()
def writer(parent):
prj = parent.project
dlg = Dialog(prj, parent)
rc = dlg.exec_()
if not rc:
return False
path = unicode(dlg.path_le.text)
slugform = unicode(dlg.slugform_le.text())
pack = dlg.pack_cb.isChecked()
overwrite = dlg.overwrite_cb.isChecked()
documents = prj.root()['documents']
pdlg = MyProgressDialog(u'Экспорт', u'Запись файла', u'Отмена', 0, len(documents), parent=parent)
pdlg.show()
try:
for j in _write(prj, path, pack, slugform, overwrite):
pdlg.setValue(j)
if pdlg.wasCanceled():
raise IOError('Canceled')
log.debug('done')
finally:
pdlg.close()
if __name__ == '__main__':
pass
#read
#fn = ur'd:\work2014\cm3\_test\articles.kad'
#data = open(fn, 'rb').read()
#for doc in _read(data):
# print doc.title.encode('cp866', 'replace'), doc.tags
#write
root = startup.CONN.root()
prj = root["projects"][0]
prj.open()
#dlg = Dialog(prj)
#dlg.exec_()
for j in _write(prj, '_test'):
print j,
prj.close()
|
import torch
RANDOM_SEED = 0 # random seed for PyTorch, NumPy and random
BUFFER_SIZE = int(1e6) # replay buffer size
BATCH_SIZE = 512 # minibatch size
GAMMA = 1 # discount factor
TAU = 5e-2 # for soft update of target parameters
LR_ACTOR = 5e-4 # learning rate of the actor
LR_CRITIC = 5e-4 # learning rate of the critic
WEIGHT_DECAY = 0 # L2 weight decay
UPDATE_EVERY = 2 # weight update frequency
RANDOM_SEED = 444
#NOISE_AMPLIFICATION = 0.99 # exploration noise amplification
#NOISE_AMPLIFICATION_DECAY = 0.99 # noise amplification decay
# Environment Information
NUM_AGENTS = 2
STATE_SIZE = 24
ACTION_SIZE = 2
# PyTorch device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("====== Parameters ======")
print('TAU: ' + str(TAU))
print('Update Every: '+ str(UPDATE_EVERY))
print('Weight Decay: '+str(WEIGHT_DECAY))
print('Random seed: ' + str(RANDOM_SEED))
#RANDOM_SEED = 0 # random seed for PyTorch, NumPy and random
#BUFFER_SIZE = int(1e6) # replay buffer size
#BATCH_SIZE = 512 # minibatch size
#GAMMA = 0.99 #1 solution # discount factor
#TAU = 5e-2 # for soft update of target parameters
#LR_ACTOR = 5e-4 # learning rate of the actor
#LR_CRITIC = 5e-4 # learning rate of the critic
#WEIGHT_DECAY = 0.0 #0.0001 # L2 weight decay
#UPDATE_EVERY = 2 # weight update frequency
#NOISE_AMPLIFICATION = 1 # exploration noise amplification
#NOISE_AMPLIFICATION_DECAY = 1 # noise amplification decay |
"""
在字符串 s 中找出第一个只出现一次的字符。如果没有,返回一个单空格。 s 只包含小写字母。
示例:
s = "abaccdeff"
返回 "b"
s = ""
返回 " "
来源:剑指offer-50
链接:https://leetcode-cn.com/problems/di-yi-ge-zhi-chu-xian-yi-ci-de-zi-fu-lcof
"""
# 思路: 哈希表,第一次遍历到时设置value为 True,后面再次遍历到的时候设置value为False
# 再次遍历s,判断dic中的键值,返回第一个值为True的键(说明只遍历到一次)
class Solution:
def firstUniqChar(self, s: str) -> str:
dic = {}
for i in s:
dic[i] = not i in dic
for i in s:
if dic[i]:
return i
return " "
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# import the necessary packages
from scipy.spatial import distance as dist
from imutils import face_utils
import imutils
import dlib
import cv2
# In[2]:
def eye_aspect_ratio(eye):
# compute the euclidean distances between the two sets of
# vertical eye landmarks (x, y)-coordinates
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
# compute the euclidean distance between the horizontal
# eye landmark (x, y)-coordinates
C = dist.euclidean(eye[0], eye[3])
# compute the eye aspect ratio
ear = (A + B) / (2.0 * C)
# return the eye aspect ratio
return ear
# In[3]:
# define two constants, one for the eye aspect ratio to indicate
# blink and then a second constant for the number of consecutive
# frames the eye must be below the threshold
EYE_AR_THRESH = 0.3
EYE_AR_CONSEC_FRAMES = 3
path_shape_predictor = "./shape_predictor_68_face_landmarks.dat"
path_video = "./blink_detection.mp4"
# initialize the frame counters and the total number of blinks
COUNTER = 0
TOTAL = 0
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(path_shape_predictor)
# create a videoCapture object with a video file or a capture device
cap = cv2.VideoCapture(path_video)
# check if we will successfully open the file
if not cap.isOpened():
print("Error opening the file.")
assert(False)
# Default resolutions of the frame are obtained.The default resolutions are system dependent.
frame_width = int(cap.get(3))
# frame_height = int(cap.get(4))
# # Define the codec and create VideoWriter object.The output is stored in 'output.mp4' file.
# out = cv2.VideoWriter('output.mp4',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))
# read until the end of the video frame by frame
while cap.isOpened():
# cap.read (): decodes and returns the next video frame
# variable ret: will get the return value by retrieving the camera frame, true or false (via "cap")
# variable frame: will get the next image of the video (via "cap")
ret, frame = cap.read()
if ret:
# grab the indexes of the facial landmarks for the left and
# right eye, respectively
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
rects = detector(gray, 0)
# loop over the face detections
for rect in rects:
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# extract the left and right eye coordinates, then use the
# coordinates to compute the eye aspect ratio for both eyes
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
# average the eye aspect ratio together for both eyes
ear = (leftEAR + rightEAR) / 2.0
# check to see if the eye aspect ratio is below the blink
# threshold, and if so, increment the blink frame counter
if ear < EYE_AR_THRESH:
COUNTER += 1
# otherwise, the eye aspect ratio is not below the blink
# threshold
else:
# if the eyes were closed for a sufficient number of
# then increment the total number of blinks
if COUNTER >= EYE_AR_CONSEC_FRAMES:
TOTAL += 1
# reset the eye frame counter
COUNTER = 0
# draw the total number of blinks on the frame along with
# the computed eye aspect ratio for the frame
cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "EAR: {:.2f}".format(ear), (frame_width - 120, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# to display the frame
cv2.imshow("Output", frame)
# Write the frame into the file 'output.avi'
# out.write(frame)
# waitKey (0): put the screen in pause because it will wait infinitely that key
# waitKey (n): will wait for keyPress for only n milliseconds and continue to refresh and read the video frame using cap.read ()
# ord (character): returns an integer representing the Unicode code point of the given Unicode character.
if cv2.waitKey(1) == ord('e'):
break
else:
break
# to release software and hardware resources
cap.release()
# out.release()
# to close all windows in imshow ()
cv2.destroyAllWindows()
|
import numpy as n
import pylab as p
#define Values
m = .00104
D = .00559
v0 = 500.
rho = 1.28
mu = .0000183
r_e1 = rho*v0*D/mu
print r_e1
print r_e1<10**6
#RK2 method
#define values, using some of above values
v = v0
y0 = 0
y = y0
g = 9.8
A = ((D/2)**2)*n.pi
tf = 20.
ts = 10000
dt = tf/ts
Y = []
V = []
time = []
t = 0
for i in range(0, ts):
#define reynolds number, drag coefficient for first step
r_e2 = rho*abs(v)*D/mu
c_d1 = (24/r_e2)+((2.6*(r_e2/5.))/(1+(r_e2/5.)**1.52))+((.411*(r_e2/263000)**(-7.94))/(1+(r_e2/263000)**(-8.)))+(r_e2**(.8)/461000.)
#do first step
y1 = y + v*dt/2
v1 = v + (-g - ((rho*A*abs(v)*c_d1*v)/(2*m)))*dt/2
#define reynolds number, drag coefficent for second step
r_e3 = rho*abs(v1)*D/mu
c_d2 = (24/r_e3)+((2.6*(r_e3/5.))/(1+(r_e3/5.)**1.52))+((.411*(r_e3/263000)**(-7.94))/(1+(r_e3/263000)**(-8.)))+(r_e3**(.8)/461000.)
#do second step
y = y + v1*dt
v = v + (-g - ((rho*A*abs(v1)*c_d2*v1)/(2*m)))*dt
#interate values and append solutions to a list
t = t +dt
V.append(v)
Y.append(y)
time.append(t)
#create and save plot for the height
p.close()
p.plot(time,Y,"r")
p.title("Particle Height vs Time")
p.ylabel("Particle Height (m)", fontsize = 16)
p.xlabel("Time (s)",fontsize = 16)
p.show()
p.savefig("heightplot.png")
#create and save plot for velocity
p.close()
p.plot(time,V,0,"r")
p.title("Particle Velocity vs Time")
p.ylabel("Particle Velocity (m/2)", fontsize = 16)
p.xlabel("Time (s)",fontsize = 16)
p.show()
p.savefig("velocityplot.png") |
import datetime
import sys
import contextlib
import sqlalchemy
import sqlalchemy.ext.declarative
import sqlalchemy.dialects.mysql
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import sessionmaker
from apps.log import logger
from apps.utils import worker, time_string
conn_str = "mysql+pymysql://blog:111111@127.0.0.1:3306/blog?charset=utf8mb4"
OK = 10
Base = sqlalchemy.ext.declarative.declarative_base()
class Primary(Base):
__tablename__ = 'Person'
mysql_charset = 'utf8mb4'
# 注册人id,唯一主键
primary_id = sqlalchemy.Column(sqlalchemy.BigInteger, primary_key=True)
# 使用字母较好,因为不用作数字运算;暂时不用作唯一标识
phone_number = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
# 姓名
name = sqlalchemy.Column(sqlalchemy.String(255))
# 昵称
nickname = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
password = sqlalchemy.Column(sqlalchemy.BINARY(32), nullable=False)
# 身份证号 18位
id = sqlalchemy.Column(sqlalchemy.String(255))
# 创建账号时间
create_time = sqlalchemy.Column(sqlalchemy.DateTime, nullable=False, default=datetime.datetime.now())
mail = sqlalchemy.Column(sqlalchemy.String(255))
is_deleted = sqlalchemy.Column(sqlalchemy.BOOLEAN, default=0, nullable=False)
class Title(Base):
__tablename__ = 'Title'
mysql_charset = 'utf8mb4'
title_id = sqlalchemy.Column(sqlalchemy.BigInteger, primary_key=True)
title = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
# 副标题可空
subtitle = sqlalchemy.Column(sqlalchemy.String(255))
primary_id = sqlalchemy.Column(sqlalchemy.BigInteger)
nickname = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
activity = sqlalchemy.Column(sqlalchemy.Integer, default=0, nullable=False)
create_time = sqlalchemy.Column(sqlalchemy.DateTime, nullable=False, default=datetime.datetime.now())
last_modify_time = sqlalchemy.Column(sqlalchemy.DateTime, nullable=False, default=datetime.datetime.now())
class Label(Base):
__tablename__ = 'Label'
mysql_charset = 'utf8mb4'
label_id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, autoincrement=True)
label = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
create_time = sqlalchemy.Column(sqlalchemy.DateTime, nullable=False, default=datetime.datetime.now())
class TitleLabel(Base):
__tablename__ = 'TitleLabel'
mysql_charset = 'utf8mb4'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, autoincrement=True)
label_id = sqlalchemy.Column(sqlalchemy.Integer, nullable=False)
title_id = sqlalchemy.Column(sqlalchemy.BigInteger, nullable=False)
create_time = sqlalchemy.Column(sqlalchemy.DateTime, nullable=False, default=datetime.datetime.now())
class Comment(Base):
__tablename__ = 'Comment'
mysql_charset = 'utf8mb4'
# 评论的唯一id
comment_id = sqlalchemy.Column(sqlalchemy.BigInteger, primary_key=True)
title_id = sqlalchemy.Column(sqlalchemy.BigInteger, nullable=False)
# 发言人的信息
primary_id = sqlalchemy.Column(sqlalchemy.BigInteger, nullable=False)
# 避免频繁查找,加入昵称
nickname = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
# text
text = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
preference = sqlalchemy.Column(sqlalchemy.Integer, nullable=False, default=0)
create_time = sqlalchemy.Column(sqlalchemy.DateTime, nullable=False, default=datetime.datetime.now())
class SecondComment(Base):
__tablename__ = 'SecondComment'
mysql_charset = 'utf8mb4'
# 与一级留言表是彼此独立,但结构相似
second_comment_id = sqlalchemy.Column(sqlalchemy.BigInteger, primary_key=True)
comment_id = sqlalchemy.Column(sqlalchemy.BigInteger, nullable=False)
primary_id = sqlalchemy.Column(sqlalchemy.BigInteger, nullable=False)
nickname = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
text = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
preference = sqlalchemy.Column(sqlalchemy.Integer, nullable=False, default=0)
create_time = sqlalchemy.Column(sqlalchemy.DateTime, nullable=False, default=datetime.datetime.now())
class Book(Base):
__tablename__ = 'Books'
mysql_charset = 'utf8mb4'
book_id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, autoincrement=True)
book = sqlalchemy.Column(sqlalchemy.String(127), nullable=False)
alia = sqlalchemy.Column(sqlalchemy.String(127))
group = sqlalchemy.Column(sqlalchemy.String(31))
create_time = sqlalchemy.Column(sqlalchemy.DateTime, nullable=False, default=datetime.datetime.now())
last_modify_time = sqlalchemy.Column(sqlalchemy.DateTime, nullable=False, default=datetime.datetime.now())
class DataBase(object):
def __init__(self):
self.engine = sqlalchemy.create_engine(
conn_str,
# echo=True,
pool_recycle=3600
)
self.Session = sessionmaker(self.engine)
try:
self.create_tables()
except Exception as e:
print("database initialize failed")
print("Exception: {}".format(e))
sys.exit(-1)
else:
# 创建数据库表成功,继续往下执行
pass
def create_tables(self):
Base.metadata.create_all(self.engine)
def destroy_session(self):
self.engine.dispose()
@contextlib.contextmanager
def session_scope(self):
session = self.Session()
try:
yield session
session.commit()
except IntegrityError as ie:
ecode = ie.orig.args[0]
if ecode == 1062: # Duplicate key
raise Exception
else:
session.rollback()
print("> session commit failed 1, rollback")
raise Exception
except Exception as e:
print("exception occurs: {}, {}".format(type(e), e))
# if type(e) is sqlalchemy.exc.IntegrityError:
# ecode = e.orig.args[0]
# if ecode == 1062: # Duplicate key
# raise Exception
# else:
# session.rollback()
# print("> session commit failed 1, rollback")
# raise Exception
# else:
# session.rollback()
# print("> session commit failed 2, rollback")
# raise Exception
session.rollback()
print("> session commit failed 2, rollback")
raise Exception
finally:
session.close()
def register(self, phone_num, name, nickname, man_id, mail):
"""
注册
:return:
"""
primary_id = worker.get_id()
with self.session_scope() as session:
row_p = Primary(primary_id=primary_id,
phone_number=phone_num, name=name, nickname=nickname, id=man_id, mail=mail)
session.add(row_p)
return True
def freeze_account(self, primary_id):
"""
冻结账户 给予冻结账户选项
:return:
"""
with self.session_scope() as session:
query_primary = session.query(Primary).filter(
Primary.primary_id == primary_id, Primary.is_deleted == 0
)
row_primary = query_primary.first()
if row_primary:
row_primary.is_deleted = 1
return True
else:
return False
def close_account(self, primary_id):
"""
注销账户 只有管理员可以销户
:return:
"""
with self.session_scope() as session:
query_primary = session.query(Primary).filter(
Primary.primary_id == primary_id, Primary.is_deleted == 0
)
row_primary = query_primary.first()
if row_primary:
session.delete(row_primary)
return True
else:
return False
def create_label(self, label):
"""
创建标签
:return:
"""
with self.session_scope() as session:
query_lab = session.query(Label).filter(
Label.label == label
)
row_lab = query_lab.first()
if row_lab:
return False
else:
row_lab = Label(label=label)
session.add(row_lab)
return True
def remove_label(self, label):
"""
移除标签
:return:
"""
with self.session_scope() as session:
query_lab = session.query(Label).filter(
Label.label == label
)
row_lab = query_lab.first()
if row_lab:
session.delete(row_lab)
return True
else:
return False
def create_title(self, title, subtitle, primary_id, nickname, label_id_ls):
"""
创建主题
:return:
"""
title_id = worker.get_id()
with self.session_scope() as session:
for label_id in label_id_ls:
query_lab = session.query(Label).filter(
Label.label_id == label_id
)
if not query_lab.first():
return False
else:
now = datetime.datetime.now()
row_t = Title(title_id=title_id, title=title, subtitle=subtitle,
primary_id=primary_id, nickname=nickname, create_time=now, last_modify_time=now)
session.add(row_t)
for label_id in label_id_ls:
row_tl = TitleLabel(label_id=label_id, title_id=title_id, create_time=now)
session.add(row_tl)
return True
def remove_title(self, title_id):
"""
删除主题
:return:
"""
with self.session_scope() as session:
query_title = session.query(Title).filter(
Title.title_id == title_id
)
row_title = query_title.first()
if row_title:
session.delete(row_title)
# remove label title
query_tl = session.query(TitleLabel).filter(
TitleLabel.title_id == title_id
)
query_tl.delete()
return True
else:
return False
def add_labels_to_title(self, title_id, label_id_ls):
"""
新增label到title
:return:
"""
with self.session_scope() as session:
query_tl = session.query(TitleLabel).filter(
TitleLabel.title_id == title_id
)
old_label_ls = [row_tl.label_id for row_tl in query_tl.all()]
for label_id in label_id_ls:
if label_id in old_label_ls:
continue
else:
row_tl = TitleLabel(label_id=label_id, title_id=title_id)
session.add(row_tl)
return True
def remove_label_to_title(self, title_id, label_id):
"""
删除title的label
:return:
"""
with self.session_scope() as session:
query_tl = session.query(TitleLabel).filter(
TitleLabel.title_id == title_id, TitleLabel.label_id == label_id
)
row_tl = query_tl.first()
if row_tl:
session.delete(row_tl)
return True
else:
return False
def create_comment(self, title_id, primary_id, nickname, text):
"""
创建留言
:return:
"""
comment_id = worker.get_id()
with self.session_scope() as session:
row_c = Comment(comment_id=comment_id, title_id=title_id, primary_id=primary_id,
nickname=nickname, text=text)
session.add(row_c)
return True
def remove_comment(self, comment_id):
"""
删除留言, 管理员权限
:return:
"""
with self.session_scope() as session:
query_c = session.query(Comment).filter(
Comment.comment_id == comment_id
)
row_c = query_c.first()
if row_c:
session.delete(row_c)
return True
else:
return False
def create_second_comment(self, comment_id, primary_id, nickname, text):
"""
创建二级留言
:return:
"""
second_comment_id = worker.get_id()
with self.session_scope() as session:
query_c = session.query(Comment).filter(
Comment.comment_id == comment_id
)
row_c = query_c.first()
if row_c:
row_sc = SecondComment(second_comment_id=second_comment_id, comment_id=comment_id,
primary_id=primary_id, nickname=nickname, text=text)
session.add(row_sc)
return True
else:
return False
def remove_second_comment(self, second_comment_id):
"""
删除二级留言
:return:
"""
with self.session_scope() as session:
query_sc = session.query(SecondComment).filter(
SecondComment.second_comment_id == second_comment_id
)
row_sc = query_sc.first()
if row_sc:
session.delete(row_sc)
return True
else:
return False
# -------------------------------------------------
def logging(self, username, _password):
# db保存的是binary(16)
bin_password = bytes(_password, encoding='utf-8')
logger.info('1111 {} {}'.format(username, bin_password))
with self.session_scope() as session:
query_primary = session.query(Primary).filter(
Primary.nickname == username, Primary.password == bin_password, Primary.is_deleted == 0
)
row_p = query_primary.first()
if row_p:
logger.info('{} {}'.format(row_p.password, type(row_p.password)))
pass
else:
if isinstance(username, str) and len(username) == 18 and username.isdigit():
# 用身份证号也可以
query_primary = session.query(Primary).filter(
Primary.id == username, Primary.password == bin_password, Primary.is_deleted == 0
)
row_p = query_primary.first()
if row_p:
pass
else:
logger.info('username or password wrong')
return False
elif isinstance(username, str) and len(username) == 11 and username.isdigit():
query_primary = session.query(Primary).filter(
Primary.phone_number == username, Primary.password == bin_password, Primary.is_deleted == 0
)
row_p = query_primary.first()
if row_p:
pass
else:
logger.info('username or password wrong')
return False
else:
logger.info('username or password wrong')
return False
info = {
'nickname': row_p.nickname,
'primary_id': str(row_p.primary_id),
'phone_number': row_p.phone_number,
'name': row_p.name,
'create_time': time_string(row_p.create_time),
'mail': row_p.mail,
}
return info
def list_title(self, page, limit=30, primary_id=None, create_time=None, title_val_ls=None, subtitle_val_ls=None):
if primary_id or create_time or title_val_ls or subtitle_val_ls:
# TODO 搜索功能
pass
else:
with self.session_scope() as session:
query_title = session.query(Title).order_by(Title.create_time.desc()).\
offset(limit * (page - 1)).limit(limit)
row_t = query_title.first()
if row_t:
query_t_all = query_title.all()
title_id_ls = [row_t.title_id for row_t in query_t_all]
# TODO check in_ set
query_tl = session.query(TitleLabel).filter(
TitleLabel.title_id.in_(title_id_ls)
)
query_tl_all = query_tl.all()
# title_id: label_ids
title_dc = {}
# id的set
label_set = set()
logger.info(len(query_tl_all))
for row_tl in query_tl_all:
# label_set 保存包含的所有label_id,避免反复查找
label_set.add(row_tl.label_id)
title = title_dc.get(row_tl.title_id)
if title:
if row_tl.label_id in title:
continue
else:
title.add(row_tl.label_id)
else:
title_dc[row_tl.title_id] = {row_tl.label_id}
# 替换 label_id 为 label
query_l = session.query(Label.label, Label.label_id).filter(
Label.label_id.in_(label_set)
)
query_l_all = query_l.all()
label_dc = {label_id: label for label, label_id in query_l_all}
new_title_dc = {}
for title_id, t in title_dc.items():
for _t in t:
if _t in label_dc.keys():
if new_title_dc.get(title_id):
new_title_dc[title_id].add((_t, label_dc[_t]))
else:
new_title_dc[title_id] = {(_t, label_dc[_t])}
else:
# 这是意料之外的错误
logger.error('unexpect except')
return False
title_dc = new_title_dc
# 装配数据
ls = [{
# 为了避免js大整数精度丢失,查阅资料后解决方案都很不方便,所以在服务器中做字符串转化
'title_id': str(row_t.title_id),
'title': row_t.title,
'primary_id': str(row_t.primary_id),
'nickname': row_t.nickname,
'create_time': time_string(row_t.create_time),
'last_modify_time': time_string(row_t.last_modify_time),
'label_ls': list(title_dc.get(row_t.title_id))
} for row_t in query_t_all]
return ls
else:
return []
def list_label(self):
with self.session_scope() as session:
query_label = session.query(Label)
ret_ls = []
for row_label in query_label.all():
ret_ls.append({'id': row_label.label_id, 'text': row_label.label})
return ret_ls
def inquiry_title(self, title_id):
# TODO 每个title的权重
with self.session_scope() as session:
query_title = session.query(Title).filter(
Title.title_id == title_id
)
row_title = query_title.first()
if row_title:
query_comment = session.query(Comment).filter(
Comment.title_id == title_id
)
row_comment = query_comment.first()
comment_ls = []
if row_comment:
for row_comment in query_comment.all():
comment_dc = {
'comment_id': str(row_comment.comment_id),
'primary_id': str(row_comment.primary_id),
'nickname': row_comment.nickname,
'text': row_comment.text,
'create_time': time_string(row_comment.create_time)
}
comment_ls.append(comment_dc)
# 装配数据
ret_dc = {
'title': row_title.title,
'title_id': str(row_title.title_id),
'subtitle': row_title.subtitle,
'primary_id': str(row_title.primary_id),
'nickname': row_title.nickname,
'create_time': time_string(row_title.create_time),
'last_modify_time': time_string(row_title.last_modify_time),
'comment_ls': comment_ls
}
return ret_dc
else:
logger.error('title no found')
return False
def inquiry_comment(self, comment_id, page=1, limit=20):
with self.session_scope() as session:
query_c = session.query(Comment).filter(
Comment.comment_id == comment_id
)
row_c = query_c.first()
if row_c:
query_sc = session.query(SecondComment).filter(
SecondComment.comment_id == comment_id
)
row_sc = query_sc.first()
second_comment_ls = []
if row_sc:
for row_sc in query_sc.all():
sc_dc = {
'second_comment_id': str(row_sc.second_comment_id),
'primary_id': str(row_sc.primary_id),
'nickname': row_sc.nickname,
'text': row_sc.text,
'create_time': time_string(row_sc.create_time)
}
second_comment_ls.append(sc_dc)
ret_dc = {
'primary_id': str(row_c.primary_id),
'nickname': row_c.nickname,
'text': row_c.text,
'create_time': time_string(row_c.create_time),
'second_comment': second_comment_ls
}
return ret_dc
else:
logger.error('comment no found')
return False
# ------------------- books ---------------
def book_list(self, group=None, page=1, limit=20):
with self.session_scope() as session:
query_book = session.query(Book)
if group:
query_book = query_book.filter(
Book.group == group
)
query_book = query_book.order_by(Book.create_time.desc()).offset(limit * (page - 1)).limit(limit)
row_book = query_book.first()
book_ls = []
if row_book:
for row_book in query_book.all():
book_dc = {
"book_id": str(row_book.book_id),
"book": row_book.book,
"alia": row_book.alia,
"group": row_book.group,
"create_time": time_string(row_book.create_time),
"last_modify_time": time_string(row_book.last_modify_time),
}
book_ls.append(book_dc)
return book_ls
db = DataBase()
|
# Generated by Django 3.1.2 on 2020-10-30 00:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0003_auto_20201030_0045'),
]
operations = [
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.CharField(max_length=100)),
('photographer', models.CharField(max_length=100)),
('photographer_url', models.CharField(max_length=100)),
('src_medium', models.CharField(max_length=100)),
],
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Written by Willy
import matplotlib.pyplot as plt
import sys
import os
from PIL import Image
import scipy.io as scio
import cv2
import numpy as np
import math
def prepareShangHaiTech():
DATASET_ROOT = 'data/ShanghaiTech'
for part in ['part_A_final','part_B_final']:
for phase in ['train_data','test_data']:
DATASET_PATH = os.path.join(DATASET_ROOT,part,phase)
fout = open(DATASET_PATH+'.txt','w+')
for img_name in os.listdir(os.path.join(DATASET_PATH, 'images')):
image_path = os.path.join(DATASET_PATH, 'images', img_name)
gt_path = os.path.join(DATASET_PATH, 'ground_truth', 'GT_' + img_name.split('.')[0] + '.mat')
fout.write(image_path + ' ' + gt_path + '\n')
fout.close()
def prepareMall():
set_path = 'data/Mall'
train_f = open(os.path.join(set_path, 'train.txt'), 'w')
for i in range(1, 801):
file = 'seq_{:0>6}.jpg'.format(i)
dot = 'dmap_{}.mat'.format(i)
train_f.write(os.path.join(set_path, 'frames', file) + '\n') # +' '+os.path.join(set_path,'gt',dot)
train_f.closed
test_f = open(os.path.join(set_path, 'test.txt'), 'w')
for i in range(801, 2001):
file = 'seq_{:0>6}.jpg'.format(i)
test_f.write(os.path.join(set_path, 'frames', file) + '\n') # +' '+os.path.join(set_path,'gt',dot)
test_f.closed
def prepareUCSD():
DATASET_ROOT = 'data/UCSD'
#generate Maximal dataset
#training
training_f = open(os.path.join(DATASET_ROOT, 'imagedataset', 'train_maximal.txt'), 'w')
for ix in range(605, 1400+5,5):
xx = (ix - 1)//200
yy = (ix - 200*xx)
img_name = 'vidf1_33_00{}_f{:0>3}.png'.format(xx,yy)
print(img_name)
training_f.write( os.path.join(DATASET_ROOT,'images',img_name) + '\n' )
training_f.close()
testing_f = open(os.path.join(DATASET_ROOT, 'imagedataset', 'test_maximal.txt'), 'w')
for ix in range(1,601):
xx = (ix-1)//200
yy = ix - 200*xx
img_name = 'vidf1_33_00{}_f{:0>3}.png'.format(xx, yy)
print(img_name)
testing_f.write(os.path.join(DATASET_ROOT, 'images', img_name) + '\n')
for ix in range(1401,2001):
xx = (ix-1)//200
yy = ix - 200*xx
img_name = 'vidf1_33_00{}_f{:0>3}.png'.format(xx, yy)
print(img_name)
testing_f.write(os.path.join(DATASET_ROOT, 'images', img_name) + '\n')
testing_f.close()
#generate Downscale dataset
# training
training_f = open(os.path.join(DATASET_ROOT, 'imagedataset', 'train_downscale.txt'), 'w')
for ix in range(1205, 1605, 5):
xx = (ix - 1) // 200
yy = (ix - 200 * xx)
img_name = 'vidf1_33_00{}_f{:0>3}.png'.format(xx, yy)
print(img_name)
training_f.write(os.path.join(DATASET_ROOT, 'images', img_name) + '\n')
training_f.close()
testing_f = open(os.path.join(DATASET_ROOT, 'imagedataset', 'test_downscale.txt'), 'w')
for ix in range(1, 1201):
xx = (ix - 1) // 200
yy = ix - 200 * xx
img_name = 'vidf1_33_00{}_f{:0>3}.png'.format(xx, yy)
print(img_name)
testing_f.write(os.path.join(DATASET_ROOT, 'images', img_name) + '\n')
for ix in range(1601, 2001):
xx = (ix - 1) // 200
yy = ix - 200 * xx
img_name = 'vidf1_33_00{}_f{:0>3}.png'.format(xx, yy)
print(img_name)
testing_f.write(os.path.join(DATASET_ROOT, 'images', img_name) + '\n')
testing_f.close()
# generate Upscale dataset
# training
training_f = open(os.path.join(DATASET_ROOT, 'imagedataset', 'train_upscale.txt'), 'w')
for ix in range(805, 1105, 5):
xx = (ix - 1) // 200
yy = (ix - 200 * xx)
img_name = 'vidf1_33_00{}_f{:0>3}.png'.format(xx, yy)
print(img_name)
training_f.write(os.path.join(DATASET_ROOT, 'images', img_name) + '\n')
training_f.close()
testing_f = open(os.path.join(DATASET_ROOT, 'imagedataset', 'test_upscale.txt'), 'w')
for ix in range(1, 801):
xx = (ix - 1) // 200
yy = ix - 200 * xx
img_name = 'vidf1_33_00{}_f{:0>3}.png'.format(xx, yy)
print(img_name)
testing_f.write(os.path.join(DATASET_ROOT, 'images', img_name) + '\n')
for ix in range(1101, 2001):
xx = (ix - 1) // 200
yy = ix - 200 * xx
img_name = 'vidf1_33_00{}_f{:0>3}.png'.format(xx, yy)
print(img_name)
testing_f.write(os.path.join(DATASET_ROOT, 'images', img_name) + '\n')
testing_f.close()
# generate Minimal dataset
# training
training_f = open(os.path.join(DATASET_ROOT, 'imagedataset', 'train_minimal.txt'), 'w')
for ix in range(640, 1360+80, 80):
xx = (ix - 1) // 200
yy = (ix - 200 * xx)
img_name = 'vidf1_33_00{}_f{:0>3}.png'.format(xx, yy)
print(img_name)
training_f.write(os.path.join(DATASET_ROOT, 'images', img_name) + '\n')
training_f.close()
testing_f = open(os.path.join(DATASET_ROOT, 'imagedataset', 'test_minimal.txt'), 'w')
for ix in range(1, 601):
xx = (ix - 1) // 200
yy = ix - 200 * xx
img_name = 'vidf1_33_00{}_f{:0>3}.png'.format(xx, yy)
print(img_name)
testing_f.write(os.path.join(DATASET_ROOT, 'images', img_name) + '\n')
for ix in range(1401, 2001):
xx = (ix - 1) // 200
yy = ix - 200 * xx
img_name = 'vidf1_33_00{}_f{:0>3}.png'.format(xx, yy)
print(img_name)
testing_f.write(os.path.join(DATASET_ROOT, 'images', img_name) + '\n')
testing_f.close()
def prepare_UCSD_gt():
DATASET_ROOT = 'data/UCSD/vidf-cvpr'
for i in range(10):
notation_file = os.path.join(DATASET_ROOT, 'vidf1_33_00{}_frame_full.mat'.format(i))
notation = scio.loadmat(notation_file, struct_as_record=False, squeeze_me=True)
frames = notation['frame']
for jx, frame in enumerate(frames):
# Get dots
loc = frame.loc
print(loc.shape)
scio.savemat(os.path.join('data/UCSD/ground-truth/vidf1_33_00{}_f{:0>3}.mat'.format(i,jx+1)), {'loc':loc})
def prepareVGGCell():
DATASET_ROOT = 'data/VGGCell'
test_file = os.path.join('data/test.txt')
f = open(test_file, 'w')
for img in os.listdir(DATASET_ROOT):
print(img)
if img[-8:-4] == 'cell' and img.split('.')[-1] == 'png':
f.write(os.path.join(DATASET_ROOT,img) +'\n')
f.close()
def prepareDublinCell():
DATASET_ROOT = 'data/DublinCell'
for phase in ['trainval','test']:
DATASET_PATH = os.path.join(DATASET_ROOT,phase)
fout = open(DATASET_PATH+'.txt','w+')
for img_name in os.listdir(os.path.join(DATASET_PATH, 'images')):
image_path = os.path.join(DATASET_PATH, 'images', img_name)
#gt_path = os.path.join(DATASET_PATH, 'GT', img_name)
fout.write(image_path + '\n')
fout.close()
def prepareMBMCell():
DATASET_ROOT = 'data/MBMCell'
test_file = os.path.join('data/test.txt')
f = open(test_file, 'w')
for img in os.listdir(DATASET_ROOT):
print(img)
if img.split('_')[-2] != 'dots' and img.split('.')[-1] == 'png':
f.write(os.path.join(DATASET_ROOT,img) +'\n')
f.close()
nameFuncMapping = {'shanghai':prepareShangHaiTech}
#def Mirror()
def resize_bilinear(img,m, n):
height, width, channels = img.shape
emptyImage = np.zeros((m, n, channels), np.uint8)
value = [0, 0, 0]
sh = m / height
sw = n / width
for i in range(m):
for j in range(n):
x = i / sh
y = j / sw
p = (i + 0.0) / sh - x
q = (j + 0.0) / sw - y
x = int(x) - 1
y = int(y) - 1
for k in range(3):
if x + 1 < m and y + 1 < n:
value[k] = int(
img[x, y][k] * (1 - p) * (1 - q) + img[x, y + 1][k] * q * (1 - p) + img[x + 1, y][k] * (
1 - q) * p + img[x + 1, y + 1][k] * p * q)
emptyImage[i, j] = (value[0], value[1], value[2])
return emptyImage
def S(x):
x = np.abs(x)
if 0 <= x < 1:
return 1 - 2 * x * x + x * x * x
if 1 <= x < 2:
return 4 - 8 * x + 5 * x * x - x * x * x
else:
return 0
def resize_bicubic(img,m,n):
height, width, channels = img.shape
emptyImage = np.zeros((m, n, channels), np.uint8)
sh = m / height
sw = n / width
for i in range(m):
for j in range(n):
x = i / sh
y = j / sw
p = (i + 0.0) / sh - x
q = (j + 0.0) / sw - y
x = int(x) - 2
y = int(y) - 2
A = np.array([
[S(1 + p), S(p), S(1 - p), S(2 - p)]
])
if x >= m - 3:
m - 1
if y >= n - 3:
n - 1
if x >= 1 and x <= (m - 3) and y >= 1 and y <= (n - 3):
B = np.array([
[img[x - 1, y - 1], img[x - 1, y],
img[x - 1, y + 1],
img[x - 1, y + 1]],
[img[x, y - 1], img[x, y],
img[x, y + 1], img[x, y + 2]],
[img[x + 1, y - 1], img[x + 1, y],
img[x + 1, y + 1], img[x + 1, y + 2]],
[img[x + 2, y - 1], img[x + 2, y],
img[x + 2, y + 1], img[x + 2, y + 1]],
])
C = np.array([
[S(1 + q)],
[S(q)],
[S(1 - q)],
[S(2 - q)]
])
blue = np.dot(np.dot(A, B[:, :, 0]), C)[0, 0]
green = np.dot(np.dot(A, B[:, :, 1]), C)[0, 0]
red = np.dot(np.dot(A, B[:, :, 2]), C)[0, 0]
# ajust the value to be in [0,255]
def adjust(value):
if value > 255:
value = 255
elif value < 0:
value = 0
return value
blue = adjust(blue)
green = adjust(green)
red = adjust(red)
emptyImage[i, j] = np.array([blue, green, red], dtype=np.uint8)
return emptyImage
def prepareUCSD_for_interpolation(type):
DATASET_ROOT = 'data/UCSD'
# type:
# training
training_f = open(os.path.join(DATASET_ROOT, 'interpolation_dataset', 'train_maximal_'+type+'.txt'), 'w')
for ix in range(601, 1400):
xx = (ix - 1) // 200
yy = (ix - 200 * xx)
img_name = 'vidf1_33_00{}_f{:0>3}.png'.format(xx, yy)
print(img_name)
training_f.write(os.path.join(DATASET_ROOT, 'images_for_interpolation', type, img_name) + '\n')
training_f.close()
testing_f = open(os.path.join(DATASET_ROOT, 'interpolation_dataset', 'test_maximal_'+type+'.txt'), 'w')
for ix in range(1, 601):
xx = (ix - 1) // 200
yy = ix - 200 * xx
img_name = 'vidf1_33_00{}_f{:0>3}.png'.format(xx, yy)
print(img_name)
testing_f.write(os.path.join(DATASET_ROOT, 'images_for_interpolation', type, img_name) + '\n')
for ix in range(1401, 2001):
xx = (ix - 1) // 200
yy = ix - 200 * xx
img_name = 'vidf1_33_00{}_f{:0>3}.png'.format(xx, yy)
print(img_name)
testing_f.write(os.path.join(DATASET_ROOT, 'images_for_interpolation', type, img_name) + '\n')
testing_f.close()
def main(argv):
nameFuncMapping[argv[0]]()
return 0
if __name__ == '__main__':
main(sys.argv[1:])
|
from .env import convert
from flask import request
class Lyrics(object):
"""
Data model for a song's lyrics.
:attr sections: A key-value pairing of section to lyrics.
"""
def __init__(self):
self.sections = dict()
def add_section(self, section, lyrics):
"""
Adds a section to the Lyrics object.
:param section: The name of the section. (e.g. `V1`, `A`, `Chorus`
etc.)
:type section: `str`
:param lyrics: The lyrics of that section.
:type lyrics: `str`
"""
self.sections[section] = lyrics
def to_dict(self):
"""
:returns: a `dict` representation of the Lyrics object.
"""
return self.sections
def get_lyrics(request: request, exclude_id: int=None):
"""
Utility function that returns a Lyrics object containing the song lyrics.
:param request: `request` object from the Flask app.
:param exclude_id: an integer identifying which lyrics section to exclude.
:returns: A Lyrics object containing the song's lyrics in a structured
format.
"""
# Defensive programming checks
if exclude_id:
assert isinstance(exclude_id, int)
# Get lyrics
lyr = Lyrics()
for k, v in request.form.items():
if "section-" in k:
idx = int(k.split("-")[-1])
if idx is not exclude_id:
# First, convert to traditional.
lyrics = convert(request.form[f"lyrics-{idx}"])
section = request.form[k]
lyr.add_section(section=section, lyrics=lyrics)
return lyr
def clean_lyrics(song):
"""
Cleans the lyrics in a song object.
"""
cleaned_lyrics = dict()
for name, lyrics in song.lyrics.items():
# Strip trailing punctuation except for question marks.
lyrics = lyrics.strip("。,;:").strip(',.;:')
# Replace middle punctuation with special blank-space character.
# The special space character is specified here:
# https://unicodelookup.com/#%E3%80%80/1
lyrics = (
lyrics.replace("。", " ")
.replace(",", " ")
.replace(";", " ")
.replace("、", " ")
.replace(".", " ")
.replace(",", " ")
.replace(";", " ")
.replace(" ", " ")
)
cleaned_lyrics[name] = lyrics
song.lyrics.update(cleaned_lyrics)
return song
def clean_arrangement(arrangement):
"""
Cleans the song arrangement and turns it into a list.
:example:
>>> str_arr = 'V, C, V, C'
>>> clean_arrangement(str_arr)
['V', 'C', 'V', 'C']
:param arrangement: a comma-delimited string containing the arrangement of
the song.
:type arrangement: `str`
:param song_data: a data dictionary. Keys are the data model fields as
specified in `datamodels.py`. One of the keys has to be "lyrics".
:type song_data: `dict`
:returns: arrangement a list of strings, each of which is a key in song's
lyrics dictionary.
:rtype: `list(str)`
"""
arrangement = [a.strip(" ") for a in arrangement.split(",")]
return arrangement
def allowed_file(filename):
"""
Utility function that checks that the filename has an allowed extension.
Used when uploading the file. Checks the module-level variable
`ALLOWED_EXTENSIONS` for allowed uploads.
:param filename: The name of the file that is being uploaded.
:type filename: `str`
:example:
>>> ALLOWED_EXTENSIONS = ['.pdf', '.jpg'] # module-level variable
>>> file1 = 'my_file.txt'
>>> allowed_file(file1)
False
>>> file2 = 'my_file'
>>> allowed_file(file2)
False
>>> file3 = 'my_file.jpg'
>>> allowed_file(file3)
True
"""
ALLOWED_EXTENSIONS = set(["pdf"])
return (
"." in filename
and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS
)
def lyrics_plaintext(song):
"""
Get lyrics as plaintext.
"""
output = ""
song = validate_song(song)
output += song.default_arrangement
output += "\n\n\n\n"
output += song.composer
output += "\n"
output += song.copyright
output += "\n\n"
for section, lyrics in song.lyrics.items():
output += section
output += "\n"
output += lyrics
output += "\n\n"
return output
def validate_song(song):
"""
Converts song fields from None to '' for string outputs.
"""
attrs = ["default_arrangement", "composer", "copyright", "youtube", "ccli"]
for a in attrs:
if getattr(song, a) in [None, "None"]:
setattr(song, a, "")
return song
|
import time
import serial
from Tkinter import *
from tkColorChooser import askcolor
ser = serial.Serial(
port='com4',
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_TWO,
bytesize=serial.EIGHTBITS
)
print ser.portstr # check which port was really used
ser.write("As0707000255000") # write a string
root = Tk()
root.title("Grid Geometry Manager")
while 1 :
store0_image = PhotoImage(file = "./led_array_thumbnail.gif")
color0 = '#ffffff'
color1 = '#ffffff'
color2 = '#ffffff'
color3 = '#ffffff'
color4 = '#ffffff'
color5 = '#ffffff'
color6 = '#ffffff'
color7 = '#ffffff'
selected_color = '#ffffff'
def cb_Store0():
button_Store0["bg"] = "green"
button_Store0["text"] = "ACTIVE"
button_Store0.flash()
button_Store0["text"] = "Store0"
def cb_Store1():
button_Store1["bg"] = "green"
button_Store1["text"] = "ACTIVE"
button_Store1.flash()
button_Store1["text"] = "Store1"
def cb_Store2():
button_Store2["bg"] = "green"
button_Store2["text"] = "ACTIVE"
button_Store2.flash()
button_Store2["text"] = "Store2"
def cb_Store3():
button_Store3["bg"] = "green"
button_Store3["text"] = "ACTIVE"
button_Store3.flash()
button_Store3["text"] = "Store3"
def cb_Store4():
button_Store4["bg"] = "green"
button_Store4["text"] = "ACTIVE"
button_Store4.flash()
button_Store4["text"] = "Store4"
def cb_Store5():
button_Store5["bg"] = "green"
button_Store5["text"] = "ACTIVE"
button_Store5.flash()
button_Store5["text"] = "Store5"
def cb_Store6():
button_Store6["bg"] = "green"
button_Store6["text"] = "ACTIVE"
button_Store6.flash()
button_Store6["text"] = "Store6"
def cb_Store7():
button_Store7["bg"] = "green"
button_Store7["text"] = "ACTIVE"
button_Store7.flash()
button_Store7["text"] = "Store7"
def cb_Color0():
global selected_color
button_selected_color["bg"] = color0
selected_color = color0
def cb_Color1():
global selected_color
button_selected_color["bg"] = color1
selected_color = color1
def cb_Color2():
global selected_color
button_selected_color["bg"] = color2
selected_color = color2
def cb_Color3():
global selected_color
button_selected_color["bg"] = color3
selected_color = color3
def cb_Color4():
global selected_color
button_selected_color["bg"] = color4
selected_color = color4
def cb_Color5():
global selected_color
button_selected_color["bg"] = color5
selected_color = color5
def cb_Color6():
global selected_color
button_selected_color["bg"] = color6
selected_color = color6
def cb_Color7():
global selected_color
button_selected_color["bg"] = color7
selected_color = color7
def cb_setColor0():
global color0
button_Color0["bg"] = 'white'
(triple, hexstr) = askcolor()
if hexstr:
button_Color0["bg"] = hexstr
color0 = hexstr
def cb_setColor1():
global color1
button_Color1["bg"] = 'white'
(triple, hexstr) = askcolor()
if hexstr:
print hexstr
button_Color1["bg"] = hexstr
color1 = hexstr
def cb_setColor2():
global color2
button_Color2["bg"] = 'white'
(triple, hexstr) = askcolor()
if hexstr:
print hexstr
button_Color2["bg"] = hexstr
color2 = hexstr
def cb_setColor3():
global color3
button_Color3["bg"] = 'white'
(triple, hexstr) = askcolor()
if hexstr:
print hexstr
button_Color3["bg"] = hexstr
color3 = hexstr
def cb_setColor4():
global color4
button_Color4["bg"] = 'white'
(triple, hexstr) = askcolor()
if hexstr:
print hexstr
button_Color4["bg"] = hexstr
color4 = hexstr
def cb_setColor5():
global color5
button_Color5["bg"] = 'white'
(triple, hexstr) = askcolor()
if hexstr:
print hexstr
button_Color5["bg"] = hexstr
color5 = hexstr
def cb_setColor6():
global color6
button_Color6["bg"] = 'white'
(triple, hexstr) = askcolor()
if hexstr:
print hexstr
button_Color6["bg"] = hexstr
color6 = hexstr
def cb_setColor7():
global color7
button_Color7["bg"] = 'white'
(triple, hexstr) = askcolor()
if hexstr:
print hexstr
button_Color7["bg"] = hexstr
color7 = hexstr
def cb_led00():
print"Somebody hit a button";
global selected_color
button_LED00["bg"] = selected_color
ser.write("As0707000255000")
def cb_led01():
global selected_color
button_LED01["bg"] = selected_color
def cb_led02():
global selected_color
button_LED02["bg"] = selected_color
def cb_led03():
global selected_color
button_LED03["bg"] = selected_color
def cb_led04():
global selected_color
button_LED04["bg"] = selected_color
def cb_led05():
global selected_color
button_LED05["bg"] = selected_color
def cb_led06():
global selected_color
button_LED06["bg"] = selected_color
def cb_led07():
global selected_color
button_LED07["bg"] = selected_color
def cb_led10():
global selected_color
button_LED10["bg"] = selected_color
def cb_led11():
global selected_color
button_LED11["bg"] = selected_color
def cb_led12():
global selected_color
button_LED12["bg"] = selected_color
def cb_led13():
global selected_color
button_LED13["bg"] = selected_color
def cb_led14():
global selected_color
button_LED14["bg"] = selected_color
def cb_led15():
global selected_color
button_LED15["bg"] =selected_color
def cb_led16():
global selected_color
button_LED16["bg"] = selected_color
def cb_led17():
global selected_color
button_LED17["bg"] = selected_color
def cb_led20():
global selected_color
button_LED20["bg"] = selected_color
def cb_led21():
global selected_color
button_LED21["bg"] = selected_color
def cb_led22():
global selected_color
button_LED22["bg"] = selected_color
def cb_led23():
global selected_color
button_LED23["bg"] = selected_color
def cb_led24():
global selected_color
button_LED24["bg"] = selected_color
def cb_led25():
global selected_color
button_LED25["bg"] = selected_color
def cb_led26():
global selected_color
button_LED26["bg"] = selected_color
def cb_led27():
global selected_color
button_LED27["bg"] = selected_color
def cb_led30():
global selected_color
button_LED30["bg"] = selected_color
def cb_led31():
global selected_color
button_LED31["bg"] = selected_color
def cb_led32():
global selected_color
button_LED32["bg"] = selected_color
def cb_led33():
global selected_color
button_LED33["bg"] = selected_color
def cb_led34():
global selected_color
button_LED34["bg"] = selected_color
def cb_led35():
global selected_color
button_LED35["bg"] = selected_color
def cb_led36():
global selected_color
button_LED36["bg"] = selected_color
def cb_led37():
global selected_color
button_LED37["bg"] = selected_color
def cb_led40():
global selected_color
button_LED40["bg"] = selected_color
def cb_led41():
global selected_color
button_LED41["bg"] = selected_color
def cb_led42():
global selected_color
button_LED42["bg"] = selected_color
def cb_led43():
global selected_color
button_LED43["bg"] = selected_color
def cb_led44():
global selected_color
button_LED44["bg"] = selected_color
def cb_led45():
global selected_color
button_LED45["bg"] = selected_color
def cb_led46():
global selected_color
button_LED46["bg"] = selected_color
def cb_led47():
global selected_color
button_LED47["bg"] = selected_color
def cb_led50():
global selected_color
button_LED50["bg"] = selected_color
def cb_led51():
global selected_color
button_LED51["bg"] = selected_color
def cb_led52():
global selected_color
button_LED52["bg"] = selected_color
def cb_led53():
global selected_color
button_LED53["bg"] = selected_color
def cb_led54():
global selected_color
button_LED54["bg"] = selected_color
def cb_led55():
global selected_color
button_LED55["bg"] = selected_color
def cb_led56():
global selected_color
button_LED56["bg"] = selected_color
def cb_led57():
global selected_color
button_LED57["bg"] = selected_color
def cb_led60():
global selected_color
button_LED60["bg"] = selected_color
def cb_led61():
global selected_color
button_LED61["bg"] = selected_color
def cb_led62():
global selected_color
button_LED62["bg"] = selected_color
def cb_led63():
global selected_color
button_LED63["bg"] = selected_color
def cb_led64():
global selected_color
button_LED64["bg"] = selected_color
def cb_led65():
global selected_color
button_LED65["bg"] = selected_color
def cb_led66():
global selected_color
button_LED66["bg"] = selected_color
def cb_led67():
global selected_color
button_LED67["bg"] = selected_color
def cb_led70():
global selected_color
button_LED70["bg"] = selected_color
def cb_led71():
global selected_color
button_LED71["bg"] = selected_color
def cb_led72():
global selected_color
button_LED72["bg"] = selected_color
def cb_led73():
global selected_color
button_LED73["bg"] = selected_color
def cb_led74():
global selected_color
button_LED74["bg"] = selected_color
def cb_led75():
global selected_color
button_LED75["bg"] = selected_color
def cb_led76():
global selected_color
button_LED76["bg"] = selected_color
def cb_led77():
global selected_color
button_LED77["bg"] = selected_color
#input=1
button_selected_color = Button(root, text = "Selected Color", bg='white')
button_selected_color.grid(row=0,column=5)
button_Store0 = Button(root, text = "Store 0", image = store0_image, command = cb_Store0, compound="left")
button_Store0.grid(row=1, column=0)
button_LED00 = Button(root, height=2, bd=15, bg="Cornflowerblue",text="LED 0,0", command = cb_led00)
button_LED00.grid(row=1, column=1)
button_LED01 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 0,1", command = cb_led01)
button_LED01.grid(row=1, column=2)
button_LED02 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 0,2", command = cb_led02)
button_LED02.grid(row=1, column=3)
button_LED03 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 0,3", command = cb_led03)
button_LED03.grid(row=1, column=4)
button_LED04 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 0,4", command = cb_led04)
button_LED04.grid(row=1, column=5)
button_LED05 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 0,5", command = cb_led05)
button_LED05.grid(row=1, column=6)
button_LED06 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 0,6", command = cb_led06)
button_LED06.grid(row=1, column=7)
button_LED07 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 0,7", command = cb_led07)
button_LED07.grid(row=1, column=8)
button_Color0 = Button(root, bg='blue',text="Color0", command = cb_Color0 )
button_Color0.grid(row=1, column=9)
button_setColor0 = Button(root, bg='white',text="setColor0", command = cb_setColor0 )
button_setColor0.grid(row=1, column=10)
button_Store1 = Button(root, text = "Store 1", image = store0_image, command = cb_Store1, compound="left")
button_Store1.grid(row=2, column=0)
button_LED10 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 1,0", command = cb_led10)
button_LED10.grid(row=2, column=1)
button_LED11 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 1,1", command = cb_led11)
button_LED11.grid(row=2, column=2)
button_LED12 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 1,2", command = cb_led12)
button_LED12.grid(row=2, column=3)
button_LED13 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 1,3", command = cb_led13)
button_LED13.grid(row=2, column=4)
button_LED14 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 1,4", command = cb_led14)
button_LED14.grid(row=2, column=5)
button_LED15 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 1,5", command = cb_led15)
button_LED15.grid(row=2, column=6)
button_LED16 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 1,6", command = cb_led16)
button_LED16.grid(row=2, column=7)
button_LED17 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 1,7", command = cb_led17)
button_LED17.grid(row=2, column=8)
button_Color1 = Button(root, bg='blue',text="Color1", command = cb_Color1 )
button_Color1.grid(row=2, column=9)
button_setColor1 = Button(root, bg='white',text="setColor1", command = cb_setColor1 )
button_setColor1.grid(row=2, column=10)
button_Store2 = Button(root, text = "Store 2", image = store0_image, command = cb_Store2, compound="left")
button_Store2.grid(row=3, column=0)
button_LED20 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 2,0", command = cb_led20)
button_LED20.grid(row=3, column=1)
button_LED21 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 2,1", command = cb_led21)
button_LED21.grid(row=3, column=2)
button_LED22 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 2,2", command = cb_led22)
button_LED22.grid(row=3, column=3)
button_LED23 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 2,3", command = cb_led23)
button_LED23.grid(row=3, column=4)
button_LED24 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 2,4", command = cb_led24)
button_LED24.grid(row=3, column=5)
button_LED25 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 2,5", command = cb_led25)
button_LED25.grid(row=3, column=6)
button_LED26 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 2,6", command = cb_led26)
button_LED26.grid(row=3, column=7)
button_LED27 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 2,7", command = cb_led27)
button_LED27.grid(row=3, column=8)
button_Color2 = Button(root, bg='blue',text="Color2", command = cb_Color2 )
button_Color2.grid(row=3, column=9)
button_setColor2 = Button(root, bg='white',text="setColor2", command = cb_setColor2 )
button_setColor2.grid(row=3, column=10)
button_Store3 = Button(root, text = "Store 3", image = store0_image, command = cb_Store3, compound="left")
button_Store3.grid(row=4, column=0)
button_LED30 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 3,0", command = cb_led30)
button_LED30.grid(row=4, column=1)
button_LED31 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 3,1", command = cb_led31)
button_LED31.grid(row=4, column=2)
button_LED32 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 3,2", command = cb_led32)
button_LED32.grid(row=4, column=3)
button_LED33 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 3,3", command = cb_led33)
button_LED33.grid(row=4, column=4)
button_LED34 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 3,4", command = cb_led34)
button_LED34.grid(row=4, column=5)
button_LED35 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 3,5", command = cb_led35)
button_LED35.grid(row=4, column=6)
button_LED36 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 3,6", command = cb_led36)
button_LED36.grid(row=4, column=7)
button_LED37 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 3,7", command = cb_led37)
button_LED37.grid(row=4, column=8)
button_Color3 = Button(root, bg='blue',text="Color3", command = cb_Color3 )
button_Color3.grid(row=4, column=9)
button_setColor3 = Button(root, bg='white',text="setColor3", command = cb_setColor3 )
button_setColor3.grid(row=4, column=10)
button_Store4 = Button(root, text = "Store 4", image = store0_image, command = cb_Store4, compound="left")
button_Store4.grid(row=5, column=0)
button_LED40 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 4,0", command = cb_led40)
button_LED40.grid(row=5, column=1)
button_LED41 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 4,1", command = cb_led41)
button_LED41.grid(row=5, column=2)
button_LED42 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 4,2", command = cb_led42)
button_LED42.grid(row=5, column=3)
button_LED43 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 4,3", command = cb_led43)
button_LED43.grid(row=5, column=4)
button_LED44 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 4,4", command = cb_led44)
button_LED44.grid(row=5, column=5)
button_LED45 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 4,5", command = cb_led45)
button_LED45.grid(row=5, column=6)
button_LED46 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 4,6", command = cb_led46)
button_LED46.grid(row=5, column=7)
button_LED47 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 4,7", command = cb_led47)
button_LED47.grid(row=5, column=8)
button_Color4 = Button(root, bg='blue',text="Color4", command = cb_Color4 )
button_Color4.grid(row=5, column=9)
button_setColor4 = Button(root, bg='white',text="setColor4", command = cb_setColor4 )
button_setColor4.grid(row=5, column=10)
button_Store5 = Button(root, text = "Store 5", image = store0_image, command = cb_Store5, compound="left")
button_Store5.grid(row=6, column=0)
button_LED50 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 5,0", command = cb_led50)
button_LED50.grid(row=6, column=1)
button_LED51 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 5,1", command = cb_led51)
button_LED51.grid(row=6, column=2)
button_LED52 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 5,2", command = cb_led52)
button_LED52.grid(row=6, column=3)
button_LED53 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 5,3", command = cb_led53)
button_LED53.grid(row=6, column=4)
button_LED54 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 5,4", command = cb_led54)
button_LED54.grid(row=6, column=5)
button_LED55 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 5,5", command = cb_led55)
button_LED55.grid(row=6, column=6)
button_LED56 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 5,6", command = cb_led56)
button_LED56.grid(row=6, column=7)
button_LED57 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 5,7", command = cb_led57)
button_LED57.grid(row=6, column=8)
button_Color5 = Button(root, bg='blue',text="Color5", command = cb_Color5 )
button_Color5.grid(row=6, column=9)
button_setColor5 = Button(root, bg='white',text="setColor5", command = cb_setColor5 )
button_setColor5.grid(row=6, column=10)
button_Store6 = Button(root, text = "Store 6", image = store0_image, command = cb_Store6, compound="left")
button_Store6.grid(row=7, column=0)
button_LED60 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 6,0", command = cb_led60)
button_LED60.grid(row=7, column=1)
button_LED61 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 6,1", command = cb_led61)
button_LED61.grid(row=7, column=2)
button_LED62 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 6,2", command = cb_led62)
button_LED62.grid(row=7, column=3)
button_LED63 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 6,3", command = cb_led63)
button_LED63.grid(row=7, column=4)
button_LED64 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 6,4", command = cb_led64)
button_LED64.grid(row=7, column=5)
button_LED65 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 6,5", command = cb_led65)
button_LED65.grid(row=7, column=6)
button_LED66 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 6,6", command = cb_led66)
button_LED66.grid(row=7, column=7)
button_LED67 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 6,7", command = cb_led67)
button_LED67.grid(row=7, column=8)
button_Color6 = Button(root, bg='blue',text="Color6", command = cb_Color6 )
button_Color6.grid(row=7, column=9)
button_setColor6 = Button(root, bg='white',text="setColor6", command = cb_setColor6 )
button_setColor6.grid(row=7, column=10)
button_Store7 = Button(root, text = "Store 7", image = store0_image, command = cb_Store7, compound="left")
button_Store7.grid(row=8, column=0)
button_LED70 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 7,0", command = cb_led70)
button_LED70.grid(row=8, column=1)
button_LED71 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 7,1", command = cb_led71)
button_LED71.grid(row=8, column=2)
button_LED72 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 7,2", command = cb_led72)
button_LED72.grid(row=8, column=3)
button_LED73 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 7,3", command = cb_led73)
button_LED73.grid(row=8, column=4)
button_LED74 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 7,4", command = cb_led74)
button_LED74.grid(row=8, column=5)
button_LED75 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 7,5", command = cb_led75)
button_LED75.grid(row=8, column=6)
button_LED76 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 7,6", command = cb_led76)
button_LED76.grid(row=8, column=7)
button_LED77 = Button(root, height=2, bd=15, bg='Cornflowerblue',text="LED 7,7", command = cb_led77)
button_LED77.grid(row=8, column=8)
button_Color7 = Button(root, bg='blue',text="Color7", command = cb_Color7 )
button_Color7.grid(row=8, column=9)
button_setColor7 = Button(root, bg='white',text="setColor7", command = cb_setColor7 )
button_setColor7.grid(row=8, column=10)
endcap="\r\n";
if ser.inWaiting() > 0:
out += ser.read(1)
if out.endsWidth(endcap):
print ">>" + out
out = ''
#ser.close() # close port
root.mainloop()
|
#!/usr/local/bin/python2.6
import sys
import os
import string
import re
#
#--- reading directory list
#
path = '/proj/web-cxc/cgi-gen/mta/Obscat/ocat/Info_save/dir_list_new' #---- test directory list path
#path = '/data/udoc1/ocat/Info_save/dir_list' #---- live directory list path
f = open(path, 'r')
data = [line.strip() for line in f.readlines()]
f.close()
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec "%s = %s" %(var, line)
#
#--- append path to a privte folder
#
sys.path.append(bin_dir)
import tooddtFunctions as tdfnc
#---------------------------------------------------------------------------------------------
#-- updateNewObsList: update new_obs_list ---
#---------------------------------------------------------------------------------------------
def updateNewObsList(inList):
'update new_obs_list for a given obsid list'
#
#--- read new_obs_list
#
type = []
seq = []
obsid = []
status = []
poc = []
ao = []
date = []
orgCnt = readNewObsList(type, seq, obsid, status, poc,ao, date)
#
#--- read monitor/group observations already in the list
#
grpObsid = []
grpPoc = []
grpID = []
grpSeq = []
grpCnt = readMonitorList(grpObsid, grpPoc, grpID, grpSeq)
#
#--- check the obsids in the new list are actually new for new_obs_list
#
newList = list(set(inList).difference.(set(obsid)))
if len(newList) > 0:
for ent in newList:
(ntype, nseq, nstatus, nao, ndate, target, grating, instrument) = findInfo(int(ent), group = [], monitor = []))
if (nstatus == 'unobserved') or (nstatus == 'scheduled') or (nstatus == 'observed'):
if ntype.lower() == 'too' or ntype.lower() == 'ddt':
npoc = tdfnc.find_person_in_charge(target, grating) #---- find today's person in charge
#
#--- check whether this obsid is in the past mointor/group list, and assigned poc already
#
mchk = 0
for i in range(0, len(grpObsid)):
if nseq == grpSeq[i]
npoc = grpPoc[i]
mchk += 1
break
if mchk == 0:
#
#--- if this obsid is not in the previous monitor/group list, check whether this is monitor/group opbservation
#--- and if so add to the monitor list
#
if len(group) > 1:
for gent in group:
grpObsid.append(gent)
grpPoc.append(npoc)
grpID.append(ent)
grpSeq.append(nseq)
elif len(monitor) > 1:
for ment in monitor:
grpObsid.append(ment)
grpPoc.append(npoc)
grpID.append(ent)
grpSeq.append(nseq)
else:
npoc = tdfnc.match_usint_person(ntype, grating, nseq, instrument) #--- find poc from seq #
type.append(ntype)
seq.append(nseq)
obsid.append(ent)
poc.append(npoc)
ao.append(nao)
date.append(ndate)
newCnt = len(obsid)
if newCnt > orgCnt:
#
#--- update new_obs_list
#
cmd = 'mv ' too_dir + 'new_obs_list ' + too_dir + 'new_obs_ist~' #--- keep the previous version
os.system(cmd)
line = too_dir + 'new_obs_list'
f = open(line, 'w')
for i in range(0, len(obsid)):
f.write('%s\t%d\t%d\t%s\%s\%d\%s\n' % (type[i], int(seq[i]), int(obsid[i]), status[i], poc[i], int(ao[i]), date[i]))
f.close()
#---------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------
def readNewObsList(type, seq, obsid, status, poc,ao, date):
line = too_dir + 'new_obs_list'
f = open(line, 'r')
data = [line.strip() for line in f.readlines()]
f.close()
for ent in data:
atemp = re.split('\s+|\t+', ent)
#
#---find the status of the observation
#
(ntype, nseq, nstatus, nao, ndate, target, grating, instrument) = findInfo(int(atemp[2]), group = [], monitor = [])
#
#--- drop canceled/archived observations
#
if (nstatus == 'unobserved') or (nstatus == 'scheduled') or (nstatus == 'observed'):
type.append(atemp[0])
seq.append(atemp[1])
obsid.append(atemp[2])
status.append(atemp[3])
poc.append(atemp[4])
ao.append(atemp[5])
date.append(atemp[6])
return len(obsid)
#---------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------
def readMonitorList(grpObsid, grpPoc, grpID, grpSeq):
line = too_dir + 'monitor_too_ddt'
f = open(line, 'r')
data= [line.strip() for line in f.readlines()]
f.close()
for ent in data:
atemp = re.split('\s+|\t+', ent)
grpObsid.append(atemp[0])
grpPoc.append(atemp[1])
grpID.append(atemp[2])
grpSeq.append(atemp[3])
return len(grpObsiid)
#---------------------------------------------------------------------------------------------
if __name__ == '__main__':
|
import polla.models
import time
import datetime
import random
def ingresardato():
corpus = open('a.csv',encoding='utf-8').readlines()
for i in corpus:
a=i.split(",")[0]
b=i.split(",")[1]
mod=equipoideal.models.JugadorPais(nombre=a,pais=b)
try:
mod.save()
except:
print ('no se agrego'+ a +" - "+b)
def fixdate(fech):
p=fech.split("/")
y,m,d=p[2],p[1],p[0]
return (y+"-"+m+"-"+d)
def ingresarpartidos():
corpus = open('d.csv',encoding='utf-8').readlines()
cont1=0
cont2=0
gr=['A','B','C','D','E','F','G','H']
for i in corpus:
m1=random.uniform( 1, 7 )
m2=random.uniform( 1, 7 )
m3=random.uniform( 1, 7 )
grpo=""
cont1=cont1+1
if cont1==6:
grpo=gr[cont2]
cont1=0
cont2=cont2+1
else:
grpo=gr[cont2]
linea=i.split(",")
eq1=linea[1];
eq1=eq1.split(" ")[1]
eq2=linea[3];eq2=eq2.split(" ")[1]
dat=linea[0]
mod=polla.models.Partido(equipo1=eq1,equipo2=eq2
,fecha=fixdate(dat),Grupo=grpo,monto1=m1,monto2=m2,montoempate=m3)
print ('no se agrego'+ eq1 +" - "+eq2+" - "+grpo)
try:
mod.save()
except:
print ('no se agrego'+ eq1 +" - "+eq2+" - "+grpo)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-12-05 19:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shopping', '0005_items'),
]
operations = [
migrations.AlterField(
model_name='items',
name='item_type',
field=models.CharField(choices=[('Phones', 'phone'), ('Laptops', 'laptop'), ('Men', 'man'), ('Women', 'woman'), ('Laptops', 'laptop'), ('Cars', 'car'), ('Watches', 'watch')], max_length=20),
),
]
|
import codecs
import logging
import os
import shutil
from hana.errors import HanaPluginError
class FileWriter(object):
def __init__(self, deploy_path, clean=False):
self._deploy_path = deploy_path
self.clean = clean
self.logger = logging.getLogger(self.__module__)
def _clean_output_dir(self):
#TODO: see if we can avoid removing the dir itself
shutil.rmtree(self._deploy_path)
self._create_output_dir()
def _create_output_dir(self):
os.mkdir(self._deploy_path)
def __call__(self, files, hana):
if self.clean and os.path.isdir(self._deploy_path):
self._clean_output_dir()
if not os.path.isdir(self._deploy_path):
self._create_output_dir()
for filename, f in files:
output_path = os.path.join(self._deploy_path, filename)
def makedirs(path, directory):
if not directory:
return
if os.path.isdir(os.path.join(self._deploy_path, path, directory)):
return
makedirs(*os.path.split(path))
if os.path.isdir(os.path.join(self._deploy_path, path)) or path == '':
dirpath = os.path.join(self._deploy_path, path, directory)
os.mkdir(dirpath)
return
makedirs(*os.path.split(os.path.dirname(filename)))
self.logger.debug('Writing %s (%s)', output_path, 'binary' if f.is_binary else 'text')
if not f.is_binary:
codecs.open(output_path, 'w', 'utf-8').write(f['contents'])
else:
open(output_path, 'wb').write(f['contents'])
#class FileLoaderError(HanaPluginError):
# pass
#
#class DeployDirectoryError(FileLoaderError):
# pass
|
import dotenv,os
from pathlib import Path
dotenv.load_dotenv()
GCP_MYSQL_HOST = os.getenv("gcp_host")
GCP_MYSQL_USER = os.getenv("gcp_user")
GCP_MYSQL_PASSWORD = os.getenv("gcp_password")
GMAIL_PASSWORD = os.getenv("gmail_password")
BASE_DIR = Path(__file__).resolve().parent |
'''
Created on Sep 4, 2018
@author: ishank
'''
class ListNode(object):
def __init__(self, x):
self.x = x
self.next = None
def createLL(lst):
ll = ListNode(lst[0])
head = ll
for item in lst[1:]:
newll = ListNode(item)
ll.next = newll
ll = ll.next
return head
def toString(lst):
op = ""
while lst != None:
op += str(lst.x) + '->'
lst = lst.next
print (op)
def rotateRight(head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if not head:
return None
if not head.next:
return head
size = 0
tail = head
while tail.next != None:
tail = tail.next
size += 1
k = size - (k % size) + 1
while k > 0:
toString(head)
node = head
head = head.next
node.next = None
tail.next = node
tail = tail.next
k -= 1
return head
ll = createLL([0,1,2])
toString(rotateRight(ll, 4) )
|
# coding=utf-8
from __future__ import unicode_literals
ENVIRONMENT = {
'default': {
'i18n': 'en-us',
'l10n': 'local',
'g11n': 'global'
}
}
CACHE = 'locmem://'
STORAGE = 'sqlite://:memory:'
PIPELINE = [
'cio.pipeline.pipes.cache.CachePipe',
'cio.pipeline.pipes.meta.MetaPipe',
'cio.pipeline.pipes.plugin.PluginPipe',
'cio.pipeline.pipes.storage.StoragePipe',
'cio.pipeline.pipes.storage.NamespaceFallbackPipe'
]
PLUGINS = [
'cio.plugins.txt.TextPlugin',
'cio.plugins.md.MarkdownPlugin'
]
URI_SCHEME_SEPARATOR = '://'
URI_NAMESPACE_SEPARATOR = '@'
URI_PATH_SEPARATOR = '/'
URI_EXT_SEPARATOR = '.'
URI_VERSION_SEPARATOR = '#'
URI_DEFAULT_SCHEME = 'i18n'
URI_DEFAULT_EXT = 'txt'
URI_QUERY_SEPARATOR = '?'
URI_QUERY_PARAMETER_SEPARATOR = '&'
URI_QUERY_VARIABLE_SEPARATOR = '='
|
from setuptools import setup
setup(
name = 'moviehub',
packages = ['moviehub'],
install_requires = [
'beautifulsoup4',
'requests'
],
version = '1.0.1',
description = 'A console application for obtaining movie information using OMDb API',
author = 'Marcus Mu',
author_email = 'chunkhang@gmail.com',
license = 'UNLICENSE',
url = 'https://github.com/chunkhang/moviehub',
keywords = [
'movie',
'hub',
'omdb'
],
classifiers = [
'Intended Audience :: End Users/Desktop',
'Programming Language :: Python :: 3 :: Only',
'Environment :: Console'
],
entry_points = {
'console_scripts': [
'moviehub=moviehub.moviehub:main'
]
}
) |
from requests_html import HTMLSessifrom requests_html import HTMLSession
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup
import pandas as pd
from pyspark.sql import SparkSession
spark = SparkSession\
.builder\
.appName("scrap_mobilede")\
.config("spark.jars", "/Users/shahulmhd/PycharmProjects/mysql-connector-java-8.0.21/mysql-connector-java-8.0.21.jar")\
.getOrCreate()
def create_webdriver():
chrome_options = Options()
chrome_options.use_chromium = True
chrome_options.headless = True
driver = webdriver.Chrome(ChromeDriverManager().install())
return driver
for pageNum in range(1, 4):
session = HTMLSession()
url = f"""
https://suchen.mobile.de/fahrzeuge/search.html?damageUnrepaired=NO_DAMAGE_UNREPAIRED&grossPrice=true&isSearchRequest=true&makeModelVariant1.makeId=17200&makeModelVariant1.modelId=8&maxMileage=150000&maxPrice=17500&minFirstRegistrationDate=2016-01-01&pageNumber={pageNum}&scopeId=C&sfmr=false"""\
.format(pageNum)
request = session.get(url)
request.html.render(sleep=1)
soup = BeautifulSoup(request.text, "html.parser")
carResultList = soup.find_all('div', class_='cBox-body cBox-body--resultitem dealerAd rbt-reg rbt-no-top')
eachCarPageLinks = []
for item in carResultList:
for link in item.find_all('a', href=True):
eachCarPageLinks.append(link['href'])
selectedFinalCarList = []
for link in eachCarPageLinks:
session = HTMLSession()
request = session.get(link)
request.html.render(sleep=1)
soup = BeautifulSoup(request.text, "html.parser")
# ------------------------------------------------
# date_annonce
car_name = soup.find('h1').text
# reviews
price = soup.find('span', class_='h3 rbt-prime-price').text
mileage = soup.find("div", {"id": "rbt-mileage-v"}).text
categorie = soup.find("div", {"id": "rbt-category-v"}).text
power = soup.find("div", {"id": "rbt-power-v"}).text
fuel = soup.find("div", {"id": "rbt-fuel-v"}).text
try :
emmission_co2 = soup.find("div", {"id": "rbt-envkv.emission-v"}).text
except :
emmission_co2 = "not available"
try :
classe_energie = soup.find("div", {"id": "rbt-envkv.efficiencyClass-v"}).text
except :
classe_energie = "no available"
date_premiere_circulation = soup.find("div", {"id": "rbt-firstRegistration-v"}).text
lieu_geographique = soup.find("p", {"id": "rbt-db-address"}).text
# note_handler = soup.find('span', {"class" :'star-rating-s u-valign-middle u-margin-right-9'}, {"data-rating"}).text
"""
divTag = soup.find_all("div", {"id": "rbt-envkv.consumption-v"}, )
for tag in divTag:
consommation = []
tdTags = tag.find_all("div", {"class": "u-margin-bottom-9"})
for conso in tdTags:
consommation.append(conso.text)
print(consommation)
A REVOIR CETTE PARTIE POUR RECUPERER UNE SEULE CONSO OU SEPAREMENT DANS CHAQUE COLONNE
"""
car_details = {
'car_name': car_name,
'price': price,
'mileage': mileage,
'categorie': categorie,
'power': power,
'fuel': fuel,
# 'consommation' : consommation,
'emmission_co2': emmission_co2,
'classe_energie': classe_energie,
'date_premiere_circulation': date_premiere_circulation,
'lieu_geographique': lieu_geographique,
# 'note_handler': note_handler
}
selectedFinalCarList.append(car_details)
# Create data frame
selectedFinalCarListDF = spark.createDataFrame(selectedFinalCarList)
#selectedFinalCarListDF.show()
selectedFinalCarListDF.write.format('jdbc')\
.options(
url='jdbc:mysql://localhost/automobile',
driver='com.mysql.cj.jdbc.Driver',
dbtable='mobile_de',
user='root',
password='azertyuiop')\
.mode("overwrite" )\
.save()
|
"""
ID: aspam1
LANG: PYTHON3
TASK: friday
"""
import sys
def get_day(start, end, start_day): return ((end-start)%7+start_day)%7
fin = open('friday.in', 'r')
fout = open('friday.out', 'w')
numYears = int(fin.readline())
days = [0 for x in range(7)]
year = 1900
date = 1
day = 1 # monday
months = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
for y in range(numYears):
year = 1900+y
#print(year)
if year % 4 == 0 and (year % 100 != 0 or year % 400 == 0):
months[1] = 29
else: months[1] = 28
#print("Feb: ", months[1])
for m in range(12):
#print("m: ", m)
t_day = get_day(date, 13, day)
#print("13:", t_day)
days[t_day] += 1
l_day = get_day(13, months[m], t_day)
#print("last: ", l_day)
day = (l_day+1) % 7
#print("first: ", day)
ret = "{} {} {} {} {} {} {}\n".format(days[6], days[0], days[1], days[2], days[3], days[4], days[5])
fout.write(ret) |
# -*- coding: utf-8 -*-
# GMate - Plugin Based Programmer's Text Editor
# Copyright © 2008-2009 Alexandre da Silva
#
# This file is part of Gmate.
#
# See LICENTE.TXT for licence information
"""A widget used to display the file/folder structure of projects."""
import re
import gtk
import gnomevfs
from GMATE import files
from pathdescriptor import PathDescriptor
from icons import Icons
from settings import Settings
from i18n import msg0002, msg0003, err0010, err0011
class ProjectTreeView(gtk.TreeView):
"""A widget for displaying the files within a repositoy."""
def __init__(self):
"""Constructor.
Creates the initial view of the project repository."""
super(ProjectTreeView, self).__init__()
self.__current_repository = None
self.__activate_file = None
self.__refresh = None
self.__settings = Settings()
self.__initialize_treeview()
self.__initialize_icons()
self.__initialize_columns()
def set_activate_file(self, afile=None):
"""Sets the method to use when activating a file."""
if afile is not None and not callable(afile):
raise ValueError, err0010
self.__activate_file = afile
def set_refresh(self, refresh=None):
"""Sets the method to use when refreshing."""
if refresh is not None and not callable(refresh):
raise ValueError, err0011
self.__refresh = refresh
def get_repository(self):
"""Gets the URI associated with the currently opened repository."""
return self.__current_repository
def refresh(self):
"""Refreshes the current view."""
current_repo = self.get_repository()
# Check to be sure we have a current repository
if current_repo is not None:
# Collection to hold all expanded rows
open_paths = []
# Append all the expanded paths to the collection
self.map_expanded_rows(self.__map_expanded_rows, open_paths)
self.__refresh()
# Expand all previously expanded paths
path_iter = self.get_model().get_iter_root()
self.__expand_previously_open_rows(path_iter, open_paths)
del open_paths[0:]
self.queue_draw()
def set_repository(self, uri):
"""Sets the repository to be viewed.
@param uri: The URI to set the repository to.
@type uri: a gnomevfs.URI
"""
self.__current_repository = uri
self.get_model().clear()
# Create the root directory within the list
parent_dir = self.__append_descriptor(uri, True, None)
# Be sure there is a loading item within the current directory
self.__append_loading_cell(parent_dir)
# Expand the current directory to show the rest of the files
iterpath = self.get_model().get_path(parent_dir)
self.expand_row(iterpath, False)
self.queue_draw()
def __expand_previously_open_rows(self, path_iter, open_paths):
"""Expands any previously opened paths after a refresh."""
while path_iter is not None:
desc = self.get_model().get_value(path_iter, 0)
# Be sure we have a PathDescriptor
if isinstance(desc, PathDescriptor):
# If the path was previously opened open it
if desc.get_uri() in open_paths:
path = self.get_model().get_path(path_iter)
self.expand_row(path, False)
# Remove it from the list
open_paths.remove(desc.get_uri())
# If the iterator has children, check to see if any should
# be open
if self.get_model().iter_has_child(path_iter):
child = self.get_model().iter_nth_child(path_iter, 0)
self.__expand_previously_open_rows(child, open_paths)
# Move to the next row
path_iter = self.get_model().iter_next(path_iter)
def __map_expanded_rows(self, widget, path, data):
"""Store previously opened paths."""
# Append URI values to track what is open
path_iter = self.get_model().get_iter(path)
if path_iter is not None:
desc = self.get_model().get_value(path_iter, 0)
if isinstance(desc, PathDescriptor):
data.append(desc.get_uri())
def __initialize_treeview(self):
"""Create the view and set its properties."""
treestore = gtk.TreeStore(object, gtk.gdk.Pixbuf, gtk.gdk.Pixbuf)
self.set_property(u'model', treestore)
self.set_property(u'enable-search', False)
self.set_property(u'headers-visible', False)
self.connect(u'test-expand-row', self.__on_expand_row)
self.connect(u'row-activated', self.__on_row_activated)
self.connect(u'row-collapsed', self.__on_collapse_row)
def __initialize_columns(self):
"""Creates the columns for the view."""
# Create the necessary widgets for the view
image_renderer = gtk.CellRendererPixbuf()
name_renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn()
# Pach the icon renderer and the text label renderer into the view
column.pack_start(image_renderer, False)
column.pack_start(name_renderer, True)
# Set the icons for the icon renderer
column.set_attributes(image_renderer, pixbuf=1, pixbuf_expander_open=2,
pixbuf_expander_closed=1)
# Set the texit labels method for retrieving the file's name
column.set_cell_data_func(name_renderer, self.__retrieve_filename)
self.append_column(column)
def __initialize_icons(self):
"""Retrieves the icons needed to display within the file view."""
self.__icons = Icons(self)
def __populate_directory(self, uri, parent=None):
"""Populates the directory list alphabetically by directory then by
file.
@param uri: the URI of the directory.
@type uri: a gnomevfs.URI
@param parent: the parent iterator to append the child to.
@type parent: a gtk.TreeIter
"""
# Retrieve directories alphabetically
directory = gnomevfs.DirectoryHandle(uri)
file_filter = self.__settings.get_file_filter()
show_file = None
if len(file_filter) > 0:
comp = re.compile(file_filter)
def __show_file(file_name):
if comp.search(file_name) is not None:
return True
return False
show_file = __show_file
for file_info in sorted(directory, cmp=self.__compare_files):
# Process folders
if files.is_visible_dir(file_info):
file_uri = uri.append_file_name(file_info.name)
cur_dir = self.__append_descriptor(file_uri, True, parent)
self.__append_loading_cell(cur_dir)
# Process Files
elif files.is_visible_file(file_info):
if show_file is not None and not show_file(file_info.name):
continue
file_uri = uri.append_file_name(file_info.name)
self.__append_descriptor(file_uri, False, parent)
def __compare_files(self, file_a, file_b):
"""Compares to files and determines which is first based on file type
and file name."""
type_a = file_a.type
type_b = file_b.type
# Make folders the most important in the list
if type_a == gnomevfs.FILE_TYPE_DIRECTORY: type_a = 0
else: type_a = 1
if type_b == gnomevfs.FILE_TYPE_DIRECTORY: type_b = 0
else: type_b = 1
type_comp = cmp(type_a, type_b)
# If the files are the same type then compare names
if type_comp == 0:
return cmp(file_a.name, file_b.name)
return type_comp
def __empty_directory(self, iterator):
"""Removes all the items within a directory on the tree."""
model = self.get_model()
# Remove each of the child nodes within the iterator
while model.iter_has_child(iterator):
child = model.iter_nth_child(iterator, 0)
model.remove(child)
def __append_descriptor(self, uri, is_dir, parent):
"""Creates a tree node with a path descriptor."""
open_icon = None
default_icon = None
# Retrieve a default and open icon if the URI is a folder, otherwise
# just a default icon
if is_dir:
open_icon = self.__icons.folder_open
default_icon = self.__icons.folder
else:
default_icon = self.__icons.retrieve_file_icon(str(uri))
# Create a descriptor and append a new node that represents that
# descriptor into the tree
desc = PathDescriptor(uri, is_dir)
parent_dir = self.get_model().append(parent, [desc, default_icon,
open_icon])
# Attach the corresponding tree iterator to the descriptor
desc.set_iter(parent_dir)
return parent_dir
def __append_empty_cell(self, iterator):
"""Creates an 'empty' cell within the tree."""
self.get_model().append(iterator, [msg0003, None, None])
def __append_loading_cell(self, iterator):
"""Creates a 'loading' cell within the tree."""
self.get_model().append(iterator, [msg0002, None, None])
def __retrieve_filename(self, column, cell, model, iterator):
"""Retrieves the filename of the PathDescriptor."""
desc = model.get_value(iterator, 0)
# Retrieve the filename of the PathDescriptor or string.
if isinstance(desc, PathDescriptor):
cell.set_property(u'text', desc.get_name())
else:
cell.set_property(u'text', desc)
def __on_expand_row(self, widget, iterator, path, data=None):
"""Empties a directory then loads in the files."""
if iterator is not None:
desc = self.get_model().get_value(iterator, 0)
if not isinstance(desc, PathDescriptor):
return
# If the object is a directory clear its contents within the tree
# and rescan it
if desc.is_dir():
self.freeze_child_notify()
# Empty the directory
self.__empty_directory(iterator)
self.__populate_directory(desc.get_uri(), iterator)
# Append an "Empty" cell if the directory is empty
if not self.get_model().iter_has_child(iterator):
self.__append_empty_cell(iterator)
self.thaw_child_notify()
self.queue_draw()
def __on_collapse_row(self, widget, iterator, path, data=None):
"""Empties a directory to conserve memory."""
if iterator is not None:
desc = self.get_model().get_value(iterator, 0)
if not isinstance(desc, PathDescriptor):
return
# If the object is a directory clear its contents within the tree
# and rescan it
if desc.is_dir():
self.freeze_child_notify()
# Empty the directory
self.__empty_directory(iterator)
# Append a loading node to be used later when expanding
self.__append_loading_cell(iterator)
self.thaw_child_notify()
self.queue_draw()
def __on_row_activated(self, widget, path, view_column, data=None):
"""Enters a directory or loads a file."""
iterator = self.get_model().get_iter(path)
if iterator is not None:
desc = self.get_model().get_value(iterator, 0)
# Be sure we hane a PathDescriptor before we try to activate the
# node.
if not isinstance(desc, PathDescriptor):
return
# Expand or collapse a directory
if desc.is_dir():
if self.row_expanded(path):
self.collapse_row(path)
else:
self.expand_row(path, False)
# Activate the file
else:
if self.__activate_file is not None:
self.__activate_file(desc)
self.queue_draw()
|
from django.shortcuts import render, redirect
from .forms import PlanilhaForm
from .models import Planilha
from processo.models import Processo
def home(request):
context = {
'planilhas': Planilha.objects.all()
}
return render(request, 'cadastro_processo/home.html', context=context)
def cadastro(request):
if request.method == 'POST':
form = PlanilhaForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('cadastro_processo:home')
else:
form = PlanilhaForm()
return render(request, 'cadastro_processo/cadastro_processo.html', {'form': form})
def processo_detail(request, id):
context = {
'processos': Processo.objects.filter(cliente_id=id),
'planilha': Planilha.objects.get(id=id)
}
return render(request, 'cadastro_processo/processo_detalhe.html', context=context)
|
from collections import defaultdict
from time import perf_counter
start = perf_counter()
dir_mappings = [
(1, 0),
(0, -1),
(-1, 0),
(0, 1),
]
def simulate(num_moves, f=False):
grid = defaultdict(int)
x, y = 0, 0
direction = 0
moves = []
for i in range(1, num_moves + 1):
if grid[(x, y)] == 1:
grid[(x, y)] = 0
direction = (direction - 1) % 4
else:
grid[(x, y)] = 1
direction = (direction + 1) % 4
dx, dy = dir_mappings[direction]
x += dx
y += dy
moves.append((dx, dy))
return moves
return sum(grid.values())
def guess(x):
pass
x = 10 ** 6
s = simulate(x)
def find_cycle(eles):
pass
print(s)
# print("x =", x)
# print("s =", s, " | s / x =", round(s / x, 3))
end = perf_counter()
print(f"{end - start:.4f} sec")
|
class Solution(object):
def trailingZeroes(self, n):
ans = 0 # for counting zeroes
while n >= 5:
ans += n//5
n //= 5
return ans
"""
1) complexity : O(log5n)
2) 100/5 + 20/5 + 4/5 = total no. of zeroes in 100!
3) divide by 5 untill the number becomes zero
"""
|
import numpy as np
import time
from nltk.corpus import wordnet as wn
from tfIdf import idf
from PreProcess import getProcessedConcepts
from nlpTasks import getSynonyms, getVerbs, getNouns
# To compute time
current_millis_time = lambda: int(round(time.time() * 1000))
# Getting concepts extracted from videos
lines, raw = getProcessedConcepts()
def computeSimilarity(queryPhrase, concept):
score = 0
queryWords = set(queryPhrase.split(" "))
conceptWords = set(concept.split(" "))
conceptSynonyms = set()
for cw in conceptWords:
conceptSynonyms.update(getSynonyms(cw, wn.NOUN))
conceptSynonyms.update(getSynonyms(cw, wn.VERB))
commonWords = queryWords.intersection(conceptWords)
commonWords.update(queryWords.intersection(conceptSynonyms))
if len(commonWords) > 0:
maxScore = 0
for cw in commonWords:
minScore = 100000 #S_u
for synset in wn.synsets(cw):
depth = synset.max_depth()
minScore = min(depth,minScore) + 1
if minScore == 100000:
minScore = 0
tfIdf = idf(cw)
maxScore = max(maxScore, tfIdf*minScore)
score = (len(commonWords)/len(conceptWords)) * maxScore #+ 10*len(commonWords)
return score
def getConcepts(sentence):
print(sentence)
st = current_millis_time()
nouns = getNouns(sentence)
verbs = getVerbs(sentence)
phrases = nouns.copy()
phrases.update(verbs)
phrases = list(phrases)
biPhrases = phrases.copy()
for phrase in phrases:
for p in phrases:
if p != phrase:
biPhrases.append(p + " " + phrase)
phrases = biPhrases
numPhrases = len(phrases)
numConcepts = len(lines)
#Similarity Matrix
S = np.zeros((numConcepts, numPhrases))
for i in range(0, numConcepts):
for j in range(0, numPhrases):
ss = computeSimilarity(phrases[j], lines[i])
S[i][j] = ss
sMax = S.max(0)
for j in range(0, numPhrases):
for i in range(0, numConcepts):
if sMax[j] == S[i][j]:
S[i][j] = sMax[j]
else:
S[i][j] = 0
conceptVector = S.max(1)
i = 0
listOfConcepts = []
conceptScores = []
for line in raw:
if conceptVector[i] != 0:
listOfConcepts.append(line)
conceptScores.append(conceptVector[i])
i = i + 1
print("Computed in {} milli secs".format(current_millis_time() - st))
print(str(listOfConcepts))
print(str(conceptScores)) |
"""ubi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import routers
# API view sets essentials
from users.api.user import UserViewSet
from users.api.user import UserLoginView
from users.api.user import UserLogoutView
from musics.api.music import MusicViewSet
from musics.api.mymusics import MyMusicViewSet
# from django.views.generic import TemplateView
from ubi.views import IndexView
router = routers.DefaultRouter()
router.register(r'users', UserViewSet)
router.register(r'musics', MusicViewSet)
router.register(r'me', MyMusicViewSet, base_name="me")
# urlpatterns = router.urls
# django admin
urlpatterns = [
url(r'^api/v1/', include(router.urls)),
url(r'^admin/', admin.site.urls),
url(r'^auth/', include('rest_framework.urls',
namespace='rest_framework')),
url(r'^api/v1/auth/login/$', UserLoginView.as_view(), name='login'),
url(r'^api/v1/auth/logout/$', UserLogoutView.as_view(), name='logout'),
# any other than above
url('^.*$', IndexView.as_view(), name='index'),
]
|
from datetime import datetime, timedelta
import os
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators import (StageToRedshiftOperator, LoadFactOperator, LoadDimensionCsvOperator,
LoadDimensionOperator, DataQualityOperator, PostgresOperator)
from airflow.contrib.operators.spark_submit_operator import SparkSubmitOperator
from helpers import SqlQueries
from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.models import Variable
# AWS_KEY = os.environ.get('AWS_KEY')
# AWS_SECRET = os.environ.get('AWS_SECRET')
default_args = {
'owner': 'udacity',
'start_date': datetime(2020, 11, 1),
'email_on_retry': False,
'depends_on_past': False,
'retries': 1,
'retry_delay': timedelta(minutes=3),
'redshift_conn_id': 'redshift',
'aws_credential_id': 'aws_credentials'
}
dag = DAG('udac_dag',
default_args = default_args,
description = 'Load and transform data in Redshift with Airflow',
schedule_interval = '@once',
max_active_runs = 6
)
start_operator = DummyOperator(task_id='Begin_execution', default_args = default_args, dag=dag)
stage_temperatures = StageToRedshiftOperator(
task_id='stage_temperatures',
dag=dag,
table = "staging_temperatures",
s3_bucket = "s3://de-capstone/temperature/output/"
)
staging_demographics = StageToRedshiftOperator(
task_id = 'staging_demographics',
dag = dag,
table = "staging_demographics",
s3_bucket = "s3://de-capstone/demographics/output/"
)
staging_airports = StageToRedshiftOperator(
task_id = 'staging_airports',
dag = dag,
table = "staging_airports",
s3_bucket = "s3://de-capstone/airport_codes/output/"
)
staging_immigrations = StageToRedshiftOperator(
task_id = 'staging_immigrations',
dag = dag,
table = "staging_immigrations",
s3_bucket = "s3://de-capstone/immigration/output/"
)
dimension_countries = LoadDimensionCsvOperator(
task_id = 'dimension_countries',
dag = dag,
table = "Countries",
s3_bucket = "s3://de-capstone/country_data/countries.csv"
)
dimension_usa_states = LoadDimensionCsvOperator(
task_id = 'dimension_usa_states',
dag = dag,
table = "USAStates",
s3_bucket = "s3://de-capstone/USA_States/US_States.csv"
)
insert_dimension_coordinates = LoadDimensionOperator(
task_id = 'insert_dimension_coordinates',
dag = dag,
insert_sql = SqlQueries.insert_dimension_coordinates
)
insert_dimension_regions = LoadDimensionOperator(
task_id = 'insert_dimension_regions',
dag = dag,
insert_sql = SqlQueries.insert_dimension_regions
)
insert_dimension_cities = LoadDimensionOperator(
task_id = 'insert_dimension_cities',
dag = dag,
insert_sql = SqlQueries.insert_dimension_cities
)
insert_dimension_dates = LoadDimensionOperator(
task_id = 'insert_dimension_dates',
dag = dag,
insert_sql = SqlQueries.insert_dimension_dates
)
insert_dimension_flights = LoadDimensionOperator(
task_id = 'insert_dimension_flights',
dag = dag,
insert_sql = SqlQueries.insert_dimension_flights
)
insert_dimension_transportations = LoadDimensionOperator(
task_id = 'insert_dimension_transportations',
dag = dag,
insert_sql = SqlQueries.insert_dimension_transportations
)
insert_fact_immigrations = LoadFactOperator(
task_id = 'insert_fact_immigrations',
dag = dag,
insert_sql = SqlQueries.insert_fact_immigrations
)
insert_fact_temperatures = LoadFactOperator(
task_id = 'insert_fact_temperatures',
dag = dag,
insert_sql = SqlQueries.insert_fact_temperatures
)
insert_fact_demographics = LoadFactOperator(
task_id = 'insert_fact_demographics',
dag = dag,
insert_sql = SqlQueries.insert_fact_demographics
)
insert_fact_airports = LoadFactOperator(
task_id = 'insert_fact_airports',
dag = dag,
insert_sql = SqlQueries.insert_fact_airports
)
end_operator = DummyOperator(task_id='Stop_execution', dag=dag)
"""
There are 4 groups of tasks:
- Group1: tasks to load JSON files from S3 to staging tables in Redshift.
- Group2: tasks to load external datasets (CSV) from S3 to 2 dimension tables COuntries & USAStates
- Group3: tasks to query data from staging tables. then adding into dimension tables.
- Group4: tasks to query data from both staging & dimension tables, then adding into fact table.
Dependencies between group tasks should be:
Group1 >> Group2 >> Group3 >> Group4
"""
# Group1
s3_to_staging = (stage_temperatures, staging_demographics, staging_airports, staging_immigrations)
# Group2
external_ds_to_dimension = (dimension_countries, dimension_usa_states)
# Group3
insert_dimensions = (insert_dimension_coordinates, insert_dimension_regions, insert_dimension_cities, insert_dimension_dates, insert_dimension_flights, insert_dimension_transportations)
# Group4
insert_facts = (insert_fact_airports, insert_fact_demographics, insert_fact_temperatures, insert_fact_immigrations)
start_operator >> s3_to_staging
stage_temperatures >> external_ds_to_dimension
staging_demographics >> external_ds_to_dimension
staging_airports >> external_ds_to_dimension
staging_immigrations >> external_ds_to_dimension
dimension_countries >> insert_dimensions
dimension_usa_states >> insert_dimensions
insert_dimensions >> insert_fact_airports
insert_dimensions >> insert_fact_demographics
insert_dimensions >> insert_fact_temperatures
insert_dimensions >> insert_fact_immigrations
insert_facts >> end_operator
|
def target_and_weight_const(target_col,weight_col):
ret_str = """target_col = """+target_col
ret_str += """\nweight_col = """+weight_col
return ret_str
def check_weight_col():
ret_str = """if weight_col is None:
weight_col='weight'
df[weight_col] = 1"""
return ret_str
def get_features():
ret_str = """def get_features(df):
return [x for x in list(df.columns) if x not in [target_col,weight_col]]
def get_categorical_cols():
return [x for x,v in col_datatype.items() if v==categorical]
def get_numerical_cols():
return [x for x,v in col_datatype.items() if v==numerical]"""
return ret_str
def get_needful_functions():
ret_str = """def random_colors(num_of_colors):
color = ["#"+''.join([random.choice('0123456789ABCDEF') for j in range(6)])
for i in range(num_of_colors)]
return color
def make_copy_df(df):
return df.copy()
def get_total(df,col):
return df[col].sum()
def fill_default_values(df):
for c in get_features(df):
df[c].fillna(col_default.get(c),inplace=True)
return df
def return_top_k(df,col,top_k):
temp_df = df.sort_values(by=col,ascending=False)
return temp_df[:top_k]"""
return ret_str
def unique_count():
ret_str = """#Unique Values DataFrame
def unique_count(df):
feature_col = 'Features'
count_col = 'Unique Count'
unique_count_df = pd.DataFrame(columns=[feature_col,count_col])
unique_count_df[feature_col] = get_categorical_cols()
unique_count_df[count_col] = unique_count_df[feature_col].apply(lambda col: df[col].nunique())
return unique_count_df"""
return ret_str
def make_unique_df():
ret_str = """unique_count_df = unique_count(df)
unique_count_df"""
return ret_str
def make_group():
ret_str = """# This function returns the dataframe subset and fill NULL values with some other value
def make_group(df,col,weight_col,fill_na=False,reset_index=True):
temp_df = pd.DataFrame(df[col+[weight_col]])
if fill_na:
temp_df = fill_default_values(temp_df)
group = temp_df.groupby(col).agg({weight_col:'sum'})
if reset_index:
group = group.reset_index()
return group"""
return ret_str
def do_miscing():
ret_str = """def do_miscing(df,col,weight_col,misc_percent):
group = make_group(df,[col],weight_col)
if_misc_col = 'if_misc'
group[if_misc_col]=False
summation = get_total(df,weight_col)*misc_percent*0.01
group[if_misc_col] = group[weight_col].apply(lambda x:True if x<summation else False)
group[col] = group.apply(lambda x:misc_col_value if x[if_misc_col] else x[col],axis=1)
misced_group = make_group(group,[col],weight_col)
return misced_group"""
return ret_str
def is_feature_irrelevant():
ret_str = """def is_feature_irrelevant(df,col,weight_col,misc_percent):
fin_group = do_miscing(df,col,weight_col,misc_percent)
fin_group = fin_group[(fin_group[col]!=col_default.get(col)) & (fin_group[col]!=misc_col_value)]
return fin_group.empty"""
return ret_str
def get_irrelevant_features():
ret_str = """def get_irrelevant_features(df,weight_col,misc_precent):
irrelevant_cols=[]
for col in get_features(df):
if df[col].nunique()==df.shape[0]:
irrelevant_cols.append(col)
elif is_feature_irrelevant(df,col,weight_col,0.05):
irrelevant_cols.append(col)
return irrelevant_cols"""
return ret_str
def remove_irrelevant_features():
ret_str = """def remove_irrelevant_features(df,weight_col,misc_percent):
irrelevant_features = get_irrelevant_features(df,weight_col,misc_percent)
df.drop(irrelevant_features,axis=1,inplace=True)
return df"""
return ret_str
def remove_irrelevant_features_from_df():
ret_str = """df = remove_irrelevant_features(df,weight_col,misc_percent)"""
return ret_str
def make_misced_df():
ret_str = """def make_misced_df(df,target_col,weight_col):
df = make_copy_df(df)
df = fill_default_values(df)
misced_df = pd.DataFrame(columns = list(df.columns))
misced_df[target_col] = df[target_col]
misced_df[weight_col] = df[weight_col]
for col in get_categorical_cols():
misced_group = do_miscing(df,col,weight_col,misc_percent)
unique_values = set(misced_group[col].unique())
misced_df[col] = df[col].apply(lambda x: x if (x in unique_values) else misc_col_value)
for col in get_numerical_cols():
misced_df[col] = df[col]
return misced_df"""
return ret_str
def misced_df():
ret_str = """misced_df = make_misced_df(make_copy_df(df),target_col,weight_col)
misced_df.head()"""
return ret_str |
from django.shortcuts import render
from django.views.generic import TemplateView
from django.http import HttpResponse
from django.contrib import messages
from blog1.models import Post
# Create your views here.
def home(request):
return render(request,'home/home.html')
def about(request):
return render(request,'home/about.html')
# class AboutView(TemplateView):
# template_name='home/about.html'
def login(request):
return render(request,'home/login.html')
def contact(request):
return render(request,'home/contact.html')
def help(request):
return render(request,'home/help.html')
def search(request):
query = request.GET['query']
if len(query)>78:
allPosts = Post.object.none()
else:
allPostsTitle = Post.objects.filter(title__icontains=query)
allPostsContent = Post.objects.filter(content__icontains=query)
allPosts = allPostsTitle.union(allPostsContent)
if allPosts.count()==0:
messages.warning(request,"No search result found. please refine your query")
params = {'allPosts':allPosts,'query':query}
return render (request,'home/search.html',params)
|
import numpy as np
import xlwings as xw
from rate_curve_class import RateCurve
from strike_grid_discretization import StrikeGridsAllTenors
from tenor_market_data import TenorMarketData
from implied_vol_class import ImpliedVolatility
from compute_sum_sqr_vol_T import compute_sum_sqr_vol_T
from new_pillar_strike_extrapolation import NewPillarStrikeExtrapolation
from compute_maturity_grid import compute_maturity_grid
from compute_local_vol_init_guess import compute_local_vol_init_guess
def input_data_initialization():
np.set_printoptions(linewidth=150)
wb = xw.Book('LocVol Parameters.xlsx')
#wb = xw.Book(r'source\LocVol Parameters.xlsx')
sht = wb.sheets['IRSMFORM']
## Read data from file
S = sht.range('B2').value
r_tenors = sht.range('B6').options(np.array, expand='down').value
r_quotes = sht.range('C6').options(np.array, expand='down').value
rf_tenors = sht.range('E6').options(np.array, expand='down').value
rf_quotes = sht.range('F6').options(np.array, expand='down').value
csc_tenors = sht.range('H6').options(np.array, expand='down').value
csc_quotes = sht.range('I6').options(np.array, expand='down').value
imp_vol_tenors = sht.range('K6').options(np.array, expand='down').value
imp_vol_strikes = sht.range('N6').options(np.array, expand='table').value
imp_vol_quotes = sht.range('T6').options(np.array, expand='table').value
## Build up rate curve class
r_para = RateCurve(r_tenors, r_quotes)
rf_para = RateCurve(rf_tenors, rf_quotes)
csc_para = RateCurve(csc_tenors, csc_quotes)
return S, r_para, rf_para, csc_para, imp_vol_tenors, imp_vol_strikes, imp_vol_quotes
|
import math, binascii, urllib
import hashlib
from PIL import Image
from Crypto import Random
from Crypto.Cipher import AES
#text_name = raw_input("Type the name of the file to encrypt with the extension: ")
text_name = '128.BMP'
im = Image.open(text_name)
image_info = [im.tostring(), im.size, im.mode]
#image_info[2] = 'P'
print im.mode
print im.info
print im.format
print im.tostring()
#text_name = 'encrypted_ECB.bmp'
text_name = 'decrypted_ECB.png'
iv = Random.new().read(AES.block_size)
im = Image.open(text_name)
image_info = [im.tostring(), im.size, im.mode]
converted = im.convert('RGB')
print converted.mode
print converted.info
print converted.format
print converted.tostring()
Image.frombytes(converted.mode, converted.size, converted.tostring()).save("test.bmp")
converted.show()
text_name = 'test.bmp'
print "File open is ", text_name
iv = Random.new().read(AES.block_size)
im = Image.open(text_name)
image_info = [im.tostring(), im.size, im.mode]
#image_info[2] = 'P'
print im.mode
print im.info
print im.format
print im.tostring()
#cipher = AES.new(key_pad, AES.MODE_ECB) |
from config.Config import Config
def test_config_debug_is_true():
config = Config()
assert config.camera.debug
assert config.gpio.debug
if __name__ == '__main__':
test_config_debug_is_true()
print('All tests passed.')
|
from Settings import Settings
from app import manager as app
if __name__=="__main__":
settings = Settings()
settings.populate_db()
app.run() |
"""
给你两个有序整数数组 nums1 和 nums2,请你将 nums2 合并到 nums1 中,使 num1 成为一个有序数组。
说明:
初始化 nums1 和 nums2 的元素数量分别为 m 和 n 。
你可以假设 nums1 有足够的空间(空间大小大于或等于 m + n)来保存 nums2 中的元素。
示例:
输入:
nums1 = [1,2,3,0,0,0], m = 3
nums2 = [2,5,6], n = 3
输出: [1,2,2,3,5,6]
https://leetcode-cn.com/problems/merge-sorted-array/
"""
from typing import List
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
nums3 = nums1.copy()
k = 0 #下一个要比较的索引
j = 0
for i in range(m+n):
if k > m-1:
nums1[i] = nums2[j]
j += 1
elif j > n-1:
nums1[i] = nums3[k]
k+= 1
elif nums3[k] <= nums2[j]:
nums1[i] = nums3[k]
k += 1
else:
nums1[i] = nums2[j]
j += 1
nums1 = [1,2,3,0,0,0]
m = 3
nums2 = [2,5,6]
n = 3
Solution().merge(nums1,m,nums2,n)
|
# Generated by Django 3.2 on 2021-04-17 13:14
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20210410_1843'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(verbose_name='标签名称')),
('create_time', models.DateTimeField(auto_now_add=True)),
('update_time', models.DateTimeField(auto_now=True)),
],
),
migrations.AddField(
model_name='article',
name='create_time',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='article',
name='update_time',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='article',
name='body',
field=models.TextField(verbose_name='内容'),
),
migrations.AlterField(
model_name='article',
name='title',
field=models.TextField(verbose_name='标题'),
),
migrations.AddField(
model_name='article',
name='category',
field=models.ForeignKey(db_constraint=False, default=1, on_delete=django.db.models.deletion.CASCADE, to='blog.category'),
preserve_default=False,
),
]
|
# Source code for Test 1 program. Success!
def add_function_ui(commandsList, command_parameters):
"""
Adds a given function, its parameters, and its result to the list of defined functions
:param commandsList: the list of all functions
:param command_parameters: the function name, its parameters and its result
:return: -
"""
functionName = command_parameters.split('(', 1) #the name of the function
parameters = functionName[1].split('=', 1) #parameters[0]=function parameters, parameters[1]=function result
commandsList.append({'Function name': functionName[0], 'Function parameters': parameters[0], 'Function result': parameters[1]})
#print(commandsList)
def list_function_ui(commandsList, command_parameters): #command_parameters=only the name of the function to list
found = 0
for index in range(len(commandsList)):
if commandsList[index]['Function name'] == command_parameters:
print('def ' + str(commandsList[index]['Function name']) + '(' + str(commandsList[index]['Function parameters']) + ': return ' +
str(commandsList[index]['Function result']))
found = 1
if found == 0:
raise ValueError('The function was not defined yet!')
def eval_function_ui(commandsList, command_parameters):
found = 0
tokens = command_parameters.split('(', 1) #tokens[0] = the function name, tokens[1] = the actual parameters
for index in range(len(commandsList)):
if tokens[0] == commandsList[index]['Function name']:
numberOfParametersFunction = commandsList[index]['Function parameters'].split('+')
numberOfActualParameters = tokens[1].split(',')
if len(numberOfParametersFunction) == len(numberOfActualParameters): #for example we can have add(a,b) and add(a,b,c) which are 2 different functions
actualParameters = tokens[1].split(')')
#for operation in actualParameters[0]: #we change ',' with '+' in the actual parameters
#if actualParameters[0][operation] == ',':
#actualParameters[0][operation] = '+' #not working yet
exec(actualParameters) #?
found =1
if found == 0:
raise TypeError('There was an error in the eval function!')
def split_command(command):
tokens = command.strip().split(' ', 1)
return tokens[0].strip().lower(), tokens[1].strip() if len(tokens) > 1 else '' # tokens[0]=command, tokens[1]=parameters
def start_command_ui():
commandsList = []
done = False
command_dictionary = {'add': add_function_ui, 'list': list_function_ui, 'eval': eval_function_ui}
while not done:
command = input("\ncommand: ").strip().lower()
command_word, command_parameters = split_command(command)
if "exit" == command_word:
print("See you later!")
done = True
elif command_word in command_dictionary:
try:
command_dictionary[command_word](commandsList, command_parameters)
except ValueError as ve:
print(str(ve))
except TypeError as te:
print(str(te))
except IndexError as ie:
print(str(ie))
except:
print("There was an exception which was not handled!")
else:
print("\nThis is not a command!")
start_command_ui() |
# from datetime import datetime, timedelta
import datetime
from datetime import timedelta
from dateutil import tz
import time
import logging
import astral
import smbus
from enum import Enum, unique
# REFERENCES: https://github.com/switchdoclabs/RTC_SDL_DS3231
# https://pypi.org/project/astral/1.2/
#
# datasheet: https://datasheets.maximintegrated.com/en/ds/DS3231.pdf
@unique
class AlrmType_t(Enum):
ALM1_EVERY_SECOND = 0x0F
ALM1_MATCH_SECONDS = 0x0E
ALM1_MATCH_MINUTES = 0x0C # match minutes *and* seconds
ALM1_MATCH_HOURS = 0x08 # match hours *and* minutes, seconds
ALM1_MATCH_DATE = 0x00 # match date *and* hours, minutes, seconds
ALM1_MATCH_DAY = 0x10 # match day *and* hours, minutes, seconds
ALM2_EVERY_MINUTE = 0x8E
ALM2_MATCH_MINUTES = 0x8C # match minutes
ALM2_MATCH_HOURS = 0x88 # match hours *and* minutes
ALM2_MATCH_DATE = 0x80 # match date *and* hours, minutes
ALM2_MATCH_DAY = 0x90 # match day *and* hours, minutes
class DS3231(object):
''' Class to represent DS3231 RTC
'''
# reg map for the DS3231 RTC
(
_REG_SEC, # 0x00
_REG_MIN, # 0x01
_REG_HRS, # 0x02
_REG_DAY, # 0x03
_REG_DATE, # 0x04
_REG_MONTH, # 0x05
_REG_YR, # 0x06
_REG_ALRM_1_SEC, # 0x07
_REG_ALRM_1_MIN, # 0x08
_REG_ALRM_1_HRS, # 0x09
_REG_ALRM_1_DAY_DATE, # 0x0a
_REG_ALRM_2_MIN, # 0x0b
_REG_ALRM_2_HRS, # 0x0c
_REG_ALRM_2_DAY_DATE, # 0x0d
_REG_CTRL, # 0x0e
_REG_STATUS, # 0x0f
_REG_AGE_OFFSET, # 0x10
_REG_TMP_MSB, # 0x11
_REG_TMP_LSB, # 0x12
) = range(19)
# change port to 0 if old gen 1 pi, else leave default
# addr should not change as this is embedded in RTC
def __init__(self, logger_name ='main_logger',
logger_module_name = 'rtc',
i2c_port = 1,
i2c_addr = 0x68,
latitude = 0.00,
longitude = 0.00
):
# instantiate logger
self.logger = logging.getLogger(logger_name + '.' + logger_module_name)
self.logger.info('creating an instance of the ' + __name__ + ' with the alias {}'.format(logger_module_name))
# constants
self._SEC_PER_MIN = 60
self._MIN_PER_HR = 60
self._HR_PER_DAY = 24
self._DAY_PER_WEEK = 7
self._MAX_DAYS_PER_MONTH = 31
self._MONTH_PER_YR = 12
self._YRS_PER_CENTURY = 100
# i2c object
self._bus = smbus.SMBus(i2c_port)
self._addr = i2c_addr
# coordinates
self._latitude = latitude
self._longitude = longitude
# masks
self._MASK_oscillator_on = 0b1<<7
# _REG_CTRL
# todo: can probably remove these masks since we won't need to change
# them after config is done
self._CONFIG_REG_CTRL = 0x05
# _REG_STATUS
self._MASK_power_lost = 0x80
self._MASK_en_32_kHz = 0x08
self._MASK_busy = 0x04
self._MASK_alrm_2_flag = 0x02
self._MASK_alrm_1_flag = 0x01
self._CONFIG_REG_STATUS = 0x00
# reg map tuples for DS3231
self._reg_time_addrs = (
self._REG_SEC,
self._REG_MIN,
self._REG_HRS,
self._REG_DAY,
self._REG_DATE,
self._REG_MONTH,
self._REG_YR,
)
self._reg_alrm_1_addrs = (
self._REG_ALRM_1_SEC,
self._REG_ALRM_1_MIN,
self._REG_ALRM_1_HRS,
self._REG_ALRM_1_DAY_DATE,
)
self._reg_alrm_2_addrs = (
self._REG_ALRM_2_MIN,
self._REG_ALRM_2_HRS,
self._REG_ALRM_2_DAY_DATE,
)
''' _____ Private Members _____
'''
'''
'''
''' Helper functions
'''
# BCD to integer
# Decode n least significant packed binary coded decimal digits to binary.
# Return binary result.
# n defaults to 2 (BCD digits).
# n=0 decodes all digits.
def __bcd_to_int(self, bcd, n=2):
bcd2int = int(('%x' % bcd)[-n:])
self.logger.debug('BCD to Int: {}'.format(bcd2int))
return bcd2int
# integer to BCD
# Encode the n least significant decimal digits of x
# to packed binary coded decimal (BCD).
# Return packed BCD value.
# n defaults to 2 (digits).
# n=0 encodes all digits.
def __int_to_bcd(self, x, n=2):
int2bcd = int(str(x)[-n:], 0x10)
self.logger.debug('Int to BCD: {}'.format(int2bcd))
return int2bcd
# utc to local time
def __utc_to_local(self, utc):
# Auto-detect zone
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
# convert time zone
central = utc.astimezone(to_zone)
self.logger.debug('Converting UTC to Local time')
self.logger.debug('From Zone: {}, To Zone: {}'.format(from_zone,to_zone))
self.logger.debug('Central Time: {}'.format(central))
return central
# write i2c data to reg
# todo: find out if we can remove the if False or if it is useful for dbg
def __write(self, register, data):
if False:
print(
"addr =0x%x register = 0x%x data = 0x%x %i " %
(self._addr, register, data, self.__bcd_to_int(data)))
self._bus.write_byte_data(self._addr, register, data)
# read i2c data from reg
# todo: ref __write comment
def __read(self, reg_addr):
data = self._bus.read_byte_data(self._addr, reg_addr)
if False:
self.logger.error('Invalid I2C Read State!')
print(
"addr = 0x%x reg_addr = 0x%x %i data = 0x%x %i "
% (
self._addr, reg_addr, reg_addr,
data, self.__bcd_to_int(data)))
self.logger.debug('I2C read cmd: {}'.format(reg_addr))
self.logger.debug('I2C read from addr: {}'.format(self._addr))
self.logger.debug('I2C read data: {}'.format(data))
return data
''' Time Registers
'''
# incoherent read of all time regs
# Return tuple of yrs, month, date, day, hrs, mins, sec.
# Since each value is read one byte at a time,
# it might not be coherent.
def __incoherent_read_all(self):
sec, mins, hrs, day, date, month, yrs = (
self.__read(reg_addr)
for reg_addr in self._reg_time_addrs
)
sec &= ~self._MASK_oscillator_on
if True:
# This stuff is suspicious.
if hrs == 0x64:
hrs = 0x40
hrs &= 0x3F
return_data = tuple(
self.__bcd_to_int(t)
for t in (yrs, month, date, day, hrs, mins, sec))
self.logger.debug('Incoherent read all data regs returns: {}'.format(return_data))
return return_data
# Write all
# updates RTC time register with synchronized information
# Direct write un-none value.
# Range: sec [0,59], mins [0,59], hrs [0,23],
# day [0,7], date [1-31], month [1-12], yrs [0-99].
def __write_all_time_regs(self, sec=None, mins=None, hrs=None, day=None,
date=None, month=None, yrs=None, save_as_24h=True):
self.logger.debug('Performing write to all RTC time regs')
if sec is not None:
if not 0 <= sec < self._SEC_PER_MIN:
raise ValueError('sec is out of range [0,59].')
seconds_reg = self.__int_to_bcd(sec)
self.__write(self._REG_SEC, seconds_reg)
if mins is not None:
if not 0 <= mins < self._MIN_PER_HR:
raise ValueError('mins is out of range [0,59].')
self.__write(self._REG_MIN, self.__int_to_bcd(mins))
if hrs is not None:
if not 0 <= hrs < self._HR_PER_DAY:
raise ValueError('hrs is out of range [0,23].')
self.__write(self._REG_HRS, self.__int_to_bcd(hrs) ) # not | 0x40 according to datasheet
if yrs is not None:
if not 0 <= yrs < self._YRS_PER_CENTURY:
raise ValueError('Years is out of range [0,99].')
self.__write(self._REG_YR, self.__int_to_bcd(yrs))
if month is not None:
if not 1 <= month <= self._MONTH_PER_YR:
raise ValueError('month is out of range [1,12].')
self.__write(self._REG_MONTH, self.__int_to_bcd(month))
if date is not None:
# How about a more sophisticated check?
if not 1 <= date <= self._MAX_DAYS_PER_MONTH:
raise ValueError('Date is out of range [1,31].')
self.__write(self._REG_DATE, self.__int_to_bcd(date))
if day is not None:
if not 1 <= day <= self._DAY_PER_WEEK:
raise ValueError('Day is out of range [1,7].')
self.__write(self._REG_DAY, self.__int_to_bcd(day))
# write datetime
# Write from a datetime.datetime object.
def __set_datetime(self, dt):
self.__write_all_time_regs(dt.second, dt.minute, dt.hour,
dt.isoweekday(), dt.day, dt.month, dt.year % 100)
self.logger.debug('Setting RTC with datetime object: {}'.format(dt))
# Read All
# Return tuple of yrs, month, date, day, hrs, mins, sec.
# Read until one gets same result twice in a row.
# Then one knows the time is coherent.
def __get_all_time_regs(self):
old = self.__incoherent_read_all()
while True:
new = self.__incoherent_read_all()
if old == new:
break
self.logger.warning('Reading RTC time regs is on second boundry, trying again')
old = new
self.logger.debug('RTC time regs are stable, time regs are: {}'.format(new))
return new
# Read datetime Julian
# todo: add function to return datetime object as julian time
''' Alarm Registers
'''
# set the alarm
# has_seconds should be false for alarm 2
def __set_alrm_regs(self, alrm_type=None, sec=None, mins=None, hrs=None, daydate=None):
self.logger.debug('Setting RTC alarm regs')
if not isinstance(alrm_type, AlrmType_t): #alrm_type not in AlrmType_t:
raise ValueError('Alarm Type is not in enumerate')
if sec is not None:
if not 0 <= sec < self._SEC_PER_MIN:
raise ValueError('sec is out of range [0,59].')
seconds = self.__int_to_bcd(sec)
if mins is not None:
if not 0 <= mins < self._MIN_PER_HR:
raise ValueError('mins is out of range [0,59].')
minutes = self.__int_to_bcd(mins)
if hrs is not None:
if not 0 <= hrs < self._HR_PER_DAY:
raise ValueError('hrs is out of range [0,23].')
hours = self.__int_to_bcd(hrs)
if daydate is not None:
# todo: create better check here
#if not 1 <= date <= self._MAX_DAYS_PER_MONTH:
if False:
raise ValueError('Date is out of range [1,31].')
daydate = self.__int_to_bcd(daydate)
self.logger.debug('Alarm Type: {}'.format(alrm_type.name))
self.logger.debug('Alarm Value: {}'.format(alrm_type.value))
if (alrm_type.value & 0x01): # A1M1
seconds |= 0b1<<7
self.logger.debug('Setting mode A1M1')
if (alrm_type.value & 0x02): # A1M2
minutes |= 0b1<<7
self.logger.debug('Setting mode A1M2')
if (alrm_type.value & 0x04): # A1M3
hours |= 0b1<<7
self.logger.debug('Setting mode A1M3')
if (alrm_type.value & 0x10): # DYDT
daydate |= 0b1<<6
self.logger.debug('Setting mode Day Date')
if (alrm_type.value & 0x08): # A1M4
daydate |= 0b1<<7
self.logger.debug('Setting mode A1M4')
if ~(alrm_type.value & 0x80): # alarm 1
data = (seconds, minutes, hours, daydate)
for i, reg in enumerate(self._reg_alrm_1_addrs):
self.__write(reg, data[i])
self.logger.debug('Setting RTC Alarm 1 for up to seconds match of datetime: {}'.format(data))
else: # alarm 2
data = (minutes, hours, daydate)
for i, reg in enumerate(self._reg_alrm_2_addrs):
self.__write(reg, data[i])
self.logger.debug('Setting RTC Alarm 2 for up to minutes match of datetime: {}'.format(data))
# Set alarm with datetime object
def __set_alrm_datetime(self, dt):
self.__set_alrm_regs(AlrmType_t.ALM1_MATCH_DATE, dt.second, dt.minute,
dt.hour, dt.day)
self.logger.debug('Setting alarm with datetime object')
''' Status Register
'''
# get status register data
# Returns byte
def __get_status(self):
status_reg = self.__read(self._REG_STATUS)
self.logger.debug('Checking RTC status register: {}'.format(status_reg))
return status_reg
''' _____ Public Members _____
'''
'''
'''
# Configure DS3231
# set: EOSC_N = 0
# BBSQW = 0
# CONV = 0
# INTCN = 1 -- enable interrupts for alarms
# A2IE = 0 -- disable alarm 2 interrupts
# A1IE = 1 -- enable alarm 1 interrupts
def configure_rtc(self):
self.logger.info('Configuring Status and Control Registers')
self.__write(self._REG_CTRL, self._CONFIG_REG_CTRL)
self.__write(self._REG_STATUS, self._CONFIG_REG_STATUS)
check_ctrl_reg = self.__read(self._REG_CTRL)
check_stat_reg = self.__read(self._REG_STATUS) & self._MASK_en_32_kHz
if check_ctrl_reg == self._CONFIG_REG_CTRL:
self.logger.info('Configuration of control register successful!')
else:
self.logger.error('Configuration of control register was NOT successful!')
raise ValueError
self.logger.info('Control Reg Value: 0b{:08b}, Expected Value: 0b{:08b}'.format(check_ctrl_reg, self._CONFIG_REG_CTRL))
if check_stat_reg == self._CONFIG_REG_STATUS:
self.logger.info('Configuration of status register successful!')
else:
self.logger.error('Configuration of status register was NOT successful!')
raise ValueError
self.logger.info('Status Reg Value: 0b{:08b}, Expected Value: 0b{:08b}'.format(check_stat_reg, self._CONFIG_REG_STATUS))
# Read string
# Return a string such as 'YY-MM-DD HH:MM:SS'.
def get_datetime_str(self):
yrs, month, date, _, hrs, mins, sec = self.__get_all_time_regs()
return (
'%02d-%02d-%02d %02d:%02d:%02d' %
(yrs, month, date, hrs, mins, sec)
)
# Read datetime
# Return the datetime.datetime object.
def get_datetime(self, century=21, tzinfo=None):
time_str = self.get_datetime_str()
self.logger.debug('RTC datetime as string: {}'.format(time_str))
return datetime.datetime.strptime(time_str, "%y-%m-%d %H:%M:%S")
# get datetime timedelta
# return tuple of difference between RTC datetime and datetime.datetime.now
# TODO: consider relativedelta from datetime to have later end time come first.
def get_datetime_delta(self, return_all=False):
rtc_datetime = self.get_datetime()
local_now = datetime.datetime.now()
delta = local_now - rtc_datetime
self.logger.info('RTC datetime: {}'.format(rtc_datetime))
self.logger.info('Local Time: {}'.format(local_now))
self.logger.info('RTC and Local time delta: {}'.format(delta))
if return_all:
return rtc_datetime, local_now, delta
else:
return delta
# write datetime.now
# Write from a datetime.datetime object.
def set_datetime_now(self):
dt = datetime.datetime.now()
self.__set_datetime(dt)
self.logger.debug('Setting RTC time to datetime.now(): {}'.format(dt))
# Set alarm
# sets an alarm for now + the specified number of days, hours, minutes, and seconds
def set_alarm_now_delta(self, days=0, hours=0, minutes=0, seconds=0):
if days == hours == minutes == seconds == 0:
self.logger.error('Not entering delta may cause RTC to become unstable!')
raise ValueError('Due to time passing, not entering any timedelta might cause RTC to become unstable')
now = datetime.datetime.now()
delta = timedelta(days=days, hours=hours, minutes=minutes, seconds=seconds)
time = now + delta
self.__set_alrm_datetime(time)
self.logger.info('Setting RTC alarm with delta: {}'.format(delta))
self.logger.info('RTC alarm is set for: {}'.format(time))
# set alarm sunrise
# sets an alarm for the sunrise date/time
def set_alarm_sunrise(self):
# todo: convert from utc to local unless full system uses utc
next_day = datetime.datetime.now() + timedelta(days=1)
next_day_date = next_day.date()
time_utc = astral.Astral.sunrise_utc(next_day_date, self._latitude, self._longitude)
time_local = self.__utc_to_local(time_utc)
self.__set_alrm_datetime(time_local)
self.logger.debug('1 day from now is: {}'.format(next_day))
self.logger.debug('Date for tomorrow is: {}'.format(next_day_date))
self.logger.debug('UTC time call is: {}, local conversion: {}'.format(time_utc, time_local))
self.logger.info('Setting sunrise alarm for: {}'.format(time_local))
# todo: add function for checking when sunset is
# Get Power Lost
# Returns boolean
def get_power_lost(self):
self.logger.debug('Checking to see if power was lost')
power_lost = bool(self.__get_status() & self._MASK_power_lost)
if power_lost:
self.logger.warning('Power was lost, check the battery and power supply')
return power_lost
# Get alarm 1 flag
# Returns boolean
def get_alarm_1_flag(self):
alrm_set = bool(self.__get_status() & self._MASK_alrm_1_flag)
if alrm_set:
self.logger.debug('RTC Alarm 1 is set')
else:
self.logger.debug('RTC Alarm 1 is NOT set')
return alrm_set
# Clear alarm 1 flag
# clears alarm 1 flag without modifying anything in register
def clear_alarm_1_flag(self):
self.logger.debug('Clearing RTC alarm 1 flag')
current_status = self.__get_status() & 0xFE
self.__write(self._REG_STATUS, current_status)
# Get alarm 2 flag
# Returns boolean
def get_alarm_2_flag(self):
alrm_set = bool(self.__get_status() & self._MASK_alrm_2_flag)
if alrm_set:
self.logger.debug('RTC Alarm 2 is set')
else:
self.logger.debug('RTC Alarm 2 is NOT set')
return alrm_set
# Clear alarm 2 flag
# clears alarm 2 flag without modifying anything in register
def clear_alarm_2_flag():
self.logger.debug('Clearing RTC alarm 2 flag')
current_status = self.__get_status() & 0xFD
self.__write(self._REG_STATUS, current_status)
# Check and clear both alarms
# return boolean
# asserts true if either alarm was set
def check_and_clear_alarms(self):
self.logger.info('Checking both RTC alarm flags')
is_alarm = False
if self.get_alarm_1_flag():
self.logger.info('Alarm 1 set, clearing alarm.')
is_alarm = True
self.clear_alarm_1_flag()
if self.get_alarm_1_flag():
self.logger.error('Could not clear Alarm 1!')
if self.get_alarm_2_flag():
self.logger.info('Alarm 2 set, clearing alarm.')
is_alarm = True
self.clear_alarm_2_flag()
if self.get_alarm_2_flag():
self.logger.error('Could not clear Alarm 2!')
return is_alarm
# Get temperature conversion busy state
# Returns boolean
def get_temp_conversion_busy():
conv_busy = bool(self.__get_status() & self._MASK_busy)
if conv_busy:
self.logger.debug('RTC Temperature is busy')
else:
self.logger.debug('RTC Temperature is NOT busy')
return conv_busy
# Get temp of DS3231
# todo: add support for starting a new conversion, this doesn't appear to
# update the value, either that or it is very stable where I'm testing
def get_temp(self):
byte_tmsb = self._bus.read_byte_data(self._addr, self._REG_TMP_MSB)
byte_tlsb = bin(self._bus.read_byte_data(self._addr, self._REG_TMP_LSB))[2:].zfill(8)
temp_C = byte_tmsb + int(byte_tlsb[0]) * 2**(-1) + int(byte_tlsb[1]) * 2**(-2)
self.logger.debug('Temp MSB: {}, Temp LSB: {}'.format(byte_tmsb, byte_tlsb))
self.logger.debug('RTC Temperature: {} \'C'.format(temp_C))
return temp_C
|
# Generated by Django 3.1.7 on 2021-03-31 13:53
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('volunteers', '0002_auto_20210331_1614'),
]
operations = [
migrations.AlterField(
model_name='advertisement',
name='end_date',
field=models.DateField(default=datetime.date(2021, 3, 31)),
),
migrations.AlterField(
model_name='advertisement',
name='end_time',
field=models.TimeField(default=datetime.datetime(2021, 3, 31, 16, 53, 58, 442841)),
),
migrations.AlterField(
model_name='advertisement',
name='one_day_date',
field=models.DateField(default=datetime.date(2021, 3, 31)),
),
migrations.AlterField(
model_name='advertisement',
name='start_date',
field=models.DateField(default=datetime.date(2021, 3, 31)),
),
migrations.AlterField(
model_name='advertisement',
name='start_time',
field=models.TimeField(default=datetime.datetime(2021, 3, 31, 16, 53, 58, 442841)),
),
]
|
from django.utils import timezone
from django.utils.deprecation import MiddlewareMixin
from users.models import User
class LastActivityMiddleware(MiddlewareMixin):
def process_response(self, request, response):
if request.user.is_authenticated:
User.objects.filter(uuid=request.user.uuid).update(last_activity=timezone.now())
return response
|
#!/usr/bin/python
import os
import math
#import time
def transform(col1,colLs1,col2,colLs2,src,dest,annotations,structures):
#print "Read"
#start = time.clock()
dictList = {}
index = 0
scoreSumPerGene = {}
scoreSqSumPerGene = {}
countPerGene = {}
for annot in annotations:
#header = col1
for f in annot:
#header = header+","+str(f['sample']) #read sample names
typeFile = src+str(f['uri']).split("..")[1] #read file names
with open(typeFile) as fn:
flines = fn.readlines()
fpkmDict = {}
for lines in flines:
temp = lines.rstrip().split(" ")
#print len(temp)
if len(temp) < 9:
print typeFile
print lines
continue
gid = ""
tif = ""
fpkm = ""
# if "feature" column (col 3) is "transcript", only read that line
if "transcript" in temp[2]:
temp2 = temp[colLs1[index]].split(";")
for elem2 in range(len(temp2)):
if col1 in temp2[elem2]:
gid = temp2[elem2].rstrip().lstrip().split(" ")[1]
#print gid
temp2 = temp[colLs2[index]].split(";")
for elem2 in range(len(temp2)):
#print temp2[elem2]
if col2 in temp2[elem2]:
fpkm = float(temp2[elem2].rstrip().lstrip().split(" ")[1].replace("\"",''))
if len(gid)>0: #if gene id non-empty
fpkmDict[gid] = fpkm
if gid not in countPerGene:
countPerGene[gid] = 1
scoreSumPerGene[gid] = float(fpkm)
scoreSqSumPerGene[gid] = float(fpkm)*float(fpkm)
else:
countPerGene[gid] = countPerGene[gid]+1
scoreSumPerGene[gid] = scoreSumPerGene[gid]+float(fpkm)
scoreSqSumPerGene[gid] = scoreSqSumPerGene[gid]+float(fpkm)*float(fpkm)
dictList[str(f['sample'])] = fpkmDict
index = index+1
keyList = set()
header = " \t"
for d in dictList:
for key in dictList[d]:
keyList.add(key)
#print time.clock()-start
#print "write"
#start = time.clock()
f = open(dest+'bagit_data.df','w')
##header
for key in keyList:
header = header + "\t" + key
header = header+'\n'
f.write(header.replace("\"",''))
### each row
for d in dictList:
#line = d
f.write(d)
for key in keyList:
if key in dictList[d]:
#line = line+"\t"+str(dictList[d][key])
f.write('\t')
mean = scoreSumPerGene[key]/countPerGene[key]
sqVal = (scoreSumPerGene[key]*scoreSumPerGene[key])/countPerGene[key]
if sqVal < scoreSqSumPerGene[key]:
stdev = math.sqrt((scoreSqSumPerGene[key] - sqVal)/countPerGene[key])
normalizedValue = abs(dictList[d][key]-mean)/stdev
else:
normalizedValue = dictList[d][key]
f.write(str(normalizedValue))
else:
#line = line+"\t "
f.write('\t ')
#line = line+'\n'
f.write('\n')
f.close()
#print time.clock()-start
|
#python3
n_fib = int(input())-1
n,m = 0,1
for x in range(n_fib):
n,m = m%10,n%10+m%10
print(m)
#Good job! (Max time used: 0.27/5.00, max memory used: 9588736/536870912.)
|
from import_export import resources
from aplikasi.models import Karyawan
from import_export.fields import Field
class KaryawanResources(resources.ModelResource):
prestasi = Field(attribute='prestasi', column_name='nilai')
kesehatan = Field(attribute='kesehatan', column_name='nilai')
kemampuan_komunikasi = Field(attribute='kemampuan_komunikasi', column_name='nilai')
class Meta:
model = Karyawan
fields = ['nama','prestasi','masa_kerja','usia','kesehatan','kemampuan_komunikasi']
export_order = ['nama', 'prestasi', 'masa_kerja', 'usia','kemampuan_komunikasi','kesehatan'] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.