text stringlengths 38 1.54M |
|---|
import serial
import rospy
from geometry_msgs.msg import PoseWithCovarianceStamped, Quaternion, Point
import math
class RallyCar:
def __init__(self, way_points):
rospy.Subscriber("cur_pos", Point, self.get_cur_pos)
self.cur_x = 0
self.cur_y = 0
self.thread_hold = 1
self.rate = rospy.Rate(150)
self.way_points = way_points
self.way_point_idx = 0
self.way_point = self.way_points[self.way_point_idx]
self.dist = None
def dist_to_goal(self):
return math.sqrt((self.cur_x - self.way_point[0]) ** 2 + (self.cur_y - self.way_point[1]) ** 2)
def update_way_point(self):
if self.dist_to_goal() < self.thread_hold:
self.way_point_idx = 0
self.way_point = self.way_points[self.way_point_idx]
def drive_car(self):
while not rospy.is_shutdown():
self.update_way_point()
self.send_serial()
self.rate.sleep()
def get_cur_pos(self, data):
self.cur_x = data.x
self.cur_y = data.y
def get_velocity(self):
k_p = 10
k_d = 80
v_target = 200
error = v_target - speed
speed = k_p * error + k_d * (error - prev_error)
return speed, error
def get_steering_angle(self):
k_p = 10
k_d = 80
error = angle - prev_angle
control_angle = k_p * error + k_d * (error - prev_error)
angle = angle + control_angle
return angle, error
def send_serial(self, steering_angle, gas_pedal):
# todo: consider sign
serial_signal = "A+{}+{}".format("%04d" % steering_angle, "%04d" % gas_pedal)
console_ser = serial.Serial("/dev/ttyACM0", baudrate=115200)
# construct and send the Ackermann steering commands to Car
console_ser.write(serial_signal)
def run(self):
rospy.spin()
if __name__ == "__main__":
rospy.init_node('drive_rally_car', anonymous=True)
way_points = [
[1.00882411003, -0.110736370087],
[4.99138975143, -0.115155220032],
[9.00156021118, -0.103336811066],
[12.9005899429, -0.0280865430832],
[18.3351726532, 0.169030070305],
[23.3436813354, 0.330986976624],
[29.8973789215, 0.398444473743]
]
rally_car = RallyCar(way_points=way_points)
rally_car.run()
|
from django.conf.urls import url
from .views import DailyCountExportCRUDL, IncomingPerDayChart, MostUsedLabelsChart, RepliesPerMonthChart
urlpatterns = DailyCountExportCRUDL().as_urlpatterns()
urlpatterns += [
url(r"^incoming_chart/$", IncomingPerDayChart.as_view(), name="statistics.incoming_chart"),
url(r"^replies_chart/$", RepliesPerMonthChart.as_view(), name="statistics.replies_chart"),
url(r"^labels_pie_chart/$", MostUsedLabelsChart.as_view(), name="statistics.labels_pie_chart"),
]
|
import segmentation_models_pytorch as smp
model = smp.Unet('resnet34', classes=3, activation='softmax') |
stuff = []
x = True
while x is True:
y = input("")
if y.lower() != 'done':
stuff.append(y)
else:
print(stuff)
print(len(stuff))
break
|
import torch
from base import utils as ut
from base.models import nns
from torch import nn
from torch.nn import functional as F
import numpy as np
class MultipleTimestepLSTM(nn.Module):
def __init__(self, in_num_ch=1, img_size=(64,64,64), z_dim=512,inter_num_ch=16, fc_num_ch=16, lstm_num_ch=16, kernel_size=3, name ='LSTM',
conv_act='relu', requires_grad=True,fc_act='tanh', num_cls=2, num_timestep=5, skip_missing=True, init_lstm=False, rnn_type='GRU', fe_arch='AE', vae =None):
super(MultipleTimestepLSTM, self).__init__()
self.name =name
self.z_dim=z_dim
self.fe_arch = fe_arch
if fe_arch == 'AE':
self.feature_extractor = vae
num_feat = 512
elif fe_arch == 'VAE':
self.feature_extractor = vae
num_feat = 512
if fc_act == 'tanh':
fc_act_layer = nn.Tanh()
elif fc_act == 'relu':
fc_act_layer = nn.ReLU()
else:
raise ValueError('No implementation of ', fc_act)
if requires_grad== False:
for p in self.parameters():
p.requires_grad = False
if num_cls == 2 or num_cls == 0:
num_output = 1
else:
num_output = num_cls
self.num_cls = num_cls
self.dropout_rate = 0.1
self.skip_missing = skip_missing
self.fc1 = nn.Sequential(
nn.Linear(num_feat, fc_num_ch),
fc_act_layer)
# self.fc1 = nn.Sequential(
# nn.Linear(num_feat, 4*fc_num_ch),
# fc_act_layer,
# nn.Dropout(self.dropout_rate))
# self.fc2 = nn.Sequential(
# nn.Linear(4*fc_num_ch, fc_num_ch),
# fc_act_layer),
# nn.Dropout(self.dropout_rate))
# nn.Sequential(
# nn.Linear(512,16),
# nn.GRU(input_ize=16,hidden_size=16,
# num_layers=1,batch_first=True)
# nn.Linear(16,1)
# )
if rnn_type == 'LSTM':
self.lstm = nn.LSTM(input_size=fc_num_ch, hidden_size=lstm_num_ch, num_layers=1,
batch_first=True)
elif rnn_type == 'GRU':
self.lstm = nn.GRU(input_size=fc_num_ch, hidden_size=lstm_num_ch, num_layers=1,
batch_first=True)
else:
raise ValueError('No RNN Layer!')
self.fc3 = nn.Linear(lstm_num_ch, num_output)
if init_lstm:
self.init_lstm()
def init_lstm(self):
for name, param in self.lstm.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 1.0)
elif 'weight_ih' in name:
nn.init.xavier_normal_(param)
elif 'weight_hh' in name:
nn.init.orthogonal_(param)
def forward(self, x, mask):
#pdb.set_trace()
bs, ts = x.shape[0], x.shape[1]
x = torch.cat([x[b,...] for b in range(bs)], dim=0) # (bs,ts,32,64,64) -> (bs*ts,32,64,64)
x = x.unsqueeze(1) # (bs*ts,1,32,64,64)
if self.fe_arch == 'AE':
out_z = self.feature_extractor.enc.encode_nkl(x) # (bs*ts,512)
elif self.fe_arch == 'VAE':
z_param_m, z_param_v = self.feature_extractor.enc.encode(x)
# print('sampling')
# out_z = ut.sample_gaussian(z_param_m,z_param_v)
out_z = z_param_m
fc1 = self.fc1(out_z)
# fc2 = self.fc2(fc1) # (bs*ts,16)
fc2_concat = fc1.view(bs, ts, -1) # (bs, ts, 16)
if self.skip_missing:
num_ts_list = mask.sum(1)
if (num_ts_list == 0).sum() > 0:
pdb.set_trace()
_, idx_sort = torch.sort(num_ts_list, dim=0, descending=True)
_, idx_unsort = torch.sort(idx_sort, dim=0)
num_ts_list_sorted = num_ts_list.index_select(0, idx_sort)
fc2_concat_sorted = fc2_concat.index_select(0, idx_sort)
fc2_packed = torch.nn.utils.rnn.pack_padded_sequence(fc2_concat_sorted, num_ts_list_sorted, batch_first=True)
lstm_packed, _ = self.lstm(fc2_packed)
lstm_sorted, _ = torch.nn.utils.rnn.pad_packed_sequence(lstm_packed, batch_first=True)
lstm = lstm_sorted.index_select(0, idx_unsort)
# print(lstm.shape)
else:
lstm, _ = self.lstm(fc2_concat) # lstm: (bs, ts, 16)
if lstm.shape[1] != ts:
pad = torch.zeros(bs, ts-lstm.shape[1], lstm.shape[-1])
lstm = torch.cat([lstm, pad.cuda()], dim=1)
output = self.fc3(lstm)
if self.num_cls == 0:
output = F.relu(output)
if self.skip_missing:
tpm = [output[i, num_ts_list[i].long()-1, :].unsqueeze(0) for i in range(bs)]
output_last = torch.cat(tpm, dim=0)
return [output_last, output, fc2_concat]
else:
return [output[:,-1,:], output]
class Siamese_Network_v2(nn.Module):
def __init__(self, vae, relu_loss, name="SiameseNet_weight_AE", lambda_relu=2, z_dim=16,device ='cpu'):
super().__init__()
self.name = name
self.z_dim = z_dim
self.device = device
self.name =name
self.vae = vae
self.relu_loss =relu_loss
self.lambda_cos = 1
def loss_nkl(self, x):
x1 = torch.unsqueeze(x[:,0,:,:,:],axis=1)
x2 = torch.unsqueeze(x[:,1,:,:,:],axis=1)
vae_loss_x1, summaries_1, z1 = self.vae.loss_nkl(x1)
vae_loss_x2, summaries_2, z2 = self.vae.loss_nkl(x2)
ncos12 = self.relu_loss(z1,z2)
loss_cos = 1 + (ncos12)
print('loss_cos:',loss_cos)
vae_loss = vae_loss_x1 + vae_loss_x2
reg_loss = self.lambda_cos * loss_cos.mean()
print('vae_loss:', vae_loss)
print('reg_loss:', reg_loss)
loss = vae_loss + reg_loss
return loss, summaries_1, reg_loss.detach(), vae_loss.detach()
''' Please look at loss_nkl and negatove_elbo_bound_ae'''
class VAE3d(nn.Module):
def __init__(self, nn='v8', name='vae3d', z_dim=16,device ='cpu', lambda_kl =0.01):
super().__init__()
self.name = name
self.z_dim = z_dim
nn = getattr(nns, nn)
self.enc = nn.Encoder(self.z_dim, device =device)
self.dec = nn.Decoder(self.z_dim, device =device)
self.lambda_kl = lambda_kl
# Set prior as fixed parameter attached to Module
self.z_prior_m = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
self.z_prior_v = torch.nn.Parameter(torch.ones(1), requires_grad=False)
self.z_prior = (self.z_prior_m, self.z_prior_v)
def negative_elbo_bound_cos(self, x):
"""
Computes the Evidence Lower Bound, KL and, Reconstruction costs
Args:
x: tensor: (batch, dim): Observations
Returns:
nelbo: tensor: (): Negative evidence lower bound
kl: tensor: (): ELBO KL divergence to prior
rec: tensor: (): ELBO Reconstruction term
"""
z_param_m, z_param_v = self.enc.encode(x)
z = ut.sample_gaussian(z_param_m,z_param_v)
xhat = self.dec.decode(z)
rec = ut.mseloss(x,xhat)
kl = ut.kl_normal(z_param_m,z_param_v,self.z_prior_m,self.z_prior_v)
# print(kl.shape)
nelbo = rec + self.lambda_kl * kl
nelbo = nelbo.mean()
kl = kl.mean()
rec = rec.mean()
print(nelbo,kl,rec)
return nelbo, kl, rec, z_param_m, z_param_v
def negative_elbo_bound_ae(self, x):
"""
Using in loss_nkl
Args:
x: tensor: (batch, dim): Observations
Returns:
nelbo: tensor: (): Negative evidence lower bound
kl: tensor: (): ELBO KL divergence to prior
rec: tensor: (): ELBO Reconstruction term
z: tensor: (): encoded representation
"""
z = self.enc.encode_nkl(x)
xhat = self.dec.decode(z)
rec = ut.mseloss(x,xhat)
nelbo = rec
nelbo = nelbo.mean()
kl = 0
z_param_m = z_param_v = 0
print(rec)
rec = rec.mean()
print(nelbo,rec)
return nelbo, kl, rec, z
def loss(self, x):
nelbo, kl, rec, z_param_m, z_param_v = self.negative_elbo_bound_cos(x)
# nelbo, kl, rec, z_param_m, z_param_v = self.negative_elbo_bound(x)
loss = nelbo
summaries = dict((
('train/loss', nelbo),
('gen/elbo', -nelbo),
('gen/kl_z', kl),
('gen/rec', rec),
))
return loss, summaries, z_param_m, z_param_v
def loss_nkl(self, x):
nelbo, kl, rec, z = self.negative_elbo_bound_ae(x)
# nelbo, kl, rec, z_param_m, z_param_v = self.negative_elbo_bound(x)
loss = nelbo
summaries = dict((
('train/loss', nelbo),
('gen/elbo', -nelbo),
('gen/kl_z', kl),
('gen/rec', rec),
))
return loss, summaries, z
|
import plotly as py # 导入plotly库并命名为py
import plotly.graph_objs as go
# -------------pre def
pyplt = py.offline.plot
import pandas as pd
df = pd.read_csv(r'dat/appl.csv', index_col=['date'], parse_dates=['date'])
trace = go.Ohlc(
x=df.index,
open=df.open,
high=df.high,
low=df.low,
close=df.close,
)
data = [trace]
pyplt(data, filename=r'tmp/first_ohlc.html')
|
# -*- coding:utf-8 -*-
import errno
import os
from random import randint
from const import *
def make_static_dir(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def getSourceId(source):
sourceType = source.type
if sourceType == 'user':
return source.user_id
elif sourceType == 'group':
return source.group_id
elif sourceType == 'room':
return source.room_id
else:
raise NotFoundSourceError()
class NotFoundSourceError(Exception):
pass
entry = {
'0':'+75+75',
'1':'+313+75',
'2':'+551+75',
'3':'+789+75',
'4':'+75+304',
'5':'+313+304',
'6':'+551+304',
'7':'+789+304',
'8':'+75+533',
'9':'+313+533',
'10':'+551+533',
'11':'+789+533',
}
def generate_voting_result_image(data):
number, path = _tmpdir()
for i in range(0, 12):
cmd = _generate_cmd(i, data, path)
os.system(cmd)
resize_cmd = 'mogrify -resize 50% -unsharp 2x1.4+0.5+0 -colors 65 -quality 100 -verbose ' + path + '/result_11.png'
os.system(resize_cmd)
return number
def _generate_cmd(position, data, tmp):
if position is 0:
bg_file = BG_FILE_PATH
out_file = os.path.join(tmp, 'result_0.png')
else:
bg_file = os.path.join(tmp, 'result_' + str(position-1) + '.png')
out_file = os.path.join(tmp, 'result_' + str(position) + '.png')
value = data[str(position)] if data.has_key(str(position)) else str(0)
cmd = []
cmd.append('composite -gravity northwest -geometry')
cmd.append(entry[str(position)])
cmd.append('-compose over')
cmd.append(os.path.join(IMG_PATH, 'vote_' + value + '.png'))
cmd.append(bg_file)
cmd.append(os.path.join(tmp, out_file))
return ' '.join(cmd)
def _tmpdir():
number = str(randint(1000, 9999))
path = os.path.join(TMP_ROOT_PATH, number)
make_static_dir(path)
return (number, path)
|
1. Let _envRec_ be the function Environment Record for which the method was invoked.
1. If _envRec_.[[ThisBindingStatus]] is ~lexical~, return *false*.
1. If _envRec_.[[HomeObject]] has the value *undefined*, return *false*; otherwise, return *true*. |
import os
import datetime
import json
from markupsafe import Markup
import requests
from flask import render_template, redirect, request
from app import app
# The node with which our application interacts, there can be multiple
# such nodes as well.
CONNECTED_NODE_ADDRESS = "http://127.0.0.1:" + str(os.environ.get('CONNECTED_NODE_PORT'))
posts = []
def fetch_posts():
"""
Function to fetch the chain from a blockchain node, parse the
data and store it locally.
"""
get_chain_address = "{}/chain".format(CONNECTED_NODE_ADDRESS)
response = requests.get(get_chain_address)
if response.status_code == 200:
content = []
chain = json.loads(response.content)
for block in chain["chain"]:
for tx in block["transactions"]:
tx["index"] = block["index"]
tx["hash"] = block["previous_hash"]
content.append(tx)
global posts
posts = sorted(content, key=lambda k: k['timestamp'],
reverse=True)
@app.route('/')
def index():
fetch_posts()
return render_template('index.html',
title='Linked Data Blockchain - ' + str(os.environ.get('APPLICATION_PORT')),
posts=posts,
node_address=CONNECTED_NODE_ADDRESS,
readable_time=timestamp_to_string)
@app.route('/visual')
def visual():
fetch_posts()
return render_template('visual.html',
title='Linked Data Blockchain',
posts=posts)
def check_ids(d_or_l, keys_list, value_list):
"""
Checking if all JSON-LD objects have unique ID
"""
global count
if isinstance(d_or_l, dict):
count += 1
for k, v in iter(sorted(d_or_l.items())):
if isinstance(v, list) or isinstance(v, dict):
check_ids(v, keys_list, value_list)
elif k == "@id" or k == "id":
keys_list.append(k)
value_list.append(v)
elif isinstance(d_or_l, list):
for i in d_or_l:
if isinstance(i, list) or isinstance(i, dict):
check_ids(i, keys_list, value_list)
if (len(value_list) == len(set(value_list))) and (count == len(keys_list)):
return True
else:
return False
@app.route('/submit', methods=['POST'])
def submit_textarea():
"""
Endpoint to create a new transaction via our application.
"""
# WARNING: Insecure, all fields need to be checked: action type must be in "allowed list", etc.
# Context
action = request.form["action_type"].split("_")
context = "request" if action[0] == "req" else "result"
# Action
action_type = action[-1]
action_name = request.form["action_name"]
action_desc = request.form["action_desc"]
# Linked Data
file = request.files["data"]
if file:
data = json.loads(file.read())
# Init the UID checker
keys_list = []
value_list = []
global count
count = 0
# Chech if every object has UID
if not check_ids(data, keys_list, value_list):
return "Some objects in the JSON-LD file are missing unique IDs. IDs found: " + ", ".join([i for i in value_list])
else:
return "No data were send to the Blockchain. Please, upload a file."
# Agent: Person
person_name = request.form["person_name"]
person_org = request.form["person_org"]
# Reference
ref_tx = request.form["ref_transaction"]
ref_object = request.form["ref_object"]
post_object = {
"@context": ["DSP/web-ledger", "DSP/" + context],
"@id": None,
"@type": "StorageBlock",
"action": {
"@type": action_type,
"name": action_name,
"description": action_desc,
"agent": {
"@type": "Person",
"name": person_name,
"organization": person_org,
},
"transaction_id": ref_tx,
"object_id": ref_object,
},
"data": data
}
# Submit a transaction
new_tx_address = "{}/new_transaction".format(CONNECTED_NODE_ADDRESS)
requests.post(new_tx_address,
json=post_object,
headers={'Content-type': 'application/json'})
# return post_object # Debug post object
return redirect('/')
def timestamp_to_string(epoch_time):
return datetime.datetime.fromtimestamp(epoch_time).strftime('%d. %m. %Y at %H:%M')
def to_pretty_json(value):
return Markup(json.dumps(value, sort_keys=False,
indent=4, separators=(',', ': ')))
app.jinja_env.filters['tojson_pretty'] = to_pretty_json
|
class Node:
def __init__(self, info):
self.info = info
self.left = None
self.right = None
self.level = None
def __str__(self):
return str(self.info)
class BinarySearchTree:
def __init__(self):
self.root = None
def create(self, val):
if self.root == None:
self.root = Node(val)
else:
current = self.root
while True:
if val < current.info:
if current.left:
current = current.left
else:
current.left = Node(val)
break
elif val > current.info:
if current.right:
current = current.right
else:
current.right = Node(val)
break
else:
break
"""
Node is defined as
self.left (the left child of the node)
self.right (the right child of the node)
self.info (the value of the node)
"""
def level_order(root):
q = []
lvl_order = []
q.append(root)
while True:
head = q[0]
del(q[0])
if head.left is not None:
q.append(head.left)
if head.right is not None:
q.append(head.right)
lvl_order.append(head.info)
if len(q)==0:
return lvl_order
def vertical_order(root):
q = []
vertical_order_map = {}
q.append(root)
root.level = 0
vertical_order_map["0"] = [root.info]
while True:
head = q[0]
head_lvl = head.level
del(q[0])
if head.left is not None:
q.append(head.left)
head.left.level = head_lvl - 1
if str(head.left.level) in vertical_order_map.keys():
vertical_order_map[str(head.left.level)].append(head.left.info)
else:
vertical_order_map[str(head.left.level)] = [head.left.info]
if head.right is not None:
q.append(head.right)
head.right.level = head_lvl + 1
if str(head.right.level) in vertical_order_map.keys():
vertical_order_map[str(head.right.level)].append(head.right.info)
else:
vertical_order_map[str(head.right.level)] = [head.right.info]
if len(q) == 0:
return vertical_order_map
def topView(root):
#Write your code here
lvl_order = level_order(root)
# print(lvl_order)
vertical_order_map = vertical_order(root)
# print(vertical_order_map)
for key in sorted(map(int, vertical_order_map.keys())):
if len(vertical_order_map[str(key)]) > 1:
for elem in vertical_order_map[str(key)]:
if elem in lvl_order:
print(elem),
break
else:
print(vertical_order_map[str(key)][0]),
print("")
if __name__ == "__main__":
tree = BinarySearchTree()
t = int(raw_input())
arr = list(map(int, raw_input().split()))
for i in xrange(t):
tree.create(arr[i])
topView(tree.root) |
# Copyright (c) 2013 Michael Bitzi
# Licensed under the MIT license http://opensource.org/licenses/MIT
import string
import re
import time
import pwm.windows
from pwm.config import config
from pwm.ffi.xcb import xcb
from pwm.ffi.cairo import cairo
import pwm.color
import pwm.keybind
import pwm.xdg
import pwm.spawn
import pwm.bar
import pwm.root
active = False
_window = None
_height = 0
_width = 0
_pixmap = None
_gc = None
_surface = None
_ctx = None
_applications = None
_filtered = None
_selection = 0
_typed = ""
def setup():
global _width
_width = xcb.screen.width_in_pixels
global _height
_height = pwm.bar.calculate_height()
global _window
mask = [(xcb.CW_OVERRIDE_REDIRECT, 1),
(xcb.CW_BACK_PIXEL, pwm.color.get_pixel(config.bar.background)),
(xcb.CW_EVENT_MASK, xcb.EVENT_MASK_EXPOSURE)]
_window = pwm.windows.create(pwm.bar.primary.x, pwm.bar.primary.y,
_width, _height, xcb.mask(mask))
global _pixmap
_pixmap = xcb.core.generate_id()
xcb.core.create_pixmap(
xcb.screen.root_depth,
_pixmap,
_window,
_width,
_height)
global _gc
_gc = xcb.core.generate_id()
xcb.core.create_gc(_gc, xcb.screen.root,
*xcb.mask([(xcb.GC_FOREGROUND, xcb.screen.white_pixel),
(xcb.GC_BACKGROUND, xcb.screen.black_pixel),
(xcb.GC_GRAPHICS_EXPOSURES, 0)]))
global _surface
_surface = cairo.xcb_surface_create(
xcb.conn,
_pixmap,
xcb.aux_find_visual_by_id(xcb.screen, xcb.screen.root_visual),
_width,
_height)
global _ctx
_ctx = cairo.create(_surface)
_ctx.select_font_face(config.bar.font.face,
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_NORMAL)
_ctx.set_font_size(config.bar.font.size)
def destroy():
xcb.core.destroy_window(_window)
cairo.surface_destroy(_surface)
_ctx.destroy()
xcb.core.free_pixmap(_pixmap)
xcb.core.free_gc(_gc)
def show():
global active
if active:
return
global _typed
_typed = ""
global _applications
_applications = pwm.xdg.applications()
_filter_applist()
xcb.core.map_window(_window)
_draw()
try:
_grab_keyboard()
except:
_hide()
raise
active = True
def _grab_keyboard():
"""Try to grab the keyboard."""
# Try (repeatedly, if necessary) to grab the keyboard. We might not
# get the keyboard at the first attempt because of the keybinding
# still being active when started via a wm's keybinding.
for _ in range(1000):
reply = xcb.core.grab_keyboard(True, _window, xcb.CURRENT_TIME,
xcb.GRAB_MODE_ASYNC,
xcb.GRAB_MODE_ASYNC).reply()
if reply != xcb.ffi.NULL:
return
time.sleep(1.0/1000.0)
raise Exception("Cannot grab keyboard")
def _ungrab_keyboard():
"""Ungrab the keyboard."""
xcb.core.ungrab_keyboard(xcb.CURRENT_TIME)
def _hide():
global active
active = False
xcb.core.unmap_window(_window)
_ungrab_keyboard()
def _filter_applist():
global _selection
_selection = 0
global _filtered
_filtered = []
# Use a simple fuzzy search to match applications.
#
# 1. The pattern would be few characters.
# 2. A match would mean that the characters in the pattern appear in the
# same order as in the matched string.
# 3. A match found near the beginning of a string is scored more than a
# match found near the end.
# 4. A match is scored more if the characters in the patterns are closer to
# each other, while the score is lower if they are more spread out.
# 5. We are not assigning more weights to certain characters than the
# other.
pattern = re.compile(
".*?".join(re.escape(char) for char in _typed.lower()))
for idx, app in enumerate(_applications):
match = pattern.search(app["name"].lower())
if not match:
continue
score = 100 / ((1+match.start()) * (match.end() - match.start() + 1))
# Insert:
# Inverted score for correct sorting
# Name to sort those with the same score (e.g. empty string typed)
# Index in case two applications have the same name
# Application information
_filtered.append((-score, app["name"], idx, app))
_filtered.sort()
def _draw():
_ctx.set_source_rgb(*pwm.color.get_rgb(config.bar.background))
_ctx.set_operator(cairo.OPERATOR_SOURCE)
_ctx.paint()
text = _typed+"|"
font_ext = cairo.ffi.new("cairo_font_extents_t*")
_ctx.font_extents(font_ext)
pos_y = _height/2 - font_ext.descent + font_ext.height/2
text_extents = cairo.ffi.new("cairo_text_extents_t*")
_ctx.text_extents(text, text_extents)
_ctx.move_to(0, pos_y)
_ctx.set_source_rgb(*pwm.color.get_rgb(config.bar.foreground))
_ctx.show_text(_typed+"|")
left = max(font_ext.max_x_advance*20, text_extents.width+10)
for idx, (_, _, _, app) in enumerate(_filtered):
_ctx.text_extents(app["name"], text_extents)
if idx == _selection:
# Make the selected entry a bit nicer
_ctx.set_source_rgb(*pwm.color.get_rgb(
config.bar.active_workspace_background))
_ctx.rectangle(left-5, 0, text_extents.width+10, _height)
_ctx.fill()
_ctx.set_source_rgb(*pwm.color.get_rgb(
config.bar.active_workspace_foreground))
else:
_ctx.set_source_rgb(*pwm.color.get_rgb(
config.bar.inactive_workspace_foreground))
_ctx.move_to(left, pos_y)
_ctx.show_text(app["name"])
_ctx.text_extents(app["name"], text_extents)
left += text_extents.width + 10
xcb.core.copy_area(_pixmap, _window, _gc, 0, 0, 0, 0, _width, _height)
def handle_key_press_event(event):
sym = pwm.keybind.get_keysym(event.detail, event.state)
symstr = pwm.keybind.get_keysym_string(sym)
if not symstr:
return
global _typed
global _selection
if symstr == "Escape":
_hide()
return
elif symstr == "Return":
if _filtered:
# Strip out the placeholders some Exec values might have.
pwm.spawn.spawn(
_filtered[_selection][-1]["exec"].split(" ", 1)[0])
_hide()
return
elif symstr == "Tab":
_selection += 1
if _selection >= len(_filtered):
_selection = 0
elif symstr == "ISO_Left_Tab":
_selection -= 1
if _selection < 0:
_selection = len(_filtered)-1
elif symstr == "BackSpace":
_typed = _typed[:-1]
_filter_applist()
else:
try:
sym = chr(sym)
except ValueError:
# some keys have no usable values
return
if sym not in string.printable:
return
_typed += sym
_filter_applist()
_draw()
|
from pettingzoo.utils.deprecated_module import DeprecatedModule
prison_v0 = DeprecatedModule("prison", "v0", "v3")
prison_v1 = DeprecatedModule("prison", "v1", "v3")
prison_v2 = DeprecatedModule("prison", "v2", "v3")
prospector_v0 = DeprecatedModule("prospector", "v0", "v4")
prospector_v1 = DeprecatedModule("prospector", "v1", "v4")
prospector_v2 = DeprecatedModule("prospector", "v2", "v4")
prospector_v3 = DeprecatedModule("prospector", "v3", "v4")
pistonball_v0 = DeprecatedModule("pistonball", "v0", "v4")
pistonball_v1 = DeprecatedModule("pistonball", "v1", "v4")
pistonball_v2 = DeprecatedModule("pistonball", "v2", "v4")
pistonball_v3 = DeprecatedModule("pistonball", "v3", "v4")
cooperative_pong_v0 = DeprecatedModule("cooperative_pong", "v0", "v3")
cooperative_pong_v1 = DeprecatedModule("cooperative_pong", "v1", "v3")
cooperative_pong_v2 = DeprecatedModule("cooperative_pong", "v2", "v3")
knights_archers_zombies_v0 = DeprecatedModule("knights_archers_zombies", "v0", "v7")
knights_archers_zombies_v1 = DeprecatedModule("knights_archers_zombies", "v1", "v7")
knights_archers_zombies_v2 = DeprecatedModule("knights_archers_zombies", "v2", "v7")
knights_archers_zombies_v3 = DeprecatedModule("knights_archers_zombies", "v3", "v7")
knights_archers_zombies_v4 = DeprecatedModule("knights_archers_zombies", "v4", "v7")
knights_archers_zombies_v5 = DeprecatedModule("knights_archers_zombies", "v5", "v7")
knights_archers_zombies_v6 = DeprecatedModule("knights_archers_zombies", "v6", "v7")
|
country={}
for number in range(2):
key = input('Введите страну:\n')
value = input('Введите город этой страны\n')
country[key] = value
print(country)
for number in range(2):
key = input('Введите страну:\n')
value = input('Введите город этой страны\n')
country[key] = value
print(country)
|
from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec, CommandLineInputSpec, CommandLine, traits, File, TraitedSpec
from nipype.interfaces.matlab import MatlabCommand
#==================================================================================================
# Denoising with non-local means
# This function is based on the example in the Dipy preprocessing tutorial:
# http://nipy.org/dipy/examples_built/denoise_nlmeans.html#example-denoise-nlmeans
class DipyDenoiseInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, desc='diffusion weighted volume for denoising', mandatory=True)
class DipyDenoiseOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="denoised diffusion-weighted volume")
class DipyDenoise(BaseInterface):
input_spec = DipyDenoiseInputSpec
output_spec = DipyDenoiseOutputSpec
def _run_interface(self, runtime):
import nibabel as nib
import numpy as np
import matplotlib.pyplot as plt
from dipy.denoise.nlmeans import nlmeans
from nipype.utils.filemanip import split_filename
fname = self.inputs.in_file
img = nib.load(fname)
data = img.get_data()
affine = img.get_affine()
mask = data[..., 0] > 80
a = data.shape
denoised_data = np.ndarray(shape=data.shape)
for image in range(0,a[3]):
print(str(image + 1) + '/' + str(a[3] + 1))
dat = data[...,image]
sigma = np.std(dat[~mask]) # Calculating the standard deviation of the noise
den = nlmeans(dat, sigma=sigma, mask=mask)
denoised_data[:,:,:,image] = den
_, base, _ = split_filename(fname)
nib.save(nib.Nifti1Image(denoised_data, affine), base + '_denoised.nii')
return runtime
def _list_outputs(self):
from nipype.utils.filemanip import split_filename
import os
outputs = self._outputs().get()
fname = self.inputs.in_file
_, base, _ = split_filename(fname)
outputs["out_file"] = os.path.abspath(base + '_denoised.nii')
return outputs
#==================================================================================================
# Denoising with non-local means
# This function is based on the example in the Dipy preprocessing tutorial:
# http://nipy.org/dipy/examples_built/denoise_nlmeans.html#example-denoise-nlmeans
class T1DipyDenoiseInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, desc='diffusion weighted volume for denoising', mandatory=True)
in_mask = File(exists=True, desc='binary brain mask file', mandatory=True)
class T1DipyDenoiseOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="denoised diffusion-weighted volume")
class T1DipyDenoise(BaseInterface):
input_spec = T1DipyDenoiseInputSpec
output_spec = T1DipyDenoiseOutputSpec
def _run_interface(self, runtime):
import nibabel as nib
import numpy as np
import matplotlib.pyplot as plt
from dipy.denoise.nlmeans import nlmeans
from nipype.utils.filemanip import split_filename
fname = self.inputs.in_file
img = nib.load(fname)
data = img.get_data()
affine = img.get_affine()
mask_fname = self.inputs.in_mask
mask_img = nib.load(mask_fname)]
mask = mask_img.get_data()
sigma = np.std(data[~mask]) # Calculating the standard deviation of the noise
denoised_data = nlmeans(data, sigma=sigma, mask=mask)
denoised_data[:,:,:,image] = den
_, base, _ = split_filename(fname)
nib.save(nib.Nifti1Image(denoised_data, affine), base + '_denoised.nii')
return runtime
def _list_outputs(self):
from nipype.utils.filemanip import split_filename
import os
outputs = self._outputs().get()
fname = self.inputs.in_file
_, base, _ = split_filename(fname)
outputs["out_file"] = os.path.abspath(base + '_denoised.nii')
return outputs
#==================================================================================================
# Fitting the Tensor models with RESTORE
class DipyRestoreInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, desc='diffusion weighted volume', mandatory=True)
bval = File(exists=True, desc='FSL-style b-value file', mandatory=True)
bvec = File(exists=True, desc='FSL-style b-vector file', mandatory=True)
class DipyRestoreOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="fitted FA file")
class DipyRestore(BaseInterface):
input_spec = DipyRestoreInputSpec
output_spec = DipyRestoreOutputSpec
def _run_interface(self, runtime):
import dipy.reconst.dti as dti
import dipy.denoise.noise_estimate as ne
from dipy.core.gradients import gradient_table
from nipype.utils.filemanip import split_filename
import nibabel as nib
fname = self.inputs.in_file
img = nib.load(fname)
data = img.get_data()
affine = img.get_affine()
bvals = self.inputs.bval
bvecs = self.inputs.bvec
gtab = gradient_table(bvals, bvecs)
sigma = ne.estimate_sigma(data)
dti = dti.TensorModel(gtab,fit_method='RESTORE', sigma=sigma)
dtifit = dti.fit(data)
fa = dtifit.fa
_, base, _ = split_filename(fname)
nib.save(nib.Nifti1Image(fa, affine), base + '_FA.nii')
return runtime
def _list_outputs(self):
import os
from nipype.utils.filemanip import split_filename
outputs = self._outputs().get()
fname = self.inputs.in_file
_, base, _ = split_filename(fname)
outputs["out_file"] = os.path.abspath(base + '_FA.nii')
return outputs
#==================================================================================================
# Deterministic tracking based on the CSD model
class CSDdetInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, desc='diffusion weighted volume', mandatory=True)
bval = File(exists=True, desc='FSL-style b-value file', mandatory=True)
bvec = File(exists=True, desc='FSL-style b-vector file', mandatory=True)
FA_file = File(exists=True, desc='FA map', mandatory=True)
brain_mask = File(exists=True, desc='FA map', mandatory=True)
class CSDdetOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="streamline trackfile")
class CSDdet(BaseInterface):
input_spec = CSDdetInputSpec
output_spec = CSDdetOutputSpec
def _run_interface(self, runtime):
import numpy as np
import nibabel as nib
from dipy.io import read_bvals_bvecs
from dipy.core.gradients import gradient_table
from nipype.utils.filemanip import split_filename
# Loading the data
fname = self.inputs.in_file
img = nib.load(fname)
data = img.get_data()
affine = img.get_affine()
FA_fname = self.inputs.FA_file
FA_img = nib.load(FA_fname)
fa = FA_img.get_data()
affine = FA_img.get_affine()
affine = np.matrix.round(affine)
mask_fname = self.inputs.brain_mask
mask_img = nib.load(mask_fname)
mask = mask_img.get_data()
bval_fname = self.inputs.bval
bvals = np.loadtxt(bval_fname)
bvec_fname = self.inputs.bvec
bvecs = np.loadtxt(bvec_fname)
bvecs = np.vstack([bvecs[0,:],bvecs[1,:],bvecs[2,:]]).T
gtab = gradient_table(bvals, bvecs)
# Creating a white matter mask
fa = fa*mask
white_matter = fa >= 0.2
# Creating a seed mask
from dipy.tracking import utils
seeds = utils.seeds_from_mask(white_matter, density=[2, 2, 2], affine=affine)
# Fitting the CSA model
from dipy.reconst.shm import CsaOdfModel
from dipy.data import default_sphere
from dipy.direction import peaks_from_model
csa_model = CsaOdfModel(gtab, sh_order=8)
csa_peaks = peaks_from_model(csa_model, data, default_sphere,
relative_peak_threshold=.8,
min_separation_angle=45,
mask=white_matter)
from dipy.tracking.local import ThresholdTissueClassifier
classifier = ThresholdTissueClassifier(csa_peaks.gfa, .25)
# CSD model
from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel, auto_response)
response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7)
csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=8)
csd_fit = csd_model.fit(data, mask=white_matter)
from dipy.direction import DeterministicMaximumDirectionGetter
det_dg = DeterministicMaximumDirectionGetter.from_shcoeff(csd_fit.shm_coeff,
max_angle=45.,
sphere=default_sphere)
# Tracking
from dipy.tracking.local import LocalTracking
streamlines = LocalTracking(det_dg, classifier, seeds, affine,
step_size=.5, maxlen=200, max_cross=1)
# Compute streamlines and store as a list.
streamlines = list(streamlines)
# Saving the trackfile
from dipy.io.trackvis import save_trk
_, base, _ = split_filename(fname)
save_trk(base + '_CSDdet.trk', streamlines, affine, fa.shape)
return runtime
def _list_outputs(self):
from nipype.utils.filemanip import split_filename
import os
outputs = self._outputs().get()
fname = self.inputs.in_file
_, base, _ = split_filename(fname)
outputs["out_file"] = os.path.abspath(base + '_CSDdet.trk')
return outputs
#==================================================================================================
# Probabilistic tracking based on the CSD model
class CSDprobInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, desc='diffusion weighted volume', mandatory=True)
bval = File(exists=True, desc='FSL-style b-value file', mandatory=True)
bvec = File(exists=True, desc='FSL-style b-vector file', mandatory=True)
FA_file = File(exists=True, desc='FA map', mandatory=True)
brain_mask = File(exists=True, desc='FA map', mandatory=True)
class CSDprobOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="streamline trackfile")
class CSDprob(BaseInterface):
input_spec = CSDdetInputSpec
output_spec = CSDdetOutputSpec
def _run_interface(self, runtime):
import numpy as np
import nibabel as nib
from dipy.io import read_bvals_bvecs
from dipy.core.gradients import gradient_table
from nipype.utils.filemanip import split_filename
# Loading the data
fname = self.inputs.in_file
img = nib.load(fname)
data = img.get_data()
affine = img.get_affine()
FA_fname = self.inputs.FA_file
FA_img = nib.load(FA_fname)
fa = FA_img.get_data()
affine = FA_img.get_affine()
affine = np.matrix.round(affine)
mask_fname = self.inputs.brain_mask
mask_img = nib.load(mask_fname)
mask = mask_img.get_data()
bval_fname = self.inputs.bval
bvals = np.loadtxt(bval_fname)
bvec_fname = self.inputs.bvec
bvecs = np.loadtxt(bvec_fname)
bvecs = np.vstack([bvecs[0,:],bvecs[1,:],bvecs[2,:]]).T
gtab = gradient_table(bvals, bvecs)
# Creating a white matter mask
fa = fa*mask
white_matter = fa >= 0.2
# Creating a seed mask
from dipy.tracking import utils
seeds = utils.seeds_from_mask(white_matter, density=[2, 2, 2], affine=affine)
# Fitting the CSA model
from dipy.reconst.shm import CsaOdfModel
from dipy.data import default_sphere
from dipy.direction import peaks_from_model
csa_model = CsaOdfModel(gtab, sh_order=8)
csa_peaks = peaks_from_model(csa_model, data, default_sphere,
relative_peak_threshold=.8,
min_separation_angle=45,
mask=white_matter)
from dipy.tracking.local import ThresholdTissueClassifier
classifier = ThresholdTissueClassifier(csa_peaks.gfa, .25)
# CSD model
from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel, auto_response)
response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7)
csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=8)
csd_fit = csd_model.fit(data, mask=white_matter)
from dipy.direction import ProbabilisticDirectionGetter
prob_dg = ProbabilisticDirectionGetter.from_shcoeff(csd_fit.shm_coeff,
max_angle=45.,
sphere=default_sphere)
# Tracking
from dipy.tracking.local import LocalTracking
streamlines = LocalTracking(prob_dg, classifier, seeds, affine,
step_size=.5, maxlen=200, max_cross=1)
# Compute streamlines and store as a list.
streamlines = list(streamlines)
# Saving the trackfile
from dipy.io.trackvis import save_trk
_, base, _ = split_filename(fname)
save_trk(base + '_CSDprob.trk', streamlines, affine, fa.shape)
return runtime
def _list_outputs(self):
from nipype.utils.filemanip import split_filename
import os
outputs = self._outputs().get()
fname = self.inputs.in_file
_, base, _ = split_filename(fname)
outputs["out_file"] = os.path.abspath(base + '_CSDprob.trk')
return outputs
#==================================================================================================
# Moving tracts to a different space
class trk_CoregInputSpec(CommandLineInputSpec):
in_file = File(exists=True, desc='whole-brain tractography in .trk format',
mandatory=True, position = 0, argstr="%s")
output_file = File("coreg_tracks.trk", desc="whole-brain tractography in coregistered space",
position=1, argstr="%s", usedefault=True)
FA_file = File(exists=True, desc='FA file in the same space as the .trk file',
mandatory=True, position = 2, argstr="-src %s")
reference = File(exists=True, desc='Image that the .trk file will be registered to',
mandatory=True, position = 3, argstr="-ref %s")
transfomation_matrix = File(exists=True, desc='FSL matrix with transform form original to new space',
mandatory=True, position = 4, argstr="-reg %s")
class trk_CoregOutputSpec(TraitedSpec):
transformed_track_file = File(exists=True, desc="whole-brain tractography in new space")
class trk_Coreg(CommandLine):
input_spec = trk_CoregInputSpec
output_spec = trk_CoregOutputSpec
_cmd = "track_transform"
def _list_outputs(self):#
import os
outputs = self.output_spec().get()
outputs['transformed_track_file'] = os.path.abspath(self.inputs.output_file)
return outputs
#==================================================================================================
# Extract b0
class Extractb0InputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, desc='diffusion-weighted image (4D)', mandatory=True)
class Extractb0OutputSpec(TraitedSpec):
out_file = File(exists=True, desc="First volume of the dwi file")
class Extractb0(BaseInterface):
input_spec = Extractb0InputSpec
output_spec = Extractb0OutputSpec
def _run_interface(self, runtime):
import nibabel as nib
img = nib.load(self.inputs.in_file)
data = img.get_data()
affine = img.get_affine()
from nipype.utils.filemanip import split_filename
import os
outputs = self._outputs().get()
fname = self.inputs.in_file
_, base, _ = split_filename(fname)
nib.save(nib.Nifti1Image(data[...,0],affine),os.path.abspath(base + '_b0.nii.gz'))
return runtime
def _list_outputs(self):
from nipype.utils.filemanip import split_filename
import os
outputs = self._outputs().get()
fname = self.inputs.in_file
_, base, _ = split_filename(fname)
outputs["out_file"] = os.path.abspath(base + '_b0.nii.gz')
return outputs
#==================================================================================================
# FA connectome construction
class FAconnectomeInputSpec(BaseInterfaceInputSpec):
trackfile = File(exists=True, desc='whole-brain tractography in .trk format', mandatory=True)
ROI_file = File(exists=True, desc='image containing the ROIs', mandatory=True)
FA_file = File(exists=True, desc='fractional anisotropy map in the same soace as the track file', mandatory=True)
output_file = File("FA_matrix.txt", desc="Adjacency matrix of ROIs with FA as conenction weight", usedefault=True)
class FAconnectomeOutputSpec(TraitedSpec):
FA_matrix = File(exists=True, desc="connectivity matrix of FA between each pair of ROIs")
density_matrix = File(exists=True, desc="connectivity matrix with number of streamlines between ROIs as weight")
density_corrected_matrix = File(exists=True, desc="connectivity matrix with number of streamlines between ROIs as weight corrected for the sum of volumes of both ROIs")
class FAconnectome(BaseInterface):
input_spec = FAconnectomeInputSpec
output_spec = FAconnectomeOutputSpec
def _run_interface(self, runtime):
# Loading the ROI file
import nibabel as nib
import numpy as np
from dipy.tracking import utils
import os
import pandas as pd
img = nib.load(self.inputs.ROI_file)
data = img.get_data()
affine = img.get_affine()
# Getting ROI volumes if they haven't been generated
if not os.path.isfile('/imaging/jb07/CALM/DWI/FA_connectome/Atlas_volumes.csv'):
import nibabel as nib
import numpy as np
import os
import pandas as pd
import subprocess
atlas_file = ROI_file
img = nib.load(self.inputs.atlas_file)
data = img.get_data()
affine = img.get_affine()
volumes = pd.DataFrame()
atlas_labels = np.unique(data)
for atlas_label in atlas_labels:
data = nib.load((self.inputs.atlas_file)).get_data()
data[data != atlas_label] = 0
data[data == atlas_label] = 1
nib.save(nib.Nifti1Image(data, affine), 'temp.nii.gz')
volumes.set_value(atlas_label, 'volume', subprocess.check_output(os.environ['FSLDIR'] + '/bin/fslstats temp.nii.gz -V', shell=True).split(' ')[0])
os.remove('temp.nii.gz')
volumes.to_csv('/imaging/jb07/CALM/DWI/FA_connectome/Atlas_volumes.csv')
ROI_volumes = pd.read_csv('/imaging/jb07/CALM/DWI/FA_connectome/Atlas_volumes.csv')
# Getting the FA file
img = nib.load(self.inputs.FA_file)
FA_data = img.get_data()
FA_affine = img.get_affine()
# Loading the streamlines
from nibabel import trackvis
streams, hdr = trackvis.read(self.inputs.trackfile,points_space='rasmm')
streamlines = [s[0] for s in streams]
streamlines_affine = trackvis.aff_from_hdr(hdr,atleast_v2=True)
# Checking for negative values
from dipy.tracking._utils import _mapping_to_voxel, _to_voxel_coordinates
endpoints = [sl[0::len(sl)-1] for sl in streamlines]
lin_T, offset = _mapping_to_voxel(affine, (1.,1.,1.))
inds = np.dot(endpoints, lin_T)
inds += offset
negative_values = np.where(inds <0)[0]
for negative_value in sorted(negative_values, reverse=True):
del streamlines[negative_value]
# Constructing the streamlines matrix
matrix,mapping = utils.connectivity_matrix(streamlines=streamlines,label_volume=data,affine=streamlines_affine,symmetric=True,return_mapping=True,mapping_as_streamlines=True)
matrix[matrix < 10] = 0
# Constructing the FA matrix
dimensions = matrix.shape
FA_matrix = np.empty(shape=dimensions)
density_matrix = np.empty(shape=dimensions)
density_corrected_matrix = np.empty(shape=dimensions)
for i in range(0,dimensions[0]):
for j in range(0,dimensions[1]):
if matrix[i,j]:
dm = utils.density_map(mapping[i,j], FA_data.shape, affine=streamlines_affine)
FA_matrix[i,j] = np.mean(FA_data[dm>0])
if np.sum(dm > 0) > 0:
density_matrix[i,j] = np.sum(dm[dm > 0])
density_corrected_matrix[i,j] = float(np.sum(dm[dm > 0]))/np.sum([ROI_volumes.iloc[i].values.astype('float'), ROI_volumes.iloc[j].values.astype('float')])
else:
density_matrix[i,j] = 0
density_corrected_matrix[i,j] = 0
else:
FA_matrix[i,j] = 0
density_matrix[i,j] = 0
density_corrected_matrix[i,j] = 0
FA_matrix[np.tril_indices(n=len(FA_matrix))] = 0
FA_matrix = FA_matrix.T + FA_matrix - np.diagonal(FA_matrix)
density_matrix[np.tril_indices(n=len(density_matrix))] = 0
density_matrix = density_matrix.T + density_matrix - np.diagonal(density_matrix)
density_corrected_matrix[np.tril_indices(n=len(density_corrected_matrix))] = 0
density_corrected_matrix = density_corrected_matrix.T + density_corrected_matrix - np.diagonal(density_corrected_matrix)
from nipype.utils.filemanip import split_filename
_, base, _ = split_filename(self.inputs.trackfile)
np.savetxt(base + '_FA_matrix.txt',FA_matrix,delimiter='\t')
np.savetxt(base + '_density_matrix.txt',density_matrix,delimiter='\t')
np.savetxt(base + '_volume_corrected_density_matrix.txt',density_corrected_matrix,delimiter='\t')
return runtime
def _list_outputs(self):
from nipype.utils.filemanip import split_filename
import os
outputs = self._outputs().get()
fname = self.inputs.trackfile
_, base, _ = split_filename(fname)
outputs["FA_matrix"] = os.path.abspath(base + '_FA_matrix.txt')
outputs["density_matrix"] = os.path.abspath(base + '_density_matrix.txt')
outputs["density_corrected_matrix"] = os.path.abspath(base + '_volume_corrected_density_matrix.txt')
return outputs
#==================================================================================================
# Convert an adjacency matrix in txt format to NetworkX pck format
class TXT2PCKInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, desc='adjacency matrix in txt format', mandatory=True)
class TXT2PCKOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="NetworkX file in pck format")
class TXT2PCK(BaseInterface):
input_spec = TXT2PCKInputSpec
output_spec = TXT2PCKOutputSpec
def _run_interface(self, runtime):
# Reading the matrix file
import numpy as np
import networkx as nx
adjacency_matrix = np.loadtxt(self.inputs.in_file)
G = nx.from_numpy_matrix(adjacency_matrix)
from nipype.utils.filemanip import split_filename
_, base, _ = split_filename(self.inputs.in_file)
nx.write_gpickle(G,path=base + '.pck')
return runtime
def _list_outputs(self):
from nipype.utils.filemanip import split_filename
import os
outputs = self._outputs().get()
fname = self.inputs.in_file
_, base, _ = split_filename(fname)
outputs["out_file"] = os.path.abspath(base + '.pck')
return outputs
#==================================================================================================
# Calling fsl_anat on T1 files
class FSLANATInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, desc='input structural image', mandatory=True)
out_directory = File(exist=True, desc="output directory")
class FSLANATOutputSpec(TraitedSpec):
fsl_anat_directory = traits.Directory(exists=True, desc="folder with processed T1 files")
class FSLANAT(BaseInterface):
input_spec = FSLANATInputSpec
output_spec = FSLANATOutputSpec
def _run_interface(self, runtime):
from subprocess import call
subject = self.inputs.in_file.split('/')[-1].split('.')[0].split('_')[0]
cmd = "fsl_anat --noreg --nononlinreg --noseg --nosubcortseg -i " + self.inputs.in_file + ' -o ' + self.inputs.out_directory + subject
call(cmd,shell=True)
return runtime
def _list_outputs(self):
import os
outputs = self.output_spec().get()
subject = self.inputs.in_file.split('/')[-1].split('.')[0].split('_')[0]
outputs['fsl_anat_directory'] = os.path.abspath(self.inputs.out_directory + subject + '.anat/')
return outputs
#==================================================================================================
# Wavelet Despiking
import os
from string import Template
class WaveletDespikeInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True)
subject_id = traits.String(mandatory=True)
out_folder = File(mandatory=True)
class WaveletDespikeOutputSpec(TraitedSpec):
out_file = File(exists=True)
out_noise = File(exists=True)
class WaveletDespike(BaseInterface):
input_spec = WaveletDespikeInputSpec
output_spec = WaveletDespikeOutputSpec
def _run_interface(self, runtime):
d = dict(in_file=self.inputs.in_file,
out_folder=self.inputs.out_folder,
subject_id=self.inputs.subject_id)
script = Template("""
WaveletDespike('$in_file','$subject_id','LimitRAM',4)""").substitute(d)
mlab = MatlabCommand(script=script,
mfile=True,
nodesktop=True,
nosplash=True,
logfile='/imaging/jb07/matlab_log.txt')
result = mlab.run()
return result.runtime
def _list_outputs(self):
from nipype.utils.filemanip import split_filename
import os
outputs = self._outputs().get()
fname = self.inputs.in_file
_, base, _ = split_filename(fname)
outputs["out_file"] = os.path.abspath(base.split('_')[0] + '_wds.nii.gz')
outputs["out_noise"] = os.path.abspath(base.split('_')[0] + '_noise.nii.gz')
return outputs
#==================================================================================================
# Calling ANTs Quick Registration with SyN
class ants_QuickSyNInputSpec(CommandLineInputSpec):
fixed_image = File(exists=True, desc='Fixed image or source image or reference image',
mandatory=True, argstr="-f %s")
moving_image = File(exists=True, desc="Moving image or target image",
mandatory=True, argstr="-m %s")
image_dimensions = traits.Enum(1,3,exists=True, desc='ImageDimension: 2 or 3 (for 2 or 3 dimensional registration of single volume)',
mandatory=True, argstr="-d %d")
output_prefix = traits.Str(exists=True, desc='OutputPrefix: A prefix that is prepended to all output files',
mandatory=True, argstr="-o %s_")
transform_type = traits.Str("s", desc='transform type',
mandatory=False, argstr="-t %s", usedefault=True)
class ants_QuickSyNOutputSpec(TraitedSpec):
deformation_warp_image = File(desc="Outputs deformation warp image")
inverse_deformation_warp_image = File(desc="Outputs inverse deformation warp image")
warped_image = File(desc="Outputs warped images")
inverse_warped_image = File(desc="Outputs the inverse of the warped image")
transform_matrix = File(desc="Outputs affine transform matrix")
class ants_QuickSyN(CommandLine):
input_spec = ants_QuickSyNInputSpec
output_spec = ants_QuickSyNOutputSpec
_cmd = "antsRegistrationSyNQuick.sh"
def _list_outputs(self):
from nipype.utils.filemanip import split_filename
import os
outputs = self._outputs().get()
outputs['deformation_warp_image'] = os.path.abspath(self.inputs.output_prefix + '_1Warp.nii.gz')
outputs['inverse_deformation_warp_image'] = os.path.abspath(self.inputs.output_prefix + '_1InverseWarp.nii.gz')
outputs['warped_image'] = os.path.abspath(self.inputs.output_prefix + '_Warped.nii.gz')
outputs['inverse_warped_image'] = os.path.abspath(self.inputs.output_prefix + '_InverseWarped.nii.gz')
outputs['transform_matrix'] = os.path.abspath(self.inputs.output_prefix + '_0GenericAffine.mat')
return outputs
#==================================================================================================
# Regressing signal within a mask
# This is intended for regressing the signal within a CSF or ventricle mask
class RegressMaskInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, desc='4D time series volume', mandatory=True)
mask_filename = File(exists=True, desc='Binary brain mask', mandatory=True)
atlas_filename = File(exists=True, desc='3D atlas segmentation file', mandatory=True)
atlas_key = traits.Float(desc='Number associated with the ROI', mandatory=True)
class RegressMaskOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="4D time series volume of residuals")
class RegressMask(BaseInterface):
input_spec = RegressMaskInputSpec
output_spec = RegressMaskOutputSpec
def _run_interface(self, runtime):
from nilearn.input_data import NiftiMasker, NiftiLabelsMasker
from nipype.utils.filemanip import split_filename
import nibabel as nib
import os
functional_filename = self.inputs.in_file
atlas_filename = self.inputs.atlas_filename
mask_filename = self.inputs.mask_filename
# Extracting the ROI signals
masker = NiftiLabelsMasker(labels_img=atlas_filename,
background_label = 0,
standardize=True,
detrend = True,
verbose = 1
)
time_series = masker.fit_transform(functional_filename)
# Removing the ROI signal from the time series
nifti_masker = NiftiMasker(mask_img=mask_filename)
masked_data = nifti_masker.fit_transform(functional_filename, confounds=time_series[...,0])
masked_img = nifti_masker.inverse_transform(masked_data)
# Saving the result to disk
outputs = self._outputs().get()
fname = self.inputs.in_file
_, base, _ = split_filename(fname)
nib.save(masked_img, os.path.abspath(base + '_regressed.nii.gz'))
return runtime
def _list_outputs(self):
from nipype.utils.filemanip import split_filename
import os
outputs = self._outputs().get()
fname = self.inputs.in_file
_, base, _ = split_filename(fname)
outputs["out_file"] = os.path.abspath(base + '_regressed.nii.gz')
return outputs
#==================================================================================================
# Wrapper for the mat2det function from ENIGMA (http://enigma.ini.usc.edu/protocols/imaging-protocols/protocol-for-brain-and-intracranial-volumes/)
class MAT2DETInputSpec(BaseInterfaceInputSpec):
in_matrix = File(exists=True, desc='input transfomration matrix', mandatory=True)
subject_id = traits.String(desc='output file with the intracranial value', mandatory=True)
class MAT2DETOutputSpec(TraitedSpec):
out_file = File(desc="output value of intracranial volume")
class MAT2DET(BaseInterface):
input_spec = MAT2DETInputSpec
output_spec = MAT2DETOutputSpec
def _run_interface(self, runtime):
from subprocess import call
from nipype.utils.filemanip import split_filename
outputs = self._outputs().get()
fname = self.inputs.in_matrix
_, base, _ = split_filename(fname)
cmd = "mat2det " + self.inputs.in_matrix + ' > ' + self.inputs.subject_id + '_ICV.txt'
call(cmd,shell=True)
return runtime
def _list_outputs(self):
import os
from nipype.utils.filemanip import split_filename
outputs = self.output_spec().get()
fname = self.inputs.in_matrix
_, base, _ = split_filename(fname)
outputs['out_file'] = os.path.abspath(base + self.inputs.subject_id + '_ICV.txt')
return outputs
#==================================================================================================
# Function to generate a grey matter density image from antsCorticalThickness output
class GM_DENSITYInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, desc='input brain image file in subject space', mandatory=True)
mask_file = traits.String(desc='input file of GM matter segmentation posterior', mandatory=True)
class GM_DENSITYOutputSpec(TraitedSpec):
out_file = File(desc="GM density image")
class GM_DENSITY(BaseInterface):
input_spec = GM_DENSITYInputSpec
output_spec = GM_DENSITYOutputSpec
def _run_interface(self, runtime):
import nibabel as nib
from nipype.utils.filemanip import split_filename
import os
brain_image = nib.load(self.inputs.in_file)
brain = brain_image.get_data()
affine = brain_image.get_affine()
segmentation_mask = nib.load(self.inputs.mask_file)
mask = segmentation_mask.get_data()
mask[mask > 0.1] = 1
mask[mask < 0.1] = 0
GM_density = brain*mask
# Saving the result to disk
outputs = self._outputs().get()
fname = self.inputs.in_file
_, base, _ = split_filename(fname)
nib.save(nib.Nifti1Image(GM_density, affine), os.path.abspath(base + '_gm_density.nii.gz'))
return runtime
def _list_outputs(self):
from nipype.utils.filemanip import split_filename
import os
outputs = self._outputs().get()
fname = self.inputs.in_file
_, base, _ = split_filename(fname)
outputs["out_file"] = os.path.abspath(base + '_gm_density.nii.gz')
return outputs
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging, time
from rest_framework import serializers
from django.contrib.auth.models import User, Group
from django.db import transaction
from natrix.common.natrix_views.serializers import NatrixSerializer
from natrix.common import exception as natrix_exception
from sentinel.models import Alarm, Notification
from sentinel.models.alarm_models import NOTIFICATION_CHOICE
from sentinel.backends.deepmonitor import DeepMonitorNotificationManagement
logger = logging.getLogger(__name__)
choice_filter = lambda x: (x.get('name'), x.get('verbose_name'))
class NotifySerializer(NatrixSerializer):
alarm_id = serializers.IntegerField(help_text=u'告警ID', required=False)
description = serializers.CharField(max_length=255, help_text=u'通知描述', allow_blank=True)
notify_type = serializers.ChoiceField(choices=NOTIFICATION_CHOICE, help_text=u'通知类型')
users = serializers.ListField()
is_recovery = serializers.BooleanField(help_text=u'是否恢复通知')
frequency = serializers.IntegerField(help_text=u'频率(分钟)')
start_time = serializers.TimeField(help_text=u'工作开始时间')
end_time = serializers.TimeField(help_text=u'工作结束时间')
def validate_alarm_id(self, value):
try:
alarm = Alarm.objects.get(pk=value, group=self.group)
self.alarm = alarm
except Alarm.DoesNotExist:
raise serializers.ValidationError('The alarm({}) is not exist!'.format(value))
return value
def validate_users(self, value):
#TODO: Validate users
user_list = User.objects.filter(pk__in=value)
if len(user_list) != len(value):
raise serializers.ValidationError('There are non-exist user!')
self.user_list = user_list
return value
def is_valid(self):
flag = super(NotifySerializer, self).is_valid()
if not hasattr(self, 'instance'):
if not hasattr(self, 'alarm'):
self._errors['alarm_id'] = ['"alarm_id" is required!']
return False
if not flag:
return flag
start_time = time.strptime(self.initial_data.get('start_time'), '%H:%M')
end_time = time.strptime(self.initial_data.get('end_time'), '%H:%M')
if end_time < start_time:
self._errors['work_time'] = ['end_time must more than start_time']
return False
return flag
def create(self, validated_data):
description = validated_data.get('description')
notify_type = validated_data.get('notify_type')
is_recovery = validated_data.get('is_recovery')
frequency = validated_data.get('frequency') * 60
start_time = validated_data.get('start_time')
end_time = validated_data.get('end_time')
notify = Notification.objects.create(alarm=self.alarm,
description=description,
notify_type = notify_type,
is_recovery=is_recovery,
frequency=frequency,
start_time=start_time,
end_time=end_time,
owner=self.user,
group=self.group)
for u in self.user_list:
notify.users.add(u)
notify_backend = DeepMonitorNotificationManagement(notify)
deepmonitor_uuid, deepmonitor_operation = notify_backend.add()
notify.deepmonitor_uuid = deepmonitor_uuid
notify.deepmonitor_operation = deepmonitor_operation
notify.save()
return notify
def update(self, instance, validate_data):
description = validate_data.get('description')
with transaction.atomic():
instance.description = description if description else instance.description
instance.notify_type = validate_data.get('notify_type')
instance.is_recovery = validate_data.get('is_recovery')
instance.frequency = validate_data.get('frequency') * 60
instance.start_time = validate_data.get('start_time')
instance.end_time = validate_data.get('end_time')
instance.users.clear()
for u in self.user_list:
instance.users.add(u)
notify_backend = DeepMonitorNotificationManagement(instance)
notify_backend.modify()
instance.save()
return instance
class NotifySearchSerializer(NatrixSerializer):
alarm_id = serializers.IntegerField()
notify_id = serializers.IntegerField()
def validate_alarm_id(self, value):
try:
alarm = Alarm.objects.get(pk=value)
self.alarm = alarm
except Alarm.DoesNotExist:
raise serializers.ValidationError('The alarm({}) is not exist!'.format(value))
return value
def validate_notify_id(self, value):
try:
notification = Notification.objects.get(pk=value)
self.notification = notification
except Notification.DoesNotExist:
raise serializers.ValidationError('The notification({}) is not exist!'.format(value))
return value
def is_valid(self, raise_exception=False):
flag = super(NotifySearchSerializer, self).is_valid()
if not flag:
return flag
if self.notification.alarm != self.alarm:
self._errors['notify_id'] = ['notify_id and alarm_id is not matched!']
return False
return flag
def presentation(self):
assert hasattr(self, '_errors'), (
'You must call `.isvalid()` before calling `.process()`'
)
assert not self.errors, (
'You can not call `.process()` on a serializer with invalid data.'
)
notify = self.notification.represent()
notify['frequency'] /= 60
return notify
class NotificationListSerializer(NatrixSerializer):
alarm_id = serializers.IntegerField()
is_paginate = serializers.NullBooleanField(required=True)
pagenum = serializers.IntegerField(min_value=1, required=False)
def validate_alarm_id(self, value):
if not isinstance(self.group, Group):
raise natrix_exception.ClassInsideException(
u'The user must join a group when query unfollowed task!')
try:
alarm = Alarm.objects.get(pk=value, group=self.group)
self.alarm = alarm
except Alarm.DoesNotExist as e:
raise serializers.ValidationError('The alarm({}) is not exist!'.format(value))
return value
class NotificationOperationSerializer(NatrixSerializer):
notify_id = serializers.IntegerField(help_text=u'通知ID')
operation = serializers.ChoiceField(choices=(('on', u'开启'), ('off', u'关闭')),
help_text=u'操作')
def validate_notify_id(self, value):
try:
notification = Notification.objects.get(pk=value, group=self.group)
self.instance = notification
except Notification.DoesNotExist as e:
raise serializers.ValidationError('The notification({}) is not exist!'.format(value))
return value
def process(self):
assert hasattr(self, '_errors'), (
'You must call `.isvalid()` before calling `.process()`'
)
assert not self.errors, (
'You can not call `.process()` on a serializer with invalid data.'
)
operation = self.validated_data.get('operation')
notify_backend = DeepMonitorNotificationManagement(self.instance)
notify_backend.switch()
if operation == 'on':
self.instance.status = True
elif operation == 'off':
self.instance.status = False
self.instance.save()
|
__author__ = 'frankhe'
import time
import numpy as np
import data_sets
class QLearning(object):
def __init__(self, pid, network, flags, comm, share_comm):
self.pid = pid
self.network = network
self.flags = flags
self.comm = comm
self.share_comm = share_comm
self.train_data_set = data_sets.DataSet(flags, share_comm)
self.test_data_set = data_sets.DataSet(flags, share_comm, max_steps=flags.phi_length * 2)
self.network.add_train_data_set(self.train_data_set)
self.epsilon = flags.ep_st
if flags.ep_decay != 0:
self.epsilon_rate = (flags.ep_st - flags.ep_min) / flags.ep_decay
else:
self.epsilon_rate = 0
# global attributes:
self.epoch_start_time = time.time()
self.start_index = 0
self.terminal_index = None
self.steps_sec_ema = 0 # add to tensorboard per epoch
self.epoch_time = 0 # add to tensorboard per epoch
self.state_action_avg_val = 0 # add to tensorboard per epoch
self.testing_data = None
self.trained_batch_counter = 0
self.global_step_counter = 0
# episode attributes:
self.step_counter = 0
self.episode_reward = 0
self.loss_averages = None
self.start_time = None
self.last_action = None
self.last_img = None
self.testing = False
self.episode_counter = 0
self.total_reward = 0 # add to tensorboard per epoch
self.reward_per_episode = 0 # add to tensorboard per epoch
def start_episode(self, observation):
"""
This method is called once at the beginning of each episode.
No reward is provided, because reward is only available after
an action has been taken.
Arguments:
observation - height x width numpy array
Returns:
An integer action
"""
self.step_counter = 0
self.episode_reward = 0 # only useful when testing
# We report the mean loss for every episode.
self.loss_averages = []
self.start_time = time.time()
action = np.random.randint(0, self.flags.num_actions) # could be changed to NN.choose_action
self.last_action = action
self.last_img = observation
return action
def end_episode(self, reward, terminal):
"""
:param terminal:
training:
terminal = True: game over or lost a life
terminal = False: game is not over but not enough steps
testing:
terminal = True: game over
terminal = False: game is not over but not enough steps
:param reward: received reward
"""
self.step_counter += 1
self.global_step_counter += 1
episode_time = time.time() - self.start_time
if self.testing:
self.episode_reward += reward
# we do not count this episode if agent running out of steps
# we do count if episode is finished or this episode is the only episode
if terminal or self.episode_counter == 0:
self.episode_counter += 1
self.total_reward += self.episode_reward
else:
self.train_data_set.add_sample(self.last_img, self.last_action, reward, True,
start_index=self.start_index)
"""
post end episode functions
"""
self._post_end_episode(terminal)
rho = 0.98
self.steps_sec_ema = rho * self.steps_sec_ema + (1.0 - rho) * (self.step_counter / episode_time)
# print 'PID:', self.pid, 'steps/second current:{:.2f}, avg:{:.2f}'.format(self.step_counter/episode_time,
# self.steps_sec_ema)
message = [self.pid, 'speed', [int(self.step_counter / episode_time), int(self.steps_sec_ema)]]
self.comm.send(message, dest=self.flags.threads)
if self.loss_averages: # if not empty
self.network.episode_summary(np.mean(self.loss_averages))
def _post_end_episode(self, x):
pass
def choose_action(self, data_set, img, epsilon, reward_received):
data_set.add_sample(self.last_img, self.last_action, reward_received, False, start_index=self.start_index)
if np.random.rand() < epsilon:
return np.random.randint(0, self.flags.num_actions)
phi = data_set.phi(img)
if self.step_counter < self.flags.phi_length:
phi[0:self.flags.phi_length - 1] = np.stack([img for _ in xrange(self.flags.phi_length - 1)], axis=0)
action = self.network.choose_action(phi)
return action
def _train(self):
return self.network.train()
def step(self, reward, observation):
"""
Arguments:
reward - Real valued reward.
observation - A height x width numpy array uint8
Returns:
An integer action.
"""
self.step_counter += 1
self.global_step_counter += 1
# TESTING---------------------------
if self.testing:
self.episode_reward += reward
action = self.choose_action(self.test_data_set, observation, .01, reward)
else:
# Training--------------------------
if len(self.train_data_set) > self.flags.train_st:
self.epsilon = max(self.flags.ep_min, self.epsilon - self.epsilon_rate)
if self.trained_batch_counter > self.flags.ep_decay_b:
self.epsilon = 0.01
action = self.choose_action(self.train_data_set, observation, self.epsilon, reward)
if self.global_step_counter % self.flags.train_fr == 0:
loss = self._train()
self.trained_batch_counter += 1
self.loss_averages.append(loss)
else:
action = self.choose_action(self.train_data_set, observation, self.epsilon, reward)
self.last_action = action
self.last_img = observation
return action
def finish_epoch(self, epoch):
# save model epoch parameters
if self.flags.ckpt:
self.network.epoch_model_save(epoch)
current_time = time.time()
self.epoch_time = current_time - self.epoch_start_time
def start_testing(self):
self.testing = True
self.total_reward = 0
self.episode_counter = 0
def finish_testing(self, epoch):
self.testing = False
test_data_size = 3200
if self.testing_data is None and len(self.train_data_set) > test_data_size:
imgs, _, _, _ = self.train_data_set.random_batch(test_data_size)
self.testing_data = imgs[:, :-1, ...]
if self.testing_data is not None:
self.state_action_avg_val = np.mean(np.max(self.network.get_action_values(self.testing_data), axis=1))
self.reward_per_episode = self.total_reward / float(self.episode_counter)
# print 'PID', self.pid, 'reward per episode:', self.reward_per_episode, 'total reward', self.total_reward, \
# 'mean q:', self.state_action_avg_val
message = 'PID:{:d} epoch:{:d} total_reward={:.1f} reward_per_episode={:.1f} mean q={:.1f}'.format(
self.pid, epoch, self.total_reward, self.reward_per_episode, self.state_action_avg_val)
self.comm.send([-1, 'print', message], dest=self.flags.threads)
self.network.epoch_summary(epoch, self.epoch_time, self.state_action_avg_val, self.total_reward,
self.reward_per_episode)
self.epoch_start_time = time.time()
def finish_everything(self):
self.network.stop_feeding()
self.comm.send([self.pid, 'END', ''], dest=self.flags.threads)
self.comm.Barrier()
class OptimalityTigheningAgent(QLearning):
def __init__(self, pid, network, flags, comm, share_comm):
super(OptimalityTigheningAgent, self).__init__(pid, network, flags, comm, share_comm)
self.train_data_set = data_sets.OptimalityTighteningDataset(flags)
self.network.add_train_data_set(self.train_data_set)
def _post_end_episode(self, terminal):
if self.testing:
return
if terminal:
q_return = 0.0
else:
phi = self.train_data_set.phi(self.last_img)
phi = np.expand_dims(phi, 0)
q_return = np.mean(self.network.get_action_values(phi))
self.start_index = self.train_data_set.top
self.terminal_index = index = (self.start_index - 1) % self.train_data_set.max_steps
while True:
q_return = q_return * self.flags.discount + self.train_data_set.rewards[index]
self.train_data_set.return_value[index] = q_return
self.train_data_set.terminal_index[index] = self.terminal_index
index = (index - 1) % self.train_data_set.max_steps
if self.train_data_set.terminal[index] or index == self.train_data_set.bottom:
break
def deprecated_train(self):
if self.flags.close2:
self.train_data_set.random_batch_with_close_bounds(self.flags.batch)
else:
pass
target_q_imgs = np.concatenate((self.train_data_set.forward_imgs, self.train_data_set.backward_imgs), axis=1)
target_q_imgs = np.reshape(target_q_imgs, (-1,) + target_q_imgs.shape[2:])
"""here consider center image's target too as a lower bound"""
target_q_table = self.network.get_action_values_old(target_q_imgs)
target_q_table = np.reshape(target_q_table, (self.flags.batch, -1) + (target_q_table.shape[-1], ))
q_values = self.network.get_action_values_given_actions(self.train_data_set.center_imgs, self.train_data_set.center_actions)
states1 = np.zeros((self.flags.batch, self.flags.phi_length, self.flags.input_height, self.flags.input_width), dtype='uint8')
actions1 = np.zeros((self.flags.batch, ), dtype='int32')
targets1 = np.zeros((self.flags.batch, ), dtype='float32')
"""
0 1 2 3* 4 5 6 7 8 V_R
0 1 2 4 5 6 7 8 V_R
V0 = r3 + y*Q4; V1 = r3 +y*r4 + y^2*Q5
Q2 -r2 = Q3*y; Q1 - r1 - y*r2 = y^2*Q3
V-1 = (Q2 - r2) / y; V-2 = (Q1 - r1 - y*r2)/y^2; V-3 = (Q0 -r0 -y*r1 - y^2*r2)/y^3
r1 + y*r2 = R1 - y^2*R3
Q1 = r1+y*r2 + y^2*Q3
"""
for i in xrange(self.flags.batch):
q_value = q_values[i]
center_position = int(self.train_data_set.center_positions[i])
if self.train_data_set.terminal.take(center_position, mode='wrap'):
states1[i] = self.train_data_set.center_imgs[i]
actions1[i] = self.train_data_set.center_actions[i]
targets1[i] = self.train_data_set.center_return_values[i]
continue
forward_targets = np.zeros(self.flags.nob, dtype=np.float32)
backward_targets = np.zeros(self.flags.nob, dtype=np.float32)
for j in xrange(self.flags.nob):
if j > 0 and self.train_data_set.forward_positions[i, j] == center_position + 1:
forward_targets[j] = q_value
else:
forward_targets[j] = self.train_data_set.center_return_values[i] - \
self.train_data_set.forward_return_values[i, j] * \
self.train_data_set.forward_discounts[i, j] + \
self.train_data_set.forward_discounts[i, j] * \
np.max(target_q_table[i, j])
if self.train_data_set.backward_positions[i, j] == center_position + 1:
backward_targets[j] = q_value
else:
backward_targets[j] = (-self.train_data_set.backward_return_values[i, j] +
self.train_data_set.backward_discounts[i, j] *
self.train_data_set.center_return_values[i] +
target_q_table[i, self.flags.nob + j,
self.train_data_set.backward_actions[i, j]]) / \
self.train_data_set.backward_discounts[i, j]
forward_targets = np.append(forward_targets, self.train_data_set.center_return_values[i])
v0 = v1 = forward_targets[0]
v_max = np.max(forward_targets[1:])
v_min = np.min(backward_targets)
if v_max - 0.1 > q_value > v_min + 0.1:
v1 = v_max * 0.5 + v_min * 0.5
elif v_max - 0.1 > q_value:
v1 = v_max
elif v_min + 0.1 < q_value:
v1 = v_min
states1[i] = self.train_data_set.center_imgs[i]
actions1[i] = self.train_data_set.center_actions[i]
targets1[i] = v0 * self.flags.pw + (1 - self.flags.pw) * v1
return self.network.train(states1, actions1, targets1)
|
import requests
from bs4 import BeautifulSoup
URL_TOTAL_LIST = 'https://comic.naver.com/webtoon/weekday.nhn'
response = requests.get(URL_TOTAL_LIST)
soup = BeautifulSoup(response.text, 'html.parser')
class_title_a_list = soup.select('a.title')
title_list = []
for a in class_title_a_list:
a_href = a.get('href') # a['href']
a_text = a.get_text()
result = f'{a_text} (https://comic.naver.com/{a_href})'
title_list.append(result)
print('\n'.join(title_list))
|
# Copyright 2018 www.privaz.io Valletech AB
# Copyright 2002-2023, OpenNebula Project, OpenNebula Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import environ
# check if test stub featuers are enabled
_test_fixture = (environ.get("PYONE_TEST_FIXTURE", "False").lower() in ["1", "yes", "true"])
if _test_fixture is False:
from . import OneServer
else:
from pyone.tester import OneServerTester as OneServer
|
# -*- coding: utf-8 -*-
import Ouroboros
import math
import Math
import time
import random
from OURODebug import *
import GlobalDefine
import data_entities
class Motion:
"""
Movement related package
"""
def __init__(self):
self.nextMoveTime = int(time.time() + random.randint(5, 15))
# Default walk speed (declared in .def file)
self.moveSpeed = self.getDatas()['moveSpeed']
entityData = data_entities.data.get(self.uid)
# Entity walk speed
if entityData != None:
self.moveSpeed = entityData['moveSpeed']
self.lastPosition = None
def stopMotion(self):
"""
Stop moving
"""
if self.isMoving:
# INFO_MSG("%i stop motion." % self.id)
self.cancelController("Movement")
self.isMoving = False
self.onMotionStateChanged(GlobalDefine.ENTITY_MOTION_STATE_STATIONARY)
def randomWalk(self, basePos):
"""
Randomly move entity
"""
if self.isMoving:
return False
if time.time() < self.nextMoveTime:
return False
while True:
# The moving radius is within 30 meters
if self.canNavigate():
destPos = self.getRandomPoints(basePos, 30.0, 1, 0)
if len(destPos) == 0:
self.nextMoveTime = int(time.time() + random.randint(5, 15))
return False
destPos = destPos[0]
else:
rnd = random.random()
a = 30.0 * rnd # The moving radius is within 30 meters
b = 360.0 * rnd # Random angle
x = a * math.cos(b) # Radius*
z = a * math.sin(b)
destPos = (basePos.x + x, basePos.y, basePos.z + z)
if self.position.distTo(destPos) < 2.0:
continue
self.gotoPosition(destPos)
self.isMoving = True
self.onMotionStateChanged(GlobalDefine.ENTITY_MOTION_STATE_WALK)
self.nextMoveTime = int(time.time() + random.randint(5, 15))
break
return True
def resetSpeed(self):
entityData = data_entities.data.get(self.uid)
# Entity walk speed
if entityData != None:
walkSpeed = entityData['moveSpeed']
# Default walk speed (declared in .def file)
walkSpeed = self.getDatas()["moveSpeed"]
if walkSpeed != self.moveSpeed:
self.moveSpeed = walkSpeed
def backSpawnPos(self):
"""
virtual method.
"""
INFO_MSG("%s::backSpawnPos: %i, pos=%s, speed=%f." % \
(self.getScriptName(), self.id, self.spawnPos, self.moveSpeed))
self.resetSpeed()
self.onMotionStateChanged(GlobalDefine.ENTITY_MOTION_STATE_RUN)
self.gotoPosition(self.spawnPos)
def gotoEntity(self, targetID, dist=0.0):
"""
virtual method.
Move to the entity location
"""
if self.isMoving:
self.stopMotion()
entity = Ouroboros.entities.get(targetID)
if entity is None:
DEBUG_MSG("%s::gotoEntity: not found targetID=%i" % (targetID))
return
if entity.position.distTo(self.position) <= dist:
return
self.isMoving = True
self.onMotionStateChanged(GlobalDefine.ENTITY_MOTION_STATE_WALK)
self.moveToEntity(targetID, self.moveSpeed, dist, None, True, False)
def gotoPosition(self, position, dist=0.0):
"""
virtual method.
Move to location
"""
if self.isMoving:
self.stopMotion()
if self.position.distTo(position) <= 0.05:
return
self.isMoving = True
self.onMotionStateChanged(GlobalDefine.ENTITY_MOTION_STATE_WALK)
speed = self.moveSpeed
DEBUG_MSG("speed %f %f" % (speed, self.moveSpeed))
if self.canNavigate():
self.navigate(Math.Vector3(position), speed, dist, speed, 512.0, 1, 0, None)
else:
if dist > 0.0:
destPos = Math.Vector3(position) - self.position
destPos.normalise()
destPos *= dist
destPos = position - destPos
else:
destPos = Math.Vector3(position)
self.moveToPoint(destPos, speed, 0, None, 1, False)
def getStopPoint(self, yaw=None, rayLength=100.0):
"""
"""
if yaw is None: yaw = self.yaw
yaw = (yaw / 2);
vv = Math.Vector3(math.sin(yaw), 0, math.cos(yaw))
vv.normalise()
vv *= rayLength
lastPos = self.position + vv;
pos = Ouroboros.raycast(self.spaceID, self.layer, self.position, vv)
if pos == None:
pos = lastPos
return pos
# --------------------------------------------------------------------------------------------
# Custom Moving
# --------------------------------------------------------------------------------------------
def onTimer(self, tid, userArg):
self.moveCheck()
print('i')
def moveCheck(self):
if self.position != self.lastPosition:
self.isMoving = True
else:
self.isMoving = False
self.lastPosition = self.position
print(self.position, self.lastPosition)
# --------------------------------------------------------------------------------------------
# Callbacks
# --------------------------------------------------------------------------------------------
def onMove(self, controllerId, userarg):
"""
Ouroboros method.
Use any of the engine's mobile-related interfaces to call this interface once the entity has completed a move
"""
# DEBUG_MSG("%s::onMove: %i controllerId =%i, userarg=%s" % \
# (self.getScriptName(), self.id, controllerId, userarg))
self.isMoving = True
def onMoveFailure(self, controllerId, userarg):
"""
Ouroboros method.
Use any of the engine's mobile-related interfaces to call this interface once the entity has completed a move
"""
DEBUG_MSG("%s::onMoveFailure: %i controllerId =%i, userarg=%s" % \
(self.getScriptName(), self.id, controllerId, userarg))
self.isMoving = False
self.onMotionStateChanged(GlobalDefine.ENTITY_MOTION_STATE_STATIONARY)
def onMoveOver(self, controllerId, userarg):
"""
Ouroboros method.
Use any of the engine's mobile-related interfaces to call this interface at the end of the entity move
"""
self.isMoving = False
self.onMotionStateChanged(GlobalDefine.ENTITY_MOTION_STATE_STATIONARY)
# --------------------------------------------------------------------------------------------
# Callbacks
# --------------------------------------------------------------------------------------------
def onMotionStateChanged(self, toState):
self.motionState = toState
self.onCalculateAnimationMove(toState)
def onCalculateAnimationMove(self, motionState):
if motionState == GlobalDefine.ENTITY_MOTION_STATE_STATIONARY:
self.currentState = GlobalDefine.ENTITY_ANIMATION_STATE_IDLE
if motionState == GlobalDefine.ENTITY_MOTION_STATE_WALK:
self.currentState = GlobalDefine.ENTITY_ANIMATION_STATE_WALK
if motionState == GlobalDefine.ENTITY_MOTION_STATE_RUN:
self.currentState = GlobalDefine.ENTITY_ANIMATION_STATE_RUN
|
"""
Utility functions
=================
This module defines some utility functions that can be used in multiple
unrelated part of the code, such as error reporting.
"""
from __future__ import print_function
import sys
import functools
def terminate_program(err_msg, ret_code=1):
"""Terminates the current program
:param err_msg: The error message to be printed out
:param ret_code: The return code for the termination of the program
"""
p = functools.partial( # pylint: disable=invalid-name
print, file=sys.stderr
)
p('')
p('*' * 80)
p('FATAL ERROR!')
p('*' * 80)
p('')
p(err_msg)
p('')
sys.exit(ret_code)
def format_vector(vec, float_format='%11.6f'):
"""Formats a vector into the pov-ray format
:param vec: A triple of floating point numbers
:param float_format: The format for formating floating point numbers into
string
:returns: A string for the formatted vector
"""
return (
'<' + (', '.join([float_format for _ in xrange(0, 3)])) + '>'
) % tuple(vec[i] for i in xrange(0, 3))
|
import olca_schema as o
import unittest
class EnumConvTest(unittest.TestCase):
def test_flow_type(self):
val: str = o.FlowType.ELEMENTARY_FLOW.value
self.assertEqual('ELEMENTARY_FLOW', val)
self.assertEqual(o.FlowType.ELEMENTARY_FLOW, o.FlowType[val])
self.assertEqual(o.FlowType.ELEMENTARY_FLOW, o.FlowType.get(val))
flow = o.Flow(flow_type=o.FlowType.ELEMENTARY_FLOW)
flow_dict = flow.to_dict()
self.assertEqual(val, flow_dict['flowType'])
flow = o.Flow.from_dict(flow_dict)
self.assertEqual(o.FlowType.ELEMENTARY_FLOW, flow.flow_type)
def test_process_type(self):
val: str = o.ProcessType.UNIT_PROCESS.value
self.assertEqual('UNIT_PROCESS', val)
self.assertEqual(o.ProcessType.UNIT_PROCESS, o.ProcessType[val])
self.assertEqual(o.ProcessType.UNIT_PROCESS,
o.ProcessType.get(val))
process = o.Process(process_type=o.ProcessType.UNIT_PROCESS)
process_dict = process.to_dict()
self.assertEqual(val, process_dict['processType'])
process = o.Process.from_dict(process_dict)
self.assertEqual(o.ProcessType.UNIT_PROCESS, process.process_type)
|
"""Module about the main application."""
import enum
import numpy as np
import vtk
from qtpy import QtCore
from qtpy.QtCore import QSize
from qtpy.QtGui import QIcon
from qtpy.QtWidgets import (QToolButton, QButtonGroup,
QFileDialog)
from .utils import DefaultFunction
from .element import ElementId
from .selector import Symmetry, SymmetrySelector
from .grid import Grid
from .plane import Plane
from .block import Block
from .intersection import Intersection
from .interactive_plotter import InteractivePlotter
from .setting import SettingDialog, ColorButton
from .help import HelpDialog
@enum.unique
class BlockMode(enum.Enum):
"""List the modes available in MainPlotter."""
BUILD = enum.auto()
DELETE = enum.auto()
@enum.unique
class Action(enum.Enum):
"""List the actions available in MainPlotter."""
RESET = enum.auto()
IMPORT = enum.auto()
EXPORT = enum.auto()
SETTING = enum.auto()
HELP = enum.auto()
@enum.unique
class Toggle(enum.Enum):
"""List the toggles available in MainPlotter."""
AREA = enum.auto()
EDGES = enum.auto()
class MainPlotter(InteractivePlotter):
"""Main application."""
def __init__(self, params, parent=None, testing=False):
"""Initialize the MainPlotter."""
super().__init__(params, parent=parent, testing=testing)
self.unit = self.params["unit"]
self.dimensions = self.params["dimensions"]
self.default_block_color = self.params["block"]["color"]
self.icon_size = self.params["builder"]["toolbar"]["icon_size"]
self.button_pressed = False
self.button_released = False
self.area_selection = False
self.floor = None
self.ceiling = None
self.icons = None
self.toolbar = None
self.current_block_mode = None
self.mode_functions = None
self.set_dimensions(self.dimensions)
# configuration
self.show()
self.load_elements()
self.add_elements()
self.load_block_modes()
self.load_icons()
self.load_toolbar()
self.load_dialogs()
self.selector.hide()
self.update_camera()
self.render_scene()
def update_camera(self):
"""Update the internal camera."""
self.set_focal_point(self.grid.center)
super().update_camera()
def move_camera(self, update, inverse=False):
"""Trigger a pick when moving the camera."""
super().move_camera(update, inverse)
x, y = self.interactor.GetEventPosition()
self.picker.Pick(x, y, 0, self.renderer)
self.render_scene()
def translate_camera(self, tr):
"""Translate the camera."""
self.grid.translate(tr)
self.set_focal_point(self.grid.center)
super().translate_camera(tr)
def on_mouse_move(self, vtk_picker, event):
"""Process mouse move events."""
x, y = vtk_picker.GetEventPosition()
self.picker.Pick(x, y, 0, self.renderer)
def on_mouse_wheel_forward(self, vtk_picker, event):
"""Process mouse wheel forward events."""
tr = np.array([0., 0., self.unit])
if self.grid.origin[2] < self.ceiling:
self.translate_camera(tr)
self.render_scene()
def on_mouse_wheel_backward(self, vtk_picker, event):
"""Process mouse wheel backward events."""
tr = np.array([0., 0., -self.unit])
if self.grid.origin[2] > self.floor:
self.translate_camera(tr)
self.render_scene()
def on_mouse_left_press(self, vtk_picker, event):
"""Process mouse left button press events."""
x, y = vtk_picker.GetEventPosition()
self.button_pressed = True
self.picker.Pick(x, y, 0, self.renderer)
def on_mouse_left_release(self, vtk_picker, event):
"""Process mouse left button release events."""
x, y = vtk_picker.GetEventPosition()
self.button_released = True
self.picker.Pick(x, y, 0, self.renderer)
self.button_pressed = False
def on_pick(self, vtk_picker, event):
"""Process pick events."""
func = self.mode_functions.get(self.current_block_mode, None)
func(vtk_picker)
def load_block_modes(self):
"""Load the block modes."""
self.set_block_mode(BlockMode.BUILD)
self.mode_functions = dict()
for mode in BlockMode:
func_name = "use_{}_mode".format(mode.name.lower())
self.mode_functions[mode] = getattr(self, func_name)
def add_element(self, element):
"""Add an element to the scene."""
actor = self.add_mesh(**element.plotting)
element.actor = actor
actor.element_id = element.element_id
def add_elements(self):
"""Add all the default elements to the scene."""
self.add_element(self.block)
self.add_element(self.grid)
self.add_element(self.plane)
self.add_element(self.selector)
self.add_element(self.selector.selector_x)
self.add_element(self.selector.selector_y)
self.add_element(self.selector.selector_xy)
def remove_element(self, element):
"""Remove an elements from the scene."""
self.renderer.RemoveActor(element.actor)
element.actor = None
def remove_elements(self):
"""Remove all the default elements of the scene."""
self.remove_element(self.block)
self.remove_element(self.grid)
self.remove_element(self.plane)
self.remove_element(self.selector)
self.remove_element(self.selector.selector_x)
self.remove_element(self.selector.selector_y)
self.remove_element(self.selector.selector_xy)
def load_elements(self):
"""Load the default elements."""
self.block = Block(self.params, self.dimensions)
self.grid = Grid(self.params, self.dimensions)
self.plane = Plane(self.params, self.dimensions)
self.selector = SymmetrySelector(self.params, self.dimensions)
def load_icons(self):
"""Load the icons.
The resource configuration file ``blockbuilder/icons/blockbuilder.qrc``
describes the location of the resources in the filesystem and
also defines aliases for their use in the code.
To automatically generate the resource file in ``blockbuilder/icons``:
pyrcc5 -o resources.py blockbuilder.qrc
"""
self.icons = dict()
for category in (BlockMode, Toggle, Symmetry, Action):
for element in category:
icon_resource = ":/{}.svg".format(element.name.lower())
self.icons[element] = QIcon(icon_resource)
def _add_toolbar_group(self, group, func, default_value):
button_group = QButtonGroup(parent=self.toolbar)
for element in group:
icon = self.icons.get(element, None)
button = QToolButton()
button.setFixedSize(QSize(*self.icon_size))
button.setIcon(icon)
button.setCheckable(True)
if default_value is not None and element is default_value:
button.setChecked(True)
button.toggled.connect(DefaultFunction(func, element))
button_group.addButton(button)
self.toolbar.addWidget(button)
def _add_toolbar_actions(self):
for action in Action:
icon = self.icons.get(action, None)
button = QToolButton()
button.setFixedSize(QSize(*self.icon_size))
button.setIcon(icon)
func_name = "action_{}".format(action.name.lower())
func = getattr(self, func_name, None)
button.clicked.connect(func)
self.toolbar.addWidget(button)
def _add_toolbar_toggles(self):
for toggle in Toggle:
icon = self.icons.get(toggle, None)
button = QToolButton()
button.setFixedSize(QSize(*self.icon_size))
button.setIcon(icon)
button.setCheckable(True)
toggle_name = toggle.name.lower()
default_value = self.params["builder"]["toggles"][toggle_name]
func_name = "toggle_{}".format(toggle_name)
func = getattr(self, func_name, None)
assert callable(func)
button.toggled.connect(func)
button.setChecked(default_value)
func(default_value)
self.toolbar.addWidget(button)
def _add_toolbar_color_button(self):
self.color_button = ColorButton()
self.color_button.setFixedSize(QSize(*self.icon_size))
self.color_button.colorChanged.connect(self.set_block_color)
self.toolbar.addWidget(self.color_button)
self.set_block_color(self.default_block_color, is_int=False)
self.color_button.setColor(self.default_block_color, is_int=False)
def load_toolbar(self):
"""Initialize the toolbar."""
self.toolbar = self.addToolBar("toolbar")
toolbar_areas = self.params["builder"]["toolbar"]["area"]["range"]
toolbar_area = self.params["builder"]["toolbar"]["area"]["value"]
self.addToolBar(
_get_toolbar_area(toolbar_area, toolbar_areas),
self.toolbar,
)
self.toolbar.setIconSize(QSize(*self.icon_size))
self._add_toolbar_color_button()
self.toolbar.addSeparator()
self._add_toolbar_group(
group=BlockMode,
func=self.set_block_mode,
default_value=BlockMode.BUILD,
)
self.toolbar.addSeparator()
self._add_toolbar_toggles()
self.toolbar.addSeparator()
self._add_toolbar_group(
group=Symmetry,
func=self.set_symmetry,
default_value=Symmetry.SYMMETRY_NONE,
)
self.toolbar.addSeparator()
self._add_toolbar_actions()
def load_dialogs(self):
"""Load the dialogs."""
# export dialog
self.export_dialog = QFileDialog(self)
self.export_dialog.setWindowTitle("Export")
self.export_dialog.setNameFilter("Blockset (*.vts *.vtk)")
self.export_dialog.setWindowIcon(self.icons[Action.EXPORT])
# XXX: Fails on CI if modal
# self.export_dialog.setModal(True)
# import dialog
self.import_dialog = QFileDialog(self)
self.import_dialog.setNameFilter("Blockset (*.vts *.vtk)")
self.import_dialog.setWindowTitle("Import")
self.import_dialog.setWindowIcon(self.icons[Action.IMPORT])
# XXX: Fails on CI if modal
# self.import_dialog.setModal(True)
# setting dialog
self.setting_dialog = SettingDialog(self.params, self)
self.setting_dialog.setWindowIcon(self.icons[Action.SETTING])
# help dialog
short_desc = [
"Build mode",
"Delete mode",
"Area selection",
"Edge visibility",
"Symmetry Off",
"Symmetry X",
"Symmetry Y",
"Symmetry XY",
"Reset",
"Import",
"Export",
"Setting",
"Help",
]
long_desc = [
"Enable the build mode",
"Enable the delete mode",
"Toggle the area selection",
"Toggle the edge visibility",
"Disable the symmetry",
"Enable symmetry along the X axis",
"Enable symmetry along the Y axis",
"Enable symmetry along X and Y axis",
"Reset the scene",
"Import a blockset",
"Export a blockset",
"Open the setting dialog",
"Open the help dialog",
]
self.help_dialog = HelpDialog(self.icons, self.icon_size, short_desc,
long_desc, self)
self.help_dialog.setWindowIcon(self.icons[Action.HELP])
def set_dimensions(self, dimensions):
"""Set the current dimensions."""
self.dimensions = np.asarray(dimensions)
self.floor = 0.
self.ceiling = (self.dimensions[2] - 2) * self.unit
self.distance = np.max(self.dimensions) * 2 * self.unit
self.distance_rng = [4 * self.unit, 2 * self.distance]
def set_symmetry(self, value):
"""Set the current symmetry."""
self.selector.set_symmetry(value)
def set_block_mode(self, value=None):
"""Set the current block mode."""
if value is None:
value = self.current_block_mode
else:
self.current_block_mode = value
self.grid.set_block_mode(value)
self.selector.set_block_mode(value)
self.render_scene()
def set_block_color(self, value=None, is_int=True):
"""Set the current block color."""
self.block.set_color(value)
def use_delete_mode(self, vtk_picker):
"""Use the delete mode."""
self._build_or_delete(vtk_picker, self.block.remove)
def use_build_mode(self, vtk_picker):
"""Use the build mode."""
self._build_or_delete(vtk_picker, self.block.add)
def _build_or_delete(self, vtk_picker, operation):
intersection = Intersection(vtk_picker)
if not intersection.exist():
self.selector.hide()
self.selector.reset_area()
self.render_scene()
return
if not intersection.element(ElementId.GRID):
return
grid_ipoint = intersection.point(ElementId.GRID)
coords = np.floor(grid_ipoint / self.unit)
coords[2] = self.grid.origin[2] / self.unit
self.coords = coords
self.selector.select(coords)
self.selector.show()
if self.area_selection:
if self.button_released:
first_set = self.selector.get_first_coords() is not None
last_set = self.selector.get_last_coords() is not None
if first_set and last_set:
for area in self.selector.selection_area():
operation(area)
elif first_set and not last_set:
for coords in self.selector.selection():
operation(coords)
self.selector.reset_area()
self.button_released = False
elif self.button_pressed:
if self.selector.get_first_coords() is None:
self.selector.set_first_coords(coords)
else:
self.selector.set_last_coords(coords)
area = (
self.selector.get_first_coords(),
self.selector.get_last_coords(),
)
self.selector.select_area(area)
else:
if self.button_pressed:
for coords in self.selector.selection():
operation(coords)
self.render_scene()
def action_reset(self, unused):
"""Reset the block properties."""
del unused
self.block.remove_all()
self.set_block_color(self.default_block_color, is_int=False)
self.color_button.setColor(self.default_block_color, is_int=False)
self.render_scene()
def action_import(self, value=None):
"""Import an external blockset."""
def _import(filename):
if len(filename) == 0:
raise ValueError("The input filename string is empty")
reader = vtk.vtkXMLStructuredGridReader()
reader.SetFileName(filename)
reader.Update()
mesh = reader.GetOutput()
dimensions = mesh.GetDimensions()
imported_block = Block(self.params, dimensions, mesh)
if all(np.equal(dimensions, self.dimensions)):
self.block.merge(imported_block)
else:
final_dimensions = [
self.block.dimensions,
imported_block.dimensions
]
final_dimensions = np.max(final_dimensions, axis=0)
if all(np.equal(self.dimensions, final_dimensions)):
self.block.merge(imported_block)
else:
self.remove_elements()
old_block = self.block
self.set_dimensions(final_dimensions)
self.load_elements()
self.add_elements()
# restore edge visibility
self.block.toggle_edges(old_block.show_edges)
# restore block mode
self.set_block_mode()
self.block.merge(old_block)
self.block.merge(imported_block)
self.selector.hide()
self.update_camera()
self.render_scene()
if isinstance(value, bool):
self.import_dialog.fileSelected.connect(_import)
self.import_dialog.show()
elif isinstance(value, str):
_import(value)
else:
raise TypeError("Expected type for ``filename``is ``str``"
" but {} was given.".format(type(value)))
def action_export(self, value=None):
"""Export the internal blockset."""
def _export(filename):
if len(filename) == 0:
raise ValueError("The output filename string is empty")
writer = vtk.vtkXMLStructuredGridWriter()
writer.SetFileName(filename)
writer.SetInputData(self.block.mesh)
writer.Write()
if isinstance(value, bool):
self.export_dialog.fileSelected.connect(_export)
self.export_dialog.show()
elif isinstance(value, str):
_export(value)
else:
raise TypeError("Expected type for ``filename``is ``str``"
" but {} was given.".format(type(value)))
def action_setting(self, value=None):
"""Open the settings menu."""
del value
self.setting_dialog.show()
def action_help(self, value=None):
"""Display the help menu."""
del value
self.help_dialog.show()
def toggle_area(self, value):
"""Toggle area selection."""
self.area_selection = value
def toggle_edges(self, value):
"""Toggle area selection."""
self.block.toggle_edges(value)
self.render_scene()
def _get_toolbar_area(area, areas):
if not isinstance(area, str):
raise TypeError("Expected type for ``area`` is ``str`` but {}"
" was given.".format(type(area)))
if area not in areas:
raise ValueError("Expected value for ``area`` in"
" {} but {} was given.".format(areas, area))
area = list(area)
area[0] = area[0].upper()
area = ''.join(area)
area = area + 'ToolBarArea'
return getattr(QtCore.Qt, area)
|
from rpgplayer import Player
import random
p = Player('Ricardo')
class item:
def __init__(self,name,cost,weight):
self.name = name
self.cost = cost
self.weight = weight
self.type = type
class weapon(item):
def __init__(self,name,cost,weight,attack,type):
self.name = name
self.cost = cost
self.weight = weight
self.attack = attack
self.type = type
class inventory:
def __init__(self,player):
self.wslot = weapon(None,None,None,None,None)
self.aslot = [None]
self.tslots = []
self.storage = []
self.player = player
self.view()
def view(self):
print("Welcome to" + self.player.name + "'s' inventory, these are the items in their inventory: ")
print("In storage: ")
for item in self.storage:
if type(item) is weapon:
print(item.name, item.cost,item.weight,item.attack)
else:
print(item.name,item.cost,item.weight)
def add(self,ita):
self.storage.append(ita)
def unequip(self,itu):
if itu.name == self.wslot.name:
self.storage.append(itu)
self.wslot = None
def equip(self,ifs):
if self.wslot != None:
self.unequip(self.wslot)
if ifs in self.storage:
if type(ifs) is weapon:
self.wslot = ifs
self.player.attacks.append(ifs.attack)
self.storage.pop(self.storage.index(ifs))
i = inventory(p)
sword = weapon('sword',30,10,10,'p')
camp = item('camp',30,10)
i.add(sword)
i.add(camp)
i.view()
eou = input("do you want to equip or unequip? (e for equip u for unequip): ")
exit = False
while not exit:
if eou == 'e':
itte = input('what item to equip: ')
if itte=='sword':
i.equip(sword)
print('Sword Equippied!')
|
from pyspark.sql import SparkSession
spark = SparkSession.builder.master('local').appName('DataFrames Sample').getOrCreate()
# READING THE CSV FILE TO DATAFRAME AND GIVING SCHEMA FOR THAT DATA
ordersCSV = spark.read.csv('E:\\Spark2usingPython3\\data-master\\retail_db\\orders')\
.toDF('order_id','order_date','order_customer_id','order_status')
from pyspark.sql.types import IntegerType
# AFTER READING THE DATA TO DATAFRAMES THE COLUMNS WILL BE OF TYPE STRING
# TO CHANGE THE COLUMNS TYPE USE WITHCOLUMN API
orders=ordersCSV.\
withColumn('order_id',ordersCSV.order_id.cast(IntegerType())).\
withColumn('order_customer_id',ordersCSV.order_customer_id.cast(IntegerType()))
orders.printSchema() |
import object_storage
import timeit
def list_objects():
container_list = sl_storage.containers()
for container in container_list:
print(container.name)
print(sl_storage[container.name].objects())
def upload_objects():
for container_name in containers:
sl_storage[container_name].create()
#sl_storage[container_name][filename].create()
sl_storage[container_name][filename].send('Plain-Text Content')
def read_objects():
container_list = sl_storage.containers()
for container in container_list:
objects = sl_storage[container.name].objects()
for obj in objects:
print("... reading obj", obj)
obj.read()
def delete_objects():
container_list = sl_storage.containers()
for container in container_list:
sl_storage[container.name][filename].delete()
sl_storage[container.name].delete()
# preps the softlayer storage objects
sl_storage = object_storage.get_client('SLOS1432883-2:SL1432883', '674c1cddcc2458f3400ba9f5f0066db87c056c5cff8bb1fcf85a5a0eec0b2ffd', datacenter='dal05')
containers = ["container1", "container2", "container3"]
filename = "file.txt"
# lists current containers
print("")
print("current containers")
list_objects()
# uploads objects
print("")
print("uploading objects")
timeit.timeit(upload_objects)
# lists new containers
print("")
print("new containers")
list_objects()
# reads objects
print("")
print("reading objects")
#timeit.timeit(read_objects)
# cleans up objects
print("")
print("deleting objects")
delete_objects()
# lists containers after deletion
print("")
print("containers after deletion")
list_objects() |
import unittest
import namegenerator.markovchain
class TestMarkovChain(unittest.TestCase):
def setUp(self):
self.markov_chain = namegenerator.markovchain.MarkovStateMachine()
self.markov_chain.analyze_text(["mark", "mark", "mark"])
def test_get_name_with_single_name_input(self):
self.assertEqual("Mark", self.markov_chain.get_name(4))
|
# -*- coding: utf-8 -*-
# Author: Tom Bresee <tbresee@umich.edu>
#
# License: BSD 3 clause
from ..datasets import public_dataset
from sklearn.naive_bayes import BernoulliNB, MultinomialNB, GaussianNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from textblob import TextBlob
import pandas as pd
def naive_bayes_Bernoulli(*args, **kwargs):
"""
This function is used when X are independent binary variables (e.g., whether a word occurs in a document or not).
"""
return BernoulliNB(*args, **kwargs)
def naive_bayes_multinomial(*args, **kwargs):
"""
This function is used when X are independent discrete variables with 3+ levels (e.g., term frequency in the document).
"""
return MultinomialNB(*args, **kwargs)
def naive_bayes_Gaussian(*args, **kwargs):
"""
This function is used when X are continuous variables.
"""
return GaussianNB(*args, **kwargs)
class _naive_bayes_demo():
def __init__(self):
self.X = None
self.y = None
self.y_classes = None
self.test_size = 0.25
self.classifier_grid = None
self.random_state = 123
self.X_train = None
self.X_test = None
self.y_train = None
self.y_test = None
self.y_pred = None
self.y_pred_score = None
def build_naive_bayes_Gaussian_pipeline(self):
# create pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
pipeline = Pipeline(steps=[('scaler',
StandardScaler(with_mean=True, with_std=True)),
('classifier',
naive_bayes_Gaussian()),
])
# pipeline parameters to tune
hyperparameters = {
'scaler__with_mean': [True],
'scaler__with_std': [True],
}
grid = GridSearchCV(
pipeline,
hyperparameters, # parameters to tune via cross validation
refit=True, # fit using all data, on the best detected classifier
n_jobs=-1,
scoring='accuracy',
cv=5,
)
# train
print(
"Training a Gaussian naive bayes pipeline, while tuning hyperparameters...\n")
self.classifier_grid = grid.fit(self.X_train, self.y_train)
print(
f"Using a grid search and a Gaussian naive bayes classifier, the best hyperparameters were found as following:\n"
f"Step1: scaler: StandardScaler(with_mean={repr(self.classifier_grid.best_params_['scaler__with_mean'])}, with_std={repr(self.classifier_grid.best_params_['scaler__with_std'])}).\n")
def _lemmas(self, X):
words = TextBlob(str(X).lower()).words
return [word.lemma for word in words]
def _tokens(self, X):
return TextBlob(str(X)).words
def build_naive_bayes_multinomial_pipeline(self):
# create pipeline
pipeline = Pipeline(steps=[('count_matrix_transformer',
CountVectorizer(ngram_range=(1, 1), analyzer=self._tokens)),
('count_matrix_normalizer',
TfidfTransformer(use_idf=True)),
('classifier',
naive_bayes_multinomial()),
])
# pipeline parameters to tune
hyperparameters = {
'count_matrix_transformer__ngram_range': ((1, 1), (1, 2)),
'count_matrix_transformer__analyzer': ('word', self._tokens, self._lemmas),
'count_matrix_normalizer__use_idf': (True, False),
}
grid = GridSearchCV(
pipeline,
hyperparameters, # parameters to tune via cross validation
refit=True, # fit using all data, on the best detected classifier
n_jobs=-1,
scoring='accuracy',
cv=5,
)
# train
print(
"Training a multinomial naive bayes pipeline, while tuning hyperparameters...\n")
import nltk
#nltk.download('punkt', quiet=True)
#nltk.download('wordnet', quiet=True)
#from ..datasets import public_dataset
#import os
#os.environ["NLTK_DATA"] = public_dataset("nltk_data_path")
# see also: https://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html
# count_vect.fit_transform() in training vs. count_vect.transform() in testing
self.classifier_grid = grid.fit(self.X_train, self.y_train)
print(
f"Using a grid search and a multinomial naive bayes classifier, the best hyperparameters were found as following:\n"
f"Step1: Tokenizing text: CountVectorizer(ngram_range = {repr(self.classifier_grid.best_params_['count_matrix_transformer__ngram_range'])}, analyzer = {repr(self.classifier_grid.best_params_['count_matrix_transformer__analyzer'])});\n"
f"Step2: Transforming from occurrences to frequency: TfidfTransformer(use_idf = {self.classifier_grid.best_params_['count_matrix_normalizer__use_idf']}).\n")
class _naive_bayes_demo_SMS_spam(_naive_bayes_demo):
def __init__(self):
super().__init__()
self.y_classes = ('ham (y=0)', 'spam (y=1)')
def getdata(self):
from ..datasets import public_dataset
data = public_dataset(name='SMS_spam')
n_spam = data.loc[data.label == 'spam', 'label'].count()
n_ham = data.loc[data.label == 'ham', 'label'].count()
print(
f"---------------------------------------------------------------------------------------------------------------------\n"
f"This demo uses a public dataset of SMS spam, which has a total of {len(data)} messages = {n_ham} ham (legitimate) and {n_spam} spam.\n"
f"The goal is to use 'term frequency in message' to predict whether a message is ham (class=0) or spam (class=1).\n")
self.X = data['message']
self.y = data['label']
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.X, self.y, test_size=self.test_size, random_state=self.random_state)
def show_model_attributes(self):
count_vect = self.classifier_grid.best_estimator_.named_steps['count_matrix_transformer']
vocabulary_dict = count_vect.vocabulary_
# clf = classifier_grid.best_estimator_.named_steps['classifier'] # clf = classifier fitted
term_proba_df = pd.DataFrame({'term': list(
vocabulary_dict), 'proba_spam': self.classifier_grid.predict_proba(vocabulary_dict)[:, 1]})
term_proba_df = term_proba_df.sort_values(
by=['proba_spam'], ascending=False)
top_n = 10
df = pd.DataFrame.head(term_proba_df, n=top_n)
print(
f"The top {top_n} terms with highest probability of a message being a spam (the classification is either spam or ham):")
for term, proba_spam in zip(df['term'], df['proba_spam']):
print(f" \"{term}\": {proba_spam:4.2%}")
def evaluate_model(self):
self.y_pred = self.classifier_grid.predict(self.X_test)
self.y_pred_score = self.classifier_grid.predict_proba(self.X_test)
from ..model_evaluation import plot_confusion_matrix, plot_ROC_and_PR_curves
plot_confusion_matrix(y_true=self.y_test, y_pred=self.y_pred,
y_classes=self.y_classes)
plot_ROC_and_PR_curves(fitted_model=self.classifier_grid, X=self.X_test,
y_true=self.y_test, y_pred_score=self.y_pred_score[:, 1], y_pos_label='spam', model_name='Multinomial NB')
def application(self):
custom_message = "URGENT! We are trying to contact U. Todays draw shows that you have won a 2000 prize GUARANTEED. Call 090 5809 4507 from a landline. Claim 3030. Valid 12hrs only."
custom_results = self.classifier_grid.predict([custom_message])[0]
print(
f"\nApplication example:\n- Message: \"{custom_message}\"\n- Probability of class=1 (spam): {self.classifier_grid.predict_proba([custom_message])[0][1]:.2%}\n- Classification: {custom_results}\n")
def run(self):
"""
This function provides a demo of selected functions in this module using the SMS spam dataset.
Required arguments:
None
"""
# Get data
self.getdata()
# Create and train a pipeline
self.build_naive_bayes_multinomial_pipeline()
# model attributes
self.show_model_attributes()
# model evaluation
self.evaluate_model()
# application example
self.application()
# return classifier_grid
# return self.classifier_grid
# import numpy as np
# from sklearn.utils import shuffle
# True Positive
#X_test_subset = X_test[y_test == 'spam']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'spam')[0], n_samples=1, random_state=1234)[0] ] ]]
# False Negative
#X_test_subset = X_test[y_test == 'spam']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'ham')[0], n_samples=1, random_state=1234)[0] ] ]]
# False Positive
#X_test_subset = X_test[y_test == 'ham']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'spam')[0], n_samples=1, random_state=1234)[0] ] ]]
# True Negative
#X_test_subset = X_test[y_test == 'ham']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'ham')[0], n_samples=1, random_state=123)[0] ] ]]
class _naive_bayes_demo_20newsgroups(_naive_bayes_demo):
def __init__(self):
super().__init__()
self.y_classes = sorted(
['soc.religion.christian', 'comp.graphics', 'sci.med'])
def getdata(self):
print(
f"-------------------------------------------------------------------------------------------------------------------------------------\n"
f"This demo uses a public dataset of 20newsgroup and uses {len(self.y_classes)} categories of them: {repr(self.y_classes)}.\n"
f"The goal is to use 'term frequency in document' to predict which category a document belongs to.\n")
from sklearn.datasets import fetch_20newsgroups
#from ..datasets import public_dataset
twenty_train = fetch_20newsgroups( # data_home=public_dataset("scikit_learn_data_path"),
subset='train', categories=self.y_classes, random_state=self.random_state)
twenty_test = fetch_20newsgroups( # data_home=public_dataset("scikit_learn_data_path"),
subset='test', categories=self.y_classes, random_state=self.random_state)
self.X_train = twenty_train.data
self.y_train = twenty_train.target
self.X_test = twenty_test.data
self.y_test = twenty_test.target
def show_model_attributes(self):
# model attributes
count_vect = self.classifier_grid.best_estimator_.named_steps['count_matrix_transformer']
vocabulary_dict = count_vect.vocabulary_
# clf = classifier_grid.best_estimator_.named_steps['classifier'] # clf = classifier fitted
for i in range(len(self.y_classes)):
term_proba_df = pd.DataFrame({'term': list(
vocabulary_dict), 'proba': self.classifier_grid.predict_proba(vocabulary_dict)[:, i]})
term_proba_df = term_proba_df.sort_values(
by=['proba'], ascending=False)
top_n = 10
df = pd.DataFrame.head(term_proba_df, n=top_n)
print(
f"The top {top_n} terms with highest probability of a document being a {repr(self.y_classes[i])}:")
for term, proba in zip(df['term'], df['proba']):
print(f" \"{term}\": {proba:4.2%}")
def evaluate_model(self):
# model evaluation
self.y_pred = self.classifier_grid.predict(self.X_test)
from ..model_evaluation import plot_confusion_matrix
# the y_classes are in an alphabetic order
plot_confusion_matrix(y_true=self.y_test,
y_pred=self.y_pred, y_classes=self.y_classes)
def application(self):
pass
def run(self):
"""
This function provides a demo of selected functions in this module using the 20 newsgroup dataset.
It models after the tutorial https://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html
Required arguments:
None
"""
# Get data
self.getdata()
# Create and train a pipeline
self.build_naive_bayes_multinomial_pipeline()
# model attributes
self.show_model_attributes()
# model evaluation
self.evaluate_model()
# application example
self.application()
# return classifier_grid
# return self.classifier_grid
class _naive_bayes_demo_Social_Network_Ads(_naive_bayes_demo):
def __init__(self):
super().__init__()
self.y_classes = ['not_purchased (y=0)', 'purchased (y=1)']
def getdata(self):
from ..datasets import public_dataset
data = public_dataset(name='Social_Network_Ads')
self.X = data[['Age', 'EstimatedSalary']].to_numpy()
self.y = data['Purchased'].to_numpy()
from sklearn.model_selection import train_test_split
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.X, self.y, test_size=0.25, random_state=123)
def show_model_attributes(self):
pass
def evaluate_model(self):
# model evaluation
self.y_pred = self.classifier_grid.predict(self.X_test)
self.y_pred_score = self.classifier_grid.predict_proba(self.X_test)
from ..model_evaluation import plot_confusion_matrix, plot_ROC_and_PR_curves, visualize_classifier_decision_boundary_with_two_features
plot_confusion_matrix(y_true=self.y_test,
y_pred=self.y_pred, y_classes=self.y_classes)
plot_ROC_and_PR_curves(fitted_model=self.classifier_grid, X=self.X_test,
y_true=self.y_test, y_pred_score=self.y_pred_score[:, 1], y_pos_label=1, model_name="Gaussian NB")
visualize_classifier_decision_boundary_with_two_features(
self.classifier_grid, self.X_train, self.y_train, self.y_classes, title=f"Gaussian Naive Bayes / training set", X1_lab='Age', X2_lab='Estimated Salary')
visualize_classifier_decision_boundary_with_two_features(
self.classifier_grid, self.X_test, self.y_test, self.y_classes, title=f"Gaussian Naive Bayes / testing set", X1_lab='Age', X2_lab='Estimated Salary')
def application(self):
pass
def run(self):
"""
This function provides a demo of selected functions in this module using the Social_Network_Ads dataset.
Required arguments:
None
"""
# Get data
self.getdata()
# Create and train a pipeline
self.build_naive_bayes_Gaussian_pipeline()
# model attributes
self.show_model_attributes()
# model evaluation
self.evaluate_model()
# application example
self.application()
# return classifier_grid
# return self.classifier_grid
def demo(dataset="SMS_spam"):
"""
This function provides a demo of selected functions in this module.
Required arguments:
dataset: A string. Possible values: "SMS_spam", "20newsgroups", "Social_Network_Ads"
"""
if dataset == "SMS_spam":
nb_demo = _naive_bayes_demo_SMS_spam()
elif dataset == "20newsgroups":
nb_demo = _naive_bayes_demo_20newsgroups()
elif dataset == "Social_Network_Ads":
nb_demo = _naive_bayes_demo_Social_Network_Ads()
else:
raise TypeError(f"dataset [{dataset}] is not defined")
return nb_demo.run()
|
import pygame
from pygame.locals import *
import math
from py3de import *
# Setup pygame
pygame.init()
pygame.font.init()
width, height = 800, 800
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption('Nick\'s 3D Engine')
engine = Engine3D(width, height)
cube = Cube(engine, 0, 0, 2, 0, 0, 1)
# Begin game loop
running = True
while running:
cube.xr += 0.1
# Check for user input
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# Clear screen
pygame.draw.rect(screen, (0, 0, 0), Rect(0, 0, width, height))
# Draw triangles
for triangle in cube.ToTriangleList():
pygame.draw.polygon(screen, (255,255,255), [
(triangle.vectors[0].x, triangle.vectors[0].y),
(triangle.vectors[1].x, triangle.vectors[1].y),
(triangle.vectors[2].x, triangle.vectors[2].y)
], 2)
# Update the screen
pygame.display.update() |
from sqlalchemy import BigInteger, Column, Integer, Numeric, String, Table
from src.models.base import Base
"""
Trending Params aggregate the paramters used to calculate trending track scores
"""
# Materialized view
t_trending_params = Table(
"trending_params",
Base.metadata,
Column("track_id", Integer, index=True),
Column("genre", String),
Column("owner_id", Integer),
Column("play_count", BigInteger),
Column("owner_follower_count", BigInteger),
Column("repost_count", Integer),
Column("save_count", Integer),
Column("repost_week_count", BigInteger),
Column("repost_month_count", BigInteger),
Column("repost_year_count", BigInteger),
Column("save_week_count", BigInteger),
Column("save_month_count", BigInteger),
Column("save_year_count", BigInteger),
Column("karma", Numeric),
)
|
from PyQt5.QtCore import Qt, QSize
from PyQt5.QtGui import QPixmap, QIcon, QFontMetricsF
from PyQt5.QtWidgets import QWidget, QPushButton, QHBoxLayout, QLabel, QLineEdit, QTextEdit, QVBoxLayout, QScrollArea, \
QGroupBox, QFormLayout
from src.widget.answer import Answer
from src.widget.header import Header
ENCODE_STATUS = {
0: "종료",
1: "진행"
}
DECODE_STATUS = {
"종료": 0,
"진행": 1
}
UPLOAD_ICON_PATH = "images/answer-upload.png"
class Survey(QWidget):
def __init__(self, central, uuid):
QWidget.__init__(self)
self.central = central
self.uuid = uuid
self.surveySource = None
self.header = Header(self)
self.textTitle = QLineEdit()
self.textTitle.setObjectName("SurveyText-title")
self.textTitle.setVisible(False)
self.textContents = QTextEdit()
self.textContents.setObjectName("SurveyText-contents")
self.textContents.setReadOnly(True)
font = self.textContents.font()
fontMetrics = QFontMetricsF(font)
spaceWidth = fontMetrics.width(' ')
self.textContents.setTabStopDistance(spaceWidth * 4)
layoutLeft = QVBoxLayout()
layoutLeft.addWidget(self.textTitle, 0)
layoutLeft.addWidget(self.textContents, 10)
self.groupAnswers = QGroupBox()
self.groupAnswers.setObjectName("SurveyGroup-answers")
self.areaAnswers = QScrollArea(self)
self.areaAnswers.setObjectName("SurveyArea-answers")
self.areaAnswers.setWidget(self.groupAnswers)
self.areaAnswers.setWidgetResizable(True)
self.areaAnswers.setVisible(False)
self.layoutAnswers = QFormLayout(self.groupAnswers)
self.labelAnswers = QLabel()
self.labelAnswers.setObjectName("SurveyLabel")
self.labelAnswers.setText("※ 답글이 존재하지 않습니다.")
self.labelAnswers.setAlignment(Qt.AlignCenter)
self.labelAnswers.setVisible(False)
self.textAnswer = QTextEdit()
self.textAnswer.setObjectName("SurveyText-answer")
self.textAnswer.setPlaceholderText("답글을 입력하세요.")
self.textAnswer.textChanged.connect(self.handleTextAnswerChange)
font = self.textAnswer.font()
fontMetrics = QFontMetricsF(font)
spaceWidth = fontMetrics.width(' ')
self.textAnswer.setTabStopDistance(spaceWidth * 4)
self.buttonUpload = QPushButton()
self.buttonUpload.setObjectName("SurveyButton-upload")
self.buttonUpload.setIcon(QIcon(QPixmap(UPLOAD_ICON_PATH)))
self.buttonUpload.setIconSize(QSize(40, 30))
self.buttonUpload.setEnabled(False)
self.buttonUpload.setCursor(Qt.PointingHandCursor)
self.buttonUpload.clicked.connect(self.handleButtonUploadClick)
layoutAnswer = QHBoxLayout()
layoutAnswer.addWidget(self.textAnswer, 10)
layoutAnswer.addWidget(self.buttonUpload, 0)
layoutAnswer.setContentsMargins(0, 0, 0, 0)
layoutRight = QVBoxLayout()
layoutRight.addWidget(self.areaAnswers, 10)
layoutRight.addWidget(self.labelAnswers, 10)
layoutRight.addLayout(layoutAnswer, 0)
layoutRight.setContentsMargins(0, 0, 0, 0)
layoutBottom = QHBoxLayout()
layoutBottom.addLayout(layoutLeft, 5)
layoutBottom.addLayout(layoutRight, 5)
layoutBottom.setContentsMargins(0, 0, 0, 0)
layout = QVBoxLayout()
layout.addWidget(self.header, 0)
layout.addLayout(layoutBottom, 10)
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
self.setSurveySource()
self.setAnswerArea()
def setSurveySource(self):
self.surveySource = self.central.realtimeDB.getSurveySource(self.uuid)
status = self.surveySource["status"]
self.textAnswer.setVisible(status)
self.buttonUpload.setVisible(status)
self.header.setHeader()
self.textTitle.setText(self.surveySource["title"].replace("\t", " "*4))
self.textContents.setPlainText(self.surveySource["contents"].replace("\t", " "*4))
def setAnswerArea(self):
if "answers" in self.surveySource.keys():
for idx in range(self.layoutAnswers.count()):
self.layoutAnswers.itemAt(idx).widget().deleteLater()
for row, key in enumerate(self.surveySource["answers"].keys()):
answerSource = self.surveySource["answers"][key]
answerWidget = Answer(self, key, answerSource)
self.layoutAnswers.addRow("", answerWidget)
self.labelAnswers.setVisible(False)
self.areaAnswers.setVisible(True)
else:
self.areaAnswers.setVisible(False)
self.labelAnswers.setVisible(True)
def handleButtonUploadClick(self):
answer = self.textAnswer.toPlainText()
self.central.realtimeDB.setAnswer(self.uuid, answer)
self.textAnswer.clear()
self.setSurveySource()
self.setAnswerArea()
def handleTextAnswerChange(self):
text = self.textAnswer.toPlainText()
if text:
self.buttonUpload.setEnabled(True)
else:
self.buttonUpload.setEnabled(False)
|
from __future__ import absolute_import
from .base import BaseContent
from ..utils import resize_image
from ZODB.blob import Blob
class Image(BaseContent):
""" Image model """
def _store_resized_image(self, key, data):
""" store a blob image as attribute """
blob = Blob()
f = blob.open('w')
f.write(data['data'])
f.close()
setattr(self, key, blob)
self._p_changed = 1
def _get_resized_image(self, key):
""" retrieve a blob image """
blob = getattr(self, key)
return {'name': self._data_['data']['name'],
'data': blob}
@property
def thumbnail(self):
""" return the thumbnail, or lazy create it when not present
yet """
key = '__cached_blob_thumbnail'
if not hasattr(self, key):
self._store_resized_image(key, resize_image(self._data_['data']))
return self._get_resized_image(key)
def get_size(self, size=(800, 600)):
""" return the resized image, or lazy create it when not present
yet """
key = '_cached_blob_%s_%s' % size
if not hasattr(self, key):
self._store_resized_image(key, resize_image(
self._data_['data'], size))
return self._get_resized_image(key)
@property
def base_id(self):
return self._data_['data']['name']
@property
def title(self):
return self._data_['name']
|
# Support and confidence are values to specify the sensitivity of the algorithm that you're running.
# Use it together with this URL to do calls to the public API.
# There is a pyspotlight wrapper, or you can do pure API calls (there is atleast on tutorial online)
AnnotationURL = "https://api.dbpedia-spotlight.org/en/annotate"
# Properties that you want to return from the dbpedia entries can be defined as dbp%3AbirthPlace | %3 is a seperator. All the properties can be found if you look at a page like
# https://dbpedia.org/page/Sonic_the_Hedgehog_(1991_video_game)
# I.e dbo%3gameArtist would return Rieko_Kodama if you try to call the Entity for the sonic game with that parameter ^^
# You need to set the accept headers to JSON to use the API.
DBpedia_URL = f"http://vmdbpedia.informatik.uni-leipzig.de:8080/api/1.0.0/values?entities={person}&property={parameter1}property={parameter2}&pretty=NONE&limit=100&offset=0&key=1234&oldVersion=true"
# Playground to find the right parameters http://vmdbpedia.informatik.uni-leipzig.de:8080/swagger-ui.html#!/Request32Types/valueRequestUsingGET
|
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name="dtf",
version="0.1.4",
description="dtf is my personal dotfile manager.",
url="https://github.com/ericnchen/dtf",
author="Eric Chen",
author_email="eric@ericnchen.com",
license="MIT",
packages=["dtf", "dtf.dotfiles"],
python_requires=">=3.7, <4",
install_requires=["click"],
include_package_data=True,
entry_points={"console_scripts": ["dtf = dtf.app:cli"]},
)
|
def main():
temperatures = open("temps_input.txt", 'r')
output = open("temps_output.txt", 'w')
lines = temperatures.readlines()
for line in lines:
line = float(line)
print(fahrenheit_to_celsius(line), file=output)
output.close()
temperatures.close()
def fahrenheit_to_celsius(fahrenheit):
celsius = 5 / 9 * (fahrenheit - 32)
return celsius
main()
|
from users.models import InvalidPassword
from users.models import User
from users.models import UserDoesNotExist
class ModelBackend(object):
def authenticate(self, username=None, password=None):
return User.authenticate(username=username, password=password) |
# Generated by Django 3.0.3 on 2020-02-10 16:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ghostPost', '0002_auto_20200210_1639'),
]
operations = [
migrations.AlterField(
model_name='boastroast',
name='boast',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='boastroast',
name='roast',
field=models.BooleanField(default=False),
),
]
|
def enqueue(l1):
if(not isFull(l1)):
data=int(input("Enter the data :"))
l1.append(data)
else:
print("QUEUE IS FULL DEQUEUE FIRST...")
def dequeue(l1):
if(not isEmpty(l1)):
l1.pop(0)
else:
print("QUEUE IS EMPTY ENQUEUE FIRST...")
def isFull(l1):
return len(l1)==10
def isEmpty(l1):
return l1==[] or l1=="NULL"
if __name__=="__main__":
l1=[]
while True:
print("++++++ QUEUE OPERATIONS ++++++")
print("1.ENQUEUE.")
print("2.DEQUEUE.")
print("3.Diplay QUEUE.")
print("4.Exit")
ch=int(input("Enter your choice :"))
if ch==1:
enqueue(l1)
if ch==2:
dequeue(l1)
if ch==3:
print("Displaying the QUEUE...")
print(l1)
if ch==4:
print("Thank you for using Stack...")
break;
if ch<1 or ch>4:
print("Please read the Menu Properly...")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/1/14 0014 上午 9:11
# @Author : Aries
# @Site :
# @File : 函数的应用.py
# @Software: PyCharm Community Edition
def func(fn):
fn()
def gn():
print('hello world')
func(gn)
|
import numpy
class layer():
def __init__(self, input_shape, trainable, name):
self.input_shape = input_shape
self.trainable = trainable
self.name = name
def forward(self, X):
raise NotImplementedError("Please Implement the Forward method")
def backward(self, dL_Z):
raise NotImplementedError("Please Implement the Backward method")
class dense(layer):
def __init__(self, W, b, input_shape, trainable=True, name="Dense"):
"""
Computes Z = dot(X, W) + b
Parameters
----------
W : Numpy array with shape (sample_features, hidden_units)
b : Numpy array with shape (hidden_units)
input_shape : Shape of the inputs. Must be equal to sample_features.
"""
assert len(input_shape) == 1, "Error! In dense layers the input shape only can have one dimension: 'features'"
assert W.shape[0] == input_shape[0], "Error! Input shape and W matrix do not match."
# Initialization
super().__init__(input_shape, trainable, name)
self.W = W.astype(numpy.float32)
self.b = b
if not isinstance(self.b, type(None)):
assert self.b.shape[0] == self.W.shape[1], "Error! Bias and weights shape do not match."
self.b = self.b.astype(numpy.float32)
# Store variables to make the computation easier
self.output_shape = (self.W.shape[1],)
# Store computation
self.X = None
self.Z = None
# Local gradient
self.dZ_W = None
self.dZ_X = None
self.dZ_b = None
def forward(self, X):
"""
Performs the computation : dot(X, W)
Parameters
----------
X : Numpy array with shape (batch_size, sample_features)
"""
assert (X.shape[1] == self.input_shape[0]), "Shapes of W and inputs do not match."
self.X = X.astype(numpy.float32)
self.batch_size = self.X.shape[0]
self.Z = self.X @ self.W
if not isinstance(self.b, type(None)):
self.Z = self.Z + self.b
return self.Z
def backward(self, dL_Z):
""" Given dL/dZ returs dL/dX and stores dL/dW"""
assert self.Z.shape == dL_Z.shape, "The gradient wrt Z do not have the same shape than Z"
self.dL_Z = dL_Z.astype(numpy.float32)
# Local gradient of dZ/dX
# Remember that all the input samples have the same derivatives of Z wrt X, because all of them are
# multiplied by the W, and the W are the same ones for all the samples in one iteration
self.dZ_X = self.W
# Local gradient of dZ/dW
# This is a bit tricky... so first remember some easy concepts. The matrix W is (IUxOU), and
# Z is (NXOU), being N : num samples, IU : Input units and OU : Output units (neurons of this layer)
# The partial derivative of Z wrt to W is a three-dimensional array because by the definition
# we need to see the rate of change of every Z respect every weight. BUT, one weight only contributes
# to ONE NEURON, and its derivative wrt the other neurons is zero!!
# So, we can store the non-trivial information of the three-dimensiona array into a two dimensional array.
# In summary, the way you should read the array dZ_W is as follows:
# | dZ1/dW11 dZ2/dW12 | <- This row is all the W which come from X1, so the derivative is always X1
# | dZ1/dW21 dZ2/dW22 | <- This row is all the W which come from X2,so the derivative is always X2...
# Futhermore, we can take advantage of matrix multiplication and compress even more this matrix, because
# as you can see above, the rows are always the same value. You can check that the computation is still the
# same when the dL/dW is computed a few lines below (;
self.dZ_W = self.X
# Local gradient of dZ/db
# Z is linearly dependent of the biases. If we increase one bias by one; z increases by one.
# Remember that each bias only contributes to one neuron, so again, we can reduce the non-trivial
# part of the Jacobian matrix to a vector.
self.dZ_b = None if isinstance(self.b, type(None)) else numpy.ones(self.b.shape)
# Output gradient dL/dW
# Remember the matrix generated in dZ/dW, now we need to apply the chain rule, or, in other words
# we need to multiply the rate of change each dZ/dW by the rate of change of dL/dZ
# dL_Wi,j = sum_{num samples} dL/dZj * dZj/dWi,j
# Finally, divide by the number of samples to preserve the magnitude of the gradient independently of
# the number of samples
self.dL_W = (self.X.T @ self.dL_Z) / self.batch_size
# Output gradient dL/dX
# This could be ignored if there are constraints of memory/time, because we can not modify how our
# data looks like. Anyways, we compute the gradients to perform future studies about the behaviour of the model
self.dL_X = (self.dL_Z @ self.dZ_X.T) / self.batch_size
# Output gradient dL/db
# Because the local gradient dZ/db is a vector of ones, we do not need to perform the vector multiplication
# We only need to reduce dL/dZ among the samples axis, so each sample contributes, and divide by the batch size
self.dL_b = self.dL_Z.sum(axis=0) / self.batch_size
return self.dL_X
def get_trainable_vars_and_grads(self):
""" Returns a list with (var, grad) for all the trainable variables of this layers """
vars_and_grads = [(self.W, self.dL_W)]
if not isinstance(self.b, type(None)):
vars_and_grads.append((self.b, self.dL_b))
return vars_and_grads
def set_trainable_vars(self, new_vars):
"""
Updates the variables. The list is expected to have form:
[weights, biases]
"""
self.W = new_vars[0]
if len(new_vars) == 2:
self.b = new_vars[1]
if __name__ == '__main__':
print("This script is not mean to be run.")
|
import os
from selenium import webdriver
import json
from time import sleep
import chromedriver_autoinstaller
drVer = str(chromedriver_autoinstaller.get_chrome_version())
drVer = drVer[0:drVer.index(".")]
hutbeler = [[],[],[]]
def printHutbeler(hutbeler):
print(f"""
NO | TARIH | HUTBE ADI
-----------------------------------------------------------
1 | {hutbeler[0][0]} | {hutbeler[0][1]}
2 | {hutbeler[1][0]} | {hutbeler[1][1]}
3 | {hutbeler[2][0]} | {hutbeler[2][1]}
""")
def createDr(headless):
options = webdriver.ChromeOptions()
settings = {
"recentDestinations": [{
"id": "Default",
"origin": "local",
"account": "",
}],
"selectedDestinationId": "Default",
"version": 2
}
prefs = {'printing.print_preview_sticky_settings.appState': json.dumps(settings)}
options.add_experimental_option('prefs', prefs)
options.add_argument('--kiosk-printing')
options.add_argument('--log-level=3')
options.set_headless(headless)
if(os.name == "nt"):
return webdriver.Chrome(f'./{drVer}/chromedriver.exe',options=options)
else:
return webdriver.Chrome(f'./{drVer}/chromedriver',options=options)
def drUpdater():
try:
print("'chromedriver' indiriliyor...")
chromedriver_autoinstaller.install("./")
except:
input("\nInternet bağlantınızı kontrol edin !\nKapatmak için 'enter'a basınız...")
exit()
def main():
try:
dr = createDr(True)
except:
drUpdater()
dr = createDr(True)
try:
dr.minimize_window()
dr.get("https://dinhizmetleri.diyanet.gov.tr/kategoriler/yayinlarimiz/hutbeler/t%C3%BCrk%C3%A7e")
for i in [1,2,3]:
hutbeler[i-1].append(dr.find_element_by_xpath(f'/html/body/form/div[5]/div/div[2]/div/div/span/div[3]/div/div/div/div/div/div/div[2]/table/tbody/tr/td/table/tbody/tr[{i}]/td[2]/span').text) #Tarihleri alıyor
hutbeler[i-1].append(dr.find_element_by_xpath(f'/html/body/form/div[5]/div/div[2]/div/div/span/div[3]/div/div/div/div/div/div/div[2]/table/tbody/tr/td/table/tbody/tr[{i}]/td[3]').text) #Hutbe Adı alıyor
hutbeler[i-1].append(dr.find_element_by_xpath(f'/html/body/form/div[5]/div/div[2]/div/div/span/div[3]/div/div/div/div/div/div/div[2]/table/tbody/tr/td/table/tbody/tr[{i}]/td[4]/a').text) #Hutbe link adı alıyor
dr.quit()
os.system('cls' if os.name == 'nt' else 'clear')
printHutbeler(hutbeler)
hutbeNo = 0
while (hutbeNo not in ("1","2","3")):
hutbeNo = input("Hutbe NO : ")
dr = createDr(False)
dr.minimize_window()
dr.get(f'https://dinhizmetleri.diyanet.gov.tr/Documents/{hutbeler[int(hutbeNo)-1][2]}.pdf')
dr.execute_script("window.print();")
sleep(5)
except:
input("\nInternet bağlantınızı kontrol edin !\nKapatmak için 'enter'a basınız...")
exit()
if __name__ == '__main__':
main() |
count = 0
sum = 0.0
while True:
value = input("Enter a number: ")
if value == 'done':
break
try:
extravalue = float(value)
except:
print("Invalid input")
continue
count = count + 1
sum = sum + extravalue
print("Sum: " + str(sum), "Count: " + str(count), "Average: " + str(sum/count))
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Vantiv eCommerce
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import, print_function, unicode_literals
import json
import os
import pyxb
import xmltodict
from cnpsdk import fields_chargeback
from . import version
class Configuration(object):
"""Setup Configuration variables.
Attributes:
user (Str): authentication.user
password (Str): authentication.password
merchant_id (Str): The unique string to identify the merchant within the system.
url (Str): Url for server.
proxy (Str): Https proxy server address. Must start with "https://"
print_xml (Str): Whether print request and response xml
"""
VERSION = version.VERSION
RELEASE = version.RELEASE
_CONFIG_FILE_PATH = os.path.join(os.environ['CNP_CHARGEBACK_SDK_CONFIG'], ".cnp_chargeback_sdk.conf") \
if 'CNP_CHARGEBACK_SDK_CONFIG' in os.environ else os.path.join(os.path.expanduser("~"), ".cnp_chargeback_sdk.conf")
def __init__(self, conf_dict=dict()):
attr_dict = {
'username': '',
'password': '',
'merchant_id': '',
'url': 'http://www.testvantivcnp.com/sandbox/new',
'proxy': '',
'print_xml': False,
'neuter_xml': False,
}
# set default values
for k in attr_dict:
setattr(self, k, attr_dict[k])
# override values by loading saved conf
try:
with open(self._CONFIG_FILE_PATH, 'r') as config_file:
config_json = json.load(config_file)
for k in attr_dict:
if k in config_json and config_json[k]:
setattr(self, k, config_json[k])
except:
# If get any exception just pass.
pass
# override values by args
if conf_dict:
for k in conf_dict:
if k in attr_dict:
setattr(self, k, conf_dict[k])
else:
raise ChargebackError('"%s" is NOT an attribute of conf' % k)
def save(self):
"""Save Class Attributes to .cnp_chargeback_sdk.conf
Returns:
full path for configuration file.
Raises:
IOError: An error occurred
"""
with open(self._CONFIG_FILE_PATH, 'w') as config_file:
json.dump(vars(self), config_file)
return self._CONFIG_FILE_PATH
def obj_to_xml(obj):
"""Convert object to xml string without namespaces
Args:
obj: Object
Returns:
Xml string
Raises:
pyxb.ValidationError
"""
# TODO convert object to xml without default namespace gracefully.
try:
xml = obj.toxml('utf-8')
except pyxb.ValidationError as e:
raise ChargebackError(e.details())
xml = xml.replace(b'ns1:', b'')
xml = xml.replace(b':ns1', b'')
return xml
def generate_retrieval_response(http_response, return_format='dict'):
return convert_to_format(http_response.text, "chargebackRetrievalResponse", return_format)
def generate_update_response(http_response, return_format='dict'):
return convert_to_format(http_response.text, "chargebackUpdateResponse", return_format)
def generate_document_response(http_response, return_format='dict'):
return convert_to_format(http_response.text, "chargebackDocumentUploadResponse", return_format)
def generate_error_response(http_response, return_format='dict'):
return convert_to_format(http_response.text, "errorResponse", return_format)
def convert_to_format(http_response, response_type, return_format='dict'):
return_format = return_format.lower()
if return_format == 'xml':
response_xml = http_response.text
return response_xml
elif return_format == 'object':
return convert_to_obj(http_response.text)
else:
return convert_to_dict(http_response, response_type)
def convert_to_obj(xml_response):
return fields_chargeback.CreateFromDocument(xml_response)
def convert_to_dict(xml_response, response_type):
response_dict = xmltodict.parse(xml_response)[response_type]
if response_dict['@xmlns'] != "":
_create_lists(response_dict)
return response_dict
else:
raise ChargebackError("Invalid Format")
def _create_lists(response_dict):
if "chargebackCase" in response_dict:
_create_list("chargebackCase", response_dict)
for case in response_dict["chargebackCase"]:
if "activity" in case:
_create_list("activity", case)
if "errors" in response_dict:
_create_list("error", response_dict["errors"])
# if there is only one element for the given key in container, create a list for it
def _create_list(element_key, container):
element_value = container[element_key]
if element_value != "" and not isinstance(element_value, list):
container[element_key] = [element_value]
class ChargebackError(Exception):
def __init__(self, message):
self.message = message
class ChargebackWebError(Exception):
def __init__(self, message, code, error_list=None):
self.message = message
self.code = code
self.error_list = error_list
class ChargebackDocumentError(Exception):
def __init__(self, message, code):
self.message = message
self.code = code
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-12-08 13:10
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('message', '0002_auto_20181208_2107'),
]
operations = [
migrations.AlterModelOptions(
name='usermessage',
options={'verbose_name': '用户留言信息', 'verbose_name_plural': '用户留言信息'},
),
migrations.RenameField(
model_name='usermessage',
old_name='id',
new_name='object_id',
),
]
|
from itertools import groupby
from operator import itemgetter
import sys, os
import xlrd
from db import drugDB, reportElm
class ExcelParser:
def __init__(self, xl_path = None, file_content=None, sheet_index=0, **extra_fields):
wb = xlrd.open_workbook(xl_path) if xl_path else xlrd.open_workbook(file_contents=file_content)
ws = wb.sheet_by_index(sheet_index)
fields = ws.row_values(0)
self._records = [dict(zip(fields, ws.row_values(i))) for i in range(1, ws.nrows)]
for row in self._records:
row.update(**extra_fields)
def __getitem__(self, index):
return self._records[index]
def __len__(self):
return len(self._records)
def __call__(self):
return self._records
def select(self, *fields, where=lambda row:row, as_table=False):
if not fields:
fields = self._records[0].keys()
ret = [{k:v for k, v in row.items() if k in fields} for row in self._records if where(row)]
self._records = ret
if as_table:
return [list(fields)] + [[row[col] for col in fields] for row in exl]
return self
def order_by(self, *rules):
for rule in reversed(rules):
rvs = rule.startswith('-')
rule = rule.strip('-')
self._records.sort(key=lambda x: x[rule], reverse=rvs)
return self
def distinct(self, *cols):
ret = sorted(self._records, key= itemgetter(*cols))
self._records = [next(l) for g, l in groupby(ret, key=itemgetter(*cols))]
return self
def update(self, where=lambda row:row, **set):
for row in self._records:
if not where(row):
continue
for k, func in set.items():
row[k] = func(row)
return self
def group_by(self, column, **annotates): # annotates: field_name=func
self._records.sort(key=itemgetter(column))
ret = []
for gname, lst in groupby(self._records, key=itemgetter(column)):
lst = list(lst)
dic = lst[0]
for k, func in annotates.items():
try:
s = list(map(float, [e[k] for e in lst]))
except:
s = [e[k] for e in lst]
dic.update({'{}__{}'.format(k, func.__name__): func(s)})
ret.append(dic)
return ret
# -------------------------------------------------------------------------------------------------------------------------------------------------
import xlsxwriter
if __name__ == '__main__':
path_list = []
exl_table = []
grp = []
if len(sys.argv) > 1 :
os.chdir(os.path.dirname(sys.argv[1]))
for arg in sys.argv:
fn, ext = os.path.splitext(arg)
if ext in ['.xls', '.xlsx']:
path_list.append(arg)
if not path_list:
sys.exit(0)
for n, path in enumerate(path_list):
exl= ExcelParser(path, 잔량=0, 폐기량=0, 폐기단위='ml', 폐기약품명="")
exl = exl.select(where=lambda row: row['불출일자']!="" and row['약품코드'] in drugDB and row['반납구분'] not in ['D/C', '반납'])
exl.order_by('불출일자','병동')
exl = exl.update(잔량 = lambda row: float(row['집계량']) - float(row['처방량(규격단위)']))
exl= exl.update(
폐기량 = lambda row: round(float(row['잔량']) * drugDB[row['약품코드']]['amount'], 2),
폐기단위 = lambda row: drugDB[row['약품코드']]['amount_unit'],
폐기약품명 = lambda row: drugDB[row['약품코드']]['name'],
).order_by('약품명','불출일자','병동')
exl = exl.select(where = lambda row: row['폐기량'] >0)
select_columns = ['불출일자', '병동', '환자번호', '환자명', '폐기약품명', '처방량(규격단위)', '잔량', '규격단위', '폐기량', '폐기단위' ]
grp += exl.group_by('폐기약품명', 폐기량=sum, 폐기약품명=len)
exl = exl.select(*select_columns, as_table=True)
exl_table += exl if n == 0 else exl[1:]
# -------------------------------------------------------------------------------------------------------------------------------------------------
date_index = set(row[0] for r, row in enumerate(exl_table) if r >0)
first_date, last_date = min(date_index), max(date_index)
title = '{}~{} 마약류 폐기 현황'.format(first_date, last_date)
fname = '{}.xlsx'.format(title)
wb = xlsxwriter.Workbook(fname)
ws = wb.add_worksheet()
title_format = wb.add_format({'align': 'center', 'bold': True, 'font_size':20})
float_format = wb.add_format({'num_format': '0.00'})
ml_format = wb.add_format({'num_format': '0.00 "ml"'})
mg_format = wb.add_format({'num_format': '0.00 "mg"'})
g_format = wb.add_format({'num_format': '0.00 "g"'})
formats = \
{
'title': title_format,
'float': float_format,
'ml': ml_format,
'mg': mg_format,
'g': g_format,
}
ws.merge_range(0,0,0, len(exl[0])-2, title, formats['title'])
ws.set_column('A:A',9) # 불출일자
ws.set_column('B:B',3) # 병동
ws.set_column('C:C',10) # 환자번호
ws.set_column('D:D',6) # 환자명
ws.set_column('E:E',20) # 폐기약품명
ws.set_column('F:F',5) # 처방량(규격단위)
ws.set_column('G:G',5) # 잔량
ws.set_column('H:H',5) # 규격단위
ws.set_column('I:I',9) # 폐기량
for r, row in enumerate(exl_table):
for c, data in enumerate(row):
if select_columns[c] == '폐기단위':
continue
if select_columns[c] == '폐기량' and r >0:
ws.write(r+1, c, data, formats[row[-1]])
elif select_columns[c] == '처방량(규격단위)' and r > 0:
ws.write(r+1, c, float(data), formats['float'])
else:
ws.write(r+1, c, data)
appen_r = r + 3
ws.write(appen_r, 3, '종합')
ws.write(appen_r, 4, '폐기약품명')
ws.write(appen_r, 5, '')
ws.write(appen_r, 6, '수량')
ws.write(appen_r, 7, '규격')
ws.write(appen_r, 8, '폐기량')
appen_r +=1
for r, row in enumerate(grp):
ws.write(appen_r+r, 4, row['폐기약품명'])
ws.write(appen_r+r, 6, row['폐기약품명__len'])
ws.write(appen_r+r, 7, row['규격단위'])
ws.write(appen_r+r, 8, row['폐기량__sum'], formats[drugDB[row['약품코드']]['amount_unit']])
ws2 = wb.add_worksheet('보고서')
ws2.set_column('A:A',12) # 제조자
ws2.set_column('B:B',25) # 약품명
ws2.set_column('C:C',5) # 구분
ws2.set_column('D:D',20) # 성분명
ws2.set_column('E:E',5) # 제형
ws2.set_column('F:F',15) # 제조번호
ws2.set_column('G:G',12) # 유효기한
ws2.set_column('H:H',9) # 폐기량
ws2.set_column('I:I',5) # 개수
ws2.set_column('J:J',5) # 규격
y, m, d = last_date.split('-')
title2 = '{}년 {}월 잔여마약류 폐기 결과보고'.format(y, m)
cr = 0
ws2.merge_range(cr, 0, cr, 9, title2, formats['title'])
cr+=1
fm1 = wb.add_format({'align': 'left', 'bold': True, 'font_size':15, 'border':True})
ws2.merge_range(cr, 0, cr, 9, '보고인(공무원)', fm1)
cr+=1
fm2 = wb.add_format({'align': 'center', 'font_size': 12, 'bold': True, 'border':True})
ws2.merge_range(cr,0,cr,1, '성명', fm2)
ws2.merge_range(cr,2,cr,3, '생년월일', fm2)
ws2.merge_range(cr,4,cr,5, '전화번호', fm2)
ws2.merge_range(cr,6,cr,7, '등록번호', fm2)
ws2.merge_range(cr,8,cr,9, '허가종별', fm2)
cr+=1
fm3 = wb.add_format({'align':'center', 'font_size': 12, 'border':True})
ws2.merge_range(cr, 0, cr, 1, reportElm['repoter']['name'], fm3)
ws2.merge_range(cr, 2, cr, 3, reportElm['repoter']['birth'], fm3)
ws2.merge_range(cr, 4, cr, 5, reportElm['repoter']['tel'], fm3)
ws2.merge_range(cr, 6, cr, 7, reportElm['repoter']['assign_num'], fm3)
ws2.merge_range(cr, 8, cr, 9, reportElm['repoter']['perm_class'], fm3)
cr +=1
fm4 = wb.add_format({'align':'center', 'font_size': 15, 'valign':'vcenter', 'border':True})
ws2.merge_range(cr, 0, cr+1, 1, '업소명칭', fm4)
ws2.merge_range(cr, 2, cr+1, 3, '대표자', fm4)
ws2.merge_range(cr, 4, cr, 9, '업소 소재지', fm3)
cr+=1
ws2.merge_range(cr, 4, cr, 5, '지역', fm3)
ws2.merge_range(cr, 6, cr, 9, '세부주소', fm3)
cr+=1
fm5 = wb.add_format({'align':'center', 'font_size': 10, 'border':True})
ws2.merge_range(cr, 0, cr, 1, reportElm['repoter']['market'], fm5)
ws2.merge_range(cr, 2, cr, 3, reportElm['repoter']['name'], fm5)
ws2.merge_range(cr, 4, cr, 5, reportElm['repoter']['region'], fm5)
ws2.merge_range(cr, 6, cr, 9, reportElm['repoter']['address'], fm5)
cr+=1
ws2.merge_range(cr, 0, cr, 9, "")
cr+=1
ws2.merge_range(cr, 0, cr, 9, '폐기정보', fm1)
cr+=1
ws2.merge_range(cr, 0, cr, 1, '폐기일시', fm2)
ws2.merge_range(cr, 2, cr, 9, reportElm['remainInfo']['date'], fm2)
cr+=1
ws2.merge_range(cr, 0, cr, 1, '입회자(부서 및 성명)', fm5)
ws2.merge_range(cr, 2, cr, 3, '폐기자 (부서 및 성명)', fm5)
ws2.merge_range(cr, 4, cr, 7, '폐기장소', fm5)
ws2.merge_range(cr, 8, cr, 9, '폐기방법', fm5)
cr+=1
ws2.merge_range(cr, 0, cr, 1, reportElm['remainInfo']['observer'], fm5)
ws2.merge_range(cr, 2, cr, 3, reportElm['remainInfo']['supervisor'], fm5)
ws2.merge_range(cr, 4, cr, 7, reportElm['remainInfo']['place'], fm5)
ws2.merge_range(cr, 8, cr, 9, reportElm['remainInfo']['method'], fm5)
cr+=1
ws2.merge_range(cr, 0, cr, 6, '사유', fm5)
ws2.merge_range(cr, 7, cr, 9, '세부사유', fm5)
cr+=1
ws2.merge_range(cr, 0, cr, 6, reportElm['remainInfo']['reason'], fm5)
ws2.merge_range(cr, 7, cr, 9, reportElm['remainInfo']['reasonDetail'], fm5)
cr+=1
ws2.merge_range(cr, 0, cr, 9, "")
cr+=1
ws2.merge_range(cr, 0, cr, 9, '폐기마약류 {}~{}'.format(first_date, last_date), fm1)
cr+=1
fm7 = wb.add_format({'align':'center', 'border':True})
ws2.write(cr, 0, '제조자(수입자)명', fm7)
ws2.write(cr, 1, '약품명', fm7)
ws2.write(cr, 2, '구분', fm7)
ws2.write(cr, 3, '성분명', fm7)
ws2.write(cr, 4, '제형', fm7)
ws2.write(cr, 5, '제조번호', fm7)
ws2.write(cr, 6, '유효기한', fm7)
ws2.write(cr, 7, '폐기량', fm7)
ws2.write(cr, 8, '개수', fm7)
ws2.write(cr, 9, '규격', fm7)
cr+=1
fm6 = wb.add_format({'border':True})
ml_format = wb.add_format({'num_format': '0.00 "ml"', 'border':True })
mg_format = wb.add_format({'num_format': '0.00 "mg"', 'border':True })
g_format = wb.add_format({'num_format': '0.00 "g"', 'border':True })
formats = \
{
'title': title_format,
'float': float_format,
'ml': ml_format,
'mg': mg_format,
'g': g_format,
}
for r, row in enumerate(grp, cr):
key = row['약품코드']
firm = drugDB[key]['firm']
name = drugDB[key]['name']
cl = drugDB[key]['class']
component = drugDB[key]['component']
shape = drugDB[key]['shape']
lot_num = " "
expire = " "
amount = row['폐기량__sum']
fm_amount = formats[drugDB[key]['amount_unit']]
count = row['폐기약품명__len']
std_unit = drugDB[key]['std_unit']
ws2.write(r, 0, firm, fm6)
ws2.write(r, 1, name, fm6)
ws2.write(r, 2, cl, fm6)
ws2.write(r, 3, component, fm6)
ws2.write(r, 4, shape, fm6)
ws2.write(r, 5, lot_num, fm6)
ws2.write(r, 6, expire, fm6)
ws2.write(r, 7, amount, fm_amount)
ws2.write(r, 8, count, fm6)
ws2.write(r, 9, std_unit, fm6)
print(grp[0]['약품코드'])
wb.close()
os.startfile(fname)
|
# #TaxCalculator
#
# gross_salary = 40000
# loan = 3000
# sacco = 2000
#
# age = (input("enter age: "))
# if age <=18:
# result = gross_salary * 3/100
# elif age > 18 and age <= 24:
# result = gross_salary * 6/100
# elif age > 24 and age <= 35:
# result = gross_salary * 8/100
# elif age > 35 and age <= 40:
# result = gross_salary * 16/100
# elif age > 40 and age <= 55:
# result = gross_salary * 12/100
# elif age > 55:
# result = gross_salary * 3/100
# else:
# print("Did not enter age")
#
#
# taxed_return = gross_salary-result
# sum_total = taxed_return-loan-sacco
# print(sum_total)
#
#
# #GRADING SYSTEM#
#
#
# while True:
# marks_input = int(input("input marks: "))
# print("User entered", marks_input)
# isinteger = isinstance(marks_input, int)
# print(isinteger)
# if marks_input >= 91 and marks_input <=100:
# print("A")
# elif marks_input >= 81 and marks_input <= 90:
# print("B")
# elif marks_input >= 71 and marks_input<= 80:
# print("C")
# elif marks_input >= 61 and marks_input<= 70:
# print("D")
# elif marks_input >= 51 and marks_input <= 60:
# print("E")
# elif marks_input >= 41 and marks_input <= 50:
# print("F")
# else:
# print("FAIL")
#
import time
import os
import sys
import math
##from colorama import Fore, Back, Style
from datetime import datetime
from datetime import timedelta
challengetime = int(input("ENTER EXPECTED CODING TIME IN HOURS : "))
if challengetime < 0 or challengetime == 0:
print("BE REALISTIC MAN ...")
else:
counter = 0
timeoffinish = datetime.now() + timedelta(hours=challengetime)
countertime = time.ctime()
while True:
seconds = counter % 60
minutes = counter / 60 % 60
hours = counter / 3600 % 60
if (math.ceil(challengetime - math.ceil(hours) - 1) < 1 and (59 - math.ceil(minutes) < 1) and (
59 - math.ceil(seconds)) < 1):
print("\n")
print("Time Is Up Dev,Take A rest.You Have 3 Seconds TO SAVE YOUR WORK SOLDIER")
time.sleep(5)
os.system('killall code')
print("Sleeping In 5 Seconds ...")
time.sleep(5)
os.system('poweroff')
else:
print("START TIME : " + countertime + "\n")
print("FINISH LINE : " + str(timeoffinish.strftime("%c")) + "\n")
print("CURRENT TIME : " + time.ctime() + "\n")
print("TIME REMAINING : " + str(math.ceil(challengetime - math.ceil(hours) - 1)) + " Hours "
+ str(59 - math.ceil(minutes)) + " Minutes " + str(59 - math.ceil(seconds)) + " Seconds" + "\n")
print("You Have Been CODING For => " + str(math.ceil(hours)) + " HOURS : " + str(
math.ceil(minutes)) + " MINUTES : "
+ str(math.ceil(seconds + 1) % 60) + " SECONDS ")
counter = counter + 1
time.sleep(1)
os.system('clear')
|
from setuptools import setup
setup(
name='clouds_are_fun',
version='0.0.1',
description="Clouds are (as stated) fun!",
author='Rachel Storer',
author_email='rls347@gmail.com',
license='',
packages=['clouds_are_fun'],
zip_safe=False)
|
# $example on$
from pyspark.ml.classification import LogisticRegression, LinearSVC
# $example off$
from pyspark.sql import SparkSession
from configs import d_path
"""
An example demonstrating Logistic Regression Summary.
Run with:
bin/spark-submit examples/src/main/python/ml/logistic_regression_summary_example.py
"""
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("LogisticRegressionSummary") \
.getOrCreate()
# Load training data
training = spark.read.format("libsvm").load(f"{d_path}ml/mllib/sample_libsvm_data.txt")
# training.printSchema()
# root
# | -- label: double(nullable=true)
# | -- features: vector(nullable=true), 训练预测feature字段是必须的
train, test = training.randomSplit([0.9, 0.1], seed=12345)
svm = LinearSVC(maxIter=10, regParam=0.3, threshold=0.0)
# Fit the model
lsvcModel = svm.fit(train)
print("Coefficients: " + str(lsvcModel.coefficients))
print("Intercept: " + str(lsvcModel.intercept))
# |features | rawPrediction| prediction|
# |特 征 | 概率 | 预测值 |
lsvcModel.transform(test).show()
# $example off$
spark.stop()
|
from db import create_app
from db.api.views import api_bp as api_module
from db.custom_api.views import custom_api_bp as custom_api_module
from flasgger import Swagger
if __name__ == '__main__':
app = create_app('config.DevelopmentConfig')
app.register_blueprint(api_module)
app.register_blueprint(custom_api_module)
Swagger(app)
#app.run(host="127.0.0.1", port=int(app.config['PORT']))
app.run(host='127.0.0.1', port=8080, debug=True)
|
def reference_demo(x):
print("Before assigning: x=", x, " id=", id(x))
x = 8
print("After assigning: x=", x, " id=", id(x))
x = 1
print("Before the call: x=", x, " id=", id(x))
reference_demo(x)
print("After the call: x=", x, " id=", id(x))
|
import sqlite3
import hashlib
#execfile('databasecommands.py')
exec(open('databasecommands.py').read())
DB = DataBase('stuff.db')
#
#SET UP INITIAL TABLES
#
#check if user table is inputted
if not DB.tableExists('users'):
print('no table "users"... making one now...')
DB.createTable('users (username VARCHAR(25) UNIQUE, password VARCHAR(100))')
#add default admin
print('adding default admin user...')
DB.addUser('admin','admin')
else:
print('already "users" table')
#QUOTE TABLE
if not DB.tableExists('quotes'):
print('no table "quotes"... making one now...')
#id: auto generated just add null
#quote: actual text from the quote
#book_id: id of the book used, see Books table below
#page: page number of quote
#user: name of user who submitted quote
#date: YYYY-MM-DD HH:MM:SS
DB.createTable('quotes (id INTEGER PRIMARY KEY AUTOINCREMENT, quote TEXT, book_id INTEGER, page INTEGER, user VARCHAR(25), date TEXT)')
else:
print('already "quotes" table')
#TAG TABLE
if not DB.tableExists('tags'):
print('no table "tags"... making one now...')
#id: unique identifier for tag
#tag: actual tag, no more than 120 characters
DB.createTable('tags (id INTEGER PRIMARY KEY AUTOINCREMENT, tag VARCHAR(120) UNIQUE)')
else:
print('already "tags" table')
#QUOTE TAG MAP TABLE
if not DB.tableExists('quote_tag'):
print('no table "quote_tag"... making one now...')
#quote_id: id corresponding to quote
#tag_id: id corresponding to tag
DB.createTable('quote_tag (quote_id INTEGER, tag_id INTEGER)')
else:
print('already "quote_tag" table')
#BOOK TABLE
if not DB.tableExists('books'):
print('no table "books"... making one now...')
#Title: Book title #as spelled on book
#Author: as appears on book
#isbn: as txt no hyphans
DB.createTable('books (id INTEGER PRIMARY KEY AUTOINCREMENT, title VARCHAR(255), author VARCHAR(255), isbn VARCHAR(13) UNIQUE)')
else:
print('already "quote_tag" table')
print('adding test data...')
if DB.getQuote(1):
print('already quotes')
else:
#def addBook(self,title,author,isbn10,isbn13):
DB.addBook("Harry Potter and the Philosopher's Stone",'J. K. Rowling','9780590353427')
DB.addBook('Brave New World','Aldous Huxley','0060809833')
DB.addBook('Catch 22','Paul Bacon','0684833395')
#def getBookId(self,title = None,author = None,isbn10 = None,isbn13 = None):
bookid = DB.getBookId(title='Catch 22',author='Paul Bacon',isbn='0684833395')[0]
#addQuote(self,quote,book_id,page,user,date,tags = []):
DB.addQuote('This is a quote',bookid,397,'admin',['catch22','tag2'])
def _resetDB():
DB.dropTable('users')
DB.dropTable('quotes')
DB.dropTable('tags')
DB.dropTable('quote_tag')
DB.dropTable('books')
DB.close() |
#!/usr/bin/env python2.7
# coding: utf-8
import os
import re
import datetime
import traceback
from slog import SysLog
class Writer(object):
def __init__(self, lock, target, file_name, max_size, max_count):
self.target = target
self.file_path = os.path.join(target, file_name)
self.file_name = file_name
self.max_size = max_size
self.max_count = max_count
self.current_size = 0
self.handler = None
self.lock = lock
def open(self):
try:
self.handler = open(self.file_path, 'ab')
self.current_size = self.handler.tell()
return True
except:
SysLog.error(traceback.format_exc())
def write(self, content):
try:
if self.lock is None:
return self.__write(content)
with self.lock:
return self.__write(content)
except:
SysLog.error(traceback.format_exc())
def __write(self, content):
try:
try:
self.handler.write(content)
self.current_size = self.handler.tell()
except Exception, e:
self.close()
self.open()
SysLog.error(e.message)
if self.current_size >= self.max_size:
self.close()
self.__rename()
self.__clean()
self.open()
return True
except:
SysLog.error(traceback.format_exc())
def close(self):
try:
if self.handler is None:
return
if self.current_size <= 0:
return
self.handler.flush()
self.handler.close()
self.handler = None
except:
SysLog.error(traceback.format_exc())
def __rename(self):
try:
ending = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')
new_file_path = self.file_path + '.' + ending
os.rename(self.file_path, new_file_path)
except:
SysLog.error(traceback.format_exc())
def __clean(self):
try:
if self.max_count < 0:
return
sorted_names = dict()
""":type: dict[int, string]"""
names = os.listdir(self.target)
for name in names:
if not re.match(r'^' + self.file_name + r'\.\d{20}$', name):
continue
when = name.split('.')[-1]
sorted_names[int(when)] = name
deleted_count = len(sorted_names) - self.max_count
deleted_list = list()
for when, name in sorted_names.items():
if deleted_count <= 0:
break
deleted_list.append(name)
deleted_count -= 1
for name in deleted_list:
full_path = os.path.join(self.target, name)
os.remove(full_path)
except:
SysLog.error(traceback.format_exc()) |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-30 22:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('horas', '0012_carrera_carrera_abreviada'),
]
operations = [
migrations.AddField(
model_name='plan',
name='plan_version',
field=models.IntegerField(blank=True, default=1, null=True),
),
]
|
## Generate a contour plot
# Import some other libraries that we'll need
# matplotlib and numpy packages must also be installed
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
# define objective function
def f(x):
x1 = x[0]
x2 = x[1]
obj = x1**2 - 2.0 * x1 * x2 + 4 * x2**2
return obj
# define objective gradient
def dfdx(x):
x1 = x[0]
x2 = x[1]
grad = []
grad.append(2.0 * x1 - 2.0 * x2)
grad.append(-2.0 * x1 + 8.0 * x2)
return grad
# Exact 2nd derivatives (hessian)
H = [[2.0, -2.0],[-2.0, 8.0]]
# Start location
x_start = [-3.0, 2.0]
# Design variables at mesh points
i1 = np.arange(-4.0, 4.0, 0.1)
i2 = np.arange(-4.0, 4.0, 0.1)
x1_mesh, x2_mesh = np.meshgrid(i1, i2)
f_mesh = x1_mesh**2 - 2.0 * x1_mesh * x2_mesh + 4 * x2_mesh**2
# Create a contour plot
plt.figure()
# Specify contour lines
lines = range(2,52,2)
# Plot contours
CS = plt.contour(x1_mesh, x2_mesh, f_mesh,lines)
# Label contours
plt.clabel(CS, inline=1, fontsize=10)
# Add some text to the plot
plt.title('f(x) = x1^2 - 2*x1*x2 + 4*x2^2')
plt.xlabel('x1')
plt.ylabel('x2')
# Show the plot
#plt.show()
##################################################
# Newton's method
##################################################
xn = np.zeros((2,2))
xn[0] = x_start
# Get gradient at start location (df/dx or grad(f))
gn = dfdx(xn[0])
# Compute search direction and magnitude (dx)
# with dx = -inv(H) * grad
delta_xn = np.empty((1,2))
delta_xn = -np.linalg.solve(H,gn)
xn[1] = xn[0]+delta_xn
plt.plot(xn[:,0],xn[:,1],'k-o')
##################################################
# Steepest descent method
##################################################
# Number of iterations
n = 8
# Use this alpha for every line search
alpha = 0.15
# Initialize xs
xs = np.zeros((n+1,2))
xs[0] = x_start
# Get gradient at start location (df/dx or grad(f))
for i in range(n):
gs = dfdx(xs[i])
# Compute search direction and magnitude (dx)
# with dx = - grad but no line searching
xs[i+1] = xs[i] - np.dot(alpha,dfdx(xs[i]))
plt.plot(xs[:,0],xs[:,1],'g-o')
##################################################
# Conjugate gradient method
##################################################
# Number of iterations
n = 8
# Use this alpha for the first line search
alpha = 0.15
neg = [[-1.0,0.0],[0.0,-1.0]]
# Initialize xc
xc = np.zeros((n+1,2))
xc[0] = x_start
# Initialize delta_gc
delta_cg = np.zeros((n+1,2))
# Initialize gc
gc = np.zeros((n+1,2))
# Get gradient at start location (df/dx or grad(f))
for i in range(n):
gc[i] = dfdx(xc[i])
# Compute search direction and magnitude (dx)
# with dx = - grad but no line searching
if i==0:
beta = 0
delta_cg[i] = - np.dot(alpha,dfdx(xc[i]))
else:
beta = np.dot(gc[i],gc[i]) / np.dot(gc[i-1],gc[i-1])
delta_cg[i] = alpha * np.dot(neg,dfdx(xc[i])) + beta * delta_cg[i-1]
xc[i+1] = xc[i] + delta_cg[i]
plt.plot(xc[:,0],xc[:,1],'y-o')
##################################################
# Quasi-Newton method
##################################################
# Number of iterations
n = 8
# Use this alpha for every line search
alpha = np.linspace(0.1,1.0,n)
# Initialize delta_xq and gamma
delta_xq = np.zeros((2,1))
gamma = np.zeros((2,1))
part1 = np.zeros((2,2))
part2 = np.zeros((2,2))
part3 = np.zeros((2,2))
part4 = np.zeros((2,2))
part5 = np.zeros((2,2))
part6 = np.zeros((2,1))
part7 = np.zeros((1,1))
part8 = np.zeros((2,2))
part9 = np.zeros((2,2))
# Initialize xq
xq = np.zeros((n+1,2))
xq[0] = x_start
# Initialize gradient storage
g = np.zeros((n+1,2))
g[0] = dfdx(xq[0])
# Initialize hessian storage
h = np.zeros((n+1,2,2))
h[0] = [[1, 0.0],[0.0, 1]]
for i in range(n):
# Compute search direction and magnitude (dx)
# with dx = -alpha * inv(h) * grad
delta_xq = -np.dot(alpha[i],np.linalg.solve(h[i],g[i]))
xq[i+1] = xq[i] + delta_xq
# Get gradient update for next step
g[i+1] = dfdx(xq[i+1])
# Get hessian update for next step
gamma = g[i+1]-g[i]
part1 = np.outer(gamma,gamma)
part2 = np.outer(gamma,delta_xq)
part3 = np.dot(np.linalg.pinv(part2),part1)
part4 = np.outer(delta_xq,delta_xq)
part5 = np.dot(h[i],part4)
part6 = np.dot(part5,h[i])
part7 = np.dot(delta_xq,h[i])
part8 = np.dot(part7,delta_xq)
part9 = np.dot(part6,1/part8)
h[i+1] = h[i] + part3 - part9
plt.plot(xq[:,0],xq[:,1],'r-o')
# Save the figure as a PNG
plt.savefig('contour.png')
plt.show()
|
import re
import sys
from contextlib import contextmanager
from pprint import pprint as p
from app import create_app
from app.jiraapi import get_marketplace_jira
from app.models import Application
app = create_app('development')
@contextmanager
def jira_with_app_context():
with app.app_context():
j = get_marketplace_jira(False)
yield j
def fix_custom_fields():
with jira_with_app_context() as j:
bad_issues = j.generic_jira.jira.search_issues('project = MARADMIN AND issuetype = "Supplier Assessment" '
'AND created >= 2012-05-31 AND created <= 2017-05-23')
for bad_issue in bad_issues:
if bad_issue.raw['fields'][j.supplier_field_code] != 0:
bad_issue.update({j.application_field_code: str(bad_issue.raw['fields'][j.supplier_field_code]),
j.supplier_field_code: str(0)})
bad_issues = j.generic_jira.jira.search_issues('project = MARADMIN AND issuetype = "Domain Assessment" '
'AND created >= 2012-05-31 AND created <= 2017-05-23')
for bad_issue in bad_issues:
if bad_issue.raw['fields'][j.application_field_code] != 0:
bad_issue.update({j.supplier_field_code:
str(re.search(r"\(#(.*)\)$", bad_issue.fields.summary).group(1)),
j.application_field_code: str(0)})
def create_approval_task(application_id):
with jira_with_app_context() as j:
a = Application.query.filter_by(id=application_id).first()
a.status = 'submitted'
a.create_approval_task()
def list_tasks():
with jira_with_app_context() as j:
assessment_tasks = j.get_assessment_tasks()
for t in assessment_tasks:
p(t)
def tasks_by_id():
with jira_with_app_context() as j:
p(j.assessment_tasks_by_application_id())
def create_subtask_issuetype():
with jira_with_app_context() as j:
j.create_issuetype(
'Supplier Assessment Step',
'A necessary step for carrying out a supplier assessment',
subtask=True)
def connect():
with jira_with_app_context() as j:
si = j.generic_jira.jira.server_info()
p(si)
if __name__ == '__main__':
try:
task_method = getattr(sys.modules[__name__], sys.argv[1])
except AttributeError:
print('no such task')
sys.exit(1)
task_method(*sys.argv[2:])
|
import os
from unittest import TestCase
from xml.etree import ElementTree as ET
from xam import Addon
try:
from collections import OrderedDict
except ImportError:
from collective.ordereddict import OrderedDict
class TestAddon(TestCase):
def assert_attrs(self, obj, attrs):
for attr_name, expected_value in attrs.items():
attr_value = getattr(obj, attr_name)
self.assertEqual(expected_value, attr_value)
self.assertTrue(isinstance(attr_value, unicode))
def assert_dict(self, expected, actual):
for key, val in actual.items():
self.assertTrue(isinstance(key, unicode))
self.assertTrue(isinstance(val, unicode))
self.assertEqual(expected, actual)
def test_parse(self):
addon = Addon.from_filename(os.path.join(os.path.dirname(__file__), 'data', 'addon.xml'))
expected = {
# attr_name: expected_value
'version': u'1.2.1',
'id': u'plugin.video.academicearth',
'name': u'Academic Earth',
'provider': u'Jonathan Beluch (jbel)',
}
self.assert_attrs(addon, expected)
self.assert_dict({
u'xbmc.python': u'2.0',
u'script.module.beautifulsoup': u'3.0.8',
u'script.module.xbmcswift': u'0.2.0',
u'plugin.video.youtube': u'2.9.1',
}, addon.dependencies)
self.assertEqual(addon.languages, ['en', 'fr'])
self.assertNotEqual(None, addon.metadata)
self.assertEqual('all', addon.platform)
self.assertEqual(OrderedDict(
[(None, 'Watch lectures from Academic Earth (http://academicearth.org)')]
), addon.summaries)
self.assertEqual('Watch lectures from Academic Earth (http://academicearth.org)',
addon.summary())
#self.assertEqual('Watch lectures from Academic Earth (http://academicearth.org)',
#addon.summary('en'))
self.assertEqual(OrderedDict(
[(None,'Browse online courses and lectures from the world\'s top scholars.')]
), addon.descriptions)
self.assertEqual('Browse online courses and lectures from the world\'s top scholars.',
addon.description())
def test_setters(self):
xml = ET.parse(os.path.join(os.path.dirname(__file__), 'data', 'addon.xml')).getroot()
addon = Addon(xml)
self.assertEqual('1.2.1', addon.version)
addon.version = '1.2.2'
self.assertEqual('1.2.2', addon.version)
def test_to_dict(self):
addon = Addon.from_filename(os.path.join(os.path.dirname(__file__), 'data', 'addon.xml'))
actual = addon.to_dict()
with open(os.path.join(os.path.dirname(__file__), 'data', 'addon.xml')) as inp:
xml = inp.read()
expected = {
'id': u'plugin.video.academicearth',
'name': u'Academic Earth',
'version': u'1.2.1',
'provider': u'Jonathan Beluch (jbel)',
'dependencies': {
'xbmc.python': '2.0',
'script.module.beautifulsoup': '3.0.8',
'script.module.xbmcswift': '0.2.0',
'plugin.video.youtube': '2.9.1',
},
'summaries': {None: u"Watch lectures from Academic Earth (http://academicearth.org)"},
'descriptions': {None: u"Browse online courses and lectures from the world's top scholars."},
'platform': 'all',
'_xml': xml,
}
for key, val in expected.items():
if not key.startswith('_'):
self.assertEqual(val, actual[key])
LANG_XML_TMP = '''
<addon id="plugin.video.academicearth" name="Academic Earth" provider-name="Jonathan Beluch (jbel)" version="1.2.1">
<extension point="xbmc.addon.metadata">
%s
</extension>
</addon>
'''
class TestLangTags(TestCase):
def test_no_lang_tag(self):
xmlstr = LANG_XML_TMP % ''
addon = Addon(ET.fromstring(xmlstr))
self.assertEqual(addon.languages, [])
def test_self_close_lang_tag(self):
xmlstr = LANG_XML_TMP % '<language/>'
addon = Addon(ET.fromstring(xmlstr))
self.assertEqual(addon.languages, [])
def test_empty_lang_tag(self):
xmlstr = LANG_XML_TMP % '<language></language>'
addon = Addon(ET.fromstring(xmlstr))
self.assertEqual(addon.languages, [])
def test_data_lang_tag(self):
xmlstr = LANG_XML_TMP % '<language>en</language>'
addon = Addon(ET.fromstring(xmlstr))
self.assertEqual(addon.languages, ['en'])
xmlstr = LANG_XML_TMP % '<language>en fr</language>'
addon = Addon(ET.fromstring(xmlstr))
self.assertEqual(addon.languages, ['en', 'fr'])
if __name__ == '__main__':
unittest.main()
|
#RUBEN CUADRA
#DAVID GAONA
#code to run a server that manages game boards of the Onitama game
class options():
PVE = b'0'
PVP = b'1'
DIFFICULTY_NORMAL = b'1'
DIFFICULTY_HARD = b'2'
class responses():
OK = '0'
WRONG_DIFFICULTY="26"
BLUE = 0
RED = 1 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import tools
from osv import osv
from osv import fields
class mail_compose_message(osv.osv_memory):
_inherit = 'mail.compose.message'
def _get_templates(self, cr, uid, context=None):
if context is None:
context = {}
model = False
email_template_obj = self.pool.get('email.template')
message_id = context.get('default_parent_id', context.get('message_id', context.get('active_id')))
if context.get('default_composition_mode') == 'reply' and message_id:
message_data = self.pool.get('mail.message').browse(cr, uid, message_id, context=context)
if message_data:
model = message_data.model
else:
model = context.get('default_model', context.get('active_model'))
if model:
record_ids = email_template_obj.search(cr, uid, [('model', '=', model)], context=context)
return email_template_obj.name_get(cr, uid, record_ids, context) + [(False, '')]
return []
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
result = super(mail_compose_message, self).default_get(cr, uid, fields, context=context)
result['template_id'] = context.get('default_template_id', context.get('mail.compose.template_id', False))
# pre-render the template if any
if result.get('use_template'):
onchange_res = self.onchange_use_template(cr, uid, [], result.get('use_template'), result.get('template_id'),
result.get('composition_mode'), result.get('model'), result.get('res_id'), context=context)
result.update(onchange_res['value'])
return result
_columns = {
'use_template': fields.boolean('Use Template'),
# incredible hack of the day: size=-1 means we want an int db column instead of an str one
'template_id': fields.selection(_get_templates, 'Template', size=-1),
}
def onchange_template_id(self, cr, uid, ids, use_template, template_id, composition_mode, model, res_id, context=None):
""" - use_template not set: return default_get
- use_template set in mass_mailing: we cannot render, so return the template values
- use_template set: return rendered values """
if use_template and template_id and composition_mode == 'mass_mail':
values = self.pool.get('email.template').read(cr, uid, template_id, ['subject', 'body_html'], context)
values.pop('id')
elif use_template and template_id:
values = self.generate_email_for_composer(cr, uid, template_id, res_id, context=context)
# transform attachments into attachment_ids
values['attachment_ids'] = []
ir_attach_obj = self.pool.get('ir.attachment')
for attach_fname, attach_datas in values.pop('attachments', []):
data_attach = {
'name': attach_fname,
'datas': attach_datas,
'datas_fname': attach_fname,
'res_model': model,
'res_id': res_id,
}
values['attachment_ids'].append(ir_attach_obj.create(cr, uid, data_attach, context=context))
else:
values = self.default_get(cr, uid, ['body', 'body_html', 'subject', 'partner_ids', 'attachment_ids'], context=context)
if values.get('body_html'):
values['body'] = values.pop('body_html')
values.update(use_template=use_template, template_id=template_id)
return {'value': values}
def toggle_template(self, cr, uid, ids, context=None):
""" hit toggle template mode button: calls onchange_use_template to
emulate an on_change, then writes the values to update the form. """
for record in self.browse(cr, uid, ids, context=context):
onchange_res = self.onchange_use_template(cr, uid, ids, not record.use_template,
record.template_id, record.composition_mode, record.model, record.res_id, context=context).get('value', {})
# update partner_ids and attachment_ids
onchange_res['partner_ids'] = [(4, partner_id) for partner_id in onchange_res.pop('partner_ids', [])]
onchange_res['attachment_ids'] = [(4, attachment_id) for attachment_id in onchange_res.pop('attachment_ids', [])]
record.write(onchange_res)
return True
def onchange_use_template(self, cr, uid, ids, use_template, template_id, composition_mode, model, res_id, context=None):
""" onchange_use_template (values: True or False). If use_template is
False, we do as an onchange with template_id False for values """
values = self.onchange_template_id(cr, uid, ids, use_template,
template_id, composition_mode, model, res_id, context=context)
# force html when using templates
if use_template:
values['value']['content_subtype'] = 'html'
return values
def save_as_template(self, cr, uid, ids, context=None):
""" hit save as template button: current form value will be a new
template attached to the current document. """
email_template = self.pool.get('email.template')
ir_model_pool = self.pool.get('ir.model')
for record in self.browse(cr, uid, ids, context=context):
model_ids = ir_model_pool.search(cr, uid, [('model', '=', record.model)], context=context)
model_id = model_ids and model_ids[0] or False
model_name = ''
if model_id:
model_name = ir_model_pool.browse(cr, uid, model_id, context=context).name
template_name = "%s: %s" % (model_name, tools.ustr(record.subject))
values = {
'name': template_name,
'subject': record.subject or False,
'body_html': record.body or False,
'model_id': model_id or False,
'attachment_ids': [(6, 0, [att.id for att in record.attachment_ids])]
}
template_id = email_template.create(cr, uid, values, context=context)
record.write({'template_id': template_id, 'use_template': True})
return True
#------------------------------------------------------
# Wizard validation and send
#------------------------------------------------------
def generate_email_for_composer(self, cr, uid, template_id, res_id, context=None):
""" Call email_template.generate_email(), get fields relevant for
mail.compose.message, transform email_cc and email_to into partner_ids """
template_values = self.pool.get('email.template').generate_email(cr, uid, template_id, res_id, context=context)
# filter template values
fields = ['body', 'body_html', 'subject', 'email_to', 'email_cc', 'attachments']
values = dict((field, template_values[field]) for field in fields if template_values.get(field))
values['body'] = values.pop('body_html', '')
# transform email_to, email_cc into partner_ids
values['partner_ids'] = []
mails = tools.email_split(values.pop('email_to', '') + ' ' + values.pop('email_cc', ''))
for mail in mails:
partner_id = self.pool.get('res.partner').find_or_create(cr, uid, mail, context=context)
values['partner_ids'].append(partner_id)
return values
def render_message(self, cr, uid, wizard, res_id, context=None):
""" Generate an email from the template for given (model, res_id) pair.
This method is meant to be inherited by email_template that will
produce a more complete dictionary, with email_to, ...
"""
# generate the composer email
values = self.generate_email_for_composer(cr, uid, wizard.template_id, res_id, context=context)
# get values to return
email_dict = super(mail_compose_message, self).render_message(cr, uid, wizard, res_id, context)
email_dict.update(values)
return email_dict
def render_template(self, cr, uid, template, model, res_id, context=None):
return self.pool.get('email.template').render_template(cr, uid, template, model, res_id, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
import tensorflow as tf
def PairwiseEuDist(
user_vec,
subgraph,
item_vec=None,
item_bias=None,
p_item_vec=None,
p_item_bias=None,
n_item_vec=None,
n_item_bias=None,
train=True,
weights=1.0,
margin=1.0,
scope="PointwiseMSE",
):
if train:
l2_user_pos = tf.reduce_sum(
tf.square(tf.subtract(user_vec, p_item_vec)),
reduction_indices=1,
keepdims=True,
name="l2_user_pos",
)
l2_user_neg = tf.reduce_sum(
tf.square(tf.subtract(user_vec, n_item_vec)),
reduction_indices=1,
keepdims=True,
name="l2_user_neg",
)
pos_score = (-l2_user_pos) + p_item_bias
neg_score = (-l2_user_neg) + n_item_bias
diff = pos_score - neg_score
loss = tf.reduce_sum(weights * tf.maximum(margin - diff, 0))
subgraph.register_global_loss(loss)
else:
predictions = (
-tf.reduce_sum(
tf.square(tf.subtract(user_vec, item_vec)),
reduction_indices=1,
keepdims=True,
name="l2_user_pos",
)
+ item_bias
)
subgraph.register_global_output(predictions)
|
import os
import linecache
import re
from glob import glob
from collections import defaultdict
from typing import List, Tuple
from pipeline.sanitizers.regex import HAS_TAG, HAS_BOTH, HAS_CLOSING_TAG, HAS_OPENING_TAG
def annotate(raw_tokens) -> List[Tuple]:
tokens = []
current_label = 'other'
for raw_token in raw_tokens:
raw_token = raw_token.strip()
if raw_token:
has_tag = HAS_TAG.search(raw_token)
if has_tag:
has_both = HAS_BOTH.findall(raw_token)
if has_both:
# print(f"raw_token: {raw_token} -> HB")
for (label, token) in has_both:
tokens.append((token, label))
# print(f"{token}~{label}")
else:
has_opening = HAS_OPENING_TAG.search(raw_token)
has_closing = HAS_CLOSING_TAG.search(raw_token)
if has_opening:
token = has_opening.group('token')
label = has_opening.group('label')
# print(f"raw_token: {raw_token} -> HO")
# print(f"{has_opening.group('token')}~{has_opening.group('label')}")
current_label = has_opening.group('label')
elif has_closing:
token = has_closing.group('token')
label = has_closing.group('label')
# print(f"raw_token: {raw_token} -> HC")
# print(f"{has_closing.group('token')}~{has_closing.group('label')}")
current_label = 'other'
else:
print(f"Unable to find opening/closing tags for {raw_token}")
# print(f"raw_token: {raw_token} -> NEITHER")
if token:
tokens.append((token, label))
else:
# print(f"raw_token: {raw_token} -> NOTHING")
tokens.append((raw_token, current_label))
else:
continue
return tokens
# Materialise the style/idx to training data
def tokenise_idx(path_to_idx_files: str, path_to_sanitised_data: str):
"""
path_to_idx_files (str): path to indices of each fold (globbable)
path_to_data (str): path to files containing the annotated strings
"""
idx_files = glob(os.path.abspath(path_to_idx_files))
print(f"Reading indices from {path_to_idx_files}")
print(f"Reading data from {path_to_sanitised_data}")
folds = defaultdict(list)
for file in idx_files:
with open(file, 'r') as fh:
style_idx = fh.read().strip('\n').split('\n')
for idx in style_idx:
fold_idx = file.split('/')[file.split('/').index('folds') + 1]
csl_type, style, lineno = idx.split('/')
filepath = os.path.join(path_to_sanitised_data, csl_type, style, 'output.sanitised.txt')
line = linecache.getline(filepath, int(lineno))
data = annotate(line.split(" "))
folds[fold_idx].append(data)
return folds
def main():
current_directory = os.getcwd()
annotated_path = os.path.join(current_directory, 'data/annotated')
training_data = tokenise_idx(os.path.join(current_directory, 'data/training/folds/*/train_style_idx.txt'),
annotated_path)
val_data = tokenise_idx(os.path.join(current_directory, 'data/training/folds/*/val_style_idx.txt'),
annotated_path)
def write_tokens_as_lines(data: list, file: str):
with open(file, 'w') as fh:
for t in data:
for token, label in t:
fh.write(f"{token}\t{label}\n")
fh.write('\n')
for fold_idx, data in training_data.items():
write_tokens_as_lines(data, os.path.join(current_directory, f"data/training/folds/{fold_idx}/train.txt"))
for fold_idx, data in val_data.items():
write_tokens_as_lines(data, os.path.join(current_directory, f"data/training/folds/{fold_idx}/val.txt"))
if __name__ == '__main__':
main()
|
from django.contrib import admin
from RNG.models import UserProfile, Category, Game, Rating, Comment
# Register your models here.
admin.site.register(UserProfile)
admin.site.register(Category)
admin.site.register(Game)
admin.site.register(Rating)
admin.site.register(Comment)
|
from sys import exit
def i(): return input()
def ii(): return int(input())
def iis(): return map(int, input().split())
def liis(): return list(map(int, input().split()))
def print_array(a): print("".join(map(str, a)))
t = ii()
for _ in range(t):
m, n = iis()
ans = [['B' for j in range(n)] for i in range(m)]
ans[0][0] = 'W'
for i in range(len(ans)):
print_array(ans[i])
|
from math import sqrt
from numpy import dot, array
def euclid_dist(p1, p2):
euclid_sum = 0
for i,j in zip(p1, p2):
euclid_sum += math.sqrt((j - i) ** 2)
return euclid_sum
def np_euclid_dist(p1, p2):
np1, np2 = array(p1), array(p2)
return sqrt(dot(np2 - np1, np2 - np1))
def manhattan_dist(p1, p2):
manhattan_sum = 0
for i, j in zip(p1 ,p2):
manhattan_sum += abs(i - j)
return manhattan_sum
def np_manhattan_dist(p1 ,p2):
np1, np2 = array(p1), array(p2)
return dot(abs(np2 - np1), 1).sum()
|
def count_substring(string, sub_string):
count_substring=0
for i in range(len(string)-len(sub_string)+1):
if string[i:i+len(sub_string)] == sub_string:
count_substring+=1
return count_substring
string = input().strip()
sub_string = input().strip()
count = count_substring(string, sub_string)
print(count)
|
# -*- coding: utf-8 -*-
"""
Display current sound volume using amixer.
Expands on the standard i3status volume module by adding color
and percentage threshold settings.
Volume up/down and Toggle mute via mouse clicks can be easily added see
example.
Configuration parameters:
button_down: Button to click to decrease volume. Setting to 0 disables.
(default 0)
button_mute: Button to click to toggle mute. Setting to 0 disables.
(default 0)
button_up: Button to click to increase volume. Setting to 0 disables.
(default 0)
cache_timeout: how often we refresh this module in seconds.
(default 10)
channel: Alsamixer channel to track.
(default 'Master')
device: Alsamixer device to use.
(default 'default')
format: Format of the output.
(default '♪: {percentage}%')
format_muted: Format of the output when the volume is muted.
(default '♪: muted')
icon_headphones: icon when headphones are plugged
(default '')
icon_speakers: icon when headphones are not plugged
(default '')
threshold_bad: Volume below which color is set to bad.
(default 20)
threshold_degraded: Volume below which color is set to degraded.
(default 50)
volume_delta: Percentage amount that the volume is increased or
decreased by when volume buttons pressed.
(default 5)
Format placeholders:
{percentage} Percentage volume
Color options:
color_bad: Volume below threshold_bad or muted
color_degraded: Volume below threshold_degraded
color_good: Volume above or equal to threshold_degraded
Example:
```
# Add mouse clicks to change volume
volume_status {
button_up = 4
button_down = 5
button_mute = 2
}
```
Requires:
alsa-utils: (tested with alsa-utils 1.0.29-1)
NOTE:
If you are changing volume state by external scripts etc and
want to refresh the module quicker than the i3status interval,
send a USR1 signal to py3status in the keybinding.
Example: killall -s USR1 py3status
@author <Jan T> <jans.tuomi@gmail.com>
@license BSD
"""
import re
import shlex
from subprocess import check_output, call
class Py3status:
"""
"""
# available configuration parameters
button_down = 0
button_mute = 0
button_up = 0
cache_timeout = 10
channel = 'Master'
device = 'default'
format = u'♪: {percentage}%'
format_muted = u'♪: muted'
icon_headphones = u''
icon_speakers = u''
threshold_bad = 20
threshold_degraded = 50
volume_delta = 5
# compares current volume to the thresholds, returns a color code
def _perc_to_color(self, string):
try:
value = int(string)
except ValueError:
return self.py3.COLOR_BAD
if value < self.threshold_bad:
return self.py3.COLOR_BAD
elif value < self.threshold_degraded:
return self.py3.COLOR_DEGRADED
else:
return self.py3.COLOR_GOOD
# return the format string formatted with available variables
def _format_output(self, format, percentage, icon):
text = format.format(percentage=percentage, icon=icon)
return text
# return the current channel volume value as a string
def _get_percentage(self, output):
# attempt to find a percentage value in square brackets
p = re.compile(r'(?<=\[)\d{1,3}(?=%\])')
text = p.search(output).group()
# check if the parsed value is sane by checking if it's an integer
try:
int(text)
return text
# if not, show an error message in output
except ValueError:
return "error: can't parse amixer output."
# returns True if the channel is muted
def _get_muted(self, output):
p = re.compile(r'(?<=\[)\w{2,3}(?=\])')
text = p.search(output).group()
# check if the parsed string is either "off" or "on"
if text in ['on', 'off']:
return text == 'off'
# if not, return False
else:
return False
def _get_icon(self):
# amixer -c 0 contents | grep Headphone -A 3 | head -n 3 | tail -n 1
output = check_output('amixer -c 0 contents | grep Headphone -A 3 | head -n 3 | tail -n 1', shell = True)
# find percentage and status
p = re.compile(r'(.*)(on|off)')
_, text = p.search(output).groups()
if text == u'on':
return self.icon_headphones
else:
return self.icon_speakers
# this method is ran by py3status
# returns a response dict
def current_volume(self):
# call amixer
output = check_output(shlex.split('amixer -D {} sget {}'.format(
self.device, self.channel))).decode('utf-8')
# get the current percentage value
perc = self._get_percentage(output)
# get info about channel mute status
muted = self._get_muted(output)
# get icon
icon = self._get_icon()
# determine the color based on the current volume level
color = self._perc_to_color(perc if not muted else '0')
# format the output
text = self._format_output(self.format_muted
if muted else self.format, perc, icon)
# create response dict
response = {
'cached_until': self.py3.time_in(self.cache_timeout),
'color': color,
'full_text': text,
}
return response
def on_click(self, event):
'''
Volume up/down and toggle mute.
'''
button = event['button']
cmd = 'amixer -q -D {} sset {} '.format(self.device, self.channel)
# volume up
if self.button_up and button == self.button_up:
call(shlex.split('{} {}%+'.format(cmd, self.volume_delta)))
# volume down
elif self.button_down and button == self.button_down:
call(shlex.split('{} {}%-'.format(cmd, self.volume_delta)))
# toggle mute
elif self.button_mute and button == self.button_mute:
call(shlex.split('{} toggle'.format(cmd)))
# test if run directly
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
class Patient:
"""
The Patient object. The initialization will not happen here. For this take a look at the
CSVDataReader class.
"""
def __init__(self,
id,
kuerzel,
alter,
geschlecht,
hauptdiagnose,
nebendiagnose,
vorherige_erkrankungen,
sonstige_anamnese,
leistungsschwaeche,
atembeschwerden,
produktiver_husten,
unproduktiver_husten,
sonstige_untersuchungsbefunde,
blutdruck,
puls,
temperatur,
sp_o2_mit_o2,
sp_o2_ohne_o2,
bmi,
pulmo,
erhoehte_af,
abdomen,
diagnostik,
laborwerte,
lungenfunktion,
roentgen,
medikation,
*args
#...
):
self.id = id
self.kuerzel = kuerzel
self.alter = alter
self.geschlecht = geschlecht
self.hauptdiagnose = hauptdiagnose
self.nebendiagnose = nebendiagnose
self.vorherige_erkrankungen = vorherige_erkrankungen
self.sonstige_anamnese = sonstige_anamnese
self.leistungsschwaeche = leistungsschwaeche
self.atembeschwerden = atembeschwerden
self.produktiver_husten = produktiver_husten
self.unproduktiver_husten = unproduktiver_husten
self.sonstige_untersuchungsbefunde = sonstige_untersuchungsbefunde
self.blutdruck = blutdruck
self.puls = puls
self.temperatur = temperatur
self.sp_o2_mit_o2 = sp_o2_mit_o2
self.sp_o2_ohne_o2 = sp_o2_ohne_o2
self.bmi = bmi
self.pulmo = pulmo
self.erhoehte_af = erhoehte_af
self.abdomen = abdomen
self.diagnostik = diagnostik
self.laborwerte = laborwerte
self.lungenfunktion = lungenfunktion
self.roentgen = roentgen
self.medikation = medikation |
favorite_numbers={"Max":3,
"Den":2,
"Dima":1
}
print("Max's favorite number is {}".format(favorite_numbers["Max"]))
print("Den's favorite number is {}".format(favorite_numbers["Den"]))
print("Dima's favorite number is {}".format(favorite_numbers["Dima"]))
print(sorted(favorite_numbers.values()))
#=======================================================================
glossary={"OOP":"object oriented programming",
"Class":"pack of methods",
"car":"machine to delever people"
}
for key,value in glossary.items():
print(key,"\n",key,"-","{}".format(value))
#=======================================================================
rivers={"nile":"Egypt",
"Lena":"Russia",
"Amazonka":"Brazil"
}
for k,v in rivers.items():
print("The {} runs through {}".format(k,v))
for key in rivers.keys():
print(key)
for value in rivers.values():
print(value)
#=======================================================================
fav_lang={"Max":"C",
"Dima":"C",
"Den":"Python",
"Sergey":"Python"
}
name_list=["Max","Ira","Dima"]
for name in name_list:
if name in fav_lang.keys():
print("Thanks {} for your answer".format(name))
else:
print("{} pidoras!".format(name))
#=======================================================================
Max={"Age":21,
"Country":"NN",
"Kaf":"Sm5"}
Dima={"Age":21,
"Country":"Elets",
"Kaf":"Sm5"}
Den={"Age":21,
"Country":"Msk",
"Kaf":"Sm7"}
people=[Max,Dima,Den]
for man in people:
for k,v in man.items():
print("{} is {}".format(k,v))
#======================================================================
fav_numb={"Max":[1,2,3],
"Den":[3,4,5],
"Dima":[6,7,8]
}
for k in fav_numb.keys():
print("{} fav numbers are {}".format(k,fav_numb[k]))
#======================================================================
Cities={"Msk":{"population":2000000,"area":1000},
"pekin":{"population":5000000,"area":3000},
"London":{"population":8000000,"area":4000},
}
for k in Cities.keys():
print("{}".format(k))
for key,value in Cities[k].items():
print(key,"-{}".format(value))
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("mnist/", one_hot=True)
#定义输入
x=tf.placeholder("float",[None,784])
W=tf.Variable(tf.zeros([784,10]))
b=tf.Variable(tf.zeros([10]))
y=tf.nn.softmax(tf.matmul(x,W)+b)
#定义输出
y_=tf.placeholder("float",[None,10])
#定义损失函数 交叉熵
cross_entropy=-tf.reduce_sum(y_*tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
#init = tf.initialize_all_variables()
init=tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
if(i % 100 == 0):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
"""
那么我们的模型性能如何呢?
首先让我们找出那些预测正确的标签。tf.argmax 是一个非常有用的函数,它能给出某个tensor对象在某一维上的其数据最大值所在的索引值。
由于标签向量是由0,1组成,因此最大值1所在的索引位置就是类别标签,比如tf.argmax(y,1)返回的是模型对于任一输入x预测到的标签值,
而 tf.argmax(y_,1) 代表正确的标签,我们可以用 tf.equal 来检测我们的预测是否真实标签匹配(索引位置一样表示匹配)。
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
这行代码会给我们一组布尔值。为了确定正确预测项的比例,我们可以把布尔值转换成浮点数,然后取平均值。例如,[True, False, True, True] 会变成 [1,0,1,1] ,取平均值后得到 0.75.
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
""" |
import requests
from lxml import html
USERNAME = 'tnetwork'
PASSWORD = '1dehudaa'
LOGIN_URL = 'https://www.centraldispatch.com/'
URL = 'https://www.centraldispatch.com/protected/cargo/dispatched-to-me?folder=Dispatched'
def main():
session_requests = requests.session()
# Get login csrf token
result = session_requests.get(LOGIN_URL)
tree = html.fromstring(result.text)
authenticity_token = list(set(tree.xpath("//input[@name='CSRFToken']/@value")))[0]
#Create Payload
payload = {
"Username": USERNAME,
"Password": PASSWORD,
"CSRFToken": authenticity_token
}
#Perform login
result = session_requests.post(LOGIN_URL, data = payload, headers = dict(referer =LOGIN_URL))
#Scrape url
print('success')
if __name__ == '__main__':
main() |
# -*- coding: utf-8 -*-
from django import http
from django.conf import settings
from django.shortcuts import render
from django.template import Context, loader
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# root
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def contact(request, template_name='scaffold/contact.html'):
"""
Main page
"""
return render(request, template_name, {
'ROOT_DIR': settings.ROOT_DIR,
'BASE_DIR': settings.BASE_DIR,
})
|
def collatx(n):
if n==1: return 1
if n%2 == 0: return 1 + collatx(int(n/2))
return 1 + collatx(3*n + 1)
n = int(input())
mx = collatx(n)
nums = list(map(int, list(str(n))))
print(nums)
for i in range(len(nums)):
for j in range(1,10):
tmp = list(nums)
tmp[i] = j
val = int("".join(list(map(str, tmp))))
print(val)
val = collatx(val)
mx = max(mx, val)
print(mx) |
import json
class Achievements_Categories:
def __init__(self, json):
self.id = json['id']
self.name = json['name']
self.description = json['description']
self.order = json['order']
self.icon = json['icon']
self.achievements = json['achievements']
def __str__(self):
return "ID: {0}\nName: {1}\nDescription: {2}\nOrder: {3}\nIcon: {4}\nAchievements: {5}".format(self.id, self.name, self.description, self.order, self.icon, self.achievements) |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/Users/adityabhat/Downloads/devel/include;/Users/adityabhat/Downloads/src/ros_comm/topic_tools/include".split(';') if "/Users/adityabhat/Downloads/devel/include;/Users/adityabhat/Downloads/src/ros_comm/topic_tools/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;rosconsole;roscpp;std_msgs;xmlrpcpp".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ltopic_tools".split(';') if "-ltopic_tools" != "" else []
PROJECT_NAME = "topic_tools"
PROJECT_SPACE_DIR = "/Users/adityabhat/Downloads/devel"
PROJECT_VERSION = "1.11.16"
|
# Copyright (C) 2005 Paul Harrison
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""\
This is a package for calculation of Levy stable distributions
(probability density function and cumulative density function) and for
fitting these distributions to data.
It operates by interpolating values from a table, as direct computation
of these distributions requires a lengthy numerical integration. This
interpolation scheme allows fast fitting of Levy stable distributions
to data using the Maximum Likelihood technique.
Does not support alpha values less than 0.5.
"""
__version__ = "0.4"
import sys
import numpy as N
import numpy.random as RandomArray
import levy_data as levy_data
def _make_range(min, max, n):
""" Create a sequence of values. """
return (N.arange(n)) * ((max-min)/(n-1.0)) + min
def _calculate_levy(x, alpha, beta, cdf=False):
""" Calculation of Levy stable distribution via numerical integration.
This is used in the creation of the lookup table. """
# "0" parameterization as per http://academic2.american.edu/~jpnolan/stable/stable.html
# Note: fails for alpha=1.0
# (so make sure alpha=1.0 isn't exactly on the interpolation grid)
from scipy import integrate
C = beta * N.tan(N.pi*0.5*alpha)
def func_cos(u):
ua = u ** alpha
if ua > 700.0: return 0.0
return N.exp(-ua)*N.cos(C*ua-C*u)
def func_sin(u):
ua = u ** alpha
if ua > 700.0: return 0.0
return N.exp(-ua)*N.sin(C*ua-C*u)
if cdf:
# Cumulative density function
return (integrate.quad(lambda u: u and func_cos(u)/u or 0.0, 0.0, integrate.Inf, weight="sin", wvar=x, limlst=1000)[0]
+ integrate.quad(lambda u: u and func_sin(u)/u or 0.0, 0.0, integrate.Inf, weight="cos", wvar=x, limlst=1000)[0]
) / N.pi + 0.5
else:
# Probability density function
return ( integrate.quad(func_cos, 0.0, integrate.Inf, weight="cos", wvar=x, limlst=1000)[0]
- integrate.quad(func_sin, 0.0, integrate.Inf, weight="sin", wvar=x, limlst=1000)[0]
) / N.pi
def _levy_tan(x, alpha, beta, cdf=False):
""" Calculate the values stored in the lookup table.
The tan mapping allows the table to cover the range from -INF to INF. """
x = N.tan(x)
return _calculate_levy(x,alpha,beta,cdf)
def _interpolate(points, grid, lower, upper):
""" Perform multi-dimensional Catmull-Rom cubic interpolation. """
point_shape = N.shape(points)[:-1]
points = N.reshape(points, (N.multiply.reduce(point_shape), N.shape(points)[-1]))
grid_shape = N.array(N.shape(grid))
dims = len(grid_shape)
points = (points-lower) * ((grid_shape-1) / (upper-lower))
floors = N.floor(points).astype('int')
offsets = points - floors
offsets2 = offsets * offsets
offsets3 = offsets2 * offsets
weighters = [
-0.5*offsets3 +offsets2-0.5*offsets,
1.5*offsets3-2.5*offsets2 +1.0,
-1.5*offsets3 +2*offsets2+0.5*offsets,
0.5*offsets3-0.5*offsets2,
]
ravel_grid = N.ravel(grid)
result = N.zeros(N.shape(points)[:-1], 'float64')
for i in range(1 << (dims*2)):
weights = N.ones(N.shape(points)[:-1], 'float64')
ravel_offset = 0
for j in range(dims):
n = (i >> (j*2)) % 4
ravel_offset = ravel_offset * grid_shape[j] + \
N.maximum(0,N.minimum(grid_shape[j]-1,floors[:,j] + (n-1)))
weights *= weighters[n][:,j]
result += weights * N.take(ravel_grid, ravel_offset)
return N.reshape(result, point_shape)
# Dimensions: 0 - x, 1 - alpha, 2 - beta
_lower = N.array([-N.pi/2 * 0.999, 0.5, -1.0])
_upper = N.array([N.pi/2 * 0.999, 2.0, 1.0])
def _make_data_file():
""" Generates the lookup table, writes it to a .py file. """
import base64
size = (200, 50, 51)
pdf = N.zeros(size, 'float64')
cdf = N.zeros(size, 'float64')
ranges = [ _make_range(_lower[i],_upper[i],size[i]) for i in xrange(len(size)) ]
print ("Generating levy_data.py ...")
for i in xrange(size[1]):
for j in xrange(size[2]):
print ("Calculating alpha =", ranges[1][i], "beta = ", ranges[2][j])
for k in xrange(size[0]):
pdf[k,i,j] = _levy_tan(ranges[0][k], ranges[1][i], ranges[2][j])
cdf[k,i,j] = _levy_tan(ranges[0][k], ranges[1][i], ranges[2][j], True)
file = open("levy_data.py", "wt")
file.write("""
# This is a generated file, do not edit.
import numpy, base64
pdf = numpy.loads(base64.decodestring(
\"\"\"%s\"\"\"))\n
cdf = numpy.loads(base64.decodestring(
\"\"\"%s\"\"\"))\n""" %
(base64.encodestring(pdf.dumps()), base64.encodestring(cdf.dumps())) )
file.close()
def levy(x, alpha, beta, cdf=False):
""" Interpolate densities of the Levy stable distribution specified by alpha and beta.
Specify cdf=True to obtain the *cumulative* density function.
Note: may sometimes return slightly negative values, due to numerical inaccuracies.
"""
points = N.empty(N.shape(x) + (3,), 'float64')
points[..., 0] = N.arctan(x)
points[..., 1] = alpha
points[..., 2] = beta
if cdf:
what = levy_data.cdf
else:
what = levy_data.pdf
return _interpolate(points, what, _lower, _upper)
def neglog_levy(x, alpha, beta):
""" Interpolate negative log densities of the Levy stable distribution specified by alpha and beta.
Small/negative densities are capped at 1e-100 to preserve sanity.
"""
return -N.log(N.maximum(1e-100, levy(x, alpha, beta)))
def _reflect(x, lower, upper):
while 1:
if x < lower:
x = lower - (x-lower)
elif x > upper:
x = upper - (x-upper)
else:
return x
def fit_levy(x, alpha=None, beta=None, location=None, scale=None):
""" Estimate parameters of Levy stable distribution given data x,
uN.sing the Maximum Likelihood method.
By default, searches all possible Levy stable distributions. However
you may restrict the search by specifying the values of one or more
parameters.
Examples:
levy(x) -- Fit a stable distribution to x
levy(x, beta=0.0) -- Fit a symmetric stable distribution to x
levy(x, beta=0.0, location=0.0) -- Fit a symmetric distribution centered on zero to x
levy(x, alpha=1.0, beta=0.0) -- Fit a Cauchy distribution to x
Returns a tuple of (alpha, beta, location, scale, negative log density)
"""
from scipy import optimize
if location == None or scale == None:
x = N.sort(x)
last = len(x)-1
guess_location = x[int(last/2)]
guess_scale = (x[int(last-last/4)] - x[int(last/4)])/2.0
# Maybe there are lots of zeros or something...
if guess_scale == 0:
guess_scale = (x[last] - x[0]) / 2.0
parameters = [ ]
if alpha != None:
get_alpha = lambda parameters: alpha
else:
get_alpha = lambda parameters, nth=len(parameters): \
_reflect(parameters[nth],_lower[1],_upper[1])
parameters.append(1.0)
if beta != None:
get_beta = lambda parameters: beta
else:
get_beta = lambda parameters, nth=len(parameters): \
_reflect(parameters[nth],_lower[2],_upper[2])
parameters.append(0.0)
if location != None:
get_location = lambda parameters: location
else:
get_location = lambda parameters, nth=len(parameters): parameters[nth]
parameters.append(guess_location)
if scale != None:
get_scale = lambda parameters: scale
else:
get_scale = lambda parameters, nth=len(parameters): N.exp(parameters[nth])
parameters.append(N.log(guess_scale))
def neglog_density(parameters):
location = get_location(parameters)
scale = get_scale(parameters)
return N.sum(neglog_levy((x-location)/scale, get_alpha(parameters), get_beta(parameters))) \
+ (len(x) * N.log(scale))
parameters = optimize.fmin(neglog_density, parameters, disp=0)
return (get_alpha(parameters), get_beta(parameters),
get_location(parameters), get_scale(parameters),
neglog_density(parameters))
def random(alpha, beta, shape=()):
""" Generate random values sampled from an alpha-stable distribution.
"""
if alpha == 2:
return RandomArray.standard_normal(shape) * N.sqrt(2.0)
# Fails for alpha exactly equal to 1.0
# but works fine for alpha infinitesimally greater or less than 1.0
radius = 1e-15 # <<< this number is *very* small
if N.absolute(alpha-1.0) < radius:
# So doing this will make almost exactly no difference at all
alpha = 1.0 + radius
r1 = RandomArray.random(shape)
r2 = RandomArray.random(shape)
a=(1.0-alpha); b=(r1-0.5); c=((a*b)*3.1415926535897931); d=N.absolute((1.0-N.absolute(a))); e=N.tan((((((alpha*d)*-2.0)*N.arctan((beta*N.tan(((3.1415926535897931*alpha)/2.0)))))*3.1415926535897931)/((6.2831853071795862*d)*alpha))); f=(((-((N.cos(c)+(e*N.sin(c))))/(N.log(r2)*N.cos((b*3.1415926535897931))))**(a/alpha))-1.0); g=N.tan(((3.1415926535897931*b)/2.0)); h=N.tan((((3.1415926535897931*a)*b)/2.0)); i=(1.0-(g*g)); j=((((((a*f)/a)+1.0)*(((2.0*(g-h))*((g*h)+1.0))-(((((((((h*i)-(2.0*g))*b)*3.1415926535897931)*a)*e)*2.0)*h)/c)))/(i*((h*h)+1.0)))+(((a*e)*f)/a))
return j
#k_alpha = N.abs(1-N.abs(1-alpha))
#beta_A = 2.0 * N.arctan(-beta * N.tan(N.pi*0.5*alpha)) / (N.pi*k_alpha)
#gamma_B = N.cos(N.pi*beta_A*k_alpha*0.5)
#Phi_0 = -0.5*N.pi*beta_A*k_alpha/alpha
#beta_prime = -N.tan(0.5*N.pi*(1.0-alpha)) * N.tan(alpha*Phi_0)
#epsilon = 1-alpha
#Phi = (r1-0.5) * N.pi
#W = -log(r2)
#tau = -epsilon*N.tan(alpha*Phi_0)
#a = N.tan(0.5*Phi)
#b = N.tan(0.5*epsilon*Phi)
#B = N.tan(0.5*epsilon*Phi)/(0.5*epsilon*Phi)
#z = (N.cos(epsilon*Phi)-N.tan(alpha*Phi_0)*N.sin(epsilon*Phi)) / (W * N.cos(Phi))
#d = (z ** (epsilon/alpha) - 1.0) / epsilon
#Y = ( 2.0*(a-b)*(1+a*b) - Phi*tau*B*(b*(1-a*a)-2*a) ) \
# / ((1-a*a)*(1+b*b)) \
# * (1+epsilon*d) \
# + tau*d
#return Y
if __name__ == "__main__":
if "build" in sys.argv[1:]:
_make_data_file()
print ("Testing fit_levy.")
print ("Should give 1.5, 0.5, 0.0, 1.0 ...")
print (fit_levy(random(1.5, 0.5, 1000)))
print()
print ("Does it die when given silly data? ...")
print (fit_levy(N.array([0,0,0,0,0,0,0,0,0,0,1])))
print()
print ("Testing random.")
import pylab
the_range = _make_range(-10.0, 10.0, 1000)
for alpha, beta, nth in [ (0.5,0.0,421), (0.5,1.0,422), (1.0,0.0,423), (1.0,1.0,424), (1.5,0.0,425), (1.5,1.0,426), (2.0, 0.0, 427), (2.0, 1.0, 428) ]:
pylab.subplot(nth)
pylab.title("alpha=%.2f beta=%.2f" % (alpha, beta))
pylab.hist(random(alpha, beta, 10000), bins=_make_range(-10.0, 10.0, 100), normed=1)
pylab.plot(the_range, levy(the_range, alpha, beta), "r", linewidth=2.0)
pylab.xlim(-10.0, 10.0)
pylab.ylim(0.0, 0.55)
pylab.show()
print ("Just being pretty.")
pylab.subplot(211)
for alpha in _make_range(0.5, 2.0, 10):
for beta in _make_range(-1.0, 1.0, 10):
pylab.plot(levy(_make_range(-5.0,5.0,1000), alpha, beta), 'g')
pylab.subplot(212)
for alpha in _make_range(0.5, 2.0, 10):
for beta in _make_range(-1.0, 1.0, 10):
pylab.plot(levy(_make_range(-5.0,5.0,1000), alpha, beta, True), 'g')
pylab.show()
|
import numpy as np
import torch
from datasets import load_dataset
from torchtext.data.metrics import bleu_score
from transformers import AutoTokenizer, T5ForConditionalGeneration
from mmengine.evaluator import BaseMetric
from mmengine.model import BaseModel
from mmengine.runner import Runner
tokenizer = AutoTokenizer.from_pretrained('t5-small')
class MMT5ForTranslation(BaseModel):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, label, input_ids, attention_mask, mode):
if mode == 'loss':
output = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
labels=label)
return {'loss': output.loss}
elif mode == 'predict':
output = self.model.generate(input_ids)
return output, label
def post_process(preds, labels):
preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
labels = torch.where(labels != -100, labels, tokenizer.pad_token_id)
labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
decoded_preds = [pred.split() for pred in preds]
decoded_labels = [[label.split()] for label in labels]
return decoded_preds, decoded_labels
class Accuracy(BaseMetric):
def process(self, data_batch, data_samples):
outputs, labels = data_samples
decoded_preds, decoded_labels = post_process(outputs, labels)
score = bleu_score(decoded_preds, decoded_labels)
prediction_lens = torch.tensor([
torch.count_nonzero(pred != tokenizer.pad_token_id)
for pred in outputs
],
dtype=torch.float64)
gen_len = torch.mean(prediction_lens).item()
self.results.append({
'gen_len': gen_len,
'bleu': score,
})
def compute_metrics(self, results):
return dict(
gen_len=np.mean([item['gen_len'] for item in results]),
bleu_score=np.mean([item['bleu'] for item in results]),
)
def collate_fn(data):
prefix = 'translate English to French: '
input_sequences = [prefix + item['translation']['en'] for item in data]
target_sequences = [item['translation']['fr'] for item in data]
input_dict = tokenizer(
input_sequences,
padding='longest',
return_tensors='pt',
)
label = tokenizer(
target_sequences,
padding='longest',
return_tensors='pt',
).input_ids
label[label ==
tokenizer.pad_token_id] = -100 # ignore contribution to loss
return dict(
label=label,
input_ids=input_dict.input_ids,
attention_mask=input_dict.attention_mask)
def main():
model = T5ForConditionalGeneration.from_pretrained('t5-small')
books = load_dataset('opus_books', 'en-fr')
books = books['train'].train_test_split(test_size=0.2)
train_set, test_set = books['train'], books['test']
train_loader = dict(
batch_size=16,
dataset=train_set,
sampler=dict(type='DefaultSampler', shuffle=True),
collate_fn=collate_fn)
test_loader = dict(
batch_size=32,
dataset=test_set,
sampler=dict(type='DefaultSampler', shuffle=False),
collate_fn=collate_fn)
runner = Runner(
model=MMT5ForTranslation(model),
train_dataloader=train_loader,
val_dataloader=test_loader,
optim_wrapper=dict(optimizer=dict(type=torch.optim.Adam, lr=2e-5)),
train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),
val_cfg=dict(),
work_dir='t5_work_dir',
val_evaluator=dict(type=Accuracy))
runner.train()
if __name__ == '__main__':
main()
|
from Db.DataClass import DataClass
from Db.CustomerType import CustomerType
class CustomerTypeSearch(DataClass):
def __init__(self, status):
DataClass.__init__(self)
self.status = status
def search(self):
params = "'" + self.status + "'"
self.rows = DataClass.selectSproc(self,"Customer_Type_Search", "All", params)
def toJson(self):
self.Json = '{ "CustomerTypes" : [ '
for row in self.rows:
customerType = CustomerType(row[0], '', '')
customerType.load()
self.Json += customerType.toJson('None', False, False, True) + ','
#Trim trailing comma
self.Json = self.Json[:-1]
#Close JSON
self.Json += ']}'
return self.Json
|
class Solution:
def balancedStringSplit(self, s: str) -> int:
l_num = 0
r_num = 0
res = 0
i = 0
while i < len(s):
if s[i] == 'L':
l_num += 1
i += 1
elif s[i] == 'R':
r_num += 1
i += 1
if l_num == r_num:
res += 1
return res
|
# split a list into two parts
def split(n, l):
return [l[0:n], l[n:]]
def test_split():
l = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k']
expected = [['a', 'b', 'c'], ['d', 'e', 'f', 'g', 'h', 'i', 'j', 'k']]
assert split(3, l) == expected
assert split(3, []) == [[], []]
assert split(0, []) == [[], []]
assert split(1, ['a']) == [['a'], []]
assert split(0, ['a']) == [[], ['a']]
assert split(0, ['a', 'b', 'c']) == [[], ['a', 'b', 'c']]
assert split(10, ['a', 'b', 'c']) == [['a', 'b', 'c'], []]
|
import sys
sys.stdin = open('특이한자석.txt')
def rotation(num, dir, chain):
# 자기자신을 포함한 오른쪽 연결정보 확인
dr = dir
for i in range(num, 5):
target = mags[i]
if dr == 1: # 시계 방향
tmp = target.pop(7)
target.insert(0, tmp)
else: # 반시계 방향
tmp = target.pop(0)
target.insert(7, tmp)
if chain[i] == 0:
break
dr *= -1
# 왼쪽 연결정보 확인
dl = -dir
for j in range(num - 1, 0, -1):
if chain[j] == 0:
break
target = mags[j]
if dl == 1: # 시계 방향
tmp = target.pop(7)
target.insert(0, tmp)
else: # 반시계 방향
tmp = target.pop(0)
target.insert(7, tmp)
dl *= -1
T = int(input())
for tc in range(1, T + 1):
K = int(input())
mags = [0]
for _ in range(4):
mags.append(list(map(int, input().split())))
# print(mags)
# 연결정보
for i in range(K):
chain = [0] * 5 # 초기화, 0번, 4번인덱스는 미사용
for n in range(1, 4):
if mags[n][2] != mags[n+1][6]:
chain[n] = 1
# print(chain)
num, dir = map(int, input().split())
# print(num, dir)
rotation(num, dir, chain)
result = (mags[1][0] * 1) + (mags[2][0] * 2) + (mags[3][0] * 4) + (mags[4][0] * 8)
print("#{} {}".format(tc, result)) |
import pytest
import vcs, cdms2
from cdat.MarkEdit import MarkerEditor
@pytest.fixture
def editor():
editor = MarkerEditor.MarkerEditorWidget()
marker = vcs.createmarker()
editor.setMarkerObject(marker)
return editor
def test_type(qtbot, editor):
editor.updateType('triangle_up')
assert editor.object.type == ['triangle_up']
def test_color(qtbot, editor):
editor.updateColor(55)
assert editor.object.color == [55]
def test_size(qtbot, editor):
editor.updateSize(250)
assert editor.object.size == [250]
|
from flask import Flask, url_for,render_template,flash,redirect
import flask_login
from form import LoginForm
from flask_sqlalchemy import SQLAlchemy
from fileconfig import *
app = Flask(__name__)
#flask config
app.config.from_object('fileconfig')
#database setting
db = SQLAlchemy(app)
db.create_all()
#login manager
login_manager = flask_login.LoginManager()
login_manager.init_app(app)
from app import views
|
import os
import uuid
import sys
sys.path.append("./src/main/python")
import main
try:
work_path = os.getcwd()
random_number = str(uuid.uuid1())
with open(os.path.join(work_path, '.hilens/rtmp.txt'), 'w') as f:
f.write(random_number)
os.environ['RTMP_PATH'] = "rtmp://127.0.0.1/live/" + random_number
#os.chdir('src/main/python/')
main.run(work_path)
finally:
with open(os.path.join(work_path, '.hilens/rtmp.txt'), 'r+') as f:
f.truncate()
|
import numpy as np
import zhinst.utils
import time
import matplotlib.pyplot as plt
import pygame, sys
from pygame.locals import *
import math
from my_poll_v2 import R_measure as R_measure
import stlab
import os
#############################################################
''' Definitions'''
# definitions
device_id = 'dev352'
prefix = 'C26_UL_FrequencySweep_Rs100'
path = 'D:\\measurement_data_4KDIY\\Hadi\\C26 2020-04-10 measurements'
# HF2LI settings
measure_amplitude = 500e-3 #measurement amplitude [V]
measure_output_channnel = 1
measure_input_channnel = 1
measure_frequency = np.linspace(1,1e3,100) #[Hz]
demodulation_time_constant = 0.1
deamodulation_duration = 0.18
calibration_factor = 1.45 # to compensate the shift in resistance measurement
shift = 0
bias_resistor = 20e6
in_range = 10e-3
out_range = 1
diff = True
add = False
offset = 0
ac = False
save_data = False
if save_data:
colnames = ['frequency (Hz)','resistance (ohm)','impedence (ohm)','phase ()', 'demodulation duration (s)', 'Vx (V)', 'Vy (V)']
my_file = stlab.newfile(prefix,'_',autoindex=True,colnames=colnames, mypath= path)
pygame.init()
pygame.display.set_mode((100,100))
##########################################################
''' Initializing the devices '''
# initial configuration of the Lock-in
apilevel_example = 6 # The API level supported by this example.
(daq, device, props) = zhinst.utils.create_api_session(device_id, apilevel_example, required_devtype='.*LI|.*IA|.*IS')
zhinst.utils.api_server_version_check(daq)
zhinst.utils.disable_everything(daq, device)
out_mixer_channel = zhinst.utils.default_output_mixer_channel(props)
R_measure(device_id = 'dev352',
amplitude = measure_amplitude,
out_channel = measure_output_channnel,
in_channel = measure_input_channnel,
time_constant = demodulation_time_constant,
frequency = measure_frequency[0],
poll_length = deamodulation_duration,
device = device,
daq = daq,
out_mixer_channel = out_mixer_channel,
bias_resistor = bias_resistor,
in_range = in_range,
out_range = out_range,
diff = diff,
add = add,
offset = offset,
ac = ac)
#############################################################
''' MEASUREMENT'''
INI_time = time.time()
Freq=np.array([])
plt_resistance=np.array([])
plt_phase=np.array([])
END = False
for freq in measure_frequency:
measured = R_measure(device_id = 'dev352',
amplitude = measure_amplitude,
out_channel = measure_output_channnel,
in_channel = measure_input_channnel,
time_constant = demodulation_time_constant,
frequency = freq,
poll_length = deamodulation_duration,
device = device,
daq = daq,
out_mixer_channel = out_mixer_channel,
bias_resistor = bias_resistor,
in_range = in_range,
out_range = out_range,
diff = diff,
add = add,
offset = offset,
ac = ac)
measured[0] = calibration_factor * np.abs(measured[0]) + shift
if save_data:
stlab.writeline(my_file,[freq] + measured)
plt_resistance = np.append(plt_resistance,measured[0])
plt_phase = np.append(plt_phase,measured[2])
Freq = np.append(Freq,freq)
plt.rcParams["figure.figsize"] = [12,6]
plt.subplot(2, 1, 1)
plt.plot(Freq*1e-6,plt_resistance*1e-3, '--r',marker='o')
plt.ylabel('Resistance ($k\Omega$)')
# plt.yscale('log')
# plt.xscale('log')
plt.title("Resistance = %4.2f k$\Omega$" %(measured[0]*1e-3))
plt.subplot(2, 1, 2)
plt.plot(Freq*1e-6,plt_phase, '--r', marker='o')
# plt.xscale('log')
plt.ylabel('phase ()')
plt.xlabel('frequency (MHz)')
plt.title("phase = %4.2f (), x = %4.2f (nV), y = %4.2f (nV)" %(measured[2], measured[4]*1e9, measured[5]*1e9))
plt.pause(0.1)
for event in pygame.event.get():
if event.type == QUIT:sys.exit()
elif event.type == KEYDOWN and event.dict['key'] == 101:
END = True
if END:
break
# zhinst.utils.disable_everything(daq, device)
if save_data:
plt.savefig(os.path.dirname(my_file.name)+'\\'+prefix)
my_file.close()
#######################################################################
''' saving the data '''
|
def luasSegitiga2(a, t):
luas = a * t / 2
print('Luas segitiga dg alas ', a,
' dan tinggi ', t,
' adalah ', luas)
luasSegitiga2(10, 20)
#segitiga dengan alas=15, dan tinggi= 45
luasSegitiga2(15, 45)
|
def main():
power = 1000
number = 2**power
return sum(int(digit) for digit in str(number))
if __name__ == "__main__":
answer = main()
print(answer) |
#!/usr/bin/env python
import sys
print("#include <array>")
namespaces = 10
for i in range(0, namespaces):
print("namespace someothername" + str(i) + " {")
func_name = sys.argv[1] + "veryLongFunctionNameToMakeTheBenchmarkBigger"
func_list = []
for i in range(0, 10000):
func_list.append(func_name + str(i))
print("std::array<int, " + str(i) + ">*" + func_name + str(i) + "() {")
if i != 0:
print(" " + func_name + str(i - 1) + "();")
print(" return nullptr;")
print("}")
for i in range(0, namespaces):
print("}")
print ("int " + sys.argv[1] + "() {")
namespace_spec = ""
for i in range(0, namespaces):
namespace_spec += "someothername" + str(i) + "::"
for func in func_list:
print("{")
print(" auto f = " + namespace_spec + func + "();")
print(" if (f->size() == 0) return 4;")
print("}")
print(" return 1;\n}")
|
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
client = Table('client', post_meta,
Column('id', INTEGER, primary_key=True, nullable=False),
Column('first_name', VARCHAR(length=255)),
Column('last_name', VARCHAR(length=255)),
Column('birthdate', DATETIME),
Column('uci_id', INTEGER),
Column('address', VARCHAR(length=255)),
Column('city', VARCHAR(length=55)),
Column('state', VARCHAR(length=10)),
Column('zipcode', VARCHAR(length=15)),
Column('phone', VARCHAR(length=15)),
Column('gender', VARCHAR(length=10)),
Column('needs_appt_scheduled', SMALLINT, default=ColumnDefault(1)),
Column('additional_info', Text),
Column('regional_center_id', INTEGER),
Column('therapist_id', INTEGER),
Column('status', VARCHAR(length=15), default=ColumnDefault('active')),
Column('weeks_premature', Numeric(precision=10, scale=2), default=ColumnDefault(0)),
)
client_eval = Table('client_eval', post_meta,
Column('id', INTEGER, primary_key=True, nullable=False),
Column('client_id', INTEGER),
Column('therapist_id', INTEGER),
Column('client_appt_id', INTEGER),
Column('created_date', DATETIME),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['client'].columns['weeks_premature'].create()
post_meta.tables['client_eval'].columns['client_appt_id'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['client'].columns['weeks_premature'].drop()
post_meta.tables['client_eval'].columns['client_appt_id'].drop()
|
from log import Log
class ResultFormatterBase:
file_extension = None
def format_result(self, test_cases):
Log.err("BaseFormatter doesn't know what to do :(")
|
def selection_Sort():
n=raw_input("\nEnter the numbersize")
l=[]
for i in range(int(n)):
x=raw_input("Enter %d element" %i)
l.append(x)
for i in range(len(l)):
mini=l[i]
for j in range(i,len(l)):
if(l[j]<mini):
mini=l[j]
z=l.index(mini)
l[i],l[z]=mini,l[i]
for j in l:
print(j),
|
#! /usr/bin/python
import sys
import os
lib_path = os.path.abspath('/home/dept/ta/yuhan/mobility-detector/src/Mobility-Detector/activity-detection/src/lib')
sys.path.append(lib_path)
from sensors import *
from location import *
from math import *
from numpy.fft import *
import numpy
from normal import *
from distributions import *
import operator
import pickle
from scipy import stats
from scipy.stats import norm
#import matplotlib
# classify based on traces
class KernelSimpleEnergyClassify(object) :
''' Windowing primitives '''
WINDOW_IN_MILLI_SECONDS = 5000
WIFI_WINDOW_IN_MILLI_SECONDS= (3 * 60 * 1000)
GSM_WINDOW_IN_MILLI_SECONDS = (3 * 60 * 1000)
GPS_WINDOW_IN_MILLI_SECONDS = (3 * 60 * 1000)
prev_bin = -1
current_accel_fv = []
current_accel_prediction = []
current_window=[]
wifi_current_window = []
gps_current_window = []
gsm_current_window = []
last_print_out = -1
last_accel_energy_update = 0
last_speed_energy_update = 0
activity_templates=[]
recs = dict()
''' power stats for each phone '''
power_accel=dict()
power_wifi=dict()
power_gps=dict()
power_gsm=dict()
power_nwk_loc=dict()
current_accel_sampling_interval = sys.maxint
current_wifi_sampling_interval = sys.maxint
current_gps_sampling_interval = sys.maxint
current_gsm_sampling_interval = sys.maxint
energy_consumed = 0
feature_based_conf_matrix = [[ 0 for i in xrange(5)] for j in xrange(5)]
feature_list = []
accel_kernel_function = {}
gsm_kernel_function = {}
wifi_kernel_function = {}
callback_list = []
#Viterbi
#stateScore = [0]*5
prev_prediction = -1
#EWMA
ewma_window = []
''' dhmm '''
transition_prob = [[1,0.2,0.2,0.2,0.2],[0.2,1,0.1,0.2,0.1],[0.2,0.2,1,0.00000001,0.00000001],[0.2,0.00000001,0.00000001,1,0.00000001],[0.2,0.00000001,0.00000001,0.00000001,1]]
state_score = [0.2] * 5
def __init__(self,sim_phone, accel_classifier_model, gsm_classifier_model, wifi_classifier_model, gps_classifier_model , power_model, callback_list) :
self.sim_phone=sim_phone
self.classifier_output_combined=[]
self.classifier_output_combined_ewma=[]
self.classifier_output_combined_hmm=[]
self.classifier_output_avefv=[]
self.classifier_output_speed = []
self.callback_list = callback_list
self.ewma_window = [0.2]*5
execfile(power_model)
''' set initial sampling intervals in milliseconds '''
self.sim_phone.change_gsm_interval(60 * 1000)
self.sim_phone.change_wifi_interval(max(self.power_wifi.keys()))
self.sim_phone.change_gps_interval(max(self.power_gps.keys()))
self.current_gsm_sampling_interval = 60 * 1000
self.current_wifi_sampling_interval = max(self.power_wifi.keys())
self.current_gps_sampling_interval = max(self.power_gps.keys())
sim_phone.change_accel_interval(max(self.power_accel.keys()))
self.current_accel_sampling_interval=max(self.power_accel.keys())
''' load model '''
self.accel_kernel_function = self.load_accel_model(accel_classifier_model)
self.gsm_kernel_function = self.load_cell_model(gsm_classifier_model)
self.wifi_kernel_function = self.load_cell_model(wifi_classifier_model)
def load_accel_model(self, accel_classifier_model):
classifier_model_handle=open(accel_classifier_model,"r")
training_feature_list = pickle.load(classifier_model_handle)
kernel_function = {}
for i in xrange(5):
kernel_function[i] = []
for j in xrange(len(training_feature_list[i])):
kernel_pdf = stats.gaussian_kde(training_feature_list[i][j])
kernel_pdf.set_bandwidth(bw_method = 0.05)
#setattr(kernel_pdf, 'self.covariance_factor', self.covariance_factor.__get__(kernel_pdf, type(kernel_pdf)))
#kernel_pdf._compute_covariance()
kernel_function[i] += [kernel_pdf]
training_feature_list = []
classifier_model_handle.close()
return kernel_function
def load_cell_model(self, cell_classifier_model):
common_cell_ratio_stats = {}
rssi_dist_ratio_stats = {}
for i in xrange(5):
common_cell_ratio_stats[i] = []
rssi_dist_ratio_stats[i] = []
classifier_model_handle = open(cell_classifier_model, 'r')
for line in classifier_model_handle.readlines():
common_cell_ratio = float(line.split(',')[0])
rssi_dist_ratio = float(line.split(',')[1])
gnd_truth = int(line.split(',')[-1])
common_cell_ratio_stats[gnd_truth] += [common_cell_ratio]
rssi_dist_ratio_stats[gnd_truth] += [rssi_dist_ratio]
output_fv_dict= {}
for i in xrange(5):
output_fv_dict[i] = []
output_fv_dict[i] += [common_cell_ratio_stats[i]] # a list
output_fv_dict[i] += [rssi_dist_ratio_stats[i]]
cell_kernel_function = {}
for i in xrange(5):
cell_kernel_function[i] = []
for j in xrange(len(output_fv_dict[i])):
kernel_pdf = gaussian_kde(output_fv_dict[i][j])
if j == 0:
kernel_pdf.set_bandwidth(bw_method = 0.1)
if j == 1:
kernel_pdf.set_bandwidth(bw_method = 3)
#setattr(kernel_pdf, 'self.covariance_factor', self.covariance_factor.__get__(kernel_pdf, type(kernel_pdf)))
#kernel_pdf._compute_covariance()
cell_kernel_function[i] += [kernel_pdf]
classifier_model_handle.close()
return cell_kernel_function
def covariance_factor(self, obj):
return 0.05
def mean_and_var(self,value_list) :
if (value_list==[]) :
return (None,None)
meanSq=reduce(lambda acc,update : acc + update**2,value_list,0.0)/len(value_list)
mean=reduce(lambda acc,update : acc + update,value_list,0.0)/len(value_list)
return (mean,meanSq-mean*mean)
def predict_label(self,mean_fv,sigma_fv,peak_freq_fv,strength_var_fv) :
''' Predict the label given the mean, sigma, peak frequency and strength variation components of the feature vector '''
likelihood=[sys.float_info.min]*5
for label in xrange(5):
likelihood[label] += (self.accel_kernel_function[label][0].evaluate(mean_fv)[0]) * (self.accel_kernel_function[label][1].evaluate(sigma_fv)[0]) *(self.accel_kernel_function[label][2].evaluate(peak_freq_fv)[0]) * (self.accel_kernel_function[label][3].evaluate(strength_var_fv)[0])
posterior_pmf = [0]*5
for label in xrange(5):
if sum(likelihood) > 0:
posterior_pmf[label]=likelihood[label]/(sum(likelihood)*1.0)
else:
posterior_pmf[label] = 0.2
return Distribution(5,posterior_pmf)
def get_ave_intersect_station_ratio(self,window):
if len(window) < 2:
return -1
intersect_ratio = 0.0
for i in xrange(len(window)-1):
intersect_num = len(list(set(window[i][1]) & set(window[i+1][1])))
union_num = len(window[i][1]) + len(window[i+1][1]) - intersect_num
if union_num > 0:
intersect_ratio += intersect_num/ (union_num*1.0)
return intersect_ratio/((len(window)-1) * 1.0)
def rssi_dist_bt_two_scans(self,a,b):
dist = 0.0
intersect_num = 0
bit_map_a = {}
bit_map_b = {}
for i in xrange(len(a[1])):
bit_map_a[i] = 0
for i in xrange(len(b[1])):
bit_map_b[i] = 0
for index_list_a in xrange(len(a[1])):
try:
index_list_b = b[1].index(a[1][index_list_a])
intersect_num += 1
bit_map_a[index_list_a] = 1
bit_map_b[index_list_b] = 1
dist += (a[2][index_list_a] - b[2][index_list_b]) * (a[2][index_list_a] - b[2][index_list_b])
except ValueError:
continue
for index_list_a in xrange(len(a[1])):
if bit_map_a[index_list_a] == 0:
dist += a[2][index_list_a] * a[2][index_list_a]
for index_list_b in xrange(len(b[1])):
if bit_map_b[index_list_b] == 0:
dist += b[2][index_list_b] * b[2][index_list_b]
union_num = (len(a[1]) + len(b[1]) - intersect_num)
if union_num > 0:
return sqrt(dist)/((len(a[1]) + len(b[1]) - intersect_num) * 1.0)
else:
return 99
def get_ave_rssi_diff(self,window):
if len(window) < 2:
return -1
total_rssi_diff = 0.0
for i in xrange(len(window)-1): # for each pair
total_rssi_diff += self.rssi_dist_bt_two_scans(window[i],window[i+1])
return total_rssi_diff/((len(window) -1) *1.0)
def get_ave_instant_speed(self,window):
if len(window) < 2:
return -1
total_speed = 0.0
for i in xrange(len(window)-1):
time_diff = (window[i+1][0] - window[i][0])/1000.0
total_speed += (window[i][1].compute_geo_distance(window[i+1][1]))/time_diff
return total_speed/((len(window)-1) * 1.0)
def get_ave_speed(self,window):
if len(window) < 2:
return -1
total_dist = 0.0
for i in xrange(len(window)-1):
total_dist += window[i][1].compute_geo_distance(window[i+1][1])
time_diff = (window[-1][0] - window[0][0])/1000.0
return float(total_dist)/time_diff
def m_estimation_smoothing(self,pmf):
weight = 3
smoothed_pmf = [sys.float_info.min] * 5
for label in xrange(5):
smoothed_pmf[label] = float((weight * pmf[label] + 0.2))/ (weight + 1)
return smoothed_pmf
def callback(self,sensor_reading,current_time) :
''' Interface to simulator : Leave final result as (timestamp,output_distribution) pairs in classifier_output '''
self.wifi_current_window = filter(lambda x: x[0] >= current_time - self.WIFI_WINDOW_IN_MILLI_SECONDS, self.wifi_current_window)
self.gsm_current_window = filter(lambda x: x[0] >= current_time - self.GSM_WINDOW_IN_MILLI_SECONDS, self.gsm_current_window)
self.current_window=filter(lambda x : x[0] >= current_time - self.WINDOW_IN_MILLI_SECONDS,self.current_window)
if(isinstance(sensor_reading, GSM)):
self.gsm_current_window += [(current_time, sensor_reading.cell_tower_list, sensor_reading.rssi_list)]
if(isinstance(sensor_reading, WiFi)):
self.wifi_current_window += [(current_time, sensor_reading.ap_list, sensor_reading.rssi_list)]
if(isinstance(sensor_reading,Accel)):
''' compute accel magnitude and keep track of windows '''
accel_mag=sqrt(sensor_reading.accel_x**2+sensor_reading.accel_y**2+sensor_reading.accel_z**2)
self.current_window+=[(current_time,accel_mag)]
start_time = self.current_window[0][0]
self.last_print_out=current_time if self.last_print_out == -1 else self.last_print_out
if (current_time - self.last_print_out) >= self.WINDOW_IN_MILLI_SECONDS : #compute a feature vector
self.last_print_out = current_time
''' variance and mean feature vector components '''
(mean,variance)=self.mean_and_var(map(lambda x : x[1],self.current_window));
sigma=sqrt(variance)
#print "Mean, sigma ",mean,sigma
''' Peak frequency, compute DFT first on accel magnitudes '''
current_dft=rfft(map(lambda x : x[1] , self.current_window))
peak_freq=0 # TODO Find a better way of doing this.
if (len(current_dft) > 1) :
''' ignore DC component '''
peak_freq_index=numpy.abs(current_dft[1:]).argmax() + 1;
''' sampling_frequency '''
N=float(len(self.current_window))
sampling_freq=N/(self.current_window[-1][0]-self.current_window[0][0])
peak_freq=((peak_freq_index)/(N* 1.0)) * sampling_freq
nyquist_freq=sampling_freq/2.0;
assert ( peak_freq <= nyquist_freq );
#print "Peak_freq ",peak_freq," Hz"
''' Strength variation '''
summits=[]
valleys=[]
sigma_summit=0
sigma_valley=0
for i in range(1,len(self.current_window)-1) :
if ( (self.current_window[i][1] >= self.current_window[i+1][1]) and (self.current_window[i][1] >= self.current_window[i-1][1]) ) :
summits+=[self.current_window[i]]
if ( (self.current_window[i][1] <= self.current_window[i+1][1]) and (self.current_window[i][1] <= self.current_window[i-1][1]) ) :
valleys+=[self.current_window[i]]
if ( len(summits) != 0 ) :
if self.mean_and_var(map(lambda x: x[1], summits))[1] > 0:
sigma_summit=sqrt(self.mean_and_var(map(lambda x : x[1],summits))[1]);
if ( len(valleys) != 0 ) :
if self.mean_and_var(map(lambda x: x[1], valleys))[1] > 0:
sigma_valley=sqrt(self.mean_and_var(map(lambda x : x[1],valleys))[1]);
#print "Strength variation ", sigma_valley+sigma_summit
posterior_dist=self.predict_label(mean,sigma,peak_freq,sigma_valley+sigma_summit)
self.current_accel_prediction+= [(current_time,posterior_dist.mode())]
self.feature_based_conf_matrix[sensor_reading.gnd_truth][posterior_dist.mode()] += 1
self.current_accel_fv+=[(current_time,mean,sigma,peak_freq,sigma_valley+sigma_summit)]
# Arbitrary values for unit test :
self.accel_energy_adapt(current_time, self.power_accel, self.callback_list, posterior_dist.pmf)
self.last_accel_energy_update=current_time
''' start making prediction '''
bin_size = 60 * 1000
cur_bin = current_time/bin_size
self.prev_bin = cur_bin if self.prev_bin == -1 else self.prev_bin
if cur_bin > self.prev_bin: # time to make a prediction
self.prev_bin = cur_bin
''' get prediction from speed-based sensor'''
speed_likelihood = [sys.float_info.min] * 5
speed_posterior = [0.2] * 5
## wifi
if self.current_wifi_sampling_interval < max(self.power_wifi.keys()):
cell_ratio_fv = self.get_ave_intersect_station_ratio(self.wifi_current_window)
rssi_diff_fv = self.get_ave_rssi_diff(self.wifi_current_window)
for i in xrange(5):
speed_likelihood[i] += (self.wifi_kernel_function[i][0].evaluate(cell_ratio_fv)[0]) * (self.wifi_kernel_function[i][1].evaluate(rssi_diff_fv)[0])
for label in xrange(5):
if sum(speed_likelihood) > 0:
speed_posterior[label] = float(speed_likelihood[label])/sum(speed_likelihood)
else:
speed_posterior[label] = 0.2
## gsm
if self.current_gsm_sampling_interval < max(self.power_gsm.keys()):
cell_ratio_fv = self.get_ave_intersect_station_ratio(self.gsm_current_window)
rssi_diff_fv = self.get_ave_rssi_diff(self.gsm_current_window)
for i in xrange(5):
speed_likelihood[i] += (self.gsm_kernel_function[i][0].evaluate(cell_ratio_fv)[0]) * (self.gsm_kernel_function[i][1].evaluate(rssi_diff_fv)[0])
for label in xrange(5):
if sum(speed_likelihood) > 0:
speed_posterior[label] = float(speed_likelihood[label])/sum(speed_likelihood)
else:
speed_posterior[label] = 0.2
smoothed_speed_posterior = self.m_estimation_smoothing(speed_posterior)
''' Energy adpative for speed-based sensor'''
self.speed_energy_adapt(current_time, self.power_gsm, self.power_wifi,self.power_gps, self.callback_list, speed_posterior)
self.last_speed_energy_update=current_time
self.classifier_output_speed.append((current_time, smoothed_speed_posterior.index(max(smoothed_speed_posterior))))
''' get prediction from accel '''
## accel
self.current_accel_fv = filter(lambda x: x[0] >= current_time - bin_size, self.current_accel_fv)
self.current_accel_prediction = filter(lambda x: x[0] >= current_time - bin_size, self.current_accel_prediction)
## ave_feature_vector
accel_mean_list = []
accel_std_list = []
accel_pf_list = []
accel_sv_list = []
for accel_tuple in self.current_accel_fv:
for i in xrange(1,len(accel_tuple)):
if i == 1:
accel_mean_list.append(accel_tuple[i])
elif i == 2:
accel_std_list.append(accel_tuple[i])
elif i == 3:
accel_pf_list.append(accel_tuple[i])
elif i == 4:
accel_sv_list.append(accel_tuple[i])
(ave_fv_mean,dummy) = self.mean_and_var(accel_mean_list)
(ave_fv_std,dummy) = self.mean_and_var(accel_std_list)
(ave_fv_pf,dummy) = self.mean_and_var(accel_pf_list)
(ave_fv_sv,dummy) = self.mean_and_var(accel_sv_list)
accel_posterior_avefv=self.predict_label(ave_fv_mean,ave_fv_std,ave_fv_pf,ave_fv_sv).pmf
smoothed_accel_posterior_avefv = self.m_estimation_smoothing(accel_posterior_avefv)
self.classifier_output_avefv.append((current_time, smoothed_accel_posterior_avefv.index(max(smoothed_accel_posterior_avefv))))
''' combinr accel and speed based sensors '''
combined_likelihood = [0]*5
for label in xrange(5):
combined_likelihood[label] = smoothed_accel_posterior_avefv[label] * smoothed_speed_posterior[label]
normalized_combined_posterior = [0]*5
if sum(combined_likelihood) > 0:
for i in xrange(5):
normalized_combined_posterior[i] = float(combined_likelihood[i])/sum(combined_likelihood)
else:
normalized_combined_posterior = [0.2] * 5
self.classifier_output_combined.append((current_time,normalized_combined_posterior.index(max(normalized_combined_posterior))))
current_prediction = normalized_combined_posterior.index(max(normalized_combined_posterior))
##EWMA
ALPHA = 0.4
Yt = [0] * 5
Yt[current_prediction] = 1
for i in xrange(5):
self.ewma_window[i] = ALPHA * Yt[i] + (1-ALPHA) * self.ewma_window[i]
self.classifier_output_combined_ewma.append((current_time,self.ewma_window.index(max(self.ewma_window))))
#HMM
class_prob = normalized_combined_posterior
#class_prob[current_prediction] = 1.0
new_state_score = [0]* 5
for i in xrange(5):
max_score = -1.0 * sys.float_info.max
for j in xrange(5):
transition_score = self.transition_prob[j][i]
cand_score = self.state_score[j] + log(transition_score) + log(max(class_prob[i], 0.001))
if cand_score > max_score:
max_score = cand_score
new_state_score[i] = max_score
for i in xrange(5):
self.state_score[i] = new_state_score[i]
self.classifier_output_combined_hmm.append((current_time,self.state_score.index(max(self.state_score))))
def speed_energy_adapt(self, current_time, power_gsm, power_wifi, power_gps, callback_list, posterior_pmf):
self.energy_consumed += (current_time-self.last_speed_energy_update) * power_wifi[self.current_wifi_sampling_interval]
self.energy_consumed += (current_time-self.last_speed_energy_update) * power_gps[self.current_gps_sampling_interval]
self.energy_consumed += (current_time-self.last_speed_energy_update) * power_gsm[self.current_gsm_sampling_interval]
do_i_ramp_up=reduce(lambda acc, update : acc or ((posterior_pmf[update] >= 0.2) and (posterior_pmf[update]<=0.8)), callback_list ,False);
if (do_i_ramp_up):
if self.current_gsm_sampling_interval != max(power_gsm.keys()):
self.current_gsm_sampling_interval = max(power_gsm.keys())
self.sim_phone.change_gsm_interval(max(power_gsm.keys()))
self.current_wifi_sampling_interval = 60 * 1000
self.sim_phone.change_wifi_interval(60 * 1000)
return
else:
if self.current_wifi_sampling_interval != max(power_wifi.keys()):
self.current_wifi_sampling_interval = max(power_wifi.keys())
self.sim_phone.change_wifi_interval(max(power_wifi.keys()))
self.current_gsm_sampling_interval = 60 * 1000
self.sim_phone.change_gsm_interval(60 * 1000)
return
def accel_energy_adapt(self, current_time, power_accel, callback_list, posterior_pmf):
''' Vary sampling rate if confidence > 0.2'''
self.energy_consumed += (current_time-self.last_accel_energy_update) * power_accel[self.current_accel_sampling_interval]
#print "Current sampling interval is ",self.current_sampling_interval
#ramp up if required
do_i_ramp_up=reduce(lambda acc, update : acc or ((posterior_pmf[update] >= 0.2) and (posterior_pmf[update]<=0.8)), callback_list ,False);
#do_i_ramp_up=reduce(lambda acc, update : acc or ((posterior_pmf[update] >= 0.2)), callback_list ,False);
if (do_i_ramp_up):
candidate_interval = filter(lambda x : x < self.current_accel_sampling_interval,power_accel)
if len(candidate_interval) > 0:
self.current_sampling_interval = max(candidate_interval)
self.sim_phone.change_accel_interval(self.current_accel_sampling_interval)
return
else:
candidate_interval = filter(lambda x : x > self.current_accel_sampling_interval,power_accel)
if len(candidate_interval) > 0:
self.current_sampling_interval = min(candidate_interval)
self.sim_phone.change_accel_interval(self.current_accel_sampling_interval)
return
|
# ,< ,> ,>= ,<= ,== ,!= ,is ,is not
# is not untuk membandingkan object dan is
a = "kamu"
b = "kamu"
hasil = a is not b
print(hasil) |
import urllib.parse
import settings
TORTOISE_ORM = {
"connections": {"default": f'mysql://{settings.DB_USER}:{urllib.parse.quote_plus(settings.DB_PASS)}@{settings.DB_HOST}:{settings.DB_PORT}/{settings.DB_NAME}'},
"apps": {
"models": {
"models": ["models", "aerich.models"],
"default_connection": "default",
},
},
}
|
from django.contrib import admin
from .models import Application
# Register your models here.
@admin.register(Application)
class ApplicationAdmin(admin.ModelAdmin):
def get_readonly_fields(self, request, obj=None):
if obj:
return self.readonly_fields + ('legal_agreement','signature','income_photo')
return self.readonly_fields |
# -*- coding: utf-8 -*-
#python 3.5
import pandas as pd
def print_frequency(data, col, format="count"):
norm=True if format=="percent" else False
print(format,'-')
print(data[col].value_counts(sort=False, normalize=norm))
if __name__=="__main__":
dataset="ADDHEALTH"
data=pd.read_csv("addhealth_pds.csv", low_memory=False)
# bug fix for display formats to avoid run time errors
pandas.set_option('display.float_format', lambda x:'%f'%x)
#Subset of respondents who gave a response
subset=data[~(data["H1RE4"].isin([6,7,8]))]
cols=["H1RE4", "H1PF15", "H1PF33"]
verbose=["\nVariable %d (%s): Importance of Religion",
"\nVariable %d (%s): Respondents getting upset by difficult problems",
"\nVariable %d (%s): Like the way you are"]
print("\nFrequency distribution of for %s dataset:" %dataset)
for index,col in enumerate(cols):
print(verbose[index] %(index+1, col))
print_frequency(subset, col, "count")
print_frequency(subset, col, "percent")
|
import time
from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware
from stem import Signal
from stem.control import Controller
def new_tor_identity():
with Controller.from_port(port=9051) as controller:
controller.authenticate(password='articles')
controller.signal(Signal.NEWNYM)
class ProxyMiddleware(HttpProxyMiddleware):
def process_response(self, request, response, spider):
# Get a new identity depending on the response
if response.status != 200 and spider.name == "GSM":
new_tor_identity()
return request
elif response.status == 429:
time.sleep(60) # If the rate limit is renewed in a minute, put 60 seconds, and so on.
return response
return response
def process_request(self, request, spider):
# Set the Proxy
# A new identity for each request
# Comment out if you want to get a new Identity only through process_response
if spider.name == "GSM":
new_tor_identity()
request.meta['proxy'] = 'http://127.0.0.1:8118'
spider.log('Proxy : %s' % request.meta['proxy'])
|
#!/usr/bin/python
import os
import re
import sys
import glob
from scipy import stats
from Bio import SeqIO
from collections import Counter, defaultdict
def isInt(astring):
""" Is the given string an integer? """
try: int(astring)
except ValueError: return 0
else: return 1
def ExtractEggPSGNumber(PSG):
if '/' not in PSG and '+' not in PSG and ',' not in PSG:
PSG_number = PSG.replace('_','')
if ':' in PSG_number: PSG_number = PSG_number.rsplit(':')[1]
if 'E' in PSG_number and isInt(PSG_number[1::]):
return PSG_number.replace('E','')
elif 'AM' in PSG_number and isInt(PSG_number[2::]):
return PSG_number.replace('AM','')
def ParseSeqs(records,seq_outfile,pos):
outfile = open(seq_outfile,'w')
Egg_dict = defaultdict(list)
Ori_dict = defaultdict(list)
excludepattern = re.compile ("UNKNOWN_1_RHMK|TMK1_MDCK|AMNIOTIC_1_PRHMK_2|M1_RII1,C5|R1_C|R1_S|RII1_C|RII1_S|RIIX_C|RX_C|MDCK_1_RHMK|NC|_MK1")
unpassagedpattern = re.compile("LUNG|P0|OR_|ORIGINAL|CLINICAL|DIRECT")
eggpattern = re.compile("AM[1-9]|E[1-7]|AMNIOTIC|EGG|EX|AM_[1-9]")
cellpattern = re.compile("S[1-9]|SX|SIAT|MDCK|C[1-9]|CX|C_[1-9]|M[1-9]|MX|X[1-9]|^X_$")
siatpattern = re.compile("^S[1-9]_$|SIAT2_SIAT1|SIAT3_SIAT1")
monkeypattern=re.compile("TMK|RMK|RHMK|RII|PMK|R[1-9]|RX")
siatexcludepattern=re.compile("SIAT|SX|S[1-9]")
CountSeq = 0
Egg_PSGNumber_dict = defaultdict(list)
for record in records:
CountSeq += 1
header = str(record.id)
if header.count('|')!=4: continue
ID = header.rsplit('|')[0]
PSG = header.rsplit('|')[1]
year = header.rsplit('|')[-1][1:5]
seq = str(record.seq)
assert(isInt(year))
if 'X' in seq: continue
if excludepattern.search(PSG): continue
elif eggpattern.search(PSG):
Egg_dict[year].append(seq)
PSG_Number = ExtractEggPSGNumber(PSG)
if PSG_Number: Egg_PSGNumber_dict['All'].append(PSG_Number)
if seq[pos]=='P':
if PSG_Number: Egg_PSGNumber_dict['P'].append(PSG_Number)
outfile.write('>Egg_P194_'+str(CountSeq)+"\n"+seq.replace('-','')+"\n")
else:
outfile.write('>Egg'+str(CountSeq)+"\n"+seq.replace('-','')+"\n")
elif unpassagedpattern.search(PSG):
Ori_dict[year].append(seq)
outfile.write('>Ori'+str(CountSeq)+"\n"+seq.replace('-','')+"\n")
outfile.close()
return Egg_dict, Ori_dict, Egg_PSGNumber_dict
def ExtractPosOfInterest(seq_dict,pos,PSG,outfile):
for year in sorted(seq_dict.keys(),key=lambda x:int(x)):
aa_dict = defaultdict(int)
for seq in seq_dict[year]:
aa = seq[pos]
aa_dict[aa]+=1
for aa in aa_dict.keys():
outfile.write("\t".join(map(str,[year, PSG, aa, aa_dict[aa]]))+"\n")
def ComplilePSGNumber(Egg_PSGNumber_dict, PSG_outfile):
outfile = open(PSG_outfile, 'w')
P_dict = Counter(Egg_PSGNumber_dict['P'])
All_dict = Counter(Egg_PSGNumber_dict['All'])
PSGnumbers = sorted(list(set(P_dict.keys()+All_dict.keys())))
outfile.write("\t".join(['Passage Number', 'Pro', 'All', 'Proportion of Pro','SE'])+"\n")
for PSGnumber in PSGnumbers:
P_count = P_dict[PSGnumber]
All_count = All_dict[PSGnumber]
P_frac = float(P_count)/float(All_count)
P_SE = (P_frac*(1-P_frac)/All_count)**0.5
outfile.write("\t".join(map(str,[PSGnumber, P_count, All_count, P_frac, P_SE]))+"\n")
outfile.close()
def wrapper(alnfilename, P194Lpos, outfilename, seqoutfilename, PSGoutfilename):
alnfile = alnfilename
outfile = open(outfilename,'w')
seq_outfile = seqoutfilename
PSG_outfile = PSGoutfilename
records = [record for record in SeqIO.parse(alnfile,"fasta")]
Egg_dict, Ori_dict, Egg_PSGNumber_dict = ParseSeqs(records,seq_outfile,P194Lpos)
outfile.write("\t".join(['Year', 'Passage', 'AA', 'Count'])+"\n")
ExtractPosOfInterest(Egg_dict,P194Lpos,'Egg',outfile)
ExtractPosOfInterest(Ori_dict,P194Lpos,'Ori',outfile)
outfile.close()
if 'H3N2' in alnfile: ComplilePSGNumber(Egg_PSGNumber_dict, PSG_outfile)
def main():
wrapper('Fasta/HumanH3N2_All.aln',225,'result/HumanH3N2_Pos194YearVsPSG.tsv',
'result/HumanH3N2_EggOri.fa','result/HumanH3N2_PSG.tsv')
wrapper('Fasta/pdmH1N1_All.aln',216,'result/pdmH1N1_Pos194YearVsPSG.tsv',
'result/pdmH1N1_EggOri.fa','result/pdmH1N1_PSG.tsv')
if __name__ == "__main__":
main()
|
import asyncio
async def func():
print("开始执行协程内部工作")
response = await asyncio.sleep(2)
print("执行完毕", response)
return "执行结果"
if __name__ == '__main__':
print("main开始")
# 创建task对象,将当前执行函数任务添加到事件循环内部
task_list = [
func(),
func(),
]
print("main结束")
# 当执行某协程遇到IO操作时,会自动切换到其他任务。
# 此处的await是等待对象的协程全部执行结束,并打印结果。
done, pending = ret1 = asyncio.run(asyncio.wait(task_list))
print(done)
|
print "I will now count my chickens:"
print "Hens", 2.5 + 3.0 / 6.0 #make the operation
print "Roosters", 10.0 - 2.5 * 3.0 % 4.0 #make the operation
print "Now I will count the eggs:"
print 3.0 + 2.0 + 1.0 - 5.0 + 4.0 % 2.0 -1.0 / 4.0 + 6.0 #make the operation
print "Is it true that 3.0 + 2.0 < 5.0 - 7.0?"
print 3.0 + 2.0 < 5.0 - 7.0 #respond on line 10 with true or false
print "What is 3.0 + 2.0?", 3.0 + 2.0
print "What is 5.0 - 7.0?", 5.0 -7.0
print "Oh, that's why it's False."
print "How about some more."
print "Is it greater?", 5.0 > -2.0 #turns true or false
print "Is it greater or equal?", 5.0 >= -2.0
print "Is it less or equal?", 5.0 <= -2.0 |
'''
Created on 8. 5. 2014
@author: casey
'''
import inspect
import sys
import pkgutil
def loadClasses(mod_path, base_class, class_name_filter, skip_private_mod = True):
result = []
if not mod_path in sys.path:
sys.path.append(mod_path)
modules = pkgutil.iter_modules(path=[mod_path])
for loader, mod_name, ispkg in modules:
if skip_private_mod and mod_name.startswith("_"):
continue
__import__(mod_name)
class_name = filterClasses(mod_name, base_class, class_name_filter)
if class_name:
result.extend(class_name)
#self.tools.append(loaded_class)
return result
def filterClasses(mod_name, base, class_name_filter):
class_name_filter = class_name_filter if isinstance(class_name_filter, list) else [class_name_filter]
result = []
for name, obj in inspect.getmembers(sys.modules[mod_name], inspect.isclass):
parents = [c.__name__ for c in inspect.getmro(obj)[1:]]
if inspect.isclass(obj) and base.__name__ in parents and name not in class_name_filter:
result.append(obj)
return result
|
'''
Daniel Loyd
'''
def run(train, labels, test):
'''
data -> numpy.array
labels -> numpy.array
'''
import numpy as np
length = max([len(data) for data in train])
data = []
for info in train:
new_info = info
diff = length - len(new_info)
if diff > 0:
buffer = np.zeros(diff, np.int8)
#print('about to concatenate {} and {}'.format(info, buffer))
new_info = np.append(info, buffer)
#print('result: {}'.format(new_info))
data.append(new_info)
data = np.array(data)
from keras.layers import Dense, Activation
from keras.models import Sequential
from keras import optimizers
import numpy as np
print('running keras neural net')
model = Sequential()
model.add(Dense(1, input_shape=(26,)))
model.compile(optimizer=optimizers.Adam(), loss='binary_crossentropy', metrics=['accuracy'])
# data = np.random.random((1000, 100))
# labels = np.random.randint(2, size=(1000, 1))
# print('data:', data)
#print('labels:', type(labels))
model.fit(data, labels, epochs=1)
|
"""Script for choosing and entering board games."""
from json import load, dump
import sys
from pathlib import Path
from boardgamegeek import BGGClient, BGGItemNotFoundError
from terminalprompts import list_prompt, confirmation_prompt, input_prompt
FILE = "games.json"
FOLDER = "game_data"
PATH = Path().parent / FOLDER / FILE
MAIN_MESSAGE = "What would you like to do?"
MAIN_CHOICES = ["Enter new game", "Delete a game", "Quit"]
GAME_MESSAGE = "Please enter the name of the game to search or (r) to return:"
GAME_CHOICE_MESSAGE = "Which board game is it?"
GAME_CANCEL_OPTION = "None - try again!"
DELETE_MESSAGE = "Which board game would you like to delete?"
DELETE_CANCEL_OPTION = "None - don't delete anything!"
def ensure_file(file):
if not file.is_file():
with open(file) as json_file:
dump([], json_file, indent=4)
def add_game(game):
with open(PATH) as json_file:
data = load(json_file)
game_data = {"Game": game.name, "ID": game.id, "BGG name": game.name}
data.append(game_data)
with open(PATH, "w") as json_file:
dump(data, json_file, indent=4)
print("Game added!")
def new_game():
client = BGGClient()
not_found = True
while not_found:
game_search = input_prompt(
message=GAME_MESSAGE,
validation_function=lambda x: True if x != "" else "Enter something!",
)
if game_search.lower() == "r":
return
games = client.search(game_search)
game_names = [game.name for game in games]
if not game_names:
print("No games found")
continue
game_question_list = game_names + [GAME_CANCEL_OPTION]
answer = list_prompt(message=GAME_CHOICE_MESSAGE, items=game_question_list)
if answer == GAME_CANCEL_OPTION:
continue
if confirmation_prompt(message=f"Are you sure you would like to add {answer}?"):
not_found = False
add_game(games[game_names.index(answer)])
def delete_game():
with open(PATH) as json_file:
data = load(json_file)
names = [game["BGG name"] for game in data]
games_to_delete = sorted(names) + [DELETE_CANCEL_OPTION]
game_to_delete = list_prompt(message=DELETE_MESSAGE, items=games_to_delete)
if game_to_delete == DELETE_CANCEL_OPTION:
return
confirmation_message = f"Are you sure you would like to delete {game_to_delete}?"
if not confirmation_prompt(message=confirmation_message):
return
index = names.index(game_to_delete)
del data[index]
with open(PATH, "w") as json_file:
dump(data, json_file, indent=4)
def main():
ensure_file(PATH)
while True:
answer = list_prompt(message=MAIN_MESSAGE, items=MAIN_CHOICES)
if answer == "Quit":
break
elif answer == "Enter new game":
new_game()
elif answer == "Delete a game":
delete_game()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.