index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
987,600 | c820215de8b2c21c27f4ad742ac1c19d68b5a0c6 | import re
import regexs
def infoget(str1,re_dict):
substr=str1[24:31]
if substr != 'LOG_EMS':
return 'q'
idx=findn(']',str1,2)
if idx>=0:
nextidx=idx+1
if nextidx<len(str1) and str1[nextidx] in mes_type:
if str1[nextidx] == '=':
if (nextidx+2)<len(str1) and str1[nextidx+2] == 'E':
return get_ems(re_dict['E'],str1)
elif (nextidx+2)<len(str1) and str1[nextidx+2] == 'O':
return get_order(re_dict['O'],str1)
else:
return 'q'
else:
return mes_type[str1[nextidx]](re_dict[str1[nextidx]],str1)
else:
return 'q'
else:
return 'q'
count_re=0
def findn(substr,str1,n):
sidx=0
while n > 0:
idx=str1.find(substr,sidx)
if idx<0:
return idx
n=n-1
sidx=idx+1
return sidx-1
def get_ems(reg,str):
m=reg.match(str)
side='0'
if m.group(5) >= '1':
side='1'
offset='0'
if m.group(6) >= '1':
offset='1'
return ['e',m.group(1),m.group(2),m.group(3),m.group(4),side,offset]
def get_r_info(reg,str):
m=reg.match(str)
#print str
if m is None:
return ['q']
else:
return ['r',m.group(3),m.group(4),m.group(5)]
def get_t_info(reg,str):
m=reg.match(str)
#print str
if m is None:
return ['q']
else:
return ['t',m.group(3),m.group(4)]
def get_order(reg,str):
m=reg.match(str)
if m is None:
return ['q']
else:
return ['o',m.group(3),m.group(4),m.group(5),m.group(6),m.group(7),m.group(8)]
def get_cancel_info(reg,str):
m=reg.match(str)
if m is None:
return 'q'
else:
return ['c',m.group(2),m.group(3),m.group(4),m.group(5)]
def get_cancel_rsp(reg,str):
m=reg.match(str)
if m is None:
return 'q'
else:
return ['p',m.group(2),m.group(3),m.group(4),m.group(5),m.group(6)]
mes_type={'=':get_ems,'R':get_r_info,'T':get_t_info,'C':get_cancel_info,'O':get_cancel_rsp}
def process_strs(infos,notmatch,cancelOrders):
index=-1
savedata=[]
re_ems = re.compile(regexs.ems_info_regex)
re_r = re.compile(regexs.r_info_regex)
re_t = re.compile(regexs.t_info_regex)
re_order = re.compile(regexs.order_info_regex)
re_cancel=re.compile(regexs.cancel_regex)
re_cancel_rsp=re.compile(regexs.cancel_Rsp_regex)
re_dict={'E':re_ems,'R':re_r,'T':re_t,'O':re_order,'C':re_cancel,'O':re_cancel_rsp}
for str in infos:
datas=infoget(str,re_dict)
if datas[0] == 'e':
datas.append('1')
datas.append('')
datas.append(0)
savedata.append(datas[1:10])
index=index+1
elif datas[0] == 'r':
reid = datas[1]+'_'+datas[2]
beforeindex = index
while beforeindex>=0:
if reid == savedata[beforeindex][2] and savedata[beforeindex][6] == '1':
savedata[beforeindex][7]=datas[3]
savedata[beforeindex][6]='3'
break
beforeindex=beforeindex-1
if beforeindex < 0:
notmatch.append(str)
elif datas[0] == 't':
beforeindex = index
while beforeindex>=0:
if len(savedata[beforeindex])>=8 and savedata[beforeindex][7] == datas[1]:
if len(savedata[beforeindex]) >= 9:
savedata[beforeindex][8] = savedata[beforeindex][8]+int(datas[2])
savedata[beforeindex][6]=4
else:
print 'error'
break
beforeindex=beforeindex-1
if beforeindex < 0:
notmatch.append(str)
elif datas[0] == 'o':
beforeindex = index
while beforeindex>=0:
if savedata[beforeindex][2] == datas[1] and savedata[beforeindex][3] == datas[2]:
savedata[beforeindex] = savedata[beforeindex]+datas[3:len(datas)]
break
beforeindex=beforeindex-1
if beforeindex < 0:
notmatch.append(str)
elif datas[0]=='c':
datas.append('1')
cancelOrders.append(datas[1:6])
elif datas[0]=='p':
beforeindex=len(cancelOrders)-1
while beforeindex>=0:
if cancelOrders[beforeindex][0] == datas[1] and cancelOrders[beforeindex][1] == datas[2] and cancelOrders[beforeindex][2] == datas[3] and cancelOrders[beforeindex][3] == datas[4] and len(datas)>=6:
cancelOrders[beforeindex][4]=datas[5]
break
beforeindex=beforeindex-1
if beforeindex<0:
notmatch.append(str)
else:
zhanwei=[]
#print 'nouse'
return savedata
|
987,601 | c67b98c5ce4e6550b1d41a147020ac1cecbba7b4 | from django.db import models
from django.conf import settings
from django.utils import timezone
from django.db.models import Q
User = settings.AUTH_USER_MODEL
class BlogPostQuerySet(models.QuerySet):
def published(self):
now = timezone.now()
# get query set means data object like BlogPost.objects.all()
return self.filter(publish_date__lte=now)
def search(self, query):
lookup = (
Q(title__icontains=query) |
Q(content__icontains=query) |
Q(slug__icontains=query) |
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query) |
Q(user__username__icontains=query)
)
return self.filter(title__icontains=query) # if we want to search in content then content__icontains or if we want exact then title__iexact and for complex query used lookups
# Model manager to handle queary set
class BlogPostManager(models.Manager):
def get_queryset(self):
# get query set means data object like BlogPost.objects.all()
return BlogPostQuerySet(self.model, using=self._db)
def published(self):
return self.get_queryset().published()
def search(self, query=None):
if query is None:
return self.get_queryset().none()
return self.get_queryset().published().search(query)
class BlogPost(models.Model): # blogpost_set -> queryset i.e. it will give object to perform query like fetching data, updating data etc for current user
user = models.ForeignKey(User, default=1, null= True, on_delete=models.SET_NULL) # i.e. when user is deleted then this class will exist
image = models.ImageField(upload_to= 'media/', blank=True, null=True)
title = models.TextField()
content = models.TextField(blank=True, null=True)
slug = models.SlugField(unique=True) # hello rohit -> hello-rohit
publish_date = models.DateTimeField(auto_now=False, auto_now_add=False, null = True, blank = True)
timestamp = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=False)
objects = BlogPostManager()
class Meta:
ordering = ['-publish_date', '-updated', '-timestamp']
def get_absolute_url(self):
return f"/blog/{ self.slug }"
def get_edit_url(self):
pattern = self.get_absolute_url()
return f"{ pattern }/edit"
# for more detail https://github.com/comargo/milkshop/blob/master/helpers/models.py
def get_delete_url(self):
pattern = self.get_absolute_url()
return f"{ pattern }/delete"
def __str__(self):
return self.title
|
987,602 | d8308bd495f2ef21bdab9c435bcfcda428c6e4a3 | import pickle
import h5py
import numpy as np
from time import time
T = int(1e7)
S = 10
D = 25
#array = np.random.rand(T, S, D)
class Solutions:
@classmethod
def from_h5(cls, f_name):
soln = cls()
soln.h5 =h5py.File(f_name, 'a')
return soln
def __init__(self, a=None):
if a is not None:
self.h5 = h5py.File('./test_file.h5', 'a')
self.h5.create_dataset('array', data=a)
t0 = time()
soln = Solutions.from_h5('./test_file.h5')
print(f'soln object loaded in {time() - t0:.2f}s')
print(soln.h5['array'][0])
assert False
print('Rand N generated')
t0 = time()
#soln = Solutions(array)
print(f'soln object made in {time() - t0:.2f}s')
t0 = time()
with open('./test_file.pkl', 'wb') as f:
pickle.dump(soln, f)
print(f'picked in {time() - t0:.2f} s')
|
987,603 | 357ac4153fb2e0638e85a3c851c9855d34ef3813 | # importing the required module
import matplotlib.pyplot as plt
import numpy as np
import math
def f(x, n):
y = math.pi;
for k in range(1, n+1):
y = y + math.sin(k*x)*(-2/k)
return y
#Array of the values of N i want to plot:
N = [1,2,5,10,100]
i = 1;
for n in N:
# x axis values
xs = np.linspace(0.0, 4* math.pi, num=1000) #Plot with 1000 Points can be changed to change resolution
# corresponding y axis values
y = np.array([f(x, n) for x in xs])
plt.subplot(len(N), 1,i)
# plotting the points
plt.plot(xs, y)
# naming the x axis
plt.xlabel('x - axis')
# naming the y axis
plt.ylabel('y - axis')
# giving a title to my graph
plt.title('N = ' + str(n))
i = i+1
# function to show the plot
plt.show()
|
987,604 | 1ffc3dda86be6e9de28906b8aefa79277df69b92 | from app.app import *
from validate_email import validate_email
def validate_user_registration(json):
if not(json["first_name"].strip()):
return jsonify({'Message':
'First name is required'}), 401
if not(json["last_name"].strip()):
return jsonify({'Message':
'Last name is required'}), 401
if not(json["username"].strip()):
return jsonify({'Message':
'Username is required'}), 401
if not(json["password"].strip()):
return jsonify({'Message':
'Password is required'}), 401
return True
def validate_user_exist(json,users):
if len(users)>0:
for user in users:
if json["email"] == user["email"]:
return jsonify({"Message":'user with this email address exist'}),409
for user in users:
if json["username"] == user["username"]:
return jsonify({"Message":"user with this username already exist"}), 409
return True
def validate_user_email(json):
if not(validate_email(json["email"])):
return jsonify({'Message':
'Enter a valid email'}), 401
return True
def validate_question(json):
if not(json["title"].strip()):
return jsonify({'Message':
'Title is required'}), 401
if not(json["content"].strip()):
return jsonify({'Message':
'Content is required'}), 401
return True
def validate_answer(json):
if not(json["answer_body"].strip()):
return jsonify({'Message':
'Answer body is required'}), 401
return True
|
987,605 | a9b516c459e3e5be445e68b1c43f39d7911017cb | """
首先,创建TensorFlow图,
然后,选择需要进行汇总(summary)操作的节点。
然后,在运行之前,合并所有的summary操作。
最后,运行汇总的节点,并将运行结果写入到事件文件中。
"""
import os
import numpy as np
import tensorflow as tf
from mlp import Net, Data, Tools
class Summary(object):
def __init__(self, data, model_path="model", summary_path="summary"):
self.data = data
self.batch_size = self.data.batch_size
self.class_number = self.data.class_number
self.model_path = model_path
# 1.summary路径
self.summary_path = summary_path
self.net = Net(batch_size=self.batch_size, data_size=self.data.data_size, class_number=self.class_number)
# 2.1.添加一个标量
tf.summary.scalar("loss", self.net.loss)
# 2.2.添加图片
self.x_2 = tf.reshape(self.net.x, shape=[self.batch_size, self.data.data_size, self.data.data_size, 1])
tf.summary.image("x", self.x_2)
# 2.3.添加直方图
tf.summary.histogram("w", self.net.softmax)
# 2.4.使summary具有层次性
with tf.name_scope("summary_loss_1"):
tf.summary.scalar("loss_1_1", self.net.loss)
tf.summary.scalar("loss_1_2", self.net.loss)
tf.summary.scalar("loss_1_3", self.net.loss)
with tf.name_scope("summary_loss_2"):
tf.summary.scalar("loss_2_1", self.net.loss)
tf.summary.scalar("loss_2_2", self.net.loss)
tf.summary.scalar("loss_2_3", self.net.loss)
# 3.合并添加的所有summary操作:
# “零存整取”原则:
# 创建网络的各个层次都可以添加监测;
# 添加完所有监测,初始化sess之前,统一用tf.summary.merge_all()获取。
self.merged_summary_op = tf.summary.merge_all()
self.saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=10)
self.sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))
pass
def train(self, epochs=10, save_freq=2):
self.sess.run(tf.global_variables_initializer())
# 4.创建对象和日志文件:Creates a `FileWriter` and an event file.
summary_writer = tf.summary.FileWriter(self.summary_path, self.sess.graph)
# 5.输出网络结构
# 等价于在创建tf.summary.FileWriter()对象时传入graph参数。
# summary_writer.add_graph(self.sess.graph)
for epoch in range(epochs):
for step in range(self.data.number_train):
x, label = self.data.next_train_batch()
# 6.运行summary节点
_, summary_now = self.sess.run([self.net.train_op, self.merged_summary_op],
feed_dict={self.net.x: x, self.net.label: label})
# 7.写入到文件中
summary_writer.add_summary(summary_now, global_step=epoch * self.data.number_train + step)
self.test("{}".format(epoch))
if epoch % save_freq == 0:
self.saver.save(self.sess, os.path.join(self.model_path, "model_epoch_{}".format(epoch)))
pass
def test(self, info):
test_acc = 0
for i in range(self.data.number_test):
x, label = self.data.next_test_batch(i)
prediction = self.sess.run(self.net.prediction, {self.net.x: x})
test_acc += np.sum(np.equal(label, prediction))
test_acc = test_acc / (self.batch_size * self.data.number_test)
Tools.print_info("{} {}".format(info, test_acc))
return test_acc
pass
if __name__ == '__main__':
runner = Summary(Data(batch_size=64, class_number=10, data_path="../data/mnist"))
runner.train()
|
987,606 | 7a2aba899e6094ea142230d7cc282cbac7285dd9 | import logging
import os
from collections import defaultdict
from copy import deepcopy
from dataclasses import dataclass, field
from typing import Literal, Optional, List, Dict, Any, DefaultDict
import torch
from mila.factories import AbstractConfiguration
from .helpers import SuperFactory
from .observers import AbstractEventHandler, EventManager, DifferentialPrivacy
@dataclass
class Config(AbstractConfiguration):
model: Dict[str, Any]
loader: Dict[str, Any]
splitter: Dict[str, Any]
featurizers: List[Dict[str, Any]]
transformers: List[Dict[str, Any]]
criterion: Dict[str, Any]
optimizer: Dict[str, Any]
scheduler: Dict[str, Any]
is_stepwise_scheduler: Optional[bool] = True
is_finetuning: Optional[bool] = False
checkpoint_path: Optional[str] = None
threshold: Optional[float] = None
cross_validation_folds: int = 5
train_split: str = "train"
train_metrics: List[str] = field(default_factory=lambda: [])
test_split: str = "test"
test_metrics: List[str] = field(default_factory=lambda: [])
epochs: int = 100
batch_size: int = 32
use_cuda: bool = True
enabled_gpus: List[int] = field(default_factory=lambda: [0])
cache_location: str = "/tmp/federated/"
clear_cache: bool = False
log_level: Literal["debug", "info", "warn", "error", "critical"] = "info"
log_format: str = ""
log_frequency: int = 20
observers: DefaultDict[str, List[Dict]] = field(default_factory=lambda: defaultdict(list))
differential_privacy: Dict[str, Any] = field(default_factory=lambda: {"enabled": False})
target_metric: str = "roc_auc"
optuna_trials: int = 1000
subset: Optional[Dict[str, Any]] = None
visualizer: Optional[Dict[str, Any]] = None
def should_parallelize(self) -> bool:
return torch.cuda.is_available() and self.use_cuda and len(self.enabled_gpus) > 1
def get_device(self) -> torch.device:
device_id = self.enabled_gpus[0] if len(self.enabled_gpus) == 1 else 0
device_name = "cuda:" + str(device_id) if torch.cuda.is_available() and self.use_cuda else "cpu"
return torch.device(device_name)
def __post_init__(self):
if not os.path.exists(self.output_path):
os.makedirs(self.output_path)
logging.basicConfig(format=self.log_format, level=self.log_level.upper())
EventManager.flush()
for event_name, event_handlers in self.observers.items():
for event_handler_definition in event_handlers:
event_handler = SuperFactory.create(AbstractEventHandler, event_handler_definition)
EventManager.add_event_listener(event_name=event_name, handler=event_handler)
if self.differential_privacy["enabled"]:
DifferentialPrivacy.setup(**self.differential_privacy["options"])
def cloned_update(self, **kwargs) -> "Config":
options = deepcopy(vars(self))
options.update(**kwargs)
return Config(**options)
|
987,607 | 1f6db4fedb05db30a0a4435a0137d4dd43f6a058 | Temp_levels = ['VC', 'C', 'W', 'H', 'VH'] # very cold,cold,warm,hot,very hot
Humidity_levels = ['VD', 'D', 'N', 'W', 'VW']# very dry,dry,normal,wet,very wet
Power_Levels = ['VL', 'L', 'N', 'H', 'VH']#very low,low,normal,high,very high
def fuzzify_temperature(value):
if value < 8.0:
return 'VC'
elif value >= 9.0 and value < 20.0:
return 'C'
elif value >= 20.0 and value < 25.0:
return 'W'
elif value >= 26.0 and value < 34.0:
return 'H'
else:
return 'VH'
def fuzzify_humidity(value):
if value < 2.0:
return 'VD'
elif value >= 3.0 and value < 5.0:
return 'D'
elif value >= 6.0 and value < 7.0:
return 'N'
elif value >= 8.0 and value < 10.0:
return 'W'
else:
return 'VW'
def defuzzify(value):
if value == 'VL':
return 50.0
elif value == 'L':
return 100.0
elif value == 'N':
return 150.0
elif value == 'H':
return 200.0
else:
return 250.0
def compute_fuzzy_power_amount(temperature_degree_fuzzy, humidity_level_fuzzy):
rule_map = {
('VC', 'VD'): 'H',
('VC', 'D'): 'H',
('VC', 'N'): 'VH',
('VC', 'W'): 'VH',
('VC', 'VW'): 'VH',
('C', 'VD'): 'N',
('C', 'D'): 'H',
('C', 'N'): 'H',
('C', 'W'): 'VH',
('C', 'VW'): 'VH',
('W', 'VD'): 'VL',
('W', 'D'): 'VL',
('W', 'N'): 'VL',
('W', 'W'): 'L',
('W', 'VW'): 'N',
('H', 'VD'): 'N',
('H', 'D'): 'N',
('H', 'N'): 'H',
('H', 'W'): 'H',
('H', 'VW'): 'VH',
('VH', 'VD'): 'L',
('VH', 'D'): 'N',
('VH', 'N'): 'H',
('VH', 'W'): 'VH',
('VH', 'VW'): 'VH',
}
fuzzy_output = rule_map.get(
(temperature_degree_fuzzy, humidity_level_fuzzy))
if fuzzy_output is None:
print("Case not covered for given input")
return "No matchinf rule"
else:
return fuzzy_output
def power_in_watts(temperature_degree, humidity_level):
if humidity_level < 1.0 or humidity_level > 10.0:
print("Invalid value for humidity level : ", humidity_level)
return "Invalid input"
temp_degree_fuzzy = fuzzify_temperature(temperature_degree)
humidity_level_fuzzy = fuzzify_humidity(humidity_level)
power_amount_fuzzy = compute_fuzzy_power_amount(
temp_degree_fuzzy, humidity_level_fuzzy)
power_amount = defuzzify(power_amount_fuzzy)
return power_amount
|
987,608 | 7230c394c9f8aba903e78a9436af6d229b1aa092 | import cv2 as cv
import numpy as np
class ASF:
"""
This class implement the alternate sequential filtering.
"""
def __init__(self, img):
self.img = img
self.kernel = np.ones((5, 5), np.uint8)
self.scroll = 10 # 10 pixel scrolling
def opening(self, img):
"""
This function combine erode and dilatation operation to perform a morphology opening on img.
"""
return cv.morphologyEx(img, cv.MORPH_OPEN, self.kernel)
def closing(self, img):
"""
This function combine erode and dilatation operation to perform a morphology closing on img.
"""
return cv.morphologyEx(img, cv.MORPH_CLOSE, self.kernel)
def get_ASF(self):
"""
Implement the alternate sequential filtering by iterating vertically trough the image to elongate the Bline.
"""
# we scroll verticaly the image to iterate through the elongated lines
for i in range(0, self.img.shape[0], self.scroll):
self.img[:, i:i + self.scroll] = self.opening(self.img[:, i:i + self.scroll])
self.img[:, i:i + self.scroll] = self.closing(self.img[:, i:i + self.scroll])
return self.img
|
987,609 | 23060450b5d92be0b23dd1a3977fbf04663bab0f | # -*- coding:utf-8 -*-
import os
import imghdr
import cv2
from sys import argv
#本脚本目的是为了将图片的类型分为训练集和验证集合
#将图片对应的种类写入到txt文件中以适应caffe
#同时建立种类对照表
#本脚本接受两个个参数path和direction
#path:为目标文件目录
#direction:为图片储存路径
path = argv[1]
direction = argv[2]
if os.path.isdir(path):
list = os.listdir(path)
#树叶的种类
num = 0
for li in list:
#li是每种树叶的类别
if li == '.DS_Store':
continue
new_path = os.getcwd()+'/'+path+'/'+li
if os.path.isdir(new_path):
li_1 = os.listdir(new_path)
length = len(li_1)-1
#创建目录
if os.path.exists(os.getcwd()+'/'+direction)==0:
os.makedirs(os.getcwd()+'/'+direction)
id = 0
#每一类图片的list
for img in li_1:
src = os.getcwd()+'/'+path+'/'+li+'/'+img
img_path = direction+'/'+img
dir_path = os.getcwd()+'/'+direction+'/'+img
type = imghdr.what(src)
if type <> 'jpeg' and type <> 'png':
print type
continue
if id < 3*length/4:
# print os.getcwd()+'/'+'class_resize/'+li+'/'+img+' '+li
with open(os.getcwd()+'/'+'train.txt','a') as f:
f.write(img_path+' '+str(num)+'\n')
cv_img = cv2.imread(src)
cv2.imwrite(dir_path,cv_img)
else:
# print "Haha"
with open(os.getcwd()+'/'+'validata.txt','a') as f:
f.write(img_path+' '+str(num)+'\n')
cv_img = cv2.imread(src)
cv2.imwrite(dir_path,cv_img)
id = id+1
with open(os.getcwd()+'/'+'dictionary.txt','a') as f:
f.write(str(num)+' '+li+'\n')
num += 1
print 'all done' |
987,610 | c9b3894445d1053b722be96feb9a1be55f0fc306 | '''
Created on Jun 7, 2011
@author: rp2007
'''
#from Mother import *
import pylab as P #@UnusedImport
import numpy as N #@UnusedImport
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.colors as cols
import matplotlib.gridspec as gridspec
import random as R
from mpl_toolkits.mplot3d import Axes3D
from itertools import *
from matplotlib.collections import PolyCollection
from matplotlib.colors import colorConverter
import matplotlib.pyplot as plt
class Plotter:
def cmap_double_discretize(self, cmap_bottom, cmap_top, num_bins, split=.5):
# sanity check
assert split < 1 and split > 0
# set up the data structure
cdict = {lab: [] for lab in ('red','green','blue')}
# do this in a fancy loop to a) save typing, b) make it easy to
# retrofit to do arbitrary splits
for cmap, ends in zip((cmap_bottom, cmap_top), ((0, split), (split, 1))):
# run over the _whole_ range for each color map
colors_i = N.concatenate((N.linspace(0, 1., num_bins), (0.,0.,0.,0.)))
# map the color
colors_rgba = cmap(colors_i)
# get the values
indices = N.linspace(ends[0], ends[1], num_bins+1, endpoint=True)
for ki,key in enumerate(('red','green','blue')):
cdict[key].extend((indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki]) for i in xrange(num_bins+1))
# print cdict
# Return colormap object.
return cols.LinearSegmentedColormap(cmap.name + "_%d"%num_bins, cdict, 1024)
def getColorMap(self, custom_cmap=''):
# # example 2: use the "fromList() method
# startcolor1 = '#0000ff' # black
# midcolor1 = '#000080' # mid blue
# endcolor1 = '#000000' # bright blue
# cmap1 = cols.LinearSegmentedColormap.from_list('own2',[startcolor1,midcolor1,endcolor1])
#
# # example 2: use the "fromList() method
# startcolor2 = '#000000' # black
# midcolor2 = '#008000' # mid green
# endcolor2 = '#00ff00' # light green
# cmap2 = cols.LinearSegmentedColormap.from_list('own2',[startcolor2,midcolor2,endcolor2])
#
# bicmap = self.cmap_double_discretize(cmap1, cmap2, 10, 0.5)
# example 2: use the "fromList() method
# startcolor2 = '#ffffff' # black
# midcolor2 = '#3b7054' # mid green
# endcolor2 = '#123624' # light green
# cmap2 = cols.LinearSegmentedColormap.from_list('own2',[startcolor2,midcolor2,endcolor2])
cmap2=plt.cm.PuOr_r
return cmap2
def plotGFP(self,cell, fileName=''):
P.figure(num=None, figsize=(8, 4), dpi=300, facecolor='w', edgecolor='k')
P.xlabel("Time (frames)")
P.ylabel("GFP")
P.title('Pos: %s Cell: %s ($ %s ^\circ C$)' %(cell.pos,cell.id, cell.temperature))
# P.title('Cell: '+cell.label)
#Plot divisions
idivision = [item for item in range(len(cell.divisions)) if cell.divisions[item] == 1]
frame_division = [cell.frames[i] for i in idivision]
GFP_division = [cell.GFP[i] for i in idivision]
P.plot(frame_division, GFP_division, 'go', markersize=4)
P.plot(cell.frames, cell.GFP, 'g-')
P.ylim([0,100])
P.xlim([0, 237])
if len(fileName)>0:
P.savefig(fileName)
else:
P.show()
def plotNormGFP(self,cell, meanGFP=1, fileName=''):
P.figure(num=None, figsize=(8, 4), dpi=300, facecolor='w', edgecolor='k')
P.xlabel("Time (frames)")
P.ylabel("Deviation from mean GFP")
P.title('Pos: %s Cell: %s ($ %s ^\circ C$)' %(cell.pos,cell.id, cell.temperature))
normGFP=[gfp-meanGFP for gfp in cell.GFP]
#Plot divisions
idivision = [item for item in range(len(cell.divisions)) if cell.divisions[item] == 1]
frame_division = [cell.frames[i] for i in idivision]
GFP_division = [normGFP[i] for i in idivision]
P.plot(frame_division, GFP_division, 'ko', markersize=4)
P.fill_between(cell.frames, normGFP, 0, color='g')
P.ylim([-50,50])
P.xlim([0, 237])
if len(fileName)>0:
P.savefig(fileName)
else:
P.show()
def plotAllGFP(self,cells, dirName=''):
for cell in cells:
fileName=dirName+''+str(cell.temperature)+'C_pos'+str(cell.pos)+'_cell'+str(cell.id)+'.png'
self.plotGFP(cell, fileName)
def plotAllNormGFP(self,cells, meanGFP, dirName=''):
for cell in cells:
fileName=dirName+'norm_'+str(cell.temperature)+'C_pos'+str(cell.pos)+'_cell'+str(cell.id)+'.png'
self.plotNormGFP(cell, meanGFP, fileName)
def plotHistogram(self,cells, meanGFP=1, minGFP=0, maxGFP=100, numBins=100,dirName=''):
GFPs=[]
for cell in cells:
GFPs.extend(cell.GFP[:])
GFPs=[gfp-meanGFP for gfp in GFPs]
P.figure(num=None, figsize=(8, 4),dpi=300, facecolor='w', edgecolor='k')
P.xlabel("Deviation from mean GFP ")
P.ylabel("Frequency")
P.title(' $'+str(cell.temperature)+'^\circ C$')
bins=N.arange(minGFP-meanGFP, maxGFP-meanGFP, (maxGFP-minGFP)/numBins)
n, bins, patches = P.hist(GFPs, bins, normed=1, histtype='stepfilled') #@UnusedVariable
P.setp(patches, 'facecolor', 'g', 'alpha', 0.75)
# P.xlim([N.min(bins),N.max(bins)])
P.xlim([minGFP-meanGFP,maxGFP-meanGFP])
# P.ylim([N.min(n),1.1*N.max(n)])
P.ylim([0., .12])
if len(dirName)>0:
fileName=dirName+'hist_'+str(cells[0].temperature)+'C.png'
P.savefig(fileName)
else:
P.show()
def plotHistogram3D(self,temp_cells, meanGFP=1, minGFP=0, maxGFP=100, numBins=100,dirName=''):
fig = P.figure(num=None, figsize=(8, 8),dpi=300, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111,projection='3d')
font = {'family' : 'Arial',
'weight' : 'normal',
'size' : 12}
P.rc('font', **font)
temperatures=[29, 31, 33, 35, 37]
colors = cycle(["#feedde", "#fdbe85","#fd8d3c", "#e6550d", "#a63603"])
cc = lambda arg: colorConverter.to_rgba(arg, alpha=0.6)
verts = []
for z, cells in zip(temperatures, temp_cells):
GFPs=[]
for cell in cells:
GFPs.extend(cell.GFP[:])
GFPs=[gfp-meanGFP for gfp in GFPs] #Subtract background
GFPs=N.log10(GFPs)
maxGFP=2. ####TEMP
minGFP=N.min(GFPs)
bins=N.arange(minGFP-meanGFP, maxGFP-meanGFP, (maxGFP-minGFP)/numBins)
#n, bins, patches = P.hist(GFPs, bins, normed=1, histtype='stepfilled') #@UnusedVariable
histo, bin_edges = N.histogram(GFPs,bins,density=True)
xs=bins[:-1]
ys=histo
#
#ax.plot(xs, N.ones(xs.shape)*z, ys, color="red", alpha=.2)
c=next(colors)
ax.bar(xs, ys, zs=z, zdir='y', color=c, edgecolor='black', alpha=0.9, width=(maxGFP-minGFP)/numBins)
ax.view_init(elev=16., azim=-56.)
verts.append(list(zip(xs, ys)))
poly = PolyCollection(verts, facecolors = [cc("Yellow"),cc("Orange"), cc("OrangeRed"), cc("Red"),cc("DarkRed")], closed=False)
poly.set_alpha(.9)
#ax.add_collection3d(poly, zs=temperatures, zdir='y')
ax.set_yticks(temperatures)
ax.set_xlabel('GFP intensity (log10 scale)', fontsize=14)
ax.set_ylabel('Temperature ($^\circ$C)', fontsize=14)
ax.set_zlabel('Probability density', fontsize=14)
if len(dirName)>0:
fileName=dirName+'histogram_temperature.pdf'
P.savefig(fileName)
else:
P.show()
def plotHistogram3Dsim(self,simGFPs, meanGFP=1, minGFP=0, maxGFP=100, numBins=100,dirName=''):
fig = P.figure(num=None, figsize=(8, 8),dpi=300, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111,projection='3d')
font = {'family' : 'Arial',
'weight' : 'normal',
'size' : 12}
P.rc('font', **font)
temperatures=[.12, .14, .16, .18, .2]
colors = cycle(["#feedde", "#fdbe85","#fd8d3c", "#e6550d", "#a63603"])
verts = []
cc = lambda arg: colorConverter.to_rgba(arg, alpha=0.6)
for z, GFPs in zip(temperatures, simGFPs):
#GFPs=[]
#for cell in cells:
# GFPs.extend(cell.GFP[:])
#GFPs=[gfp-meanGFP for gfp in GFPs] #Subtract background
#
GFPs=N.log10(GFPs)
bins=N.arange(minGFP-meanGFP, maxGFP-meanGFP, (maxGFP-minGFP)/numBins)
#n, bins, patches = P.hist(GFPs, bins, normed=1, histtype='stepfilled') #@UnusedVariable
histo, bin_edges = N.histogram(GFPs,bins,density=True)
xs=bins[:-1]
ys=histo
#xs = N.arange(20)
#ys = N.random.rand(20)
# You can provide either a single color or an array. To demonstrate this,
# the first bar of each set will be colored cyan.
#cs = [c] * len(xs)
#cs[0] = 'c'
#ax.bar(xs, ys, zs=z, zdir='y', color=next(colors), alpha=0.7, width=(maxGFP-minGFP)/numBins)
ax.plot(xs, N.ones(xs.shape)*z, ys, color="black", alpha=.2)
#ax.fill_between(xs, N.ones(xs.shape)*z, ys, alpha=.5, antialiased=True, color=next(colors))
ax.view_init(elev=16., azim=-56.)
verts.append(list(zip(xs, ys)))
poly = PolyCollection(verts, facecolors = [cc("#feedde"),cc("#fdbe85"), cc("#fd8d3c"), cc("#e6550d"),cc("#a63603")])
poly.set_alpha(.9)
ax.add_collection3d(poly, zs=temperatures, zdir='y')
ax.set_yticks(temperatures)
ax.set_xlabel('FliAZ (log10 scale)', fontsize=14)
ax.set_ylabel('Noise intensity ($\Omega$)', fontsize=14)
ax.set_zlabel('Probability density', fontsize=14)
# ax.set_zlim((0.02, 0.32))
if len(dirName)>0:
fileName=dirName+'histogram_noise.pdf'
P.savefig(fileName)
else:
P.show()
def plotHeatMap(self,all_cells, num=200, meanGFP=1, dirName=''):
fig, ax1 = P.subplots(len(all_cells), figsize=(8,16), sharex=True, facecolor='w', edgecolor='k')
gs = gridspec.GridSpec(5, 1, height_ratios=[0.76,0.82,0.84,1.33, 1.24])
gs.update(hspace=0.07)
# P.subplots_adjust(hspace = .05)
font = {'family' : 'Arial',
'weight' : 'normal',
'size' : 14}
P.rc('font', **font)
for i, celltemp in enumerate(all_cells):
GFPs=[]
mGFP=[]
for cell in celltemp:
timeGFP=[(gfp-meanGFP) for gfp in cell.GFP[0:num]]
mGFP.append(N.mean(timeGFP))
timeGFP.extend(list(N.NaN for i in xrange(0,num-len(cell.GFP))))
GFPs.append(timeGFP)
idx = N.array(mGFP).argsort()
nGFPs=N.array(GFPs)
D = N.ma.masked_invalid(N.transpose(nGFPs[idx]))
ybin = N.linspace(0, len(GFPs), len(GFPs)+1)
xbin = N.linspace(0, num, num+1)
cmap = self.getColorMap()
cmap.set_bad(color = [.25, .25, .25], alpha = 1.)
# heatmap = ax.pcolor(N.array(GFPs[:]), cmap=self.getColorMap())
ax = P.subplot(gs[i])
cax=ax.pcolormesh(xbin, ybin, D.T, cmap = cmap, edgecolors = 'None', vmin = -meanGFP, vmax = meanGFP)
ax.set_xlim([0, num])
ax.set_ylim([0, len(GFPs)])
ax.set_ylabel('Cells ($'+str(cell.temperature)+'^\circ C$)', fontsize=16)
ax.set_xticks([])
ax.set_yticks(range(0,len(GFPs),25))
ax.set_xlabel("Time (frames)", fontsize=16)
ax.set_xticks(range(0,num,50))
cbaxes = fig.add_axes([0.125,0.908, 0.775, 0.015])
cbar = fig.colorbar(cax, ticks=[-meanGFP, -meanGFP/2, 0, meanGFP/2, meanGFP], orientation='horizontal', cax = cbaxes)
cbar.ax.set_xticklabels(['-100%','-50%', 'Mean', '50%', '100%'])# horizontal colorbar
cbar.ax.xaxis.set_ticks_position('top')
cbar.ax.text(0.5, 2.2,"Deviation from population-level mean GFP (%)",fontsize=16, ha='center')
if len(dirName)>0:
fileName=dirName+'heatmap.png'
P.savefig(fileName)
# else:
# P.show()
def plotAverageGFP(self,all_cells, num=200, meanGFP=1, dirName=''):
P.subplots(len(all_cells), figsize=(8,16), sharex=True, facecolor='w', edgecolor='k')
gs = gridspec.GridSpec(5, 1, height_ratios=[0.76,0.82,0.84,1.33, 1.24])
gs.update(hspace=0.07)
# P.subplots_adjust(hspace = .05)
font = {'family' : 'Arial',
'weight' : 'normal',
'size' : 14}
P.rc('font', **font)
for i, celltemp in enumerate(all_cells):
this_temperature=[]
this_framesAboveMean=[]
GFPs=[]
for cell in celltemp:
timeGFP=[(gfp-meanGFP) for gfp in cell.GFP[0:num]]
timeGFP.extend(list(N.NaN for i in xrange(0,num-len(cell.GFP))))
this_framesAboveMean.append(N.array(sum(i > 0 for i in timeGFP)))
this_temperature.append(cell.temperature)
GFPs.append(timeGFP)
nGFPs = N.ma.masked_invalid(N.transpose(N.array(GFPs)))
idx = N.mean(nGFPs,axis=0).argsort()
mGFPs=N.mean(nGFPs,axis=0)
ax = P.subplot(gs[i])
# cax=ax.barh(range(0,len(idx)),mGFPs[idx], height=1, color="black")
cmap = self.getColorMap()
for j, m in enumerate(mGFPs[idx]):
r=(1+m/meanGFP)/2
ax.barh(j,m, height=1,color=cmap(r), edgecolor=cmap(r))
ax.set_xlim([-meanGFP, meanGFP])
ax.set_ylim([0, len(GFPs)])
ax.set_ylabel('Cells ($'+str(cell.temperature)+'^\circ C$)', fontsize=16)
ax.set_xticks([])
ax.set_yticks(range(0,len(GFPs),25))
if i==0:
ax.set_title("Average GFP in a %s-frame observation"%num)
ax.set_xlabel("Deviation from population-level meanGFP (%)", fontsize=16)
ax.set_xticks([-meanGFP, -meanGFP/2, 0, meanGFP/2, meanGFP])
ax.set_xticklabels(['-100%','-50%', 'Mean', '50%', '100%'])
if len(dirName)>0:
fileName=dirName+'devmean.png'
P.savefig(fileName)
# else:
# P.show()
def plotMeanCoefficientVariation(self, all_cells, dirName=''):
CVs=[]
temperature=[]
OFFmax=30.
for i, celltemp in enumerate(all_cells):
this_CVs=[]
this_temperature=[]
for cell in celltemp:
nGFP=N.array(cell.GFP)
if N.amax(cell.GFP)>OFFmax:
CV=N.std(nGFP)/N.mean(nGFP)
this_CVs.append(CV)
this_temperature.append(cell.temperature)
CVs.append(this_CVs)
temperature.append(this_temperature)
minCV= N.min(N.min(CVs))
maxCV= N.max(N.max(CVs))
# print temperature
# idx = N.array(temperature).argsort()
# print temperature
temperatures=N.array([29., 31., 33., 35., 37.])
P.figure(facecolor='w', edgecolor='k')
P.ylabel("Coefficient of Variation (excluding OFF cells)", fontsize=16)
P.xlabel("Temperature", fontsize=16)
for i, temp in enumerate(temperature):
P.errorbar(temp[0], N.mean(CVs[i]), yerr=N.std(CVs[i]), fmt='o',color='black')
P.ylim([0, 1])
#ax.text(0.8,0.8,' $'+str(temp[0])+'^\circ C$', fontsize=16)
P.xlim([28, 38])
P.xticks(temperatures)
#ax.set_ylabel("Normalised frequency", fontsize=16)
if len(dirName)>0:
fileName=dirName+'CV_ON.pdf'
P.savefig(fileName)
def plotCoefficientVariation(self, all_cells, numBins, fileName=''):
CVs=[]
temperature=[]
for i, celltemp in enumerate(all_cells):
this_CVs=[]
this_temperature=[]
for cell in celltemp:
nGFP=N.array(cell.GFP)
CV=N.std(nGFP)/N.mean(nGFP)
this_CVs.append(CV)
this_temperature.append(cell.temperature)
CVs.append(this_CVs)
temperature.append(this_temperature)
minCV= N.min(N.min(CVs))
maxCV= N.max(N.max(CVs))
# print temperature
# idx = N.array(temperature).argsort()
# print temperature
fig, ax = P.subplots(len(all_cells),1, figsize=(8,16), sharex=True, facecolor='w', edgecolor='k')
P.subplots_adjust(hspace = .08)
P.xlabel("Coefficient of Variation", fontsize=16)
for i, temp in enumerate(temperature):
bins=N.arange(minCV, maxCV, (maxCV-minCV)/numBins)
hist, bin_edges=N.histogram(CVs[i],bins)
wbin=(bin_edges[1]-bin_edges[0])
xbins=wbin/2+bin_edges[:-1]
hist=hist/float(N.max(hist))
ax[i].bar(xbins, hist, wbin*.8, color='black')
ax[i].set_ylim([0, 1])
ax[i].set_xlim([0, 1])
ax[i].text(0.8,0.8,' $'+str(temp[0])+'^\circ C$', fontsize=16)
ax[i].set_ylabel("Normalised frequency", fontsize=16)
def plotTotalVariation(self, all_cells, numBins, fileName=''):
TVs=[]
temperature=[]
for i, celltemp in enumerate(all_cells):
this_TVs=[]
this_temperature=[]
for cell in celltemp:
nGFP=N.array(cell.GFP)
p=2
TV=N.sum(N.abs(N.diff(nGFP,n=1, axis=0))**p)**(1/p)
this_TVs.append(TV)
this_temperature.append(cell.temperature)
TVs.append(this_TVs)
temperature.append(this_temperature)
minTV= N.min(N.min(TVs))
maxTV= N.max(N.max(TVs))
# print temperature
# idx = N.array(temperature).argsort()
# print temperature
print TVs
fig, ax = P.subplots(len(all_cells),1, figsize=(8,16), sharex=True, facecolor='w', edgecolor='k')
P.subplots_adjust(hspace = .08)
P.xlabel("Total Variation", fontsize=16)
for i, temp in enumerate(temperature):
bins=N.arange(minTV, maxTV, (maxTV-minTV)/numBins)
hist, bin_edges=N.histogram(TVs[i],bins)
wbin=(bin_edges[1]-bin_edges[0])
xbins=wbin/2+bin_edges[:-1]
hist=hist/float(N.sum(hist))
ax[i].bar(xbins, hist, wbin*.8, color='black')
ax[i].set_ylim([0,1])
ax[i].set_ylim([0,1])
ax[i].set_xlim([minTV, maxTV])
ax[i].set_xlim([minTV, maxTV])
ax[i].text(1.1*minTV,0.8,' $'+str(temp[0])+'^\circ C$', fontsize=16)
ax[i].set_ylabel("Normalised frequency", fontsize=16)
def plotFractionON(self, all_cells, dirName=''):
width = 1. # the width of the bars
temperatures=N.array([29., 31., 33., 35., 37.])
fig, ax = P.subplots(facecolor='w', edgecolor='k')
ax.bar(temperatures-width/2., N.array([1., 1., 1., 1.,1.]), width, color='#fcfcfc', edgecolor='#fcfcfc')
frac_OFF=[]
diffmax=30.
for cell_temp in all_cells:
cells_off=0.
cells_on=0.
for cell in cell_temp:
#diff=N.amax(N.abs(cell.GFP- N.mean(cell.GFP[:])))
#if diff<diffmax:
if N.amax(cell.GFP)<diffmax:
#print '%s-%s'%(cell.pos,cell.id)
cells_off=cells_off+1.
else:
cells_on=cells_on+1.
print 'Fraction of cells OFF=%s'%(cells_off/(cells_on+cells_off))
frac_OFF.append(100*cells_on/(cells_on+cells_off))
barlist=ax.bar(temperatures-width/2., frac_OFF, width)
barlist[0].set_color("#feedde")
barlist[1].set_color("#fdbe85")
barlist[2].set_color("#fd8d3c")
barlist[3].set_color("#e6550d")
barlist[4].set_color("#a63603")
ax.set_ylim([0,100])
ax.set_xlim([28., 38.])
ax.set_ylabel("Cells with $\it{fliC-ON}$ ($\%$)", fontsize=12)
ax.set_xlabel("Temperature ($^\circ C$)", fontsize=12)
ax.set_xticks(temperatures)
if len(dirName)>0:
fileName=dirName+'fracOFF.pdf'
P.savefig(fileName)
|
987,611 | 31761a239697c2b5bc743ef6b6006d40e69f2f08 | """
server
"""
import socket
import select
class TcpServer(object):
"""Tcp Server"""
def __init__(self, port=8000):
self.port = port
self.queue_size = 1000
def start(self, port=8000):
"""server start"""
self.port = port
try:
self.server_run()
except KeyboardInterrupt:
print("\r\nGood Bye!")
@property
def server_socket(self):
"""server socket"""
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.setblocking(0)
server_socket.bind(("", self.port))
server_socket.listen(self.queue_size)
return server_socket
@property
def epoll(self):
"""epoll"""
return select.epoll()
@property
def max_bytes(self, num=1000):
"""rev max bytes"""
return 1024 * num
def server_run(self):
"""server run"""
server_socket = self.server_socket
server_f = server_socket.fileno()
epoll = self.epoll
epoll.register(server_f, select.EPOLLIN | select.EPOLLET)
client_socket_fs = {}
while True:
epoll_list = epoll.poll()
for _fd, event in epoll_list:
if _fd == server_f:
socket_c, _ = server_socket.accept()
socket_c.setblocking(0)
client_socket_fs[socket_c.fileno()] = socket_c
epoll.register(socket_c.fileno(), select.EPOLLIN | select.EPOLLET)
elif event == select.EPOLLIN:
bytes_request = client_socket_fs[_fd].recv(self.max_bytes)
if bytes_request:
self.to_response(bytes_request, client_socket_fs[_fd])
else:
epoll.unregister(_fd)
client_socket_fs[_fd].close()
def to_response(self, string, socket_args):
"""parse string"""
raise ValueError("not a function")
|
987,612 | 19acf12aab96f4868e71d37dd70f0d9c9b15e43d | adj = ["crispy", "delicious" , "beautiful", "fresh" , "tender"]
food = ["salmon", "tuna" , "cod","macrkrel", "flounder" ]
for x in adj:
for y in food:
print (x,y)
|
987,613 | 0c11d1af8d5546ed1cf3e9844c9b8641d08bc44e | Import libraries for simulation
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#MANDELBROT SET
Y, X = np.mgrid[-1.3:1.3:0.005, -2:1:0.005]
#JULIA SET
#Y, X = np.mgrid[-2:2:0.005, -2:2:0.005]
#Definiamo il punto corrente
Z = X+1j*Y
c = tf.constant(Z.astype("complex64"))
zs = tf.Variable(c)
ns = tf.Variable(tf.zeros_like(c, "float32"))
#c = complex(0.0,0.75)
#c = complex(-1.5,-1.5)
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
# Compute the new values of z: z^2 + x
zs_ = zs*zs + c
#zs_ = zs*zs - c
# Have we diverged with this new value?
not_diverged = tf.complex_abs(zs_) < 4
step = tf.group(
zs.assign(zs_),
ns.assign_add(tf.cast(not_diverged, "float32"))
)
for i in range(200): step.run()
plt.imshow(ns.eval())
plt.show()
|
987,614 | a1ea85c2cfd1b2f10c56247db23ce971cac99667 | # 1) Loading Bar
# 2) Loading Screen
import os
import time
def loading_bar(seconds):
for loading in range(0, seconds+1):
percent = (loading * 100) // seconds
print("\n")
print("Loading...")
print("<" + ("-" * loading) + ("" * (seconds + loading)) +
">" + str(percent) + "%")
print("\n")
time.sleep(1)
os.system('cls' if os.name == "nt" else "clear")
def loading_screen(seconds):
print("Loading Screen...")
with open('imperial.txt', 'r') as screen:
for lines in screen:
print(lines, end='')
time.sleep(seconds)
loading_bar(10)
os.system('cls' if os.name == "nt" else "clear")
loading_screen(.5)
|
987,615 | e57739be00d1809eb5dff1f34c78f0f689361fb8 | import numpy as np
import scipy
from scipy import stats
import math
import copy
import random
import matplotlib.pyplot as plt
def norm_ang(angle):
return (angle + np.pi) % (2 * np.pi) - np.pi
class Robot:
def __init__(self,pos=None):
if pos==None:
self.x = 0
self.y = 0
self.phi = 0
else:
self.x = pos['x']
self.y = pos['y']
self.phi = pos['phi']
self.distances = None
self.naigbours = []
#self.grid = np.zeros((1000,1000)) ## 0 unknown, -1 empty , 1 full
self.MCL = MCL(20,pos)
self.cmd_vel = {"u":0,"w":0}
def update_sensor_readings(self,readings):
self.distances = readings
#def update_grid(self):
def update_naigbours(self,naigbours):
self.naigbours = naigbours
def get_robot_position(self):
return {"x":self.x,"y":self.y,"phi":self.phi}
def move(self):
#cmd_vel = self.wander()
cmd_vel = self.straight()
dPosition = self.kinematics(cmd_vel["u"],cmd_vel["w"])
self.x = self.x + dPosition["dx"]
self.y = self.y + dPosition['dy']
self.phi = self.phi + dPosition["dPhi"]
if self.phi > np.pi:
self.phi = -2*np.pi + self.phi
if self.phi < -np.pi:
self.phi = 2*np.pi + self.phi
self.cmd_vel = cmd_vel
return cmd_vel
def kinematics(self,u,w,phi=None):
if phi==None:
phi =self.phi
dx = math.cos(phi)*u
dy = math.sin(phi)*u
dPhi = w
return {"dx":dx,"dy":dy,"dPhi":dPhi}
def wander(self):
#u = self.cmd_vel["u"] + np.random.normal(loc=0,scale=0.5)
#w = self.cmd_vel["w"] + np.random.normal(loc=0,scale=np.pi/2)
u = np.random.normal(loc=0.5,scale=0.5)
w = np.random.normal(loc=0, scale=np.pi/2)
return {"u":u,"w":w}
def straight(self):
#u = self.cmd_vel["u"] + np.random.normal(loc=0,scale=0.5)
#w = self.cmd_vel["w"] + np.random.normal(loc=0,scale=np.pi/2)
u = 0.1 #np.random.normal(loc=0.5,scale=0.5)
w = 0 #np.random.normal(loc=0, scale=np.pi/2)
return {"u":u,"w":w}
#def distance_and_bearing(self,dmsg):
def straight(self):
#u = self.cmd_vel["u"] + np.random.normal(loc=0,scale=0.5)
#w = self.cmd_vel["w"] + np.random.normal(loc=0,scale=np.pi/2)
u = 0.1 #np.random.normal(loc=0.5,scale=0.5)
w = 0.01 #np.random.normal(loc=0, scale=np.pi/2)
return {"u":u,"w":w}
#def distance_and_bearing(self,dmsg):
class Particle:
def __init__(self,particle_id,x=0,y=0,phi=0):
self.x = x
self.y = y
self.phi = phi
self.w = 1
self.id = particle_id
def kinematics(self,u,w):
dx = math.cos(self.phi)*u
dy = math.sin(self.phi)*u
dPhi = w
return {"dx":dx,"dy":dy,"dPhi":dPhi}
def update_position(self,u,w):
deltas = self.kinematics(u,w)
dx = deltas["dx"] + np.random.normal(loc=0,scale=0.2)
dy = deltas["dy"] + np.random.normal(loc=0,scale=0.2)
dPhi = deltas["dPhi"] + np.random.normal(loc=0,scale=0.2)
self.x += dx
self.y += dy
self.phi += dPhi
self.last_update = {"self_dx":dx,"self_dy":dy,"self_dPhi":dPhi}
def odom_update(self,rdx,rdy,rdPhi):
xs = np.array([self.last_update["self_dx"],self.last_update["self_dx"],self.last_update["self_dx"]])
prob = stats.multivariate_normal.pdf(xs,mean=xs,cov=1)
self.w *= prob
### Here the ray trace callback traditinaly works with the robot generated map
def update_weight(self,readings,ray_trace_callback,sigma=0.1):
w = 1
expected_readings = ray_tracing_callback(self.x,self.y,self.phi)
for i, r in enumerate(readings):
prob = scipy.stats.norm.pdf(r,loc=expected_readings[i],scale=sigma)
w*=prob
self.w = w
return w
def update_detection_weight(self,dmsgs):
if len(dmsgs) == 0:
print("no neigbours")
return 1
host_r = dmsgs[0]["to"]
w = 1
for i,dmsg in enumerate(dmsgs):
print("PARTICLE ",self.id,"of R ",host_r,"working on particles from ",dmsg["from"])
w*= self.weight_of_message(dmsg)
return w
def weight_of_message(self,dmsg,slf=None):
w = 0
if slf == None:
slf =self
for i,p in enumerate(dmsg["particles"]):
p.phi = norm_ang(p.phi)
slf.phi = norm_ang(slf.phi)
dx = (p.x-slf.x)
dy = (p.x-slf.y)
dr = np.sqrt(dx**2 + dy**2) - dmsg["r"]
print("DR ",dr)
print("PAR POS (",p.x,p.y,")")
dTheta = norm_ang(np.arctan2(dy,dx) - (p.phi + dmsg["theta"]))
dPhi = norm_ang(np.pi - p.phi - slf.phi + dmsg["theta"] - dmsg["theta2"])
print("DTHETA ",dTheta)
#sample = np.array([dr,dTheta,dPhi])
sample = np.array([dr,dTheta])
s=0.1
#prob = stats.multivariate_normal.pdf(sample,mean=np.array([0,0,0]),cov=np.array([[s,0,0],[0,s,0],[0,0,4*s]]))
prob = stats.multivariate_normal.pdf(sample,mean=np.array([0,0]),cov=np.array([[s,0],[0,s]]))
#print(prob)
w+=prob
return w
class MCL:
def __init__(self,particleN=10,pos=None):
self.particle_count = particleN
self.particles = []
for n in range(particleN):
if pos == None:
x = np.random.uniform(low=-5,high=5)
y = np.random.uniform(low=-5,high=5)
phi = np.random.uniform(low=-np.pi,high=np.pi)
self.particles.append(Particle(n,x=x,y=y,phi=phi))
else:
x = pos['x'] + np.random.normal(loc=0,scale=0.1)
y = pos['y'] + np.random.normal(loc=0,scale=0.1)
phi = pos['phi'] + np.random.normal(loc=0,scale=0.1)
self.particles.append(Particle(n,x=x,y=y,phi=phi))
def apply_motion_model(self,u,w):
for i,p in enumerate(self.particles):
p.update_position(u,w)
def get_particles(self):
return copy.deepcopy( self.particles)
def apply_sensor_model(self,readings,ray_trace_callback):
sum_w = 0
for i,p in enumerate(self.particles):
w = p.update_weight(readings,ray_trace_callback)
sum_w +=w
for p in self.particles:
p.w = p.w/sum_w
def apply_detection_model(self,dmsgs):
sum_w = 0
if len(dmsgs) ==0:
print("apply_detection_model NO MESSAGES")
return 1
for i,p in enumerate(self.particles):
w = p.update_detection_weight(dmsgs)
print("New particle w: ",w)
sum_w +=w
p.w *= w
def simple_sampling(self):
sum_w = 0
for i,p in enumerate(self.particles):
sum_w += p.w
weights =[p.w for p in self.particles]
ids = range(len(weights))
print("len w",len(weights))
print("len ids",len(ids))
#print(weights)
new_ids = random.choices(ids, weights=weights, k=len(weights))
print("weights ",weights)
print("IDS ",new_ids)
new_particles = []
for i,p in enumerate(new_ids):
new_p = copy.deepcopy(self.particles[p])
new_p.id =i
new_p.w=1
#if i <= (len(weights)/10):
# new_p = Particle(i,x=np.random.uniform(low=-15,high=15),y=np.random.uniform(low=-15,high=15),phi=np.random.uniform(low=-np.pi,high=np.pi))
new_particles.append(new_p)
self.particles = new_particles
return self.particles
### FIX increases number of paricles
def reciprocal_sample(self,dmsgs):
new_particles = []
for i,dmsg in enumerate(dmsgs):
for j,p in enumerate(dmsg['particles']):
mean = self.cluster_mean(particles=dmsg["particles"])
#r2_x = mean["mean_x"]
#r2_y = mean["mean_y"]
#r2_phi = mean["mean_phi"]
r2_x = p.x
r2_y = p.y
r2_phi = p.phi
print("pHI2 ",r2_phi)
thetaR = dmsg["theta"]
thetaL = dmsg["theta2"]
print("TH R ",thetaR)
print("TH L ",thetaL)
thetaA = norm_ang(r2_phi+thetaR)
print("TH A",thetaA)
new_x = r2_x-np.cos(thetaA)*dmsg["r"] #+np.random.normal(0,0.1)
new_y = r2_y-np.sin(thetaA)*dmsg["r"] #+np.random.normal(0,0.1)
new_phi = norm_ang(np.pi- thetaR)
new_particles.append(Particle(j,x=new_x,y=new_y,phi=new_phi))
new_ps = random.choices(new_particles, k=len(self.particles))
self.particles = copy.deepcopy(new_ps)
#self.get_prob_map(dmsgs)
def get_prob_map(self,dmsgs):
fig,ax = plt.subplots()
mean = self.cluster_mean(particles=dmsgs[0]["particles"])
for x in range(-10,10):
for y in range(-20,20):
print("PARTICLE (",x,y,")")
sim_p = Particle(0,x=x,y=y)
w = sim_p.weight_of_message(dmsgs[0])
print("WEIGHT",w)
ax.scatter([x],[y],s=10*w,c='r')
ax.scatter([mean["mean_x"]],[mean["mean_y"]],s=2,c='b')
plt.show()
def cluster_mean(self,particles=None):
if particles == None:
particles = self.particles
mean_x =0
mean_y =0
mean_phi=0
for i,p in enumerate(particles):
mean_x+=p.x
mean_y+=p.y
mean_phi+=p.phi
n =len(particles)
return {"mean_x":mean_x/n,"mean_y":mean_y/n,"mean_phi":mean_phi/n}
def correct_position(self,pos):
for i,p in enumerate(self.particles):
p.x = pos["x"]
p.y = pos["y"]
p.phi = pos["phi"]
return copy.deepcopy(self.particles)
#r = Robot()
#r.drawDDA(2,5,10,200)
|
987,616 | a44063e4ad50c184acf46aa63469fc3b5630f970 | from django.apps import AppConfig
class SwzcConfig(AppConfig):
name = 'swzc'
|
987,617 | 7d8bd3fa78c0b998fcb9cf7abf4f797f1702d957 | class NMEAMessageBadFormatError(Exception):
"""Exception raised for NMEA message pattern ignored
parameters:
nmea_message: nmea message which caused the error
message: explanation of the error
"""
def __init__(self, nmea_message, message=''):
self.message = message if message else f'"{nmea_message}" is not well formatted NMEA message'
super().__init__(self.message)
class ChecksumError(Exception):
"""Exception raised when a NMEA message has an incorrect checksum
parameters:
sentence: sentence from which the check sum is calculated
received_checksum: checksum as received in nmea message
message: explanation of the error
"""
def __init__(self, sentence, received_checksum, message=''):
self.message = message if message else f'"{sentence}" has been received with invalid checksum {received_checksum}'
super().__init__(self.message)
|
987,618 | 00ba6de97a4681bf9a4d4a95a286d3f80954f1d5 | #https://www.hackerrank.com/challenges/luck-balance/problem
def luckBalance(k, contests):
min_win = 0
res_imp = []
res_not_imp = []
for i in range(len(contests)):
if contests[i][1] == 1:
res_imp.append(contests[i][0])
else:
res_not_imp.append(contests[i][0])
print(res_imp)
print(res_not_imp)
for i in range(len(res_imp) - k):
x = min(res_imp)
min_win += x
res_imp.remove(x)
return sum(res_imp) + sum(res_not_imp) - min_win
n, k = input().split()
n = int(n)
k = int(k)
contests = []
for _ in range(n):
contests.append(list(map(int, input().rstrip().split())))
result = luckBalance(k, contests)
print(result)
|
987,619 | e431ed81f60715fe4c36265f60fdc8aab4e03b5c | import os
import os.path as osp
import logging
import cv2
import numpy as np
from ..base import App
from ..base import Keyboard as kb
from ..gui.container import check_ready
from ..gui.media import MediaType
from ..utils.transform import convert_bbox_coordinate
from ..utils.visualize import draw_bbox
logger = logging.getLogger(__name__)
__all__ = [ "DetApp" ]
class DetApp(App):
MATCHED_WORKER = "DetWorker"
def __init__(self, **kwargs):
raise RuntimeError("Cannot directly instantiate object from MOTApp")
def boot(self):
"""Prepare runtime environment for worker"""
self.video_results = {}
self.event_handler = { 'detect': self._detect_handler }
def export(self, output_dir):
"""Export tracking result to output directory"""
# Check output directory exists
output_dir = osp.join(output_dir, self.__class__.__name__)
if not osp.exists(output_dir):
os.makedirs(output_dir)
# Export video result panel-by-panel
for panel, result in self.video_results.items():
fname = "{}.txt".format(osp.basename(panel.src))
fname = osp.join(output_dir, fname)
with open(fname, "w") as f:
fids = sorted(result.keys())
for fid in fids:
tracks = result[fid]
for t in tracks:
# fid, tid, minx, miny, maxx, maxy
f.write(f"{fid},0,{t[1]},{t[2]},{t[3]},{t[4]}\n")
logger.info(f"Export result to '{output_dir}'")
@check_ready
def run(self):
"""App loop for running app"""
while not self.is_stop():
content = self.render()
fid, frame = content['fid'], content['container_frame']
if not self.is_pause():
# Send request
request = { 'action': 'detect' }
self.send(request)
# Send raw frames to workers
video_frames = []
for panel in self.panel_to_channel.keys():
media_frame = panel.media_cache
media_frame = cv2.resize(media_frame, self.trans_resolution)
frame_bytes = cv2.imencode('.jpg', media_frame)[1]
video_frames.append({ 'panel': panel, 'frame_bytes': frame_bytes })
self.parallel_send_videos(video_frames)
# Catch response from remote worker
response = self.recv()
if response is None:
break
# Handle server response
handler = self.event_handler[response['action']]
new_content = handler(response)
fid, frame = new_content['fid'], new_content['container_frame']
last_frame = frame
# Show applications
cv2.imshow(self.winname, frame)
cv2.setTrackbarPos(self.barname, self.winname, fid)
# Handling keyboard events
key = cv2.waitKey(1) & 0xff
self.keyboaord_handler(key)
cv2.destroyAllWindows()
def keyboaord_handler(self, key):
# When certain panel is in focused
# ====================================
if self.mode == App.OPERATION_MODE:
if key == kb.ESC:
self.focus_panel.focus = False
self.focus_panel = None
self.mode = App.SELECT_MODE
return
# Common key handler
# =====================================
super().keyboaord_handler(key)
def mouse_callback(self, event, x, y, flags, param):
# Wait for selecting panel to focus on
# ==================================================
if self.mode == App.SELECT_MODE:
super().mouse_callback(event, x, y, flags, param)
elif self.mode == App.OPERATION_MODE:
pass
def trackbar_callback(self, value):
super().trackbar_callback(value)
def _detect_handler(self, response):
# Rerender panels (add bboxes)
panel_contents = []
for panel in response['content']:
pid = panel['pid']
confs = [ bbox['conf'] for bbox in panel['bboxes']]
bboxes = [ bbox['bbox'] for bbox in panel['bboxes']]
# Select target panel to manipulate
target_panel = [ panel
for panel in self.panels
if panel.pid == pid ][0]
# Convert coordinate system
target_media_frame = target_panel.media_cache
new_resolution = target_media_frame.shape[:2][::-1]
old_resolution = self.trans_resolution
bboxes = convert_bbox_coordinate(bboxes, old_resolution, new_resolution)
# Save result in mot tracking format
for bbox in bboxes:
# Check data structure format
if target_panel not in self.video_results:
self.video_results[target_panel] = {}
if target_panel.fid not in self.video_results[target_panel]:
self.video_results[target_panel][target_panel.fid] = []
record = (0, bbox[0], bbox[1], bbox[2], bbox[3])
self.video_results[target_panel][target_panel.fid].append(record)
# Draw bboxes on target panel
for bbox in bboxes:
draw_bbox(target_media_frame, bbox, thickness=self.line_thickness)
# Rerender
target_panel_content = target_panel.rerender(target_media_frame)
panel_contents.append(target_panel_content)
# Align/Sort rerendered panel_contents
panel_contents = [ [ panel_content
for panel_content in panel_contents
if panel_content['pid'] == panel.pid ][0]
for panel in self.panels ]
# Rerender container
content = self.rerender(panel_contents)
return content
|
987,620 | fcb6d941dea398e8e09ada883c8114dfb2e40eee | from aut.app import ExtractPopularImages, SaveBytes, WriteGEXF, WriteGraphML
from aut.common import WebArchive
from aut.udfs import (
compute_image_size,
compute_md5,
compute_sha1,
detect_language,
detect_mime_type_tika,
extract_boilerplate,
extract_date,
extract_domain,
extract_image_links,
extract_links,
get_extension_mime,
remove_html,
remove_http_header,
remove_prefix_www,
)
__all__ = [
"ExtractPopularImages",
"SaveBytes",
"WebArchive",
"WriteGEXF",
"WriteGraphML",
"compute_image_size",
"compute_md5",
"compute_sha1",
"detect_language",
"detect_mime_type_tika",
"extract_boilerplate",
"extract_date",
"extract_domain",
"extract_image_links",
"extract_links",
"get_extension_mime",
"remove_http_header",
"remove_html",
"remove_prefix_www",
]
|
987,621 | 99ce36e89c7d1493602de45a7c8b532474a79c6e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
def download_enron_dataset():
"""
Downloads Enron dataset.
"""
script_path = os.path.join("spampy", "dataset_downloader.sh")
os.system(script_path)
|
987,622 | 2429b5ef1b4061dab51ca119b0755757665db73b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 4 17:38:18 2017
@author: Muriel
"""
target=0
comput=0
small=6
medium=9
large=20
def nuggetnumber(target):
for x in range(0,target):
for y in range(0,target):
for z in range(0,target):
if target==(small* x+ medium * y+ large * z):
return True
return False
while comput<7:
if nuggetnumber(target)==True:
comput+=1
if nuggetnumber(target)==False:
largest=target
comput=0
target+=1
print("the highest number of Chicken nugget you can not purchase is",largest)
|
987,623 | 3c64b2506ac3d1fc3d00927aab8096a946550321 | import argparse
from selecting.selecting import select_config
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--time', action='store', dest='max_time',
help='The maximum time expected to run the application, united by second')
parser.add_argument('-m', '--money', action='store', dest="max_money",
help='The maximum money expected to run the application')
parser.add_argument('--size', action='store', dest="size", required=True,
help='The size of the application')
parser.add_argument('-p', '--parameters', action='append', dest='parameters', default=[],
help='the parameters of running the application, '
'if there are more than 2 parameters. please type in the format'
'-p <para1> -p <para2> ...')
parser.add_argument('application', help='the application name')
args = parser.parse_args()
configs = select_config(
max_money=float(args.max_money) if args.max_money else float('inf'),
max_time=float(args.max_time) if args.max_time else float('inf'),
application=args.application,
size=int(args.size),
learning_dir='learning/',
selecting_dir='selecting/')
if configs is None:
print "no such configuration fits the requirements"
exit()
print configs
cpu, memory = configs[0][2], configs[0][3]
import commands
image = 'ubuntu_python'
script = './' + args.application + ' ' + ' '.join(args.parameters) + ' '+cpu
commands.getoutput("echo '" + script + "' > application/main_script")
bash_script = 'sudo docker run --rm -v `pwd`/application/:/Final -m ' + memory + ' --cpuset=0-' + str(
int(cpu) - 1) + ' -w /Final ' + image + ' bash main_script'
print bash_script
commands.getoutput(bash_script)
commands.getoutput('rm application/main_script')
|
987,624 | 1304024af8cef938feba05e9b398115ea4d95e1e | import numpy as np
import matplotlib.pyplot as plt
def wave_package_position(x, x0, p0, d, hbar_prim):
prefactor = np.sqrt(np.sqrt(np.pi)*d)
envelope = np.exp(-(x - x0)**2/(2*d**2))
plane_wave = np.exp(1j*p0*(x - x0)/hbar_prim)
return (1/prefactor)*envelope*plane_wave
def propagate(phi_x, p_prop, v_prop):
return np.fft.ifft(p_prop*np.fft.fft(v_prop*phi_x))
#%% define constants
hbar_si = 1.054571817e-34 # hbar in SI
hbar_prim = hbar_si*1e34/1.602 # hbar in ASU_prim. eV*fs
m_prim_u = 0.009647 # mass/u in ASU_prim
# position space
dx = 0.001
x_start = -150.0
x_stop = 150.0
x = np.arange(x_start, x_stop, dx)
Nx = len(x)
# momentum space
dp = 2*np.pi*hbar_prim/(Nx*dx) # Delta momentum
p_nyquist = 2*np.pi*hbar_prim/(2*dx) # Nyquivst momentum
p = np.arange(-p_nyquist, p_nyquist, dp) # Generating momentum "freq"
p = np.fft.fftshift(p)
# initial values for wavefunction
d = 0.5 # Width of our hydrogen atom
m_h = 1/m_prim_u # Mass of our hydrogen atom
initial_energy = 0.08
p0 = np.sqrt(2*initial_energy*m_h) # Initial momentum of our hydrogen atom
x0 = -15 # Initial position of our hydrogen atom
## Propagation
T = 1e6
dt = 0.5
Nt = int(T/dt)
V0 = 0.1
alpha = 0.5
# adiabatic potentials
lim = 1e-60
a = 0.3
b = 0.4
c = 0.05
V_x = V0/(np.cosh(x/alpha)**2)
v_prop = np.exp(-1j*V_x*dt/hbar_prim)
p_prop = np.exp(-1j*p**2*dt/(hbar_prim*2*m_h))
# set initial wavefunction
phi_x = wave_package_position(x, x0, p0, d, hbar_prim)
phi_x_initial = phi_x
n_x_initial = np.abs(phi_x_initial)**2
fig, ax = plt.subplots()
ax.plot(x, V_x , color='orange', linewidth=2, label=r'$V_x(x)$')
ax.plot(x, n_x_initial, color='blue', linewidth=2, label=r'$|\psi(x)|^2$')
ax.set_title(r'Simulation set up, $V_0=0.1$ eV, $\alpha=2.0$ Å', fontsize='16')
ax.set_xlabel(r'$x$ [$Å$]', fontsize='16')
ax.set_ylabel(r'$|\psi(x)|^2$ [$Å^{-1}$], $V(x)$ [$eV$]', fontsize='16')
ax.set_xlim([-20,20])
ax.grid()
ax.legend(fontsize='16', loc='upper right')
plt.savefig('../T3/Sim_set_up_alpha_05.pdf', format='pdf', bbox_inches='tight')
plt.show()
V_less_then = 1e-8
V_stop_index = np.argmax(V_x > V_less_then)
x_stop_index_R = np.argmax(x > -100)
x_stop = x[V_stop_index]
x_stop_2 = x[-1-V_stop_index]
y_stop = np.array([0,0.3])
##%%
#stop_prop = 1e-6
#start_checking = False
## propagate wave!
#for t in range(Nt):
# if (t%100 == 0):
# print(f'Saving figure for timestep: {t} / {Nt}')
# print(np.abs(phi_x[x_stop_index_R])**2)
# if (np.abs(phi_x[x_stop_index_R])**2 > stop_prop):
# break
# phi_x = propagate(phi_x, p_prop, v_prop)
#
#n_x = np.abs(phi_x)**2
#
#fig, ax = plt.subplots()
#ax.plot(x, V_x , color='orange', linewidth=2, label=r'$V_x(x)$')
#ax.plot(x, n_x, color='blue', linewidth=2, label=r'$|\psi(x)|^2$')
#
#print(n_x[x_stop_index_R])
#print(n_x[V_stop_index])
#print(n_x[-1-V_stop_index])
#np.savetxt('data/V_x_e0_{:.2f}_V0_{:.2f}.csv'.format(initial_energy,V0), V_x, delimiter=",")
#np.savetxt('data/n_x_e0_{:.2f}_V0_{:.2f}.csv'.format(initial_energy,V0), n_x, delimiter=",")
#np.savetxt('data/x_e0_{:.2f}_V0_{:.2f}.csv'.format(initial_energy,V0), x, delimiter=",")
|
987,625 | 1a93e77f564b0ff5fd33f93570de940c0eb3440f | import logging
logging.disable(logging.CRITICAL)
from tabulate import tabulate
from mjrl.utils.make_train_plots import make_train_plots
from mjrl.utils.gym_env import GymEnv
from mjrl.samplers.core import sample_paths
import numpy as np
import pickle
import time as timer
import os
import copy
def train_agent(job_name, agent,
seed = 0,
niter = 101,
gamma = 0.995,
gae_lambda = None,
num_cpu = 1,
sample_mode = 'trajectories',
num_traj = 50,
num_samples = 50000, # has precedence, used with sample_mode = 'samples'
save_freq = 10,
evaluation_rollouts = None,
plot_keys = ['stoc_pol_mean'],
):
np.random.seed(seed)
if os.path.isdir(job_name) == False:
os.mkdir(job_name)
previous_dir = os.getcwd()
os.chdir(job_name) # important! we are now in the directory to save data
if os.path.isdir('iterations') == False: os.mkdir('iterations')
if os.path.isdir('logs') == False and agent.save_logs == True: os.mkdir('logs')
best_policy = copy.deepcopy(agent.policy)
best_perf = -1e8
train_curve = best_perf*np.ones(niter)
mean_pol_perf = 0.0
e = GymEnv(agent.env.env_id)
for i in range(niter):
print("......................................................................................")
print("ITERATION : %i " % i)
if train_curve[i-1] > best_perf:
best_policy = copy.deepcopy(agent.policy)
best_perf = train_curve[i-1]
N = num_traj if sample_mode == 'trajectories' else num_samples
args = dict(N=N, sample_mode=sample_mode, gamma=gamma, gae_lambda=gae_lambda, num_cpu=num_cpu)
stats = agent.train_step(**args)
train_curve[i] = stats[0]
if evaluation_rollouts is not None and evaluation_rollouts > 0:
print("Performing evaluation rollouts ........")
eval_paths = sample_paths(num_traj=evaluation_rollouts, policy=agent.policy, num_cpu=num_cpu,
env=e.env_id, eval_mode=True, base_seed=seed)
mean_pol_perf = np.mean([np.sum(path['rewards']) for path in eval_paths])
if agent.save_logs:
agent.logger.log_kv('eval_score', mean_pol_perf)
if i % save_freq == 0 and i > 0:
if agent.save_logs:
agent.logger.save_log('logs/')
make_train_plots(log=agent.logger.log, keys=plot_keys, save_loc='logs/')
policy_file = 'policy_%i.pickle' % i
baseline_file = 'baseline_%i.pickle' % i
pickle.dump(agent.policy, open('iterations/' + policy_file, 'wb'))
pickle.dump(agent.baseline, open('iterations/' + baseline_file, 'wb'))
pickle.dump(best_policy, open('iterations/best_policy.pickle', 'wb'))
# print results to console
if i == 0:
result_file = open('results.txt', 'w')
print("Iter | Stoc Pol | Mean Pol | Best (Stoc) \n")
result_file.write("Iter | Sampling Pol | Evaluation Pol | Best (Sampled) \n")
result_file.close()
print("[ %s ] %4i %5.2f %5.2f %5.2f " % (timer.asctime(timer.localtime(timer.time())),
i, train_curve[i], mean_pol_perf, best_perf))
result_file = open('results.txt', 'a')
result_file.write("%4i %5.2f %5.2f %5.2f \n" % (i, train_curve[i], mean_pol_perf, best_perf))
result_file.close()
if agent.save_logs:
print_data = sorted(filter(lambda v: np.asarray(v[1]).size == 1,
agent.logger.get_current_log().items()))
print(tabulate(print_data))
# final save
pickle.dump(best_policy, open('iterations/best_policy.pickle', 'wb'))
if agent.save_logs:
agent.logger.save_log('logs/')
make_train_plots(log=agent.logger.log, keys=plot_keys, save_loc='logs/')
os.chdir(previous_dir)
|
987,626 | 0dc5ae3d322d33f5c36ac8c919edbdb45503ecd1 | """
此脚本用来造数据,由于前端可视化部分需要大量数据,手动输入费时又费力,固然写此脚本。
"""
import random
import time
from sign import mongo
name=[]
area_key=[]
area_value=[]
type_list_key=[]
type_list_value=[]
date=[]
season=[]
make=[]
no=[]
##############################生成随机姓名#############################################################
name_list="赵 钱 孙 李 周 吴 郑 王 冯 陈 楮 卫 蒋 沈 韩 朱 秦 尤 许 何 吕 施 孔 曹 严 华 金 魏 陶 戚 谢 邹 喻 柏 水 窦 云 苏 潘 葛 奚 范 彭 鲁 韦 昌 马 苗 凤 花 俞 任 袁 柳 酆 鲍 史 费 廉 岑 薛 雷 贺 倪 滕 殷 罗 毕 郝 邬 安 乐 于 时 傅 皮 卞 齐 伍 余 元 卜 顾 孟 平 和 穆 萧 尹 姚 邵 湛 祁 毛 禹 狄 米 贝 明 计 伏 成 戴 谈 宋 茅 熊 纪 舒 屈 项 祝 董 杜 阮 蓝 闽 席 季 麻 贾 路 娄 危 江 童 颜 梅 盛 林 刁 锺 徐 丘 高 夏 蔡 田 樊 胡 凌 虞 万 支 柯 昝 管 卢 经 房 裘 缪 干 解 应 丁 宣 贲 邓 郁 单 杭 "
firstname=name_list.split(' ')
for i1 in range(10000):
name.append(str(random.sample(firstname,3)[0])+str(random.sample(firstname,3)[1])+str(random.sample(firstname,3)[2]))
##############################生成随机地点#############################################################
area_dict= {
'北京': ['北京'],
'广东': ['广州', '深圳', '珠海', '汕头', '韶关', '佛山', '江门', '湛江', '茂名', '肇庆', '惠州', '梅州', '汕尾', '河源', '阳江', '清远', '东莞', '中山', '潮州', '揭阳', '云浮'],
'上海': ['上海'],
'天津': ['天津'],
'重庆': ['重庆'],
'辽宁': ['沈阳', '大连', '鞍山', '抚顺', '本溪', '丹东', '锦州', '营口', '阜新', '辽阳', '盘锦', '铁岭', '朝阳', '葫芦岛'],
'江苏': ['南京', '苏州', '无锡', '常州', '镇江', '南通', '泰州', '扬州', '盐城', '连云港', '徐州', '淮安', '宿迁'],
'湖北': ['武汉', '黄石', '十堰', '荆州', '宜昌', '襄樊', '鄂州', '荆门', '孝感', '黄冈', '咸宁', '随州', '恩施土家族苗族自治州', '仙桃', '天门', '潜江', '神农架林区'],
'四川': ['成都', '自贡', '攀枝花', '泸州', '德阳', '绵阳', '广元', '遂宁', '内江', '乐山', '南充', '眉山', '宜宾', '广安', '达州', '雅安', '巴中', '资阳', '阿坝藏族羌族自治州', '甘孜藏族自治州', '凉山彝族自治州'],
'陕西': ['西安', '铜川', '宝鸡', '咸阳', '渭南', '延安', '汉中', '榆林', '安康', '商洛'],
'河北': ['石家庄', '唐山', '秦皇岛', '邯郸', '邢台', '保定', '张家口', '承德', '沧州', '廊坊', '衡水'],
'山西': ['太原', '大同', '阳泉', '长治', '晋城', '朔州', '晋中', '运城', '忻州', '临汾', '吕梁'],
'河南': ['郑州', '开封', '洛阳', '平顶山', '安阳', '鹤壁', '新乡', '焦作', '濮阳', '许昌', '漯河', '三门峡', '南阳', '商丘', '信阳', '周口', '驻马店'],
'吉林': ['长春', '吉林', '四平', '辽源', '通化', '白山', '松原', '白城', '延边朝鲜族自治州'],
'黑龙江': ['哈尔滨', '齐齐哈尔', '鹤岗', '双鸭山', '鸡西', '大庆', '伊春', '牡丹江', '佳木斯', '七台河', '黑河', '绥化', '大兴安岭地区'],
'内蒙古': ['呼和浩特', '包头', '乌海', '赤峰', '通辽', '鄂尔多斯', '呼伦贝尔', '巴彦淖尔', '乌兰察布', '锡林郭勒盟', '兴安盟', '阿拉善盟'],
'山东': ['济南', '青岛', '淄博', '枣庄', '东营', '烟台', '潍坊', '济宁', '泰安', '威海', '日照', '莱芜', '临沂', '德州', '聊城', '滨州', '菏泽'],
'安徽': ['合肥', '芜湖', '蚌埠', '淮南', '马鞍山', '淮北', '铜陵', '安庆', '黄山', '滁州', '阜阳', '宿州', '巢湖', '六安', '亳州', '池州', '宣城'],
'浙江': ['杭州', '宁波', '温州', '嘉兴', '湖州', '绍兴', '金华', '衢州', '舟山', '台州', '丽水'],
'福建': ['福州', '厦门', '莆田', '三明', '泉州', '漳州', '南平', '龙岩', '宁德'],
'湖南': ['长沙', '株洲', '湘潭', '衡阳', '邵阳', '岳阳', '常德', '张家界', '益阳', '郴州', '永州', '怀化', '娄底', '湘西土家族苗族自治州'],
'广西': ['南宁', '柳州', '桂林', '梧州', '北海', '防城港', '钦州', '贵港', '玉林', '百色', '贺州', '河池', '来宾', '崇左'],
'江西': ['南昌', '景德镇', '萍乡', '九江', '新余', '鹰潭', '赣州', '吉安', '宜春', '抚州', '上饶'],
'贵州': ['贵阳', '六盘水', '遵义', '安顺', '铜仁地区', '毕节地区', '黔西南布依族苗族自治州', '黔东南苗族侗族自治州', '黔南布依族苗族自治州'],
'云南': ['昆明', '曲靖', '玉溪', '保山', '昭通', '丽江', '普洱', '临沧', '德宏傣族景颇族自治州', '怒江傈僳族自治州', '迪庆藏族自治州', '大理白族自治州', '楚雄彝族自治州', '红河哈尼族彝族自治州', '文山壮族苗族自治州', '西双版纳傣族自治州'],
'西藏': ['拉萨', '那曲地区', '昌都地区', '林芝地区', '山南地区', '日喀则地区', '阿里地区'],
'海南': ['海口', '三亚', '五指山', '琼海', '儋州', '文昌', '万宁', '东方', '澄迈县', '定安县', '屯昌县', '临高县', '白沙黎族自治县', '昌江黎族自治县', '乐东黎族自治县', '陵水黎族自治县', '保亭黎族苗族自治县', '琼中黎族苗族自治县'],
'甘肃': ['兰州', '嘉峪关', '金昌', '白银', '天水', '武威', '酒泉', '张掖', '庆阳', '平凉', '定西', '陇南', '临夏回族自治州', '甘南藏族自治州'],
'宁夏': ['银川', '石嘴山', '吴忠', '固原', '中卫'],
'青海': ['西宁', '海东地区', '海北藏族自治州', '海南藏族自治州', '黄南藏族自治州', '果洛藏族自治州', '玉树藏族自治州', '海西蒙古族藏族自治州'],
'新疆': ['乌鲁木齐', '克拉玛依', '吐鲁番地区', '哈密地区', '和田地区', '阿克苏地区', '喀什地区', '克孜勒苏柯尔克孜自治州', '巴音郭楞蒙古自治州', '昌吉回族自治州', '博尔塔拉蒙古自治州', '石河子', '阿拉尔', '图木舒克', '五家渠', '伊犁哈萨克自治州'],
'香港': ['香港'],
'澳门': ['澳门'],
'台湾': ['台北市', '高雄市', '台北县', '桃园县', '新竹县', '苗栗县', '台中县', '彰化县', '南投县', '云林县', '嘉义县', '台南县', '高雄县', '屏东县', '宜兰县', '花莲县', '台东县', '澎湖县', '基隆市', '新竹市', '台中市', '嘉义市', '台南市']
}
area_list=['北京', '广东', '上海', '天津', '重庆', '辽宁', '江苏', '湖北', '四川', '陕西', '河北', '山西', '河南', '吉林', '黑龙江', '内蒙古', '山东', '安徽', '浙江', '福建', '湖南', '广西', '江西', '贵州', '云南', '西藏', '海南', '甘肃', '宁夏', '青海', '新疆', '香港', '澳门', '台湾']
for i2 in range(10000):
key=str(random.sample(area_list,1)[0])
values=area_dict[key]
value=random.sample(values,1)[0]
area_key.append(key+'省')
area_value.append(value+'市')
##################################生成随机时间###########################################################
a1=(2018,1,1,0,0,0,0,0,0) #设置开始日期时间元组(1976-01-01 00:00:00)
a2=(2018,12,31,23,59,59,0,0,0) #设置结束日期时间元组(1990-12-31 23:59:59)
start=time.mktime(a1) #生成开始时间戳
end=time.mktime(a2) #生成结束时间戳
#随机生成10个日期字符串
for i3 in range(10000):
t=random.randint(start,end) #在开始和结束时间戳中随机取出一个
date_touple=time.localtime(t) #将时间戳生成时间元组
date.append(time.strftime("%Y/%m/%d",date_touple)) #将时间元组转成格式化字符串(1976-05-21)
################################生成随机案件类型##########################################################
type_dict={
"行政案件":["作为案件","不作为案件","行政赔偿案件"],
"刑事案件":["危害国家安全罪","危害公共安全罪","生产、销售伪劣商品罪","走私罪","妨害对公司、企业的管理秩序罪",
"破坏金融管理秩序罪","破坏金融管理秩序罪","金融诈骗罪","危害税收征管罪","侵犯知识产权罪","扰乱市场秩序罪",
"侵犯公民人身权利","民主权利罪","扰乱公共秩序罪","妨害司法罪","妨害国 ( 边 ) 境管理罪","妨害文物管理罪",
"危害公共卫生罪","破坏环境资源保护罪","走私、贩卖、运输、制造毒品罪","组织、强迫、引诱、容留、介绍卖淫罪",
"制作、贩卖、传播淫秽物品罪危害国防利益罪"],
"民事案件":["所有权确认纠纷","用益物权确认纠纷","担保物权确认纠纷","返还原物纠纷","排除妨害纠纷","消除危险纠纷","修理、重作、更换纠纷","恢复原状纠纷","财产损害赔偿纠纷"],
"特殊案件":["特殊案件"]
}
type_key=["行政案件","刑事案件","民事案件","特殊案件"]
for i4 in range(10000):
key=str(random.sample(type_key,1)[0])
values=type_dict[key]
value=random.sample(values,1)[0]
type_list_key.append(key)
type_list_value.append(value)
################################生成随机侦破与否############################################################
make_list=["已侦破","未侦破"]
for i in range(10000):
make.append(random.sample(make_list,1)[0])
################################根据时间判断季节############################################################
for i5 in date:
if int(i5[5:7]) >= 10:
season.append("冬季")
elif int(i5[5:7]) >= 7:
season.append("秋季")
elif int(i5[5:7]) >= 4:
season.append("夏季")
else:
season.append("春季")
###############################随机生成序号###################################################################
for i6 in range(1,10001):
no.append(i6)
print(no)
def Insert_max_data():
mongo.REMOVE_DB()
for j in range(10000):
mongo.INSERT_DB_MAX({"no":str(no[j]),"name":name[j],"type_key":type_list_key[j],"type_value":type_list_value[j],"area_key":area_key[j],"area_value":area_value[j],"season":season[j],"date":date[j],"make":make[j]})
|
987,627 | 8084984d89e6fdc0119122cb2fc3ef14b46d64e2 | class Product:
def __init__(self, name, price, workload, expiration_date):
self.name = name
self.price = price
self.workload = workload
self.expiration_date = expiration_date
def get_price(self):
"""
Returns the price of the product.
PRE: None.
POST: Return value is the price of the product.
:return: price
"""
return self.price
def get_expiration_date(self):
"""
Returns the expiration date of the product.
PRE: None.
POST: return value is the expiration date of the product.
:return: expirationdate
"""
return self.expiration_date
def get_name(self):
"""
Returns the name of the product.
PRE: None.
POST: Return value is the name of the product.
:return: name
"""
return self.name
def get_workload(self):
"""
Returns the workload of the product.
PRE: None.
POST: Return value is the workload of the product.
:return: workload
"""
return self.workload
def get_searchkey(self):
"""
Returns the search key of the product, which is the expiration date.
PRE: None.
POST: Return value is the search key of the product.
:return: expirationdate
"""
return self.expiration_date
class Chilipepper(Product):
def __init__(self, expiration_date):
super().__init__("chili", 25, 1, expiration_date)
class Honey(Product):
def __init__(self, expiration_date):
super().__init__("honing", 50, 1, expiration_date)
class Marshmallow(Product):
def __init__(self, expiration_date):
super().__init__("marshmallow", 75, 1, expiration_date)
class Chocolateshot(Product):
def __init__(self, expiration_date, taste):
allowed_tastes = ["melk", "zwart", "wit", "bruin"]
if taste in allowed_tastes:
super().__init__('shot ' + taste, 100, 1, expiration_date)
else:
raise ValueError('Invalid taste')
|
987,628 | 3295cdabefb7a04cee511e8326a7d3d7f748eee2 | #!/usr/bin/env python3
"""
Version history:
================
-------------------------------------------------
30.7.2014 | 0.1 | initial version
02.12.2014 | 0.2 | converted for python 3.x
16.04.2019 | 0.3 | OpenAPI v2 target urls
15.10.2020 | 0.4 | SingleAPI
-------------------------------------------------
About:
==================
This is a simple script that can be used to place multiple wagers from input file in to Veikkaus gaming system.
Purpose of this script is to demonstrate how the JSON API provided by Veikkaus website works. Following functionality
is included:
- login
- moniveto (multiscore) wagers
- vakio (sport) wagers
Notes:
==================
This script requires 'requests' package from http://docs.python-requests.org/en/latest/ for session management, ensuring that the
site cookies are always properly included in the requests.
Using a http-client framework that supports authenticated sessions (i.e. cookies) out of the box is highly recommended.
Veikkaus' website may change the cookies (names and content) unnoticed, and the clients (browsers or custom scripts), shall always use the
cookies accordingly. It is not only the authentication, but also other requests may set and/or update the cookies.
Misbehavior may force us to close your gaming account.
Requests that do not require authentication can be made "without session". However, making the requests as authenticated provides
us valuable information on how you use the provided services, and thus further helps us to develop it to suite your needs better.
Usage:
==================
How to display usage:
robot.py -h
How to request winshares for set of Vakio (SPORT) boards:
robot.py -a WINSHARE -g SPORT -d 12345 -f sport_input.txt
How to list open draws for moniveto (MULTISORE):
robot.py -a LIST_DRAWS -g MULTISCORE
How to place moniveto (MULTISCORE) wagers for list index 2:
robot.py -a PLAY -g MULTISCORE -l 2 -u myaccount -p p4ssw0rd -f multiscore_input.txt -s 20
How to place Vakio (SPORT) wagers for list index 1 with miniVakio:
robot.py -a PLAY -g SPORT -l 1 -u myaccount -p p4ssw0rd -f sport_input.txt -s 25 -m
How to get account balance
robot.py -a BALANCE -u myaccount -p p4ssw0rd
It is possible to run multiple instances of this robot. But it is recommended to run maximum of 5 robots at a same time for a single account.
This is because debiting the single account becomes the bottleneck for robot-wagering, and multiple instances do no longer provide the benefit in speed.
"""
import sys
import requests
import json
import copy
import time
import datetime
import getopt
"""
properties
"""
# the veikkaus site address
host="https://www.veikkaus.fi"
# required headers
headers = {
'Content-type':'application/json',
'Accept':'application/json',
'X-ESA-API-Key':'ROBOT'
}
# wager teamplate with common fields
wager_template = {
"listIndex":0,
"gameName":"",
"price": 0,
"boards":[]
}
# winshare teamplate with common fields
winshare_template = {
"additionalPrizeTier": False,
"page":0,
"pageSize":100,
"selections":[]
}
"""
get account balance
"""
def get_balance ( session ):
r = session.get(host + "/api/v1/players/self/account", verify=True, headers=headers)
j = r.json()
return j["balances"]["CASH"]["usableBalance"]
"""
Create winshare request for vakio.
For most of the games, the odds are provided as flat files. It is highly recommended to use those as long as they are available.
Vakio winshare request takes only the "selections" as input, so we can reuse the create_sport_wager code.
"""
def get_sport_winshare ( draw, matches ):
# the winshare request takes nearly similar request, so lets use create_sport_wager, and copy selections to winshare_req
winshare_req = create_sport_wager("", 0, matches, False)
print (winshare_req)
r = requests.post(host + "/api/sport-winshare/v1/games/SPORT/draws/"+draw+"/winshare", verify=True, data=json.dumps(winshare_req), headers=headers)
j = r.json()
print(j)
for winshare in j["winShares"]:
# each winshare has only one selection that contains the board (outcomes)
board = []
for selection in winshare["selections"]:
for outcome in selection["outcomes"]:
board.append(outcome)
print("value=%d,numberOfBets=%d,board=%s" % (winshare["value"], winshare["numberOfBets"],",".join(board)))
"""
Creates a vakio (sport) wager, for which the selections of row in input file:
2;1X2;X;1;X;2;X;1;X;2;1;12;X
{
"listIndex": 1,
"gameName": "SPORT",
"price": 150,
"boards": [
{
"betType": "FREE 6",
"stake": 25,
"selections": [
{ "outcomes":["2"] },
{ "outcomes":["1","X","2"] },
{ "outcomes":["X"] },
{ "outcomes":["1"] },
{ "outcomes":["X"] },
{ "outcomes":["2"] },
{ "outcomes":["X"] },
{ "outcomes":["1"] },
{ "outcomes":["X"] },
{ "outcomes":["2"] },
{ "outcomes":["1"] },
{ "outcomes":["1","2"] },
{ "outcomes":["X"] }
]
}
]
}
Notes:
- supports multiple regular wagers from input file, w/ or wo/ minivakio
- supports system wagers from input file, no minivakio
- no reduced system support
"""
def create_sport_wager ( listIndex, stake, matches, miniVakio ):
if stake > 0:
req = copy.deepcopy(wager_template)
req["gameName"] = "SPORT"
req["listIndex"] = listIndex
req["price"] = stake
## this implementation supports only one row (selection) per wager
if miniVakio:
req["additionalPrizeTier"] = True
req["price"] = 2*stake
selection = {
"stake": stake,
"selections":[]
}
sysSize = 1
for m in matches:
if len(m) == 1:
outcome = { "outcomes":[m] }
else:
sels = []
for i in m:
if i != "\n":
sels.append(i)
outcome = { "outcomes":sels }
sysSize *= len(sels)
## add outcome to selection
selection["selections"].append(outcome)
## add betType based on size
if sysSize == 1:
selection["betType"] = "Regular"
else:
selection["betType"] = "FREE " + str(sysSize)
## ... and the selection to wager request
req["boards"].append(selection)
else:
req = copy.deepcopy(winshare_template)
for m in matches:
if len(m) == 1:
outcome = { "outcomes":[m] }
else:
sels = []
for i in m:
if i != "\n":
sels.append(i)
outcome = { "outcomes":sels }
## add outcome to selection
req["selections"].append(outcome)
return req
"""
Create a moniveto (multiscore) wager, for which the selections in input file:
0-0,1;2-3,4;4-2,5
{
"listIndex": 1,
"gameName": "MULTISCORE",
"price": 160,
"boards": [
{
"betType": "FULL 8",
"stake": 20,
"selections": [
{
"homeScores": [ 0 ],
"awayScores": [ 0,1 ]
},
{
"homeScores": [ 2 ],
"awayScores": [ 3,4 ]
},
{
"homeScores": [ 4 ],
"awayScores": [ 2,5 ]
}
]
}
]
}
Notes:
- supports multiple regular or system wagers from input file
- no reduced system support
"""
def create_multiscore_wager ( listIndex, stake, matches ):
wager_req = copy.deepcopy(wager_template)
wager_req["gameName"] = "MULTISCORE"
wager_req["listIndex"] = listIndex
wager_req["price"] = stake
selection = {
"stake": stake,
"betType":"Regular",
"selections":[]
}
sysSize = 1
for match in matches:
home, away = match.split("-")
sels = {
"homeScores":[], "awayScores":[]
}
sels["homeScores"] = list(map(int, home.split(",")))
sels["awayScores"] = list(map(int, away.split(",")))
sysSize *= len(sels["homeScores"]) * len(sels["awayScores"])
selection["selections"].append(sels)
if sysSize > 1:
wager_req["price"] = stake * sysSize
selection["betType"] = "FULL " + str(sysSize)
wager_req["boards"].append(selection)
return wager_req
"""
Places wagers on the system. Prints out the serial numbers of all accpeted wagers and error codes for rejected wagers.
"""
def place_wagers ( wager_req, session ):
rt = time.time()
r = session.post(host + "/api/sport-interactive-wager/v1/tickets", verify=True, data=json.dumps(wager_req), headers=headers)
rt = time.time() - rt;
if r.status_code == 200:
j = r.json()
print("%s - placed wager in %.3f seconds, serial %s\n" % (datetime.datetime.now(), rt, j["serialNumber"][:17]))
else:
print("Request failed:\n" + r.text)
"""
Logins to veikkaus website, and returns the session object.
It is important to use same session for all requests, so that the session cookies are handled properly.
If you want to manage the cookies manually, you have to take all the cookies from each response (even wagering) and update them accordingly.
"""
def login (username, password):
s = requests.Session()
login_req = {"type":"STANDARD_LOGIN","login":username,"password":password}
r = s.post(host + "/api/bff/v1/sessions", verify=True, data=json.dumps(login_req), headers=headers)
if r.status_code != 200:
raise Exception("Authentication failed", r.status_code)
return s
"""
Parse arguments.
"""
def parse_arguments ( arguments ):
optlist, args = getopt.getopt(arguments, 'ha:u:p:g:d:l:mf:s:')
params = {
"username":"",
"passowrd":"",
"game":"",
"draw":"",
"listIndex":0,
"miniVakio": False,
"input":"",
"stake":0
}
for o, a in optlist:
if o == '-h':
print("-h prints this help")
print("-a <action> (PLAY, WINSHARE, LIST_DRAWS, BALANCE)")
print("-u <username>")
print("-p <password>")
print("-g <game> (MULTISCORE, SCORE, SPORT)")
print("-d <draw number>")
print("-l <list index>")
print("-m (play with miniVakio")
print("-f <input file> containing the wagers")
print("-s <stake> (in cents, same stake used for all wagers)")
sys.exit(0)
elif o == '-a':
params["action"] = a
elif o == '-u':
params["username"] = a
elif o == '-p':
params["password"] = a
elif o == '-g':
params["game"] = a
elif o == '-d':
params["draw"] = a
elif o == '-l':
params["listIndex"] = int(a)
elif o == '-m':
params["miniVakio"] = True
elif o == '-f':
params["input"] = a
elif o == '-s':
params["stake"] = int(a)
return params
"""
Lists open draws
The request only takes game name as an input, e.g.
This implementation only prints common fields such as game name, index, draw and status.
More (game specific) details are available in the returned JSON document.
"""
def list_draws ( params ):
r = requests.get(host + "/api/sport-open-games/v1/games/"+params["game"]+"/draws", verify=True, headers=headers)
if r.status_code == 200:
try:
j = r.json()
for draw in j:
print("game: %s, listIndex: %2s, draw: %6s, status: %s, number events: %d" % (draw["gameName"],draw["listIndex"],draw["id"],draw["status"],len(draw["rows"])))
except:
print("request failed: " + r.text)
else:
print("request failed: " + r.text)
"""
Places the wagers based on input file.
Prints out balance in the end.
"""
def play ( params ):
session = login(params["username"], params["password"])
f = open(params["input"],"r")
for line in f:
if line.startswith("#"): continue
wager_req = copy.deepcopy(wager_template)
if params["game"] == "MULTISCORE":
wager_req = create_multiscore_wager(params["listIndex"], params["stake"], line.split(";"))
elif params["game"] == "SPORT":
wager_req = create_sport_wager(params["listIndex"], params["stake"], line.split(";"),params["miniVakio"])
place_wagers(wager_req, session)
balance = get_balance( session )
print("\n\taccount balance: %.2f\n" % (balance/100.0))
"""
Login and get balance.
"""
def balance ( params ):
session = login(params["username"], params["password"])
balance = get_balance( session )
print("\n\taccount balance: %.2f\n" % (balance/100.0))
"""
Performs winshare request for each set of wagers from input file
"""
def winshare ( params ):
f = open(params["input"],"r")
for line in f:
if line.startswith("#"): continue
if params["game"] == "SPORT":
get_sport_winshare(params["draw"], line.split(";"))
"""
Robot main function
- login
- read the wagers from input file
- places wagers
"""
def robot( arguments ):
params = parse_arguments( arguments )
if params["action"] == "LIST_DRAWS":
list_draws(params)
elif params["action"] == "PLAY":
play(params)
elif params["action"] == "WINSHARE":
winshare(params)
elif params["action"] == "BALANCE":
balance(params)
"""
MAIN
"""
if __name__ == "__main__":
robot(sys.argv[1:])
|
987,629 | 6df788eec02aa85a5cf4a2113c57aa863c8bd769 | '''
Welcome to the ALAF program. An awesome program for awesome friends.
'''
import json
friends_file = "alaf.json"
def welcome():
print("Welcome to ALAF!")
def load_friends():
try:
with open(friends_file) as f:
friends = json.load(f)
# create an empty list if the file is not there
except FileNotFoundError:
friends = []
return friends
def save_friends(friends):
with open(friends_file, mode="w") as f:
# write the friends list to the file object f
json.dump(friends, f)
def instructions():
print("<INSTRUCTIONS GO HERE> (Remember to make them awesome)")
def menu():
""" Show the user a menu of things they can do, and return their choice """
print("Choose an option")
print("(L)ist Friends")
print("(A)dd Friend")
print("(C)lear List")
print("(Q)uit")
while True:
choice = input("Now choose: ").lower().strip()
if choice in 'lacq':
return choice
print("Invalid choice.")
def list_friends(friends):
# use tabulate later, just print for now
print(friends)
def add_friend():
friend = {}
name = input("What is your friend's name? ")
friend['Name'] = name
return friend
def exit_list():
pass
def main():
'''This function is everything from start to finish'''
welcome()
instructions()
friends = load_friends()
action = menu()
# List Friends
if action == 'l':
list_friends(friends)
# Add Friend
elif action == 'a':
new_friend = add_friend()
friends.append(new_friend)
print(f"{new_friend['Name']} added.")
elif action == 'c':
friends = []
print("List cleared.")
save_friends(friends)
''' The better way, but optional
if __name__ == "__main__":
main()
'''
# actually runs the program using the main function
main()
|
987,630 | a76b3e7fb12d6081331bf29f1040d6ea6e9b8c54 | str1=input("enter string")
num=0
alph=0
for char in str1:
if char in("0123456789"):
num+=1
if char in("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"):
alph+=1
print("number of alphabets: {} \n number of numbers: {}".format(alph,num))
|
987,631 | 617c026cd10698727c76fc5db4e3674c6924710f | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/main.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(956, 660)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.tab = QtWidgets.QTabWidget(self.centralwidget)
self.tab.setObjectName("tab")
self.tabQuery = QtWidgets.QWidget()
self.tabQuery.setObjectName("tabQuery")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.tabQuery)
self.horizontalLayout_3.setContentsMargins(16, 16, 16, 16)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.trQuery = QtWidgets.QTreeWidget(self.tabQuery)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.trQuery.sizePolicy().hasHeightForWidth())
self.trQuery.setSizePolicy(sizePolicy)
self.trQuery.setMinimumSize(QtCore.QSize(50, 0))
self.trQuery.setObjectName("trQuery")
self.trQuery.headerItem().setText(0, "1")
self.horizontalLayout_3.addWidget(self.trQuery)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_10 = QtWidgets.QLabel(self.tabQuery)
self.label_10.setMinimumSize(QtCore.QSize(0, 25))
self.label_10.setObjectName("label_10")
self.horizontalLayout_2.addWidget(self.label_10)
self.edtFilter = QtWidgets.QLineEdit(self.tabQuery)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.edtFilter.sizePolicy().hasHeightForWidth())
self.edtFilter.setSizePolicy(sizePolicy)
self.edtFilter.setMinimumSize(QtCore.QSize(200, 25))
self.edtFilter.setObjectName("edtFilter")
self.horizontalLayout_2.addWidget(self.edtFilter)
self.btnRefresh = QtWidgets.QPushButton(self.tabQuery)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnRefresh.sizePolicy().hasHeightForWidth())
self.btnRefresh.setSizePolicy(sizePolicy)
self.btnRefresh.setMinimumSize(QtCore.QSize(0, 25))
self.btnRefresh.setObjectName("btnRefresh")
self.horizontalLayout_2.addWidget(self.btnRefresh)
self.btnAdd = QtWidgets.QPushButton(self.tabQuery)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnAdd.sizePolicy().hasHeightForWidth())
self.btnAdd.setSizePolicy(sizePolicy)
self.btnAdd.setMinimumSize(QtCore.QSize(0, 25))
self.btnAdd.setObjectName("btnAdd")
self.horizontalLayout_2.addWidget(self.btnAdd)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.cbFilter = QtWidgets.QCheckBox(self.tabQuery)
self.cbFilter.setMinimumSize(QtCore.QSize(0, 25))
self.cbFilter.setObjectName("cbFilter")
self.horizontalLayout_2.addWidget(self.cbFilter)
self.btnAdvFilter = QtWidgets.QPushButton(self.tabQuery)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnAdvFilter.sizePolicy().hasHeightForWidth())
self.btnAdvFilter.setSizePolicy(sizePolicy)
self.btnAdvFilter.setMinimumSize(QtCore.QSize(0, 25))
self.btnAdvFilter.setObjectName("btnAdvFilter")
self.horizontalLayout_2.addWidget(self.btnAdvFilter)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.twQuery = QtWidgets.QTableWidget(self.tabQuery)
self.twQuery.setObjectName("twQuery")
self.twQuery.setColumnCount(0)
self.twQuery.setRowCount(0)
self.verticalLayout_2.addWidget(self.twQuery)
self.horizontalLayout_3.addLayout(self.verticalLayout_2)
self.tab.addTab(self.tabQuery, "")
self.tabStats = QtWidgets.QWidget()
self.tabStats.setObjectName("tabStats")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.tabStats)
self.horizontalLayout_4.setContentsMargins(16, 16, 16, 16)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.trStats = QtWidgets.QTreeWidget(self.tabStats)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.trStats.sizePolicy().hasHeightForWidth())
self.trStats.setSizePolicy(sizePolicy)
self.trStats.setMinimumSize(QtCore.QSize(50, 0))
self.trStats.setObjectName("trStats")
self.trStats.headerItem().setText(0, "1")
self.horizontalLayout_4.addWidget(self.trStats)
self.twStats = QtWidgets.QTableWidget(self.tabStats)
self.twStats.setObjectName("twStats")
self.twStats.setColumnCount(0)
self.twStats.setRowCount(0)
self.horizontalLayout_4.addWidget(self.twStats)
self.tab.addTab(self.tabStats, "")
self.tabLoad = QtWidgets.QWidget()
self.tabLoad.setObjectName("tabLoad")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.tabLoad)
self.verticalLayout_6.setContentsMargins(16, 16, 16, 16)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.btnBrowse = QtWidgets.QPushButton(self.tabLoad)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnBrowse.sizePolicy().hasHeightForWidth())
self.btnBrowse.setSizePolicy(sizePolicy)
self.btnBrowse.setMinimumSize(QtCore.QSize(0, 25))
self.btnBrowse.setObjectName("btnBrowse")
self.horizontalLayout_5.addWidget(self.btnBrowse)
self.edtFile = QtWidgets.QLineEdit(self.tabLoad)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.edtFile.sizePolicy().hasHeightForWidth())
self.edtFile.setSizePolicy(sizePolicy)
self.edtFile.setMinimumSize(QtCore.QSize(500, 25))
self.edtFile.setObjectName("edtFile")
self.horizontalLayout_5.addWidget(self.edtFile)
self.btnImport = QtWidgets.QPushButton(self.tabLoad)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnImport.sizePolicy().hasHeightForWidth())
self.btnImport.setSizePolicy(sizePolicy)
self.btnImport.setMinimumSize(QtCore.QSize(0, 25))
self.btnImport.setObjectName("btnImport")
self.horizontalLayout_5.addWidget(self.btnImport)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem1)
self.verticalLayout_6.addLayout(self.horizontalLayout_5)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.cb1 = QtWidgets.QCheckBox(self.tabLoad)
self.cb1.setMinimumSize(QtCore.QSize(0, 25))
self.cb1.setObjectName("cb1")
self.verticalLayout_4.addWidget(self.cb1)
self.cb2 = QtWidgets.QCheckBox(self.tabLoad)
self.cb2.setMinimumSize(QtCore.QSize(0, 25))
self.cb2.setObjectName("cb2")
self.verticalLayout_4.addWidget(self.cb2)
self.cb3 = QtWidgets.QCheckBox(self.tabLoad)
self.cb3.setMinimumSize(QtCore.QSize(0, 25))
self.cb3.setObjectName("cb3")
self.verticalLayout_4.addWidget(self.cb3)
self.cb4 = QtWidgets.QCheckBox(self.tabLoad)
self.cb4.setMinimumSize(QtCore.QSize(0, 25))
self.cb4.setObjectName("cb4")
self.verticalLayout_4.addWidget(self.cb4)
self.cb5 = QtWidgets.QCheckBox(self.tabLoad)
self.cb5.setMinimumSize(QtCore.QSize(0, 25))
self.cb5.setObjectName("cb5")
self.verticalLayout_4.addWidget(self.cb5)
self.cb6 = QtWidgets.QCheckBox(self.tabLoad)
self.cb6.setMinimumSize(QtCore.QSize(0, 25))
self.cb6.setObjectName("cb6")
self.verticalLayout_4.addWidget(self.cb6)
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem2)
self.horizontalLayout_6.addLayout(self.verticalLayout_4)
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.txtLoadMsg = QtWidgets.QTextEdit(self.tabLoad)
self.txtLoadMsg.setObjectName("txtLoadMsg")
self.verticalLayout_5.addWidget(self.txtLoadMsg)
self.btnClearMsg = QtWidgets.QPushButton(self.tabLoad)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnClearMsg.sizePolicy().hasHeightForWidth())
self.btnClearMsg.setSizePolicy(sizePolicy)
self.btnClearMsg.setMinimumSize(QtCore.QSize(0, 25))
self.btnClearMsg.setObjectName("btnClearMsg")
self.verticalLayout_5.addWidget(self.btnClearMsg)
self.horizontalLayout_6.addLayout(self.verticalLayout_5)
self.verticalLayout_6.addLayout(self.horizontalLayout_6)
self.tab.addTab(self.tabLoad, "")
self.verticalLayout.addWidget(self.tab)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 956, 26))
self.menubar.setObjectName("menubar")
self.menu = QtWidgets.QMenu(self.menubar)
self.menu.setObjectName("menu")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actLoad = QtWidgets.QAction(MainWindow)
self.actLoad.setObjectName("actLoad")
self.action2016 = QtWidgets.QAction(MainWindow)
self.action2016.setObjectName("action2016")
self.thisYear = QtWidgets.QAction(MainWindow)
self.thisYear.setCheckable(False)
self.thisYear.setObjectName("thisYear")
self.action2019 = QtWidgets.QAction(MainWindow)
self.action2019.setObjectName("action2019")
self.menu.addAction(self.thisYear)
self.menubar.addAction(self.menu.menuAction())
self.retranslateUi(MainWindow)
self.tab.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "万泊"))
self.label_10.setText(_translate("MainWindow", "筛选"))
self.btnRefresh.setText(_translate("MainWindow", "刷新"))
self.btnAdd.setText(_translate("MainWindow", "新增"))
self.cbFilter.setText(_translate("MainWindow", "启用"))
self.btnAdvFilter.setText(_translate("MainWindow", "高级筛选"))
self.tab.setTabText(self.tab.indexOf(self.tabQuery), _translate("MainWindow", "数据管理"))
self.tab.setTabText(self.tab.indexOf(self.tabStats), _translate("MainWindow", "统计汇总"))
self.btnBrowse.setText(_translate("MainWindow", "浏览"))
self.btnImport.setText(_translate("MainWindow", "确认导入"))
self.cb1.setText(_translate("MainWindow", "现金账户1明细账"))
self.cb2.setText(_translate("MainWindow", "现金账户2明细账"))
self.cb3.setText(_translate("MainWindow", "银行明细账"))
self.cb4.setText(_translate("MainWindow", "应收账款汇总表"))
self.cb5.setText(_translate("MainWindow", "合同明细"))
self.cb6.setText(_translate("MainWindow", "开票明细表"))
self.btnClearMsg.setText(_translate("MainWindow", "清空"))
self.tab.setTabText(self.tab.indexOf(self.tabLoad), _translate("MainWindow", "导入"))
self.menu.setTitle(_translate("MainWindow", "年份"))
self.actLoad.setText(_translate("MainWindow", "导入"))
self.action2016.setText(_translate("MainWindow", "2016"))
self.thisYear.setText(_translate("MainWindow", "今年"))
self.action2019.setText(_translate("MainWindow", "2019"))
|
987,632 | 6313175f119104f7ef7c3994ff802e9a590cca5e | from __future__ import annotations
import importlib
from typing import Union
import math
from config import PRIORITY_VALUES, DEFAULT_PATHS
from data import DataRequirement
from duration import Duration
from failure import Failure
from resource import ResourceRequirement
DATA_MODULE = importlib.import_module(DEFAULT_PATHS['data_function'])
class Activity:
# Activities contained in process models
# Initialization and instance variables
def __init__(self, id: str, name: str, distribution: Union[dict, int] = 0, data_input: list = None, data_output: list = None, resources: list = None, failure_rate: float = 0, retries: int = 0, timeout: int = None, priority: str = 'normal') -> None:
self.id = id
self.name = name
self.duration = Duration(distribution)
self.data_input = DataRequirement.from_list(data_input)
self.data_output = DataRequirement.from_list(data_output)
self.process_data = getattr(DATA_MODULE, self.id) if data_output is not None else None
self.resources = ResourceRequirement.from_list(resources)
self.failure = Failure(failure_rate if failure_rate is not None else 0)
self.retries = retries if retries is not None else 0
self.timeout = timeout if timeout is not None else math.inf
if priority is None:
self.priority = PRIORITY_VALUES['normal']
elif priority.lower() in PRIORITY_VALUES:
self.priority = PRIORITY_VALUES[priority.lower()]
else:
raise TypeError('Value %s is not supported for priority.' % priority)
# Public methods
def generate_duration(self) -> int:
# Returns an instance of the randomly generated duration time
return self.duration.generate()
def generate_failure(self) -> bool:
return self.failure.check_failure()
def update(self, fields: dict) -> None:
for key, value in fields.items():
if key == 'data_input' or key == 'data_output':
setattr(self, key, DataRequirement.from_list(value))
elif key == 'duration':
setattr(self, key, Duration(value))
elif key == 'failure':
setattr(self, key, Failure(value))
elif key == 'priority':
setattr(self, key, PRIORITY_VALUES[value.lower()])
elif key == 'resources':
setattr(self, key, ResourceRequirement.from_list(value))
else:
setattr(self, key, value)
@staticmethod
def end() -> Activity:
return Activity("END", "END")
# Private methods
def __repr__(self):
return ', '.join("%s: %s" % item for item in vars(self).items())
|
987,633 | 37dfc77abec7de1942e6f3eb0144c4a6e44e5bd2 | import sys
file = open("list of teams.txt","w")
MyTeams = ['76ers', 'Cleveland', 'Celtics', 'Spurs', 'Lakers']
for n in range(0, len(MyTeams)):
team = MyTeams[n] + "\n"
file.write(team)
file.close()
|
987,634 | 1bb273c819ce17191e7abf9dfabc2e0e6fac3a70 | import time
from os import system,name
class Validacion:
@staticmethod
def limpiar_pantalla():
if name == "nt":
system("cls")
else:
system("clear")
@staticmethod
def entero (numero):
try:
numero = int(numero)
if numero in range(1,4):
return numero
else:
print(f"ATENCIÓN: Debe ingresar un número entero (1, 2 o 3).")
return False
except:
print(f"ATENCIÓN: Debe ingresar un número entero (1, 2 o 3).")
return False
@staticmethod
def cantidad(numero):
try:
numero = int(numero)
return numero
except:
print(f"ATENCIÓN: Debe ingresar un número entero.")
return False
@staticmethod
def precio(numero):
try:
numero = float(numero)
return numero
except:
print(f"ATENCIÓN: Debe ingresar un número.")
return False
@staticmethod
def bloqueo(segundos):
while segundos:
mins, secs = divmod(segundos, 60)
timer = '{:02d}:{:02d}'.format(mins, secs)
print("Demasiados intentos, penalizacion de: ",timer, end="\r")
time.sleep(1)
segundos -= 1
|
987,635 | 39da9aedbf275c6ff9a7806a51d62167660c0e2d | from .base import *
DEBUG = False
ADMINS = (
('Aman Srivastava', 'amanprodigy@gmail.com'),
)
ALLOWED_HOSTS = ['.edureka.com']
DATABASES['default'] = {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'edureka',
'USER': 'django',
'PASSWORD': 'django',
'HOST': 'localhost',
'PORT': '5432'
}
"""
Run the uwsgi server with the following line
sudo uwsgi --module=edureka.wsgi:application \
--env=DJANGO_SETTINGS_MODULE=edureka.settings.prod \
--master --pidfile=/tmp/project-master.pid \
--http=127.0.0.1:8005 \
--uid=1000 \
--wsgi-file /Users/aman/Coding/django2byexample/edureka/edureka/wsgi.py \
--virtualenv=/Users/aman/miniconda2/envs/edureka3.5
"""
SECURE_SSL_REDIRECT = True
CSRF_COOKIE_SECURE = True
|
987,636 | c2f740fa2c96b4d336e612a675a40deb698d9132 | # 38. Write a Python program to check three given integers and return true if one of them is 20 or more less than one of the others.
|
987,637 | ec74ec9c6ef932da8cea1680538125a158cfce6e | # Complete the checkMagazine function below.
def checkMagazine(magazine, note):
countM = Counter(magazine)
countN = Counter(note)
for key, value in countN.items():
c = countM.get(key, 0)
if c < value:
print("No")
return
print("Yes") |
987,638 | 1589e2d4f004e1b257b52fd263649e6526fba97f | # def
# 함수내에서 i, mylist 값 변경
def f(i, mylist):
i = i + 1
mylist.append(0)
k = 10
m = [1,2,3]
f(k, m)
print(k, m) # 출력: 10 [1, 2, 3, 0]
# default Parameter
def calc(i, j, factor = 1):
return i * j * factor
result = calc(10, 20)
print(result)
# Named Parameter
def report(name, age, score):
print(name, score)
report(age=10, name="Kim", score=80)
#report(age=10, 'kim', score=80) # Error!
# Variable length parameter
def total(*numbers):
tot = 0
for n in numbers:
tot += n
return tot
t = total(1,5,2,6)
print(t)
# return
def calc2(*numbers):
count = 0
tot = 0
for n in numbers:
count += 1
tot += n
return count, tot
count, sum = calc2(1,5,2,6) # (count, tot) 튜플을 리턴
print('return',count, sum) |
987,639 | 3123b3c16e794124335bd0746c70df2bae291a38 | #-*-coding:utf-8-*-
import random
import requests
import ssl
from requests.auth import HTTPBasicAuth
import configparser
import time
import traceback
import re
import json
import codecs
cp=configparser.ConfigParser()
with codecs.open('F:/test/btex.ini', 'r', encoding='utf-8') as f:
cp.readfp(f)
#交易对
trade_pair = 'ETH_USDT'
account = '13818922256'
password = '198862ss'
trade_password = '198862ss'
trade_num = cp.get('TRADE_CONF','TRADE_NUM')
#全局session
session = requests.session()
proxies = { "http": "http://10.10.1.10:3128", "https": "http://127.0.0.1:1080", }
def post_trade():
ssl._create_default_https_context = ssl._create_unverified_context
url = "https://btex.com/api1/trades?pair=BT_ETH"
response = session.post(url,[],verify=True)
print(response.text)
def post_price():
ssl._create_default_https_context = ssl._create_unverified_context
url = "https://btex.com/api1/k_data/?pair=ETD_DOGE&k_type=5m&rand_key=32320339"
response = session.post(url,[],verify=True)
print(response.text)
def order_book():
ssl._create_default_https_context = ssl._create_unverified_context
url = "https://btex.com/api1/orderbook?pair="+trade_pair+"&depth=15"
response = session.post(url,[],verify=True)
# print(response.text)
response_data = json.loads(response.text)
buy_data = response_data['data']['buy']
sell_data = response_data['data']['sell']
buy_price = buy_data[0]['price']
sell_price = sell_data[0]['price']
return float(buy_price),float(sell_price)
def trade_history():
ssl._create_default_https_context = ssl._create_unverified_context
url = "https://btex.com/api1/trades?pair="+trade_pair+""
response = session.post(url,[],verify=True)
print(response.text)
def login():
ssl._create_default_https_context = ssl._create_unverified_context
url = "https://btex.com/pubapi1/user_login"
values = {}
values['email_mobile'] = account
values['psw'] = password
values['auth_num'] = ''
values['check_pic'] = ''
response = session.post(url,values,verify=True)
print(response.text)
def trade():
response = session.post('https://btex.com/trade/'+trade_pair,[],verify=True)
p = re.compile(r'<input type=\"hidden\" id=\"csrf\" value=\"(.+?)\" />')
m = p.search(response.text)
print(m.group())
csrf = m.group()[38:70]
url = "https://btex.com/priapi1/buy_coin"
values = {}
values['price'] = 0.00000900
values['num'] = 2000
values['type'] = 'TCO'
values['danwei'] = 'ETH'
values['csrf'] = csrf
values['trade_psw'] = trade_password
#临时解决https的问题
response = session.post(url,values,verify=True)
print(response.text)
def trade_eth_tco(csrf,price,num):
url = "https://btex.com/priapi1/buy_coin"
values = {}
values['price'] = price
values['num'] = num
values['type'] = 'ETH'
values['danwei'] = 'USDT'
values['csrf'] = csrf
values['trade_psw'] = trade_password
#临时解决https的问题
response = session.post(url,values,verify=True)
print(response.text)
def trade_tco_eth(csrf,price,num):
url = "https://btex.com/priapi1/sell_coin"
values = {}
values['price'] = price
values['num'] = num
values['type'] = 'ETH'
values['danwei'] = 'USDT'
values['csrf'] = csrf
values['trade_psw'] = trade_password
#临时解决https的问题
response = session.post(url,values,verify=True)
print(response.text)
def get_csrf():
response = session.post('https://btex.com/trade/'+trade_pair,[],verify=True)
p = re.compile(r'<input type=\"hidden\" id=\"csrf\" value=\"(.+?)\" />')
m = p.search(response.text)
print(m.group())
csrf = m.group()[38:70]
return csrf
def get_rand_price(buy_price,sell_price):
# return sell_price
return (buy_price*1000 + 1) / 1000
# return float(random.randint(buy_price*1000000,sell_price*1000000) ) / 1000000
def cancel_order(csrf,order_id):
values = {}
values['csrf'] = csrf
values['order_id'] = order_id
response = session.post('https://btex.com/priapi1/cancel_order/',values,verify=True)
def get_orders():
response = session.get('https://btex.com/home/orders',verify=True)
p = re.compile(r'<a style=\"cursor:pointer\" id=\'cancel_(.+?)\' class=\'btn btn-danger\'')
m = p.search(response.text)
if m is not None:
m1 = re.findall('\d+',m.group())
if m1 is not None:
return m1[0]
else:
return None
else:
return None
if __name__ == '__main__':
login()
csrf = get_csrf()
buy_price,sell_price = order_book()
trade_price = get_rand_price(buy_price,sell_price)
buy_info = 'Operation Successfully'
sell_info = 'Operation Successfully'
while buy_price < sell_price and buy_info == 'Operation Successfully' and sell_info == 'Operation Successfully' and (trade_price > buy_price and trade_price < sell_price):
sell_info = trade_tco_eth(csrf,trade_price,trade_num)
buy_info = trade_eth_tco(csrf,trade_price,trade_num)
# buy_price,sell_price = order_book()
# if sell_price < trade_price:
# order_id = get_orders()
# cancel_order(csrf,order_id)
# else:
# buy_info = trade_eth_tco(csrf,trade_price,trade_num)
time.sleep(0.2)
buy_price,sell_price = order_book()
trade_price = get_rand_price(buy_price,sell_price)
# count = count + 1
print(buy_price,sell_price,trade_price)
|
987,640 | 3b251acd8414500013e6cf53f77dce18de140321 | import os
import json
import flaskr
import unittest
import tempfile
class FlaskrTestCase(unittest.TestCase):
def setUp(self):
self.db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp()
flaskr.app.config['TESTING'] = True
self.app = flaskr.app.test_client()
# with flaskr.app.app_context():
# flaskr.init_db()
def test_empty_db(self):
rv = self.app.get("/timeseries")
json_response = json.loads(rv.data)
self.assertEquals("1", json_response)
# def test_delete(self):
# rv = self.app.delete('/tasks')
# json_response = json.loads(rv.data)
# assert json_response["op"]
# def test_empty_db(self):
# rv = self.app.get('/tasks')
# json_response = json.loads(rv.data)
# assert not json_response["tasks"]
# def test_insert_and_get(self):
# rv = self.app.post(
# '/tasks',
# data=json.dumps({"action": "Go to Grocery Store"}),
# headers={'content-type':'application/json'}
# )
# json_response = json.loads(rv.data)
# assert json_response["task"]
# assert json_response["op"]
# rv = self.app.get('/tasks/1')
# json_response = json.loads(rv.data)
# assert json_response["task"]
# # assert json_response["op"]
# def test_insert_and_update(self):
# rv = self.app.post(
# '/tasks',
# data=json.dumps({"action": "Go to Grocery Store"}),
# headers={'content-type':'application/json'}
# )
# json_response = json.loads(rv.data)
# assert json_response["task"]
# assert json_response["op"]
# task_id = json_response["task"]["task_id"]
# rv = self.app.put(
# '/tasks/1',
# data=json.dumps({"action": "Go to Grocery Store and get cheese"}),
# headers={'content-type':'application/json'}
# )
# json_response = json.loads(rv.data)
# assert json_response["task"]
# # assert json_response["op"]
# def tearDown(self):
# os.close(self.db_fd)
# os.unlink(flaskr.app.config['DATABASE'])
if __name__ == '__main__':
unittest.main()
|
987,641 | e1db117c6fa892f3be746ca30006e2ae9628ec33 | import smtplib
from email.mime.text import MIMEText
class EmailManager:
def __init__(self):
return
def send_email_to_single_address_localhost(self, to_addr, from_addr, subject, textfile):
fp = open(textfile, 'rb')
msg = MIMEText(fp.read())
fp.close()
msg['Subject'] = subject
msg['From'] = to_addr
msg['To'] = from_addr
s = smtplib.SMTP('localhost')
s.sendmail(from_addr, [to_addr], msg.as_string())
s.quit()
def send_email_to_single_address_gmail(self, to_addr, gmail_user, gmail_pwd, subject, body):
smtpserver = smtplib.SMTP("smtp.gmail.com", 587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo()
smtpserver.login(gmail_user, gmail_pwd)
header = 'To:' + to_addr + '\n' + 'From: ' + gmail_user + '\n' + subject + ' \n'
msg = """\From: %s\nTo: %s\nSubject: %s\n\n%s""" % (gmail_user, to_addr, subject, body)
# print msg
smtpserver.sendmail(gmail_user, to_addr, msg)
print 'done!'
smtpserver.close()
#em = EmailManager()
#em.send_email_to_single_address_gmail('6509317719@tmomail.net', 'huahanzh@gmail.com', 'testemail123', 'test', 'isss body')
|
987,642 | 2bfdf5f05d657a916d3b89394a6bdd2003099665 | # programa que jogue jokenpô
import random
opcao = int(input('''Escolha:
--- 1. Pedra
--- 2. Papel
--- 3. Tesoura
--->'''))
sorteio = int(random.randrange(1, 3))
if opcao == 1:
escolha = 'Pedra'
elif opcao == 2:
escolha = 'Papel'
elif opcao == 3:
escolha = 'Tesoura'
if sorteio == 1:
escolha2 = 'Pedra'
elif sorteio == 2:
escolha2 = 'Papel'
elif sorteio == 3:
escolha2 = 'Tesoura'
print('_____________________')
print(' JOKENPÔ!!!!')
print('')
print('JOGADOR: ', escolha)
print('PC: ', escolha2)
print('_____________________')
print('')
if sorteio == 1 and opcao == 1 or sorteio == 2 and opcao == 2 or sorteio == 3 and opcao == 3:
print('EMPATE!, pois {} é igual a {}'.format(escolha, escolha2))
elif opcao == 1 and sorteio == 3:
print('Jogador VENCEU, pois {} ganha de {}'.format(escolha, escolha2))
elif opcao == 2 and sorteio == 1:
print('Jogador VENCEU, pois {} ganha de {}'.format(escolha, escolha2))
elif opcao == 3 and sorteio == 2:
print('Jogador VENCEU, pois {} ganha de {}'.format(escolha, escolha2))
elif opcao == 1 and sorteio == 2:
print('PC VENCEU, pois {} ganha de {}'.format(escolha2, escolha))
elif opcao == 2 and sorteio == 1:
print('PC VENCEU, pois {} ganha de {}'.format(escolha2, escolha))
elif opcao == 3 and sorteio == 1:
print('PC VENCEU, pois {} ganha de {}'.format(escolha2, escolha))
|
987,643 | e570bdc040b203fe014ae13eeb4dbaecc1880370 | import sys
import re
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
# 0->pythonFile 1->folderPath 2->title 3->yLabel 4->labels 5->data 6->NameRegex
numArgs = len(sys.argv)
if numArgs != 7:
print("numArgs Error!")
sys.exit(10) # numArgs
folderPath = sys.argv[1]
titl = sys.argv[2]
yLab = sys.argv[3]
givenLabels = sys.argv[4].split("|")
givenData = sys.argv[5].split("|")
regex = sys.argv[6]
tpData = {}
fig = plt.figure(figsize=(16, 9), dpi=120)
ax = fig.add_axes([0,0,1,1])
label0 = givenLabels[0]
brokenL0 = label0.split(",")
multipleRuns = bool(re.search(regex, brokenL0[0]))
for labelI, dataI in zip(givenLabels, givenData):
temp = np.array(float(dataI.replace("[","").replace("]","")))
isFound = False
keyI = ""
brokenLI = labelI.split(",")
if multipleRuns == True:
for key in tpData.keys():
brokenKey = key.split(",")
test = set(brokenKey).intersection(brokenLI)
if(len(test) == len(brokenKey)):
isFound = True
keyI = key
break
if isFound == False:
brokenLI.pop(0)
keyI = ','.join(str(b) for b in brokenLI)
tpData[keyI] = temp
else:
existingData = tpData.pop(keyI)
existingData = np.append(existingData, temp)
tpData[keyI] = existingData
else:
tpData[labelI] = temp
tickNames = []
ft = []
error = []
for d in tpData.items():
lI = d[0]
dI = d[1]
tickNames.append(lI)
ft.append(np.mean(dI))
error.append(np.std(dI))
x_pos = np.arange(len(tickNames))
ax.bar(x_pos, ft, yerr=error)
ax.set_xticks(x_pos)
ax.set_xticklabels(tickNames, rotation=90)
ax.set_title(titl)
ax.set_ylabel(yLab)
plt.savefig(folderPath+titl+'.png', bbox_inches='tight')
|
987,644 | a726d56f538a77940314106401517aa6d1397e0d | import string
class Convert:
def __init__(self):
self.chars = string.digits + string.ascii_uppercase
self.chars_len = len(self.chars)
def print(self):
print(self.chars)
print(self.chars_len)
def num2dec(self, num):
num = str(num)
num_len = len(num)
dec = 0
for i in range(num_len):
tmp = self.get_nth_of_digit(num[i]) * pow(self.chars_len, num_len-i-1)
dec = dec + tmp
return dec
def dec2num(self, dec):
dec = int(dec)
num = ''
while dec > 0:
tmp = dec % self.chars_len
num = str(self.chars[tmp]) + num
dec = dec - tmp
dec = int(dec / self.chars_len)
return num
def get_nth_of_digit(self, digit):
for i in range(self.chars_len):
if self.chars[i] == digit:
return i
return -1
def dec2num_n_digit(self, dec, n=4):
num = self.dec2num(dec)
n = int(n)
while len(num) < n:
num = "0" + num
return num
def max_dec_from_n_digit(self, n=4):
return pow(self.chars_len, int(n))
# def check(self):
# max_num = pow(self.chars_len, 4)
# print(max_num)
# print('START')
# for i in range(max_num):
# a = i
# b = self.dec2num(a)
# c = self.dec2num_4digit(a)
# d = self.num2dec(b)
# e = self.num2dec(c)
# if a != d or d != e:
# print(a, b, c, d, e, sep='\t')
# if i < 100:
# print(a, b, c, d, e, sep='\t')
# print('FINISH')
|
987,645 | 5a3921ada0a0db38178dbadfb4bc55aa09e84e53 | # -*- coding: UTF-8 -*-
# Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Archive extraction utilities
"""
import os
import shutil
import subprocess
import tarfile
from pathlib import Path, PurePosixPath
from _common import (USE_REGISTRY, PlatformEnum, ExtractorEnum, get_logger, get_running_platform)
DEFAULT_EXTRACTORS = {
ExtractorEnum.SEVENZIP: USE_REGISTRY,
ExtractorEnum.TAR: 'tar',
ExtractorEnum.WINRAR: USE_REGISTRY,
}
class ExtractionError(BaseException):
"""Exceptions thrown in this module's methods"""
def _find_7z_by_registry():
"""
Return a string to 7-zip's 7z.exe from the Windows Registry.
Raises ExtractionError if it fails.
"""
import winreg #pylint: disable=import-error
sub_key_7zfm = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\7zFM.exe'
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, sub_key_7zfm) as key_handle:
sevenzipfm_dir = winreg.QueryValueEx(key_handle, 'Path')[0]
except OSError:
get_logger().exception('Unable to locate 7-zip from the Windows Registry')
raise ExtractionError()
sevenzip_path = Path(sevenzipfm_dir, '7z.exe')
if not sevenzip_path.is_file():
get_logger().error('7z.exe not found at path from registry: %s', sevenzip_path)
return sevenzip_path
def _find_winrar_by_registry():
"""
Return a string to WinRAR's WinRAR.exe from the Windows Registry.
Raises ExtractionError if it fails.
"""
import winreg #pylint: disable=import-error
sub_key_winrar = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\WinRAR.exe'
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, sub_key_winrar) as key_handle:
winrar_dir = winreg.QueryValueEx(key_handle, 'Path')[0]
except OSError:
get_logger().exception('Unable to locale WinRAR from the Windows Registry')
raise ExtractionError()
winrar_path = Path(winrar_dir, 'WinRAR.exe')
if not winrar_path.is_file():
get_logger().error('WinRAR.exe not found at path from registry: %s', winrar_path)
return winrar_path
def _find_extractor_by_cmd(extractor_cmd):
"""Returns a string path to the binary; None if it couldn't be found"""
if not extractor_cmd:
return None
if Path(extractor_cmd).is_file():
return extractor_cmd
return shutil.which(extractor_cmd)
def _process_relative_to(unpack_root, relative_to):
"""
For an extractor that doesn't support an automatic transform, move the extracted
contents from the relative_to/ directory to the unpack_root
If relative_to is None, nothing is done.
"""
if relative_to is None:
return
relative_root = unpack_root / relative_to
if not relative_root.is_dir():
get_logger().error('Could not find relative_to directory in extracted files: %s',
relative_to)
raise ExtractionError()
for src_path in relative_root.iterdir():
dest_path = unpack_root / src_path.name
src_path.rename(dest_path)
relative_root.rmdir()
def _extract_tar_with_7z(binary, archive_path, output_dir, relative_to):
get_logger().debug('Using 7-zip extractor')
if not relative_to is None and (output_dir / relative_to).exists():
get_logger().error('Temporary unpacking directory already exists: %s',
output_dir / relative_to)
raise ExtractionError()
cmd1 = (binary, 'x', str(archive_path), '-so')
cmd2 = (binary, 'x', '-si', '-aoa', '-ttar', '-o{}'.format(str(output_dir)))
get_logger().debug('7z command line: %s | %s', ' '.join(cmd1), ' '.join(cmd2))
proc1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE)
proc2 = subprocess.Popen(cmd2, stdin=proc1.stdout, stdout=subprocess.PIPE)
proc1.stdout.close()
(stdout_data, stderr_data) = proc2.communicate()
if proc2.returncode != 0:
get_logger().error('7z commands returned non-zero status: %s', proc2.returncode)
get_logger().debug('stdout: %s', stdout_data)
get_logger().debug('stderr: %s', stderr_data)
raise ExtractionError()
_process_relative_to(output_dir, relative_to)
def _extract_tar_with_tar(binary, archive_path, output_dir, relative_to):
get_logger().debug('Using BSD or GNU tar extractor')
output_dir.mkdir(exist_ok=True)
cmd = (binary, '-xf', str(archive_path), '-C', str(output_dir))
get_logger().debug('tar command line: %s', ' '.join(cmd))
result = subprocess.run(cmd)
if result.returncode != 0:
get_logger().error('tar command returned %s', result.returncode)
raise ExtractionError()
# for gnu tar, the --transform option could be used. but to keep compatibility with
# bsdtar on macos, we just do this ourselves
_process_relative_to(output_dir, relative_to)
def _extract_tar_with_winrar(binary, archive_path, output_dir, relative_to):
get_logger().debug('Using WinRAR extractor')
output_dir.mkdir(exist_ok=True)
cmd = (binary, 'x', '-o+', str(archive_path), str(output_dir))
get_logger().debug('WinRAR command line: %s', ' '.join(cmd))
result = subprocess.run(cmd)
if result.returncode != 0:
get_logger().error('WinRAR command returned %s', result.returncode)
raise ExtractionError()
_process_relative_to(output_dir, relative_to)
def _extract_tar_with_python(archive_path, output_dir, relative_to):
get_logger().debug('Using pure Python tar extractor')
class NoAppendList(list):
"""Hack to workaround memory issues with large tar files"""
def append(self, obj):
pass
# Simple hack to check if symlinks are supported
try:
os.symlink('', '')
except FileNotFoundError:
# Symlinks probably supported
symlink_supported = True
except OSError:
# Symlinks probably not supported
get_logger().info('System does not support symlinks. Ignoring them.')
symlink_supported = False
except BaseException:
# Unexpected exception
get_logger().exception('Unexpected exception during symlink support check.')
raise ExtractionError()
with tarfile.open(str(archive_path), 'r|%s' % archive_path.suffix[1:]) as tar_file_obj:
tar_file_obj.members = NoAppendList()
for tarinfo in tar_file_obj:
try:
if relative_to is None:
destination = output_dir / PurePosixPath(tarinfo.name)
else:
destination = output_dir / PurePosixPath(tarinfo.name).relative_to(relative_to)
if tarinfo.issym() and not symlink_supported:
# In this situation, TarFile.makelink() will try to create a copy of the
# target. But this fails because TarFile.members is empty
# But if symlinks are not supported, it's safe to assume that symlinks
# aren't needed. The only situation where this happens is on Windows.
continue
if tarinfo.islnk():
# Derived from TarFile.extract()
new_target = output_dir / PurePosixPath(
tarinfo.linkname).relative_to(relative_to)
tarinfo._link_target = new_target.as_posix() # pylint: disable=protected-access
if destination.is_symlink():
destination.unlink()
tar_file_obj._extract_member(tarinfo, str(destination)) # pylint: disable=protected-access
except BaseException:
get_logger().exception('Exception thrown for tar member: %s', tarinfo.name)
raise ExtractionError()
def extract_tar_file(archive_path, output_dir, relative_to, extractors=None):
"""
Extract regular or compressed tar archive into the output directory.
archive_path is the pathlib.Path to the archive to unpack
output_dir is a pathlib.Path to the directory to unpack. It must already exist.
relative_to is a pathlib.Path for directories that should be stripped relative to the
root of the archive, or None if no path components should be stripped.
extractors is a dictionary of PlatformEnum to a command or path to the
extractor binary. Defaults to 'tar' for tar, and '_use_registry' for 7-Zip and WinRAR.
Raises ExtractionError if unexpected issues arise during unpacking.
"""
if extractors is None:
extractors = DEFAULT_EXTRACTORS
current_platform = get_running_platform()
if current_platform == PlatformEnum.WINDOWS:
# Try to use 7-zip first
sevenzip_cmd = extractors.get(ExtractorEnum.SEVENZIP)
if sevenzip_cmd == USE_REGISTRY:
sevenzip_cmd = str(_find_7z_by_registry())
sevenzip_bin = _find_extractor_by_cmd(sevenzip_cmd)
if sevenzip_bin is not None:
_extract_tar_with_7z(sevenzip_bin, archive_path, output_dir, relative_to)
return
# Use WinRAR if 7-zip is not found
winrar_cmd = extractors.get(ExtractorEnum.WINRAR)
if winrar_cmd == USE_REGISTRY:
winrar_cmd = str(_find_winrar_by_registry())
winrar_bin = _find_extractor_by_cmd(winrar_cmd)
if winrar_bin is not None:
_extract_tar_with_winrar(winrar_bin, archive_path, output_dir, relative_to)
return
get_logger().warning(
'Neither 7-zip nor WinRAR were found. Falling back to Python extractor...')
elif current_platform == PlatformEnum.UNIX:
# NOTE: 7-zip isn't an option because it doesn't preserve file permissions
tar_bin = _find_extractor_by_cmd(extractors.get(ExtractorEnum.TAR))
if not tar_bin is None:
_extract_tar_with_tar(tar_bin, archive_path, output_dir, relative_to)
return
else:
# This is not a normal code path, so make it clear.
raise NotImplementedError(current_platform)
# Fallback to Python-based extractor on all platforms
_extract_tar_with_python(archive_path, output_dir, relative_to)
def extract_with_7z(
archive_path,
output_dir,
relative_to, #pylint: disable=too-many-arguments
extractors=None):
"""
Extract archives with 7-zip into the output directory.
Only supports archives with one layer of unpacking, so compressed tar archives don't work.
archive_path is the pathlib.Path to the archive to unpack
output_dir is a pathlib.Path to the directory to unpack. It must already exist.
relative_to is a pathlib.Path for directories that should be stripped relative to the
root of the archive.
extractors is a dictionary of PlatformEnum to a command or path to the
extractor binary. Defaults to 'tar' for tar, and '_use_registry' for 7-Zip.
Raises ExtractionError if unexpected issues arise during unpacking.
"""
# TODO: It would be nice to extend this to support arbitrary standard IO chaining of 7z
# instances, so _extract_tar_with_7z and other future formats could use this.
if extractors is None:
extractors = DEFAULT_EXTRACTORS
sevenzip_cmd = extractors.get(ExtractorEnum.SEVENZIP)
if sevenzip_cmd == USE_REGISTRY:
if not get_running_platform() == PlatformEnum.WINDOWS:
get_logger().error('"%s" for 7-zip is only available on Windows', sevenzip_cmd)
raise ExtractionError()
sevenzip_cmd = str(_find_7z_by_registry())
sevenzip_bin = _find_extractor_by_cmd(sevenzip_cmd)
if not relative_to is None and (output_dir / relative_to).exists():
get_logger().error('Temporary unpacking directory already exists: %s',
output_dir / relative_to)
raise ExtractionError()
cmd = (sevenzip_bin, 'x', str(archive_path), '-aoa', '-o{}'.format(str(output_dir)))
get_logger().debug('7z command line: %s', ' '.join(cmd))
result = subprocess.run(cmd)
if result.returncode != 0:
get_logger().error('7z command returned %s', result.returncode)
raise ExtractionError()
_process_relative_to(output_dir, relative_to)
def extract_with_winrar(
archive_path,
output_dir,
relative_to, #pylint: disable=too-many-arguments
extractors=None):
"""
Extract archives with WinRAR into the output directory.
Only supports archives with one layer of unpacking, so compressed tar archives don't work.
archive_path is the pathlib.Path to the archive to unpack
output_dir is a pathlib.Path to the directory to unpack. It must already exist.
relative_to is a pathlib.Path for directories that should be stripped relative to the
root of the archive.
extractors is a dictionary of PlatformEnum to a command or path to the
extractor binary. Defaults to 'tar' for tar, and '_use_registry' for WinRAR.
Raises ExtractionError if unexpected issues arise during unpacking.
"""
if extractors is None:
extractors = DEFAULT_EXTRACTORS
winrar_cmd = extractors.get(ExtractorEnum.WINRAR)
if winrar_cmd == USE_REGISTRY:
if not get_running_platform() == PlatformEnum.WINDOWS:
get_logger().error('"%s" for WinRAR is only available on Windows', winrar_cmd)
raise ExtractionError()
winrar_cmd = str(_find_winrar_by_registry())
winrar_bin = _find_extractor_by_cmd(winrar_cmd)
if not relative_to is None and (output_dir / relative_to).exists():
get_logger().error('Temporary unpacking directory already exists: %s',
output_dir / relative_to)
raise ExtractionError()
cmd = (winrar_bin, 'x', '-o+', str(archive_path), str(output_dir))
get_logger().debug('WinRAR command line: %s', ' '.join(cmd))
result = subprocess.run(cmd)
if result.returncode != 0:
get_logger().error('WinRAR command returned %s', result.returncode)
raise ExtractionError()
_process_relative_to(output_dir, relative_to)
|
987,646 | c96039743074505be8315faaf5bc15c79b60c345 | import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
import os
import random
from watermark import watermarking, gaussian_noise
from evaluate import *
SEED=1011
def set_seeds(seed=SEED):
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
tf.random.set_seed(seed)
np.random.seed(seed)
def training(args, train_x, train_x_wm, train_y, test_x, test_x_wm, test_y):
set_seeds()
BATCH_SIZE=2048
saved_model_path="./load_model/classifier/"+str(args.dataset)+"/"+str(args.model)+"_SP_"+str(args.alpha)
forgery_directory="./data/forgery/"+str(args.dataset)
if args.dataset=='cifar10':
n_labels=10
elif args.dataset=='cifar100':
n_labels=20
if not os.path.exists(saved_model_path+'/saved_model.pb'):
input_tensor = tf.keras.Input(shape=(32, 32, 3))
resized_images = layers.Lambda(lambda image: tf.image.resize(image, (224, 224)))(input_tensor)
if args.model=='resnet':
base_model = tf.keras.applications.ResNet152V2(
include_top=False,
weights='imagenet',
input_tensor=resized_images,
input_shape=(224, 224, 3),
)
elif args.model=='densenet':
base_model = tf.keras.applications.DenseNet201(
include_top=False,
weights='imagenet',
input_tensor=resized_images,
input_shape=(224, 224, 3),
)
for layer in base_model.layers:
layer.trainable = False
outputs = base_model.layers[-1].output
output = layers.GlobalAveragePooling2D()(outputs)
output = layers.Dense(1024, activation='gelu', kernel_initializer="he_normal")(output)
output = layers.Dropout(0.2)(output)
output = layers.Dense(256, activation='gelu', kernel_initializer="he_normal")(output)
output = layers.Dropout(0.2)(output)
output = layers.Dense(64, activation='gelu', kernel_initializer="he_normal")(output)
output = layers.Dropout(0.2)(output)
output = layers.Dense(n_labels, activation='softmax', name='logits_out')(output)
output2 = layers.Conv2DTranspose(512, 3, strides=2, padding='same',
kernel_initializer="he_normal")(outputs)
output2 = layers.BatchNormalization()(output2)
output2 = layers.Activation('gelu')(output2)
output2 = layers.Conv2DTranspose(128, 3, strides=2, padding='same',
kernel_initializer="he_normal")(output2)
output2 = layers.BatchNormalization()(output2)
output2 = layers.Activation('gelu')(output2)
output2 = layers.Conv2DTranspose(32, 3, strides=1,
kernel_initializer="he_normal")(output2)
output2 = layers.BatchNormalization()(output2)
output2 = layers.Activation('gelu')(output2)
output2 = layers.Conv2DTranspose(3, 3, strides=1,
activation='sigmoid', name='rec_output')(output2)
base_model = tf.keras.models.Model(inputs=input_tensor, outputs=[output, output2])
base_model.compile(optimizer='adam',
loss={'logits_out' : 'sparse_categorical_crossentropy',
'rec_output' : 'mean_squared_error'},
loss_weights={'logits_out': 0.9,
'rec_output': 0.1})
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5,
restore_best_weights=True)
base_model.fit(train_x_wm,
[train_y, train_x],
validation_split=0.2,
epochs=50,
batch_size=BATCH_SIZE,
callbacks=[callback])
base_model.save(saved_model_path)
base_model = tf.keras.models.load_model(saved_model_path)
else:
base_model = tf.keras.models.load_model(saved_model_path)
if not os.path.exists(saved_model_path+"_OE"+'/saved_model.pb'):
aux_model = tf.keras.models.load_model(saved_model_path)
if args.model == 'resnet':
for layer in aux_model.layers[0:565]:
layer.trainable = False
elif args.model == 'densenet':
for layer in aux_model.layers[0:708]:
layer.trainable = False
prob_in, pseudo_image = aux_model.predict(train_x_wm)
pseudo_out=watermarking(pseudo_image, args.alpha)
## Gaussian noise
# pseudo_out=random_noise(pseudo_image, mode="gaussian", var=args.alpha)
## Salt-and-pepper noise
# pseudo_out=random_noise(pseudo_image, mode="s&p", amount=args.alpha)
uniform=np.expand_dims(np.repeat(1/n_labels, n_labels), axis=1)
pseudo_prob_out=np.repeat(uniform, len(pseudo_out), axis=1).T
train_x_aux=np.concatenate([train_x, pseudo_out], axis=0)
train_y_aux=np.concatenate([prob_in, pseudo_prob_out], axis=0)
aux_model.compile(optimizer='adam',
loss={'logits_out' : 'kl_divergence',
'rec_output' : 'mean_squared_error'},
loss_weights={'logits_out': 0.9,
'rec_output': 0.1})
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5,
restore_best_weights=True)
aux_model.fit(train_x_aux,
[train_y_aux, train_x_aux],
validation_split=0.2,
epochs=100,
batch_size=BATCH_SIZE,
callbacks=[callback])
aux_model.save(saved_model_path+"_OE")
aux_model = tf.keras.models.load_model(saved_model_path+"_OE")
else:
aux_model = tf.keras.models.load_model(saved_model_path+"_OE")
print("=== Evaluate ===")
forgery = tf.keras.preprocessing.image_dataset_from_directory(forgery_directory,
batch_size=10000,
image_size=(32, 32))
forgery=list(forgery.as_numpy_iterator())[0]
forgery_x=forgery[0].astype("uint8")
forgery_x=forgery_x / 255.
forgery_y=forgery[1]
test_y=test_y.reshape(-1)
s_prob_right, s_prob_wrong, kl_right, kl_wrong =\
right_wrong_distinction(base_model, test_x, test_y)
s_prob_right, s_prob_wrong, kl_right, kl_wrong =\
right_wrong_distinction(aux_model, test_x, test_y)
s_prob_in_f, s_prob_out_f, pseudo_prob_in_f, pseudo_prob_out_f =\
in_out_distinction(base_model, aux_model, test_x, forgery_x)
print("\n=== The End ===") |
987,647 | 55d1e962356403a1361a2fd8fcea05cb7bfa38e7 | #data generator using mask rcnn
#it goes frame by frame and creates single picture for frame containing combined pisctures of the same category objects
#works best with 15 fps mp4 videos
import tensorflow as tf
import numpy as np
from cv2 import cv2
import os
#paths
MODEL_NAME = 'mask_rcnn_inception_v2_coco'
PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'
videos_folder_path = 'VID/15FPS/to_do'
processed = [ '55', '79', '53', '76', '48', '57' ] #already processed videos in folder
#loading frozen graph
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
#running on single frame
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [ 'num_detections', 'detection_boxes', 'detection_scores', 'detection_classes', 'detection_masks' ]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
#running
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: image})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.int64)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
#scanning videos directory
for vid in os.listdir(videos_folder_path):
cont = False
for p in processed:
if p in vid:
cont = True
break
if cont:
continue
obiekty = [] #cut images
licznikF = 1
cap = cv2.VideoCapture(videos_folder_path + "/" + vid) #reading videos
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) #number of frames
path = videos_folder_path + "/" + vid[:len(vid)-4] #setting path to save
os.mkdir(path) #creating folder to save pictures
cap.set(1, licznikF)
#loop for current video
while True:
hasFrame, frame = cap.read()
if not hasFrame:
print("Done processing!")
break
chunksize = 14 #224/15 = 14,99
size = 224
sizem = 1000
frame = cv2.resize(frame, (sizem, sizem))
frame_np_expanded = np.expand_dims(frame, axis=0)
output = run_inference_for_single_image(frame_np_expanded, detection_graph)
for idx, wynik in enumerate(output['detection_scores']):
if wynik > 0.7: #if score is higher than 70%
if output['detection_classes'][idx] == 3: #3 means car, see coco labels
box = output['detection_boxes'][idx]
ymin = int(box[0] * sizem)
ymax = int(box[2] * sizem)
xmin = int(box[1] * sizem)
xmax = int(box[3] * sizem)
boximg = frame[ymin:ymax, xmin:xmax] #0:2 1:3
boximg = cv2.resize(boximg, (211, 211)) #14*15 = 210
boxmask = np.zeros(shape=[size, size, 3], dtype=np.uint8)
fromy = 1
for line in output["detection_masks"][idx]:
fromx = 1
for val in line:
if val > 0.8: #overlaying with proper square
boxmask[fromy:fromy+chunksize, fromx:fromx+chunksize] = boximg[fromy:fromy+chunksize, fromx:fromx+chunksize]
fromx += chunksize
fromy += chunksize
obiekty.append(boxmask) #adding objects to list
else:
break
#saving pictures form obiekty to one combined picture with name as frame count
if len(obiekty) > 0:
vis = np.zeros((224, 224*len(obiekty), 3), np.uint8) #blank img
pixels = 0
for ob in obiekty:
vis[:224, pixels:pixels+224, :3] = ob #saving img
pixels += 224
cv2.imwrite(path + "/" + str(licznikF) + ".png", vis) #saving
print("vid: {} -> {}/{} DONE".format(vid, licznikF, length))
licznikF += 1
obiekty.clear()
cap.release() |
987,648 | e3d51a74bcc4000d9267476ca6ebf2975a9ff657 | ''' Select only the columns we want for the rest of the process.
Incoming dataframe has Country, Program, Customer and Driver:
- Drop the Country, Program, Customer and Driver columns
- Reorder to: UUID, Month, Amount'''
__author__ = "The Hackett Group"
import pandas as pd
cols = ["UUID", "Month", "Amount"]
def process(df):
return df[cols] |
987,649 | 4618ea7fa3d9f60452093ec30eb9951d85a0b8fd | boodschappen = {"brood":2, "water":1, "sap":3}
prijs = 0
while prijs < 100:
product = input("welk product wil je kopen?")
if (product == 'm'):
print("Totaal:", prijs, "euro")
main()
#main()
if product in boodschappen:
prijs += boodschappen[product]
|
987,650 | 422594bd87fd33beddba78b7f98437325974a553 | from urllib.request import urlopen
from bs4 import BeautifulSoup
url="https://movie.naver.com/movie/running/current.nhn"
page=urlopen(url)
soup=BeautifulSoup(page, 'lxml')
print(soup.title)
#%%
ul_one = soup.find('ul', class_='lst_detail_t1')
print(ul_one.text)
#%%
a_all=ul_one.find_all('a')
print(a_all)
#%%
# dt, class:tit
# a -> text
li_all=ul_one.find_all('dt',class_='tit')
# print(li_all[0].text) << 데이터 확인용 출력
# print(li_all[1].text) << 데이터 확인용 출력
one_title=li_all[0].find('a')
print(one_title.text)
#%%
li_all=ul_one.find_all('dt',class_='tit')
for i in li_all:
one_title=i.find('a').text
print(one_title)
#%% 평점 정보 가져오기
# 1. 전체 가져오기
# 2. 하나씩 가져오는것 확인
# 3. for문으로 돌려보기
ul_one=soup.find('ul', class_='lst_detail_t1')
score_all=ul_one.find_all('span', class_='num')
print(score_all)
cnt=1;
for i in score_all:
if cnt%2==0:
print(i.text)
cnt=cnt+1
#%% 실습 4-1 : 예매율 가져오기
for i in score_all:
if cnt%2==0:
print(i.text)
cnt=cnt+1 |
987,651 | 6890ca88fe7ee7c33b3fd3064db245472f84c13d | import gpytorch
import torch
class ConstantKernel(gpytorch.kernels.Kernel):
"""
Gives a covariance of one for all inputs. Should be combined with a scale kernel.
"""
def __init__(self, **kwargs):
super(ConstantKernel, self).__init__(has_lengthscale=False, **kwargs)
def forward(self, x1, x2, diag=False, device='cuda:0', **params):
return torch.ones(x1.shape[0], x2.shape[0]).to(device)
|
987,652 | 3112c1b770d57e3a2e442521318289a08ffddb25 | from app import create_app,db
from flask_script import Manager,Server
from app.models import User,Posts,Comments
from flask_migrate import Migrate,MigrateCommand
app=create_app('development')
manager=Manager(app)
manager.add_command('server',Server)
migrate = Migrate(app,db)
manager.add_command('db',MigrateCommand)
@manager.shell
def make_shell_context():
return dict(app=app,db=db,User=User,Posts=Posts,Comments=Comments)
if __name__ == '__main__':
manager.run()
|
987,653 | 158347663ce3df0de6e873b255c19ec7c2d38ef3 | import nltk
from nltk.corpus import sentiwordnet as swn
import gensim # word2vec
import numpy
import re
from os import path, remove, chdir
import subprocess
import parameters
import prepare_data
import svm
stopwords = nltk.corpus.stopwords.words('english')
stopwords.extend(['#', ',', '+'])
'Use this class for features that are used by more than one feature extractor (e.g. a feature extractor for aspects and in the feature for aspect sentiments) to avoid dublicated code'
class Features:
#stemmer = nltk.PorterStemmer()
lemmatizer = nltk.stem.WordNetLemmatizer()
stopwords = nltk.corpus.stopwords.words('english')
'Constructor: compute some basic statistics (such as words in training prepare_data), to be able to calculate the features individually per post later on'
def __init__(self, training):
self.unigramFeatures = {} # dictionary: word -> relative index in feature vector
self.numUnigramFeatures = 0
self.bigramFeatures = {} # dictionary: bigram -> relative index in feature vector
self.numBigramFeatures = 0
# compute unigram and bigram features
wordSet = []
bigramSet = []
# set of sentences for word2vec
sentenceSet = []
for sentence in training['sentences']:
#lemmatizedPost = [self.lemmatizer.lemmatize(w) for w in post['text']]
#stemmedPost = [self.stemmer.stem(w) for w in post['text']]
#bigramSet = bigramSet + list(nltk.bigrams(post['text']))
bigramSet = bigramSet + list(nltk.bigrams(sentence['tokens']))
sentenceSet.append(sentence['tokens'])
filteredPost = sentence['tokens']
#filteredPost = [w for w in sentence['tokens'] if w.lower() not in self.stopwords]
for word in filteredPost:
wordSet.append(word)
# compute frequency distribution ( we can use it to select only meaningful word features)
wordSet = nltk.FreqDist(wordSet)
wordSet = list(wordSet.keys())[:parameters.numUnigrams] # only use the first 100000 most common words as features
#wordSet = set(wordSet)
# build feature dictionary with relative indices in sparse vector
i = 1
for w in wordSet:
self.unigramFeatures[w] = i
i += 1
self.numUnigramFeatures = i - 1
# compute frequency distribution ( we can use it to select only meaningful bigram features)
bigramSet = nltk.FreqDist(bigramSet)
bigramSet = list(bigramSet.keys())[:parameters.numBigrams] # only use the first 1000 most common words as features
#bigramSet = set(bigramSet)
# build feature dictionary with relative indices in sparse vector
index = 1
for w in bigramSet:
self.bigramFeatures[w] = index
index += 1
self.numBigramFeatures = index - 1
### POS Feature
# extract all POS tags that might appear
# Store tags as a (tag -> index) mapping to fasten p computation later on
self.posDict = {}
taglist = nltk.data.load('help/tagsets/upenn_tagset.pickle').keys()
i = 1
for tag in taglist:
self.posDict[tag] = i
i+=1
self.posDict['#'] = i # dirty hack, since it is the only pos tag, that appears in the tagged sentences, that does not appear in the above tagset
### Word2Vec Feature
# train word2vec model with given sentences
w2vModel = gensim.models.Word2Vec(sentenceSet, size=parameters.w2vVecSize)
# build own word2vec vocab, to improve runtime later on)
self.w2vVocab = {}
i = 1
for w in w2vModel.wv.vocab:
if w.lower() not in self.stopwords: # filter out stop word word vectors
self.w2vVocab[w] = (w2vModel[w], i)
i += 1
self.w2vVocabSize = i-1
# compute the w2v centroid of all sentences belonging to the same category
self.categories = training['categories']
self.numCat = len(training['categories'])
if self.numCat > 0:
numCatw2v = [0]*self.numCat
self.centroids = [[numpy.zeros(parameters.w2vVecSize)] for i in range(self.numCat)]
# compute centroids
for sentence in training['sentences']:
filteredPost = sentence['tokens']
#filteredPosts = [w.lower() for w in sentence['tokens'] if w.lower() not in self.stopwords]
i = 0
for currCat in training['categories']:
if currCat in sentence['categories']:
for w in filteredPost:
if w in self.w2vVocab:
self.centroids[i] += self.w2vVocab[w][0]
numCatw2v[i] += 1
i += 1
i = 0
for currCat in training['categories']:
self.centroids[i] /= numCatw2v[i]
# normalize centroids
self.centroids[i] /= numpy.linalg.norm(self.centroids[i])
i += 1
### emoticon feature
positiveSmileys = """:-) :) =) :] :> :c) x) :o) :-D ;D :D =D xD XD :oD""".split()
self.patternPosEmoticons = "|".join(map(re.escape, positiveSmileys))
negativeSmileys = """:-( :( =( :[ :< :/ x( :o( :C :\'( :\'C ;(""".split()
self.patternNegEmoticons = "|".join(map(re.escape, negativeSmileys))
'return gensim w2v model trained on trainings prepare_data'
def getw2vModel(self):
return [self.w2vVocab, self.w2vVocabSize]
'get word features of a single word'
'''
Word feature means, that it is a BOW model, where we have a set
of all words appearing in the training set. The set is
represented as a binary vector, if the vector appears in this set
the corresponding entry is 1.
Offset is the relative offset that indices in the sparse feature vector should have.
The given offset plus the size of the Word Feature vector is returned as new offset.
'''
def getWordFeatures(self, word, offset):
# feature array, which we will return
# is a array of tuples, each tuple represent an entry in a sparse vector
features = []
if word in self.unigramFeatures:
features.append((offset+self.unigramFeatures[word], 1))
offset += self.numUnigramFeatures
return [features, offset]
'''
Get unigram features within the context of the word with index "index" in the given
sentence. The boolean "prev" decides if we consider the previous words (True) or the
consecutive words. "window" determines how many words before resp. after the current word are
considered as context.
'''
def getContextFeatures(self, sentence, offset, prev, window, index):
# feature array, which we will return
# is a array of tuples, each tuple represent an entry in a sparse vector
features = {} # we use a dictionary at first, to avoid double entriesS
if prev:
start = index-window
end = index -1
else:
start = index +1
end = index + window
i = start-1
while (i < end):
i += 1
if i < 0:
continue
if i >= len(sentence['tokens']):
break
word = sentence['tokens'][i]
if word in self.unigramFeatures:
features[offset+self.unigramFeatures[word]] = 1
offset += self.numUnigramFeatures
return [list(features.items()), offset]
'''
Search for a negation within a context window
'''
def getNegationFeatures(self, sentence, offset, window, index):
# feature array, which we will return
# is a array of tuples, each tuple represent an entry in a sparse vector
features = []
start = index-window
end = index + window
i = start-1
while (i < end):
i += 1
if i < 0:
continue
if i >= len(sentence['tokens']):
break
if i == index:
continue
word = sentence['tokens'][i]
if word == 'not' or word.endswith('n\'t'):
features.append((offset+1, 1))
break
offset += 1
return [features, offset]
'get unigram features of a single sentence'
def getUnigramFeatures(self, sentence, offset):
# feature array, which we will return
# is a array of tuples, each tuple represent an entry in a sparse vector
features = []
#lemmatizedPost = [self.lemmatizer.lemmatize(w) for w in tokenizedPost]
#stemmedPost = [self.stemmer.stem(w) for w in tokenizedPost]
#filteredPost = [w.lower() for w in tokenizedPost if w.lower() not in self.stopwords]
tokenSet = set(sentence['tokens'])
filteredSet = set(sentence['tokens'])
# unigram features
#filteredPost = tokenizedPost
#filteredPost = [w.lower() for w in set(tokenizedPost)]
for w in filteredSet:
if w in self.unigramFeatures:
features.append((offset+self.unigramFeatures[w], 1))
offset += self.numUnigramFeatures
return [features, offset]
'get unigram features of a single sentence'
def getBigramFeatures(self, sentence, offset):
# feature array, which we will return
# is a array of tuples, each tuple represent an entry in a sparse vector
features = []
#lemmatizedPost = [self.lemmatizer.lemmatize(w) for w in tokenizedPost]
#stemmedPost = [self.stemmer.stem(w) for w in tokenizedPost]
#filteredPost = [w.lower() for w in tokenizedPost if w.lower() not in self.stopwords]
# bigram features
bigrams = set(list(nltk.bigrams(sentence['tokens'])))
for b in bigrams:
if b in self.bigramFeatures:
features.append((offset+self.bigramFeatures[b], 1))
offset += self.numBigramFeatures
return [features, offset]
''' return the POS tag of a specific word in a sentence as feature '''
def getPoSFeature(self, sentence, offset, index):
# feature array, which we will return
# is a array of tuples, each tuple represent an entry in a sparse vector
features = []
features.append((offset+self.posDict[sentence['pos'][index][1]], 1))
offset += len(self.posDict)
return [features, offset]
## TODO
''' return the Glove Word Vector Sum for a sentence'''
def getWordVectorFeatures(self, sentence, offset, index):
features = []
for idx in range(300):
features.append( (offset + (idx + 1), [sentence['wordVec'][index]][0][idx]))
offset += 300
return [features, offset]
def getSentenceVectorFeatures(self, sentence, offset):
features = []
filteredSet = set(sentence['tokens'])
filteredList = list(filteredSet)
for idx in range(300):
toAppend = 0
for i in range(len(filteredList)):
if filteredList[i] not in stopwords :
toAppend = toAppend + [sentence['wordVec'][i]][0][idx]
features.append((offset + (idx + 1), toAppend))
offset += 300
return [features, offset]
## TODO Semantic frame SEMAFOR parser
''' return the SEMAFOR features for this sentence'''
''' http://www.cs.cmu.edu/~ark/SEMAFOR/'''
def getSemantcFrameFeatures(self, sentence):
for word in sentence:
print(word);
## TODO Semantic Role label using SENNA
''' http://ml.nec-labs.com/senna/'''
def getSRL(self, sentence):
for word in sentence:
print(word);
## TODO Syntactic Parse Tree using SENNA
''' http://ml.nec-labs.com/senna/'''
def getSynParse(self, sentence):
for word in sentence:
print(word);
'get a set of features which measures the similarity from a sentence to the centroids of all categories'
def getW2VCategoryFeatures(self, sentence, offset):
# feature array, which we will return
# is a array of tuples, each tuple represent an entry in a sparse vector
features = []
filteredSet = set(sentence['tokens'])
for w in filteredSet:
if w in self.w2vVocab:
val = self.w2vVocab[w]
wordVec = val[0]
index = val[1]
wordVec /= numpy.linalg.norm(wordVec)
i = 0
for currCat in self.categories:
# cosine similarity to positive centroid
features.append((offset+i*self.w2vVocabSize+index, numpy.dot(self.centroids[i], wordVec)[0]))
i += 1
offset += self.numCat * self.w2vVocabSize
return [features, offset]
'number of words in the sentence that consists only of capital letters'
def getCapitalizationFeature(self, sentence, offset):
# feature array, which we will return
# is a array of tuples, each tuple represent an entry in a sparse vector
features = []
# number of words in sentence that consist only of capital letters
num = 0
for w in sentence['tokens']:
if w.isupper():
num +=1
if num > 0:
features.append((offset+1, num/len(sentence['tokens'])))
offset += 1
return [features, offset]
'number of of elongated words in a sentence'
def getElongatedWordFeature(self, sentence, offset):
# feature array, which we will return
# is a array of tuples, each tuple represent an entry in a sparse vector
features = []
# number of elongated words
num = len(re.findall(r"([a-zA-Z])\1{2,}",sentence['sentence']))
if num > 0:
features.append((offset+1, num))
offset += 1
return [features, offset]
'Features for positive resp. negative emoticons'
def getEmoticonFeatures(self, sentence, offset):
# feature array, which we will return
# is a array of tuples, each tuple represent an entry in a sparse vector
features = []
# number of positive emoticons
num = len(re.findall(self.patternPosEmoticons,sentence['sentence']))
if num > 0:
features.append((offset+1, 1))
num = len(re.findall(self.patternNegEmoticons,sentence['sentence']))
if num > 0:
features.append((offset+2, 1))
offset += 2
return [features, offset]
'Detect sequences of question and/or exclamation marks'
def getPunctuationFeature(self, sentence, offset):
# feature array, which we will return
# is a array of tuples, each tuple represent an entry in a sparse vector
features = []
num = len(re.findall(r"(!!|!\?|\?!|\?\?)[!\?]*", sentence['sentence']))
if num > 0:
features.append((offset+1, 1))
offset += 1
return [features, offset]
'Sentiment Feature: look for words in the sentence that appear in a list of positive resp. negative word (consider negation of words).'
def getSentimentFeatures(self, sentence, offset):
# feature array, which we will return
# is a array of tuples, each tuple represent an entry in a sparse vector
features = []
lemmatizedSentence = [self.lemmatizer.lemmatize(w) for w in sentence['tokens']]
numPos = 0
numNeg = 0
i = 0
for w in lemmatizedSentence:
isPos = False
isNeg = False
if w in prepare_data.positiveWords:
isPos = True
if w in prepare_data.negativeWords:
isNeg = True
if isPos or isNeg:
# if negation in last two words appear ('is not good' or '
isNeg = False
if i > 0:
prev = sentence['tokens'][i-1]
if prev == 'not' or prev.endswith('n\'t'):
isNeg = True
if not isNeg and i > 1:
prev = sentence['tokens'][i-2]
if prev == 'not' or prev.endswith('n\'t'):
isNeg = True
if isNeg:
if isPos:
numNeg +=1
else:
numPos +=1
else:
if isPos:
numPos +=1
else:
numNeg +=1
i += 1
if numPos > 0:
features.append((offset+1, numPos))
if numNeg > 0:
features.append((offset+2, numNeg))
offset += 2
return [features, offset]
'Adding sentiment scores of synonyms of each word and add this as a feature'
def getSentiwordFeatures(self, sentence, offset):
# feature array, which we will return
# is a array of tuples, each tuple represent an entry in a sparse vector
features = []
lemmatizedSentence = [self.lemmatizer.lemmatize(w) for w in sentence['tokens']]
sumPos = 0
sumNeg = 0
i = 0
for w in lemmatizedSentence:
ssl = list(swn.senti_synsets(w))
tmpSumPos = 0
tmpSumNeg = 0
tmpCountPos = 0
tmpCountNeg = 0
for s in ssl:
ps = s.pos_score()
ns = s.neg_score()
if ps > 0:
tmpSumPos += ps
tmpCountPos += 1
if ns > 0:
tmpSumNeg += ns
tmpCountNeg += 1
if tmpCountPos > 0:
tmpSumPos /= tmpCountPos
if tmpCountNeg > 0:
tmpSumNeg /= tmpCountNeg
# is the word sentiment negated?
isNeg = False
if i > 0:
prev = sentence['tokens'][i-1]
if prev == 'not' or prev.endswith('n\'t'):
isNeg = True
if not isNeg and i > 1:
prev = sentence['tokens'][i-2]
if prev == 'not' or prev.endswith('n\'t'):
isNeg = True
if isNeg:
tmp = tmpSumPos
tmpSumPos = tmpSumNeg
tmpSumNeg = tmp
sumPos += tmpSumPos
sumNeg += tmpSumNeg
i += 1
if sumPos > 0:
features.append((offset+1, sumPos))
if sumNeg > 0:
features.append((offset+2, sumNeg))
offset += 2
return [features, offset]
'Adding the SRL tags of a sentence (thus if it''s a predicate or what argument type it is)'
def getSennaSRLFeatures(self, sentence):
chdir(SENNA_PATH)
'''
r = ''
for s in dataset['sentences']:
r += s['sentence']
r += '\n'
print(r)
'''
proc = subprocess.Popen(["./senna", '-srl'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output,err = proc.communicate(input=sentence['sentence'].encode('utf-8'))
output = output.decode().split('\n')
output = [o.split('\t') for o in output]
output = [[t.strip() for t in o] for o in output]
# change back to program directory
chdir(path.dirname(path.realpath(__file__)))
# feature array, which we will return
tokenFeatures = []
offset = 0
ind = 0
for tok in sentence['tokens']:
features = set() # features for a single token
if ind < len(output):
currTok = output[ind]
if tok not in currTok[0] and ind > 0:
currTok = output[ind-1]
ind -= 1
# is current token from SENNA is part of the current token in sentence
if tok in currTok[0]:
arr = currTok
for i in range(2,len(arr)):
if arr[i].endswith('-V'):
features.add((offset+1, 1))
elif arr[i].endswith('-A0'):
features.add((offset+2, 1))
elif arr[i].endswith('-A1'):
features.add((offset+3, 1))
elif arr[i].endswith('-A2'):
features.add((offset+4, 1))
elif arr[i].endswith('-A3'):
features.add((offset+5, 1))
elif arr[i].endswith('-A4'):
features.add((offset+6, 1))
elif arr[i].endswith('-A5'):
features.add((offset+7, 1))
else:
features.add((offset+8, 1))
tokenFeatures.append(list(features))
ind += 1
return [tokenFeatures, 8]
'Get the nearest adjective and its sentiment as feature'
def getNearestAdjSentFeatures(self, sentence, offset, ind):
# feature array, which we will return
# is a array of tuples, each tuple represent an entry in a sparse vector
features = []
lemmatizedSentence = [self.lemmatizer.lemmatize(w) for w in sentence['tokens']]
closest = -1
pos = 0
neg = 0
i = 0
for w in lemmatizedSentence:
ssl = list(swn.senti_synsets(w))
currPOS = sentence['pos'][i][1]
if closest == -1 or abs(ind - i) < abs(ind - closest):
# if current POS tag is adjective or adverb
if currPOS.startswith('JJ') or currPOS.startswith('RB'):
tmpSumPos = 0
tmpSumNeg = 0
tmpCountPos = 0
tmpCountNeg = 0
for s in ssl:
ps = s.pos_score()
ns = s.neg_score()
if ps > 0:
tmpSumPos += ps
tmpCountPos += 1
if ns > 0:
tmpSumNeg += ns
tmpCountNeg += 1
if tmpCountPos > 0:
tmpSumPos /= tmpCountPos
if tmpCountNeg > 0:
tmpSumNeg /= tmpCountNeg
pos = tmpSumPos
neg = tmpSumNeg
closest = i
# is the word sentiment negated?
isNeg = False
if i > 0:
prev = sentence['tokens'][i-1]
if prev == 'not' or prev.endswith('n\'t'):
isNeg = True
if not isNeg and i > 1:
prev = sentence['tokens'][i-2]
if prev == 'not' or prev.endswith('n\'t'):
isNeg = True
if isNeg:
tmp = pos
pos = neg
neg = tmp
i += 1
if closest != -1:
if pos > 0:
features.append((offset+1, pos))
if neg > 0:
features.append((offset+2, neg))
offset += 2
return [features, offset]
|
987,654 | 64a3ccd0be2ab778959117e8aa300371db31303a | '''
假如如下数据对应数据库里面的目录结构记录
1. 假如添加的顺序是无序的,只根据id来进行索引
{
"data": [
{
"id": 1,
"parent_id": null
},
{
"id": 2,
"parent_id": 1
},
{
"id": 3,
"parent_id": null
},
{
"id": 4,
"parent_id": 3
},
{
"id": 5,
"parent_id": 4
},
{
"id": 6,
"parent_id": 4
},
{
"id": 7,
"parent_id": 3
},
{
"id": 8,
"parent_id": 7
},
{
"id": 9,
"parent_id": 7
},
{
"id": 10,
"parent_id": 7
},
{
"id": 11,
"parent_id": 3
},
{
"id": 12,
"parent_id": 11
},
{
"id": 13,
"parent_id": 11
},
{
"id": 14,
"parent_id": 11
},
{
"id": 15,
"parent_id": 11
},
{
"id": 16,
"parent_id": 11
},
{
"id": 17,
"parent_id": 3
},
{
"id": 18,
"parent_id": 17
},
{
"id": 19,
"parent_id": 17
},
{
"id": 20,
"parent_id": 17
}
]
}
'''
import json
dict_data = {
"data": [
{
"id": 1,
"parent_id": None
},
{
"id": 2,
"parent_id": 1
},
{
"id": 3,
"parent_id": None
},
{
"id": 4,
"parent_id": 3
},
{
"id": 5,
"parent_id": 4
},
{
"id": 6,
"parent_id": 4
},
{
"id": 7,
"parent_id": 3
},
{
"id": 8,
"parent_id": 7
},
{
"id": 9,
"parent_id": 7
},
{
"id": 10,
"parent_id": 7
},
{
"id": 11,
"parent_id": 3
},
{
"id": 12,
"parent_id": 11
},
{
"id": 13,
"parent_id": 11
},
{
"id": 14,
"parent_id": 11
},
{
"id": 15,
"parent_id": 11
},
{
"id": 16,
"parent_id": 11
},
{
"id": 17,
"parent_id": 3
},
{
"id": 18,
"parent_id": 17
},
{
"id": 19,
"parent_id": 17
},
{
"id": 20,
"parent_id": 17
}
]
}
dir_data = dict_data["data"]
print(dir_data)
# 找出parent_id为空的当作根节点
root = []
def find_child(array, item):
# 此处的遍历查找相当于mysql的where条件
flag = False
item["data"] = []
for j in array:
if j["parent_id"] == item["id"]:
item["data"].append(j)
flag = True
find_child(array, j)
if not flag:
item.pop("data")
return
for i in dir_data:
if i["parent_id"] is None:
# 同时去寻找子节点
find_child(dir_data, i)
root.append(i)
print(json.dumps(root))
|
987,655 | e8958a00cf693318a6024686e5e31519862e8687 | import os
import sys
path = os.getcwd().rsplit('/', 1)[0]
sys.path.append(path)
os.environ['DJANGO_SETTINGS_MODULE'] = 'sismocaracas.settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
|
987,656 | 3b0ea7382f19cf1978254d1b2eb5bb30c2f918d6 | from .module import RL
__all__ = ['RL'] |
987,657 | fd960d2cb51da0a1466459400455fc73e97e3022 | #!/usr/bin/env python
# https://leetcode.com/problems/median-of-two-sorted-arrays/#/description
# There are two sorted arrays nums1 and nums2 of size m and n respectively.
# Find the median of the two sorted arrays. The overall run time
# complexity should be O(log (m+n)).
from __future__ import division
import unittest
import random
from binary_search import binary_search
def simple_median(a):
len_a = len(a)
m = len_a // 2
if len_a % 2 == 1:
return a[m]
return (a[m - 1] + a[m]) / 2.0
def true_median(a, b):
return simple_median(sorted(a + b))
def array_as_str(a, **kwargs):
p = kwargs.get('partition')
if p is not None:
p += 1
return "%s | %s [%s]" % (' '.join(map(str, a[:p])), ' '.join(map(str, a[p:])), simple_median(a))
return '%s [%s]' % (' '.join(map(str, a)), simple_median(a))
def partition(a):
p = len(a) // 2
if len(a) % 2 == 0:
p -= 1
return p
def adjust(a, partition_a, median_a, b, partition_b, median_b):
# b is the shorter array
if median_b > median_a:
# binary search in the left part of b for the leftmost occurrence of the largest value
# that is <= the median
new_part_b = binary_search.search_largest_less_than(b[:(partition_b + 1)], median_b)
if new_part_b is None:
# oh, boy. if we can't locate this value then the partition is already 0. instead
# we have to move partition_a to the right by half the length of b.
print "######################################"
new_part_a = partition_a + len(b) // 2
return new_part_a, new_part_b
new_part_b -= 1 # since the search gave us the left endpoint of the right part
else:
# binary search in the right part of b for the rightmost occurrence of the smallest value that is >= the median
new_part_b = binary_search.search_smallest_greater_than(b[(partition_b + 1):], median_b)
new_part_b += (partition_b + 1) # add back the true index of the median
delta = new_part_b - partition_b
new_part_a = partition_a - delta
return new_part_a, new_part_b
def find_median(arr1, arr2):
# take a to be the longer one
a = arr1
b = arr2
if len(arr2) > len(arr1):
a, b = b, a
median_a = simple_median(a)
median_b = simple_median(b)
parity_len_a = len(a) % 2
parity_len_b = len(b) % 2
# partition_x is the rightmost index of the left partition
partition_a = partition(a)
partition_b = partition(b)
left_size = partition_a + partition_b + 2
right_size = len(a) + len(b) - left_size
print "left_size = %s, right_size = %s" % (left_size, right_size)
# if the array with the larger median is odd length, subtract 1 from the partition
if median_b > median_a:
if parity_len_b == 1 and right_size - left_size > 1:
partition_b -= 1
if median_a > median_b:
if parity_len_a == 1 and right_size - left_size > 1:
partition_a -= 1
print array_as_str(sorted(a + b))
print array_as_str(a, partition=partition_a)
print array_as_str(b, partition=partition_b)
left_max = max(a[partition_a], b[partition_b])
right_min = min(a[partition_a + 1], b[partition_b + 1])
while left_max > right_min:
partition_a, partition_b = adjust(a, partition_a, median_a, b, partition_b, median_b)
print array_as_str(a, partition=partition_a)
print array_as_str(b, partition=partition_b)
left_max = max(a[partition_a], b[partition_b])
right_min = min(a[partition_a + 1], b[partition_b + 1])
if parity_len_a != parity_len_b:
result = left_max
else:
result = (left_max + right_min) / 2.0
return result
def generate_array():
length = random.randint(2, 15)
return sorted([random.randint(10, 40) for _ in xrange(length)])
def print_test_case():
a = map(str, generate_array())
b = map(str, generate_array())
print ' '.join(a)
print ' '.join(b)
print ' '.join(sorted(a + b))
if __name__ == '__main__':
a = generate_array()
b = generate_array()
print "a = %s" % a
print "b = %s" % b
print "a = %s" % array_as_str(a)
print "b = %s" % array_as_str(b)
print "combo = %s" % array_as_str(sorted(a + b))
result = find_median(a, b)
compare = true_median(a, b)
print "found median = %s" % result
print "true median = %s" % compare
class TestMedian(unittest.TestCase):
def test_partition(self):
a = [1, 2, 3, 4, 5]
p = partition(a)
self.assertEqual(p, 2)
a = [1, 2, 3, 4, 5, 6]
p = partition(a)
self.assertEqual(p, 2)
def test_median_1(self):
a = [14, 16, 20, 21, 27, 29, 37, 38]
b = [13, 17, 21, 22, 23, 29, 31, 31, 32, 35, 35]
self.assertEqual(true_median(a, b), find_median(a, b))
def test_median_2(self):
a = [13, 15, 17, 18, 21, 31, 31, 32, 32, 32, 33, 33, 34, 39, 40]
b = [35, 35]
self.assertEqual(true_median(a, b), find_median(a, b))
def test_median_3(self):
a = [10, 14, 15, 19, 20, 26, 26, 26, 28, 28, 29, 29, 32, 35, 38]
b = [12, 18, 19, 34]
self.assertEqual(true_median(a, b), find_median(a, b))
def test_median_4(self):
a = [10, 18, 18, 19, 21, 22, 23, 23, 23, 28, 30, 32, 33]
b = [15, 23, 26, 27, 28, 30, 32, 38, 40]
self.assertEqual(true_median(a, b), find_median(a, b))
def test_median_5(self):
a = [11, 14, 17, 17, 17, 18, 18, 19, 21, 23, 29, 33, 34, 39, 40]
b = [10, 11, 21, 22, 23, 23, 26, 28, 30, 31, 34, 35, 37, 39, 39]
self.assertEqual(true_median(a, b), find_median(a, b))
def test_median_5(self):
a = [11, 11, 12, 12, 18, 22, 27, 30, 32, 33, 34, 36]
b = [11, 15, 28, 39, 40]
self.assertEqual(true_median(a, b), find_median(a, b))
def test_median_6(self):
a = [10, 24, 30, 33]
b = [15, 19, 19, 24, 33, 33, 36, 39, 40]
self.assertEqual(true_median(a, b), find_median(a, b))
|
987,658 | 48e647cf8b4dca94a8e676c80e1697c34d37b453 | n = int(input())
while n > 0:
n -= 1
size = int(input())
deck = []
for i in range(size, 0, -1):
deck.insert(0, i)
for j in range(0, i):
deck.insert(0, deck.pop())
out = ''
for i in deck:
out += '{} '.format(i)
print(out) |
987,659 | c8868aebf66d708319e618c5543ecbd3161a705d | import tensorflow as tf
import numpy as np
print(f'\nTensorFlow version: {tf.VERSION}')
print(f'Keras version: {tf.keras.__version__}\n')
# 28x28 images of hand written digits 0-9
digits = tf.keras.datasets.mnist
# Unpack the data
(x_train, y_train), (x_test, y_test) = digits.load_data()
# Scale data: Normalization(values are converted to be between 0 & 1)
x_train = tf.keras.utils.normalize(x_train, axis=1)
x_test = tf.keras.utils.normalize(x_test, axis=1)
# Sequential type model
model = tf.keras.models.Sequential()
# Input Layer
model.add(tf.keras.layers.Flatten(input_shape = (28, 28)))
# Hidden Layers(2):128 neuron each with Rectified Linear("Default")
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
# Output Layer: 0-9 as our choices(10) with Probability Distribution
model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))
# Paramaters for the training of the model
model.compile(optimizer='adam', # "Default"
loss='sparse_categorical_crossentropy',# Cost(degree of erroe)
metrics=['accuracy'] # Metrics to track
)
# Fit model
print('Training the neural network...\n')
model.fit(x_train, y_train, epochs=3)
print('\nTraining Complete!\n')
# Evaluate the model with validation tests
print('Evaluation testing...\n')
validation_loss, validation_accuracy = model.evaluate(x_test, y_test)
print(f'Degree of error: {validation_loss}\n')
print(f'Accuracy of prediction: {validation_accuracy}\n')
# Save the learning model
print('Example of a saved then loaded model...\n')
model.save('number_rocognizer.model')
# Load a saved model
new_model = tf.keras.models.load_model('number_rocognizer.model')
# Example with new_model
import matplotlib.pyplot as plt
import numpy as np
# Predicted values of new_model
predictions = new_model.predict([x_test])
# Show a prediction with argmax(return the highest digit index; 0-9)
print('This is what the predictions look like:\n')
print(predictions[0])
print(f'\nNot very readable, but you can see the seventh index value is .999 in decimal notation.\nYou can easily retrieve the index of the highest number in a array with argmax.\n')
print(f'Predicted out come of the first value: {np.argmax(predictions[0])}\n')
# Show digit in matplotlib
plt.imshow(x_test[0])
print('Refer to image to see if the model predicted correctly.')
plt.show()
|
987,660 | 592c84f59c9d1c06d9343e33ddc1a118be8dd0b4 | import numpy as np
import matplotlib.pyplot as plt
import biotite.sequence as seq
import biotite.sequence.io.fasta as fasta
import biotite.sequence.io.genbank as gb
import biotite.sequence.graphics as graphics
import biotite.application.clustalo as clustalo
import biotite.database.entrez as entrez
# Search for protein products of LexA gene in UniProtKB/Swiss-Prot database
query = entrez.SimpleQuery("lexA", "Gene Name") \
& entrez.SimpleQuery("srcdb_swiss-prot", "Properties")
# Search for the first 200 hits
# More than 200 UIDs are not recommended for the EFetch service
# for a single fetch
uids = entrez.search(query, db_name="protein", number=200)
file = entrez.fetch_single_file(
uids, None, db_name="protein", ret_type="gp"
)
# The file contains multiple concatenated GenPept files
# -> Usage of MultiFile
multi_file = gb.MultiFile.read(file)
# Separate MultiFile into single GenBankFile instances
files = [f for f in multi_file]
print("Definitions:")
for file in files[:20]:
print(gb.get_definition(file))
print()
print("Sources:")
for file in files[:20]:
print(gb.get_source(file))
def abbreviate(species):
# Remove possible brackets
species = species.replace("[","").replace("]","")
splitted_species= species.split()
return "{:}. {:}".format(splitted_species[0][0], splitted_species[1])
print("Sources:")
all_sources = [abbreviate(gb.get_source(file)) for file in files]
for source in all_sources[:20]:
print(source)
# List of sequences
binding_sites = []
# List of source species
sources = []
# Set for ignoring already listed sources
listed_sources = set()
for file, source in zip(files, all_sources):
if source in listed_sources:
# Ignore already listed species
continue
bind_feature = None
annot_seq = gb.get_annotated_sequence(
file, include_only=["Site"], format="gp"
)
# Find the feature for DNA-binding site
for feature in annot_seq.annotation:
# DNA binding site is a helix-turn-helix motif
if "site_type" in feature.qual \
and feature.qual["site_type"] == "DNA binding" \
and "H-T-H motif" in feature.qual["note"]:
bind_feature = feature
if bind_feature is not None:
# If the feature is found,
# get the sequence slice that is defined by the feature...
binding_sites.append(annot_seq[bind_feature])
# ...and save the respective source species
sources.append(source)
listed_sources.add(source)
print("Binding sites:")
for site in binding_sites[:20]:
print(site)
alignment = clustalo.ClustalOmegaApp.align(binding_sites, bin_path=r'C:\Users\Rickman\Documents\GitHub\Labrotation\clustal-omega-1.2.2-win64\clustalo.exe', matrix=None)
fig = plt.figure(figsize=(4.5, 4.0))
ax = fig.add_subplot(111)
graphics.plot_alignment_similarity_based(
ax, alignment[:,:20], labels=sources[:20], symbols_per_line=len(alignment)
)
# Source names in italic
ax.set_yticklabels(ax.get_yticklabels(), fontdict={"fontstyle":"italic"})
fig.tight_layout()
fig = plt.figure(figsize=(8.0, 3.0))
ax = fig.add_subplot(111)
graphics.plot_sequence_logo(ax, alignment)
ax.set_xticks([5,10,15,20])
ax.set_xlabel("Residue position")
ax.set_ylabel("Bits")
# Only show left and bottom spine
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
fig.tight_layout()
# sphinx_gallery_thumbnail_number = 2
plt.show() |
987,661 | 1c1431ddce848fa84a502598e189ef01490249a2 | a= 10
out = isinstance(a,int)
print("is a an integer",out)
out = isinstance
price = 1.5
print(isinstance(price,int))
print(isinstance(price,float))
print(isinstance(price,str)) |
987,662 | 7a474543f00f90262b23b932128e7fb7e6c0231e | '''this module contains the base functions that will be used by the GUI and CLI'''
import csv
from datetime import datetime
csv_layout = ['card_num', 'name','under_13', 'under_18', 'under_60',
'over_59', 'zip', 'last_used_date', 'all_used_dates']
date_today = datetime.now()
def check_date(date_str):
'''used to make sure the given string is in the correct format'''
try:
datetime.strptime(date_str, '%m/%d/%Y')
except ValueError:
return_val = 1
else:
return_val = 0
return return_val
def card_info(csv_data, card_num):
csv_file = open(csv_data)
csv_reader = csv.reader(csv_file, delimiter=',')
card_dict = dict()
index = 0
for row in csv_reader:
if row[csv_layout.index('card_num')] == card_num:
for item in csv_layout:
if item != 'all_used_dates':
card_dict[item] = row[csv_layout.index(item)]
card_dict['row_num'] = index
return card_dict
index += 1
return 1
def check_card(csv_data, card_num, update_card=True):
'''check a card to make sure it is on the csv file and has not been
used today (also has statements and return_row which makes this
return the row number of the card'''
csv_file = open(csv_data)
csv_list = list(csv.reader(csv_file, delimiter=',')) # To allow editing
csv_file.close() # So the csv file can be opened in write mode
card_dict = card_info(csv_data, card_num)
csv_file = open(csv_data, 'w')
csv_writer = csv.writer(csv_file, delimiter=',')
return_val = None # 0 = card not found
# 1 = card found and has not been used today
# 2 = card used today
if card_dict is not 1:
if card_dict['last_used_date'] == date_today.strftime('%m/%d/%Y'):
return_val = 2 #card used today
else:
if update_card:
csv_list[card_dict['row_num']]\
[csv_layout.index('last_used_date')] =\
date_today.strftime('%m/%d/%Y')
try:
csv_list[card_dict['row_num']]\
[csv_layout.index('all_used_dates')] +=\
'@' + date_today.strftime('%m/%d/%Y')
except IndexError:
csv_list[card_dict['row_num']].append('@' + date_today.strftime('%m/%d/%Y'))
return_val = 1 #card found and not used today
else:
return_val = 0 #card not found
csv_writer.writerows(csv_list)
csv_file.close()
return return_val
def add_card(csv_data, card_dict):
'''adds a new card to the CSV file'''
csv_file = open(csv_data, 'a')
csv_writer = csv.writer(csv_file, delimiter=',')
row_list = list()
return_val = None #0: success
#1: card exists
#2: invalid date format
if card_info(csv_data, card_dict['card_num']) != 1: #if card exists
csv_file.close()
return_val = 1
else:
for item in csv_layout:
if item != 'all_used_dates':
row_list.append(card_dict[item])
if (card_dict['last_used_date'] == 'N/A' or
check_date(card_dict['last_used_date']) is 0):
csv_writer.writerow(row_list)
return_val = 0
else:
return_val = 2
csv_file.close()
return return_val
def remove_card(csv_data, card_num):
'''deletes the card with the given number from the CSV file'''
csv_file = open(csv_data)
csv_list = list(csv.reader(csv_file, delimiter=','))
csv_file.close() # So the csv file can be opened in write mode
csv_file = open(csv_data, 'w')
csv_writer = csv.writer(csv_file, delimiter=',')
return_val = 1
#write all rows except the one that needs to be deleted
for row in csv_list:
if card_num not in row:
csv_writer.writerow(row)
else:
return_val = 0 #return 0 if the row was found
csv_file.close()
return return_val
def change_card(csv_data, card_num, card_dict):
'''change the information connected a given card number (including the card number)'''
csv_file = open(csv_data)
csv_list = list(csv.reader(csv_file, delimiter=','))
csv_file.close() # So the csv file can be opened in write mode
card_info_dict = card_info(csv_data, card_dict['card_num'])
csv_file = open(csv_data, 'w')
csv_writer = csv.writer(csv_file, delimiter=',')
return_val = None # 0: success
# 1: card not found
# 2: the new card number already exists
# 3: invalid date
if card_info_dict != 1 and card_num != card_dict['card_num']:
return_val = 2
elif (card_dict['last_used_date'] != 'N/A' and
check_date(card_dict['last_used_date']) is 1):
return_val = 3
else:
for row in csv_list:
if card_num == row[csv_layout.index('card_num')]:
for item in csv_layout:
if item != 'all_used_dates':
row[csv_layout.index(item)] = card_dict[item]
return_val = 0
break
else:
return_val = 1
csv_writer.writerows(csv_list)
csv_file.close()
return return_val
|
987,663 | 4cd2da38a6217cc747a98b26476b23bf1e09c242 | from ad_api.base import Client, sp_endpoint, fill_query_params, ApiResponse
class Keywords(Client):
"""
Use the Amazon Advertising API for Sponsored Brands for campaign, ad group, keyword, negative keyword, drafts, Stores, landing pages, and Brands management operations. For more information about Sponsored Brands, see the Sponsored Brands Support Center. For onboarding information, see the account setup topic.
"""
@sp_endpoint('/sb/keywords', method='GET')
def list_keywords(self, **kwargs) -> ApiResponse:
r"""
Gets an array of keywords, filtered by optional criteria.
Keyword Args
| query **startIndex**:*integer* | Optional. 0-indexed record offset for the result set. Default value : 0
| query **count**:*integer* | Optional. Number of records to include in the paged response. Defaults to max page size.
| query **matchTypeFilter**:*string* | Optional. Restricts results to keywords with match types within the specified comma-separated list.. Available values : broad, phrase, exact.
| query **keywordText**:*string* | Optional. Restricts results to keywords that match the specified text exactly.
| query **stateFilter**:*string* | Optional. The returned array is filtered to include only ad groups with state set to one of the values in the specified comma-delimited list. Available values : enabled, paused, archived, enabled, paused, enabled, archived, paused, archived, enabled, paused, archived Default value : enabled, paused, archived.
| query **campaignIdFilter**:*string* | Optional. A comma-delimited list of campaign identifiers.
| query **adGroupIdFilter**:*string* | Optional. Restricts results to keywords associated with ad groups specified by identifier in the comma-delimited list.
| query **keywordIdFilter**:*string* | Optional. Restricts results to keywords associated with campaigns specified by identifier in the comma-delimited list..
| query **creativeType**:*string* | Optional. Filter by the type of creative the campaign is associated with. To get ad groups associated with non-video campaigns specify 'productCollection'. To get ad groups associated with video campaigns, this must be set to 'video'. Returns all ad groups if not specified. Available values : productCollection, video
| query **locale**:*string* | Optional. Restricts results to keywords associated with locale.
Returns:
| ApiResponse
"""
return self._request(kwargs.pop('path'), params=kwargs)
@sp_endpoint('/sb/keywords', method='PUT')
def edit_keywords(self, **kwargs) -> ApiResponse:
"""
Updates one or more keywords.
Keywords submitted for update may have state set to pending for moderation review. Moderation may take up to 72 hours.
Note that keywords can be updated on campaigns where serving status is not one of archived, terminated, rejected, or ended.
Note that this operation supports a maximum list size of 100 keywords.
Request body
| **keywordId** (integer($int64)): [required] The identifier of the keyword.
| **adGroupId** (integer($int64)): [required] The identifier of an existing ad group to which the keyword is associated.
| **campaignId** (integer($int64)) [required] The identifier of an existing campaign to which the keyword is associated.
| **state** (string): [optional] Newly created SB keywords are in a default state of 'draft' before transitioning to a 'pending' state for moderation. After moderation, the keyword will be in an enabled state. Enum: [ enabled, paused, pending, archived, draft ]
| **bid** (number) [optional] The bid associated with the keyword. Note that this value must be less than the budget associated with the Advertiser account. For more information, see the Keyword bid constraints by marketplace section of the supported features article
Returns:
| ApiResponse
"""
return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)
@sp_endpoint('/sb/keywords', method='POST')
def create_keywords(self, **kwargs) -> ApiResponse:
"""
Creates one or more keywords.
Note that state can't be set at keyword creation. Keywords submitted for creation have state set to pending while under moderation review. Moderation review may take up to 72 hours.
Note that keywords can be created on campaigns where serving status is not one of archived, terminated, rejected, or ended.
Note that this operation supports a maximum list size of 100 keywords.
Request body
| **adGroupId** (integer($int64)) The identifier of an existing ad group to which the keyword is associated.
| **campaignId** (integer($int64)) The identifier of an existing campaign to which the keyword is associated.
| **keywordText** (string) The keyword text. The maximum number of words for this string is 10.
| **nativeLanguageKeyword** (string) The unlocalized keyword text in the preferred locale of the advertiser.
| **nativeLanguageLocale** (string) The locale preference of the advertiser. For example, if the advertiser’s preferred language is Simplified Chinese, set the locale to zh_CN. Supported locales include: Simplified Chinese (locale: zh_CN) for US, UK and CA. English (locale: en_GB) for DE, FR, IT and ES.
| **matchType** (string) The match type. For more information, see match types in the Amazon Advertising support center. Enum: [ broad, exact, phrase ]
| **bid** (number) [optional] The bid associated with the keyword. Note that this value must be less than the budget associated with the Advertiser account. For more information, see the Keyword bid constraints by marketplace section of the supported features article.
Returns:
| ApiResponse
"""
return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)
@sp_endpoint('/sb/keywords/{}', method='GET')
def get_keyword(self, keywordId, **kwargs) -> ApiResponse:
"""
Gets a keyword specified by identifier.
Keyword Args
| path **keywordId** (integer): The identifier of an existing keyword. [required]
| query **locale** (string): The returned array includes only keywords associated with locale matching those specified by identifier. [optional]
Returns:
| ApiResponse
"""
return self._request(fill_query_params(kwargs.pop('path'), keywordId), params=kwargs)
@sp_endpoint('/sb/keywords/{}', method='DELETE')
def delete_keyword(self, keywordId, **kwargs) -> ApiResponse:
"""
Archives a keyword specified by identifier.
This operation is equivalent to an update operation that sets the status field to 'archived'. Note that setting the status field to 'archived' is permanent and can't be undone. See Developer Notes for more information.
Keyword Args
| path **keywordId** (integer): The identifier of an existing keyword. [required]
Returns:
| ApiResponse
"""
return self._request(fill_query_params(kwargs.pop('path'), keywordId), params=kwargs)
|
987,664 | d35555ba857c4023a955c9c60f409e7f2b42b185 |
from app import db
from .BaseModels import BaseModel
# 分类表
class Category(BaseModel, db.Model):
__tablename__ = 'category'
id = db.Column(db.Integer, primary_key=True, unique=True)
name = db.Column(db.String(50), nullable=False, default='')
weight = db.Column(db.Integer, nullable=False, default=0) # 权重
status = db.Column(db.Integer, nullable=False, default=1) # 状态 1:有效 0:无效
food = db.relationship('Food', backref='category')
@property
def status_desc(self):
return self.status
def __repr__(self):
return self.name
# 食品表
class Food(BaseModel, db.Model):
__tablename__ = 'food'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), nullable=False, default='') # 名称
price = db.Column(db.Numeric(10, 2), nullable=False, default=0) # 售卖价格
main_image = db.Column(db.String(100), nullable=False, default='') # 主图
summary = db.Column(db.String(2000), nullable=False, default='') # 描述
stock = db.Column(db.Integer, nullable=False, default='') # 库存量
tags = db.Column(db.String(200), nullable=False, default='') # tag关键字,以,逗号分割,
status = db.Column(db.Integer, nullable=False, default=1) # 状态 1:有效 0:无效
month_count = db.Column(db.Integer, nullable=False, default=0) # 月销售量
total_count = db.Column(db.Integer, nullable=False, default=0) # 总销售量
view_count = db.Column(db.Integer, nullable=False, default=0) # 浏览次数
comment_count = db.Column(db.Integer, nullable=False, default=0) # 总评论量
cat_id = db.Column(db.Integer, db.ForeignKey('category.id'), nullable=False)
|
987,665 | 2a437e388b004536059506387de4e707d4fdca49 | t = int(input())
while(t):
k,d0,d1=list(map(int,input().split()))
s=d0+d1
c=(2*s)+(4*s)+(8*s)+(6*s)
if (k==2):
tot = s
else:
no_cycles = (k-3)//4
left_over = (k-3)-(no_cycles*4)
tot = (2*s) + (no_cycles*c)
p=2
for i in range(left_over):
tot+=(p*s)
p*=2
if(tot%3==0):
print('YES')
else:
print("NO")
t-=1
|
987,666 | f3ad37ef19f613b933a294271c25163994f1f143 | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
import numpy as np
import calc
from .forms import InputForm, OptionalForm
def index(request):
#return HttpResponse(reverse('health: index'))
return render(request, 'health/index.html')
#port, year, em1, em2, em3
def input(request):
#blank form
# temporary random initial values for testing
temp_initial = {'port': 'Shenzhen', 'year': 2010, 'total_pm10': 350, 'em_fpm_port': 100, 'em_fpm_on': 100, 'em_fpm_off': 100, 'em_sox_port': 100, 'em_sox_on': 100, 'em_sox_off': 100, \
'em_nox_port': 100, 'em_nox_on': 100, 'em_nox_off': 100}
#eturn HttpResponse(reverse('health: input', {'form': InputForm()}))
return render(request, 'health/input.html', {'form': InputForm(temp_initial)})
# pass pop, conc, y1, y2, y3? to optionalinput page
# process input with function in calc
def optionalinput(request):
form = InputForm(request.POST)
if form.is_valid():
em = {}
em['fpm'] = np.array([form.cleaned_data['em_fpm_port'], form.cleaned_data['em_fpm_on'], form.cleaned_data['em_fpm_off']])
em['SOx'] = np.array([form.cleaned_data['em_sox_port'], form.cleaned_data['em_sox_on'], form.cleaned_data['em_sox_off']])
em['NOx'] = np.array([form.cleaned_data['em_nox_port'], form.cleaned_data['em_nox_on'], form.cleaned_data['em_nox_off']])
ratio = sum(em['fpm'])*1.0/form.cleaned_data['total_pm10']
result = calc.process_input(form.cleaned_data['port'], form.cleaned_data['year'], em, form.cleaned_data['direction'], ratio)
#get underlying data pop, conc, y1, y2, y3
context = {'pop': result['pop'], 'conc': result['conc']}
# mimic user input for testing purpose
#context = {'pop': result['pop'], 'conc': result['conc'], 'y_LC_30_34': 2.2, 'y_LC_35_39': 4.55, 'y_LC_40_44': 10.1, 'y_LC_45_49': 20.45,\
#'y_LC_50_54': 40.9, 'y_LC_55_59': 72.45, 'y_LC_60_64': 117.05, 'y_LC_65_69': 173.5, 'y_LC_70_74': 249.95, 'y_LC_75_79': 326.7, 'y_LC_80': 431.6, 'y_CP_30_34': 52.45, \
#'y_CP_35_39': 86.85, 'y_CP_40_44': 157, 'y_CP_45_49': 274.45, 'y_CP_50_54': 493.25, 'y_CP_55_59': 835.95, 'y_CP_60_64': 1511.75, 'y_CP_65_69': 2688, 'y_CP_70_74': 4786.9, 'y_CP_75_79': 8224.6,\
#'y_CP_80': 21836.05, 'y_ARI': 2272.8}
#return HttpResponse(reverse('health: optionalinput', kwargs = context))
return render(request,'health/optionalinput.html', {'form': OptionalForm(context)})
#form not valid, render a blank form. need some kind of warning..?
#return HttpResponse(reverse('health: input'), {'form': InputForm()})
return render(request, 'health/input.html', {'form': InputForm()})
def result(request):
# TODO if general port and either one of the op input is not supplied
# warning, can't proceed
# calc impact
# TODO need to see if y in inputed
# pass value to calc.process_op_input
indicator = 0
form = OptionalForm(request.POST)
# TODO only if user inputs all data will we use his y
if form.is_valid():
if ((form.cleaned_data['y_LC_30_34'] is not None) and (form.cleaned_data['y_LC_35_39'] is not None) and (form.cleaned_data['y_LC_40_44'] is not None) and \
(form.cleaned_data['y_LC_45_49'] is not None) and (form.cleaned_data['y_LC_50_54'] is not None) and (form.cleaned_data['y_LC_55_59'] is not None) and (form.cleaned_data['y_LC_60_64'] is not None) and \
(form.cleaned_data['y_LC_65_69'] is not None) and (form.cleaned_data['y_LC_70_74'] is not None) and (form.cleaned_data['y_LC_75_79'] is not None) and (form.cleaned_data['y_LC_80'] is not None) and \
(form.cleaned_data['y_CP_30_34'] is not None) and (form.cleaned_data['y_CP_35_39'] is not None) and (form.cleaned_data['y_CP_40_44'] is not None) and \
(form.cleaned_data['y_CP_45_49'] is not None) and (form.cleaned_data['y_CP_50_54'] is not None) and (form.cleaned_data['y_CP_55_59'] is not None) and (form.cleaned_data['y_CP_60_64'] is not None) and \
(form.cleaned_data['y_CP_65_69'] is not None) and (form.cleaned_data['y_CP_70_74'] is not None) and (form.cleaned_data['y_CP_75_79'] is not None) and (form.cleaned_data['y_CP_80'] is not None) and \
(form.cleaned_data['y_ARI'] is not None)):
indicator = 1
y = {}
y['LC'] = np.array([0, form.cleaned_data['y_LC_30_34'], form.cleaned_data['y_LC_35_39'], form.cleaned_data['y_LC_40_44'], form.cleaned_data['y_LC_45_49'], \
form.cleaned_data['y_LC_50_54'], form.cleaned_data['y_CP_55_59'], form.cleaned_data['y_LC_60_64'], form.cleaned_data['y_LC_65_69'], form.cleaned_data['y_LC_70_74'], form.cleaned_data['y_LC_75_79'], form.cleaned_data['y_LC_80']])
y['CP'] = np.array([0, form.cleaned_data['y_CP_30_34'], form.cleaned_data['y_CP_35_39'], form.cleaned_data['y_CP_40_44'], form.cleaned_data['y_CP_45_49'], \
form.cleaned_data['y_CP_50_54'], form.cleaned_data['y_CP_55_59'], form.cleaned_data['y_CP_60_64'], form.cleaned_data['y_CP_65_69'], form.cleaned_data['y_CP_70_74'], form.cleaned_data['y_LC_75_79'], form.cleaned_data['y_CP_80']])
y['ARI'] = np.array([form.cleaned_data['y_ARI']])
if form.is_valid():
if indicator == 0:
results = calc.process_optional_input(pop = form.cleaned_data['pop'], conc = form.cleaned_data['conc'])
else:
results = calc.process_optional_input(pop = form.cleaned_data['pop'], conc = form.cleaned_data['conc'], port_y = y)
# pass to template. try age table first
context = {'indicator': results['indicator'], 'age': results['age'], 'time': results['time'], 'zone': results['zone'], 'yll': results['yll'], 'zone_num': range(1, results['zone']['ARI'].size + 1)}
#return HttpResponse(reverse('health:result', kwargs = context))
return render(request, 'health/result.html', context)
|
987,667 | 1b314d80e14c70a8f606db2caeac92bdf843ed71 | #!/usr/bin/env python3
from termcolor import colored
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', '-v', help="Verbose mode", action = 'store_true')
args = parser.parse_args()
init_data = [3,225,1,225,6,6,1100,1,238,225,104,0,1101,90,60,224,1001,224,-150,224,4,224,1002,223,8,223,1001,224,7,224,1,224,223,223,1,57,83,224,1001,224,-99,224,4,224,1002,223,8,223,1001,224,5,224,1,223,224,223,1102,92,88,225,101,41,187,224,1001,224,-82,224,4,224,1002,223,8,223,101,7,224,224,1,224,223,223,1101,7,20,225,1101,82,64,225,1002,183,42,224,101,-1554,224,224,4,224,102,8,223,223,1001,224,1,224,1,224,223,223,1102,70,30,224,101,-2100,224,224,4,224,102,8,223,223,101,1,224,224,1,224,223,223,2,87,214,224,1001,224,-2460,224,4,224,1002,223,8,223,101,7,224,224,1,223,224,223,102,36,180,224,1001,224,-1368,224,4,224,1002,223,8,223,1001,224,5,224,1,223,224,223,1102,50,38,225,1102,37,14,225,1101,41,20,225,1001,217,7,224,101,-25,224,224,4,224,1002,223,8,223,101,2,224,224,1,224,223,223,1101,7,30,225,1102,18,16,225,4,223,99,0,0,0,677,0,0,0,0,0,0,0,0,0,0,0,1105,0,99999,1105,227,247,1105,1,99999,1005,227,99999,1005,0,256,1105,1,99999,1106,227,99999,1106,0,265,1105,1,99999,1006,0,99999,1006,227,274,1105,1,99999,1105,1,280,1105,1,99999,1,225,225,225,1101,294,0,0,105,1,0,1105,1,99999,1106,0,300,1105,1,99999,1,225,225,225,1101,314,0,0,106,0,0,1105,1,99999,7,226,226,224,102,2,223,223,1006,224,329,101,1,223,223,1107,677,226,224,102,2,223,223,1006,224,344,1001,223,1,223,8,677,226,224,1002,223,2,223,1005,224,359,101,1,223,223,107,677,677,224,1002,223,2,223,1006,224,374,101,1,223,223,7,677,226,224,1002,223,2,223,1006,224,389,101,1,223,223,108,677,226,224,1002,223,2,223,1005,224,404,101,1,223,223,1108,677,226,224,102,2,223,223,1005,224,419,101,1,223,223,8,226,677,224,102,2,223,223,1006,224,434,1001,223,1,223,1008,677,677,224,1002,223,2,223,1005,224,449,1001,223,1,223,1107,226,677,224,102,2,223,223,1006,224,464,101,1,223,223,107,226,677,224,1002,223,2,223,1006,224,479,1001,223,1,223,7,226,677,224,102,2,223,223,1005,224,494,1001,223,1,223,8,677,677,224,102,2,223,223,1006,224,509,1001,223,1,223,1108,677,677,224,102,2,223,223,1005,224,524,1001,223,1,223,1108,226,677,224,1002,223,2,223,1005,224,539,101,1,223,223,107,226,226,224,102,2,223,223,1006,224,554,1001,223,1,223,1007,226,226,224,102,2,223,223,1005,224,569,1001,223,1,223,1008,226,226,224,102,2,223,223,1005,224,584,101,1,223,223,1007,677,677,224,1002,223,2,223,1005,224,599,1001,223,1,223,108,677,677,224,1002,223,2,223,1006,224,614,1001,223,1,223,1007,226,677,224,1002,223,2,223,1006,224,629,101,1,223,223,1008,677,226,224,102,2,223,223,1005,224,644,101,1,223,223,1107,226,226,224,1002,223,2,223,1005,224,659,1001,223,1,223,108,226,226,224,1002,223,2,223,1005,224,674,101,1,223,223,4,223,99,226]
data = None
pc = None
class Instruction:
def __init__(self, insn):
self.raw = insn
self.immediates = [False, False, False]
if insn >= 100:
self.opcode = insn % 100
immediates = int(insn / 100)
i = 0
while immediates > 0:
self.immediates[i] = bool(immediates % 10)
immediates = int(immediates / 10)
i += 1
else:
self.opcode = insn
def get_arg(self, arg, index):
if self.immediates[index]:
return data[arg]
else:
return data[data[arg]]
def store(self, index, value):
data[index] = value
return index
def print(self):
print('pc: %d, raw: %d, opcode: %d, immediates: %s' % (pc, self.raw, self.opcode, str(self.immediates)))
def init():
global data
global pc
data = init_data.copy()
pc = 0
def print_data(index):
s = ','.join([str(v) if i != index else colored(v, 'red') for i, v in enumerate(data)])
print(s)
def process():
global pc
insn = Instruction(data[pc])
if args.verbose:
insn.print()
if insn.opcode == 1:
# addition
r = insn.store(data[pc + 3], insn.get_arg(pc + 1, 0) + insn.get_arg(pc + 2, 1))
pc += 4
return r
elif insn.opcode == 2:
# multiplication
r = insn.store(data[pc + 3], insn.get_arg(pc + 1, 0) * insn.get_arg(pc + 2, 1))
pc += 4
return r
elif insn.opcode == 3:
insn.immediates[0] = True
input_value = 5
print('Getting hardcoded input %d' % input_value)
# input_value = int(input('Get input:'))
r = insn.store(insn.get_arg(pc + 1, 0), input_value)
pc += 2
return r
elif insn.opcode == 5 or insn.opcode == 6:
cmp = insn.get_arg(pc + 1, 0)
if insn.opcode == 6:
cmp = not cmp
if cmp:
pc = insn.get_arg(pc + 2, 1)
else:
pc += 3
return -1
elif insn.opcode == 7:
r = int(insn.get_arg(pc + 1, 0) < insn.get_arg(pc + 2, 1))
r = insn.store(data[pc + 3], r)
pc += 4
return r
elif insn.opcode == 8:
r = int(insn.get_arg(pc + 1, 0) == insn.get_arg(pc + 2, 1))
r = insn.store(data[pc + 3], r)
pc += 4
return r
elif insn.opcode == 4:
print(colored('Output: %d' % insn.get_arg(pc + 1, 0), 'red'))
pc += 2
elif insn.opcode == 99:
print('Exit')
exit(0)
else:
print('Unknown opcode: %d' % insn.opcode)
assert False
def run():
init()
step = 0
print('Initial data')
print_data(-1)
while True:
step += 1
if args.verbose:
print('Step %d:' % step)
r = process()
if args.verbose:
print_data(r)
print(run())
|
987,668 | 2b76f0e500bdba765c121ab8b26aef640ee51753 | ##
##
# File auto-generated by PythonFileGenerator
__all__ = [
'ClusterActivationNotification',
'SiteActivationNotification'
]
from .ClusterActivationNotification import ClusterActivationNotification
from .SiteActivationNotification import SiteActivationNotification
|
987,669 | 576ce3a8caf9b7acf67e6830fbfc5d607304517c | # unix_send.py
from socket import *
# 确保两边使用同一个套接字文件
sock_file = "./sock"
# 创建本地套接字
sockfd = socket(AF_UNIX,SOCK_STREAM)
sockfd.connect(sock_file)
while 1:
msg = input(">>")
if not msg:
break
# 消息收发
data = sockfd.send(msg.encode())
sockfd.close()
|
987,670 | 32a7a9cc24474ce9ffaa4509850f6e807a814959 | from django.db import models
from django.forms import ModelForm
class TestPlan(models.Model):
name = models.CharField(max_length=30)
product = models.CharField(max_length=30)
product_version = models.CharField(max_length=10)
created = models.DateTimeField(null=True, blank=True)
author = models.CharField(max_length=100, blank=True)
version = models.CharField(max_length=10, blank=True)
plan_type = models.CharField(max_length=30, blank=True)
def __str__(self):
return self.name + " version: " + self.product_version
class TestRun(models.Model):
TYPE_CHOICES = (
('Weekly', 'Weekly'),
('Full Pass', 'Full Pass')
)
testplan = models.ForeignKey(TestPlan, verbose_name="the related Test Plan")
version = models.CharField(max_length=10, blank=True)
release = models.CharField(max_length=30, blank=True)
test_type = models.CharField(max_length=15, choices=TYPE_CHOICES)
poky_commit = models.CharField(max_length=100)
poky_branch = models.CharField(max_length=15)
start_date = models.DateTimeField()
stop_date = models.DateTimeField(null=True, blank=True)
target = models.CharField(max_length=30, blank=True)
image_type = models.CharField(max_length=30, blank=True)
hw_arch = models.CharField(max_length=15, blank=True)
hw = models.CharField(max_length=30, blank=True)
host_os = models.CharField(max_length=30, blank=True)
other_layers_commits = models.CharField(max_length=500, blank=True)
ab_image_repo = models.CharField(max_length=100, blank=True)
services_running = models.CharField(max_length=10000, blank=True)
package_versions_installed = models.CharField(max_length=20000, blank=True)
def get_for_plan_env(self):
return TestRun.objects.filter(release=self.release).filter(testplan=self.testplan, target=self.target, hw=self.hw)
def get_total(self):
total = 0
for testrun in self.get_for_plan_env():
total += testrun.testcaseresult_set.count()
return total
def get_run(self):
run = 0
for testrun in self.get_for_plan_env():
run += testrun.testcaseresult_set.filter(~models.Q(result='idle')).count()
return run
def get_passed(self):
passed = 0
for testrun in self.get_for_plan_env():
passed += testrun.testcaseresult_set.filter(result='passed').count()
return passed
def get_failed(self):
failed = 0
for testrun in self.get_for_plan_env():
failed += testrun.testcaseresult_set.filter(result='failed').count()
return failed
def get_abs_passed_percentage(self):
return ("%.2f" % ((self.get_passed() / float(self.get_total())) * 100)).rstrip('0').rstrip('.')
def get_relative_passed_percentage(self):
return ("%.2f" % ((self.get_passed() / float(self.get_run())) * 100)).rstrip('0').rstrip('.')
def __str__(self):
return self.id.__str__() + " " + self.test_type + " " + self.release
class TestCaseResult(models.Model):
RESULT_CHOICES = (
('passed', 'passed'),
('failed', 'failed'),
('blocked', 'blocked'),
('idle', 'idle')
)
testcase_id = models.CharField(max_length=40)
testrun = models.ForeignKey(TestRun)
result = models.CharField(max_length=7, choices=RESULT_CHOICES)
message = models.CharField(max_length=30000, blank=True)
started_on = models.DateTimeField(null=True, blank=True)
finished_on = models.DateTimeField(null=True, blank=True)
attachments = models.CharField(max_length=1000, blank=True)
comments = models.CharField(max_length=1000, blank=True)
def __str__(self):
return self.testcase_id + " is " + self.result
class TestReport(models.Model):
testreport_id = models.CharField(max_length=10, primary_key=True)
filters = models.CharField(max_length=10000)
def __str__(self):
return self.testreport_id
class TestPlanForm(ModelForm):
class Meta:
model = TestPlan
fields = ['name', 'product', 'product_version', 'created', 'author', 'version', 'plan_type']
class TestRunForm(ModelForm):
class Meta:
model = TestRun
fields = ['version', 'release', 'test_type', 'poky_commit', 'poky_branch', 'start_date', 'stop_date', 'target', 'image_type', 'hw_arch',
'hw', 'host_os', 'other_layers_commits', 'ab_image_repo', 'services_running', 'package_versions_installed']
class TestCaseResultForm(ModelForm):
class Meta:
model = TestCaseResult
fields = ['testcase_id', 'result', 'message', 'started_on', 'finished_on', 'attachments', 'comments']
class TestReportForm(ModelForm):
class Meta:
model = TestReport
fields = ['filters']
|
987,671 | 2250d2828c3551fb126464521341788bdd010ce3 | version https://git-lfs.github.com/spec/v1
oid sha256:4d6b1283403d967ea5eddd71a913da66cd2cfd1d0e6fa41ed765dd1ab56b8e44
size 10905
|
987,672 | fdcd908c39fad645dc2a2b65b46e23cb23437c54 | #POR FIN NOJODASSSSS!!!!
import sys, re
from PyQt5.QtWidgets import QApplication, QDialog, QMessageBox
from PyQt5 import uic
class Dialogo(QDialog):
def __init__(self):
QDialog.__init__(self)
uic.loadUi("Practica4.ui", self)
self.NombreA.textChanged.connect(self.Validar_nombre)
self.ApellidoA.textChanged.connect(self.Validar_apellido)
self.EmailA.textChanged.connect(self.Validar_email)
self.BotonA.clicked.connect(self.Validar_formulario)
def Validar_nombre(self):
NombreA = self.NombreA.text()
validar = re.match('^[a-z\sáéíóúàèìòùäëïöüñ]+$', NombreA, re.I)
if NombreA == "":
self.NombreA.setStyleSheet("border: 1px solid yellow;")
return False
elif not validar:
self.NombreA.setStyleSheet("border: 1px solid red;")
return False
else:
self.NombreA.setStyleSheet("border: 1px solid green;")
return True
def Validar_apellido(self):
ApellidoA = self.ApellidoA.text()
validar = re.match('^[a-z\sáéíóúàèìòùäëïöüñ]+$', ApellidoA, re.I)
if ApellidoA == "":
self.ApellidoA.setStyleSheet("border: 1px solid yellow;")
return False
elif not validar:
self.ApellidoA.setStyleSheet("border: 1px solid red;")
return False
else:
self.ApellidoA.setStyleSheet("border: 1px solid green;")
return True
def Validar_email(self):
EmailA = self.EmailA.text()
validar = re.match('^[a-zA-Z0-9\._-]+@[a-zA-Z0-9-]{2,}[.][a-zA-Z]{2,4}$', EmailA, re.I)
if EmailA == "":
self.EmailA.setStyleSheet("border: 1px solid yellow;")
return False
elif not validar:
self.EmailA.setStyleSheet("border: 1px solid red;")
return False
else:
self.EmailA.setStyleSheet("border: 1px solid green;")
return True
def Validar_formulario(self):
if self.Validar_nombre() and self.Validar_apellido() and self.Validar_email():
QMessageBox.information(self, "Formulario correcto", "Validación correcta", QMessageBox.Discard)
else:
QMessageBox.warning(self, "Formulario incorrecto", "Validación incorrecta", QMessageBox.Discard)
app = QApplication(sys.argv)
dialogo = Dialogo()
dialogo.show()
app.exec_()
|
987,673 | 0cb711ea7e1028116b9cf8cb764c286b16c15828 | import os
from sklearn.datasets.base import Bunch
from yellowbrick.download import download_all
## The path to the test data sets
FIXTURES = os.path.join(os.getcwd(), "data")
## Dataset loading mechanisms
datasets = {
"hobbies": os.path.join(FIXTURES, "hobbies")
}
def load_data(name, download=True):
"""
Loads and wrangles the passed in text corpus by name.
If download is specified, this method will download any missing files.
"""
# Get the path from the datasets
path = datasets[name]
# Check if the data exists, otherwise download or raise
if not os.path.exists(path):
if download:
download_all()
else:
raise ValueError((
"'{}' dataset has not been downloaded, "
"use the download.py module to fetch datasets"
).format(name))
# Read the directories in the directory as the categories.
categories = [
cat for cat in os.listdir(path)
if os.path.isdir(os.path.join(path, cat))
]
files = [] # holds the file names relative to the root
data = [] # holds the text read from the file
target = [] # holds the string of the category
# Load the data from the files in the corpus
for cat in categories:
for name in os.listdir(os.path.join(path, cat)):
files.append(os.path.join(path, cat, name))
target.append(cat)
with open(os.path.join(path, cat, name), 'r') as f:
data.append(f.read())
# Return the data bunch for use similar to the newsgroups example
return Bunch(
categories=categories,
files=files,
data=data,
target=target,
)
|
987,674 | 33be30404b9cdc5f7c16e3c4d46f74d270f26401 | #coding=utf-8
import json
import requests
from bs4 import BeautifulSoup
#请求地址
targetUrl = "https://www.baidu.com/s?wd=13127965029&tn=json"
#代理服务器
proxyHost = '121.46.234.76'
proxyPort = "717"
proxyMeta = "http://%(host)s:%(port)s" % {
"host" : proxyHost,
"port" : proxyPort,
}
proxies = {
"http" : proxyMeta,
}
resp = requests.get(targetUrl,headers=headers,proxies=proxies)
print(resp.text)
url=r'http://ip.chinaz.com/'
proxy={'http': 'http://58.218.214.136:16749',
'https': 'https://58.218.214.136:16749'}
res=requests.get(targetUrl,headers=headers)
json.loads(res.text)
html=BeautifulSoup(res.text,'lxml')
nr=html.select('.fz24')[0].text
print(nr) |
987,675 | 24ce2f79936d71f75b239d1d3657fe2bd310a707 | class Person:
def __init__(self,name,age): #constructor
self.name=name
self.age=age
def talk(self): #self is very imp.
print(f"Hello! My name is {name} and I am {age} years old :)")
name=input("Enter name: ")
age=input(f"Enter {name}'s age: ")
obj_person=Person(name,age)
obj_person.talk()
name=input("Enter name: ")
age=input(f"Enter {name}'s age: ")
obj2_person=Person(name,age)
obj2_person.talk()
|
987,676 | b35f64a1af9745dba3ca2cc7597430db936e9cbe | """
Usage:
class MixinClass(register_on_init.Logged):
@register.before('__init__')
def method1(self) -> None: ...
@register.before('__init__')
def method2(self, init_arg) -> None: ...
@register.after('__init__')
def method3(self) -> None: ...
class Example(MixinClass):
def __init__(self, init_arg): ...
class ExampleTerminal(Example):
pass
instance = ExampleTerminal('blah')
> method1 executed
> method2 executed with 'blah' passed
> __init__ executed with 'blah' passed
> method3 executed
register functions to execute before or after terminal class's method
functions are registered only on subclasses of Mixin
registered functions may accept either the args passed to terminal method or no args
note: if decorators and descriptors are mixed, stuff will fail silently and unavoidably
"""
from collections import defaultdict
from functools import wraps
from typing import Callable
from typing import Dict
from typing import Iterable
from typing import Set
from typing import TypeVar
__all__ = [
'before',
'after',
'Mixin',
]
_function_registry_key = '_function_registry_dict_'
_class_registry_key = '_class_registry_dict_'
_unwrapped_method_key = '_original_method_key_'
_before_key = '__before__'
_after_key = '__after__'
_T = TypeVar('_T', bound=Callable)
_DECO_T = str
_METHOD_RUN_T = Iterable[str]
def _register(f: _T, order_key: str, wrapped_method: _DECO_T) -> _T:
if not isinstance(wrapped_method, str):
raise TypeError('must specify method for which to register decorated method')
if f.__name__.startswith('__'):
raise TypeError('cannot register a class private method (starts with __)')
if not hasattr(f, _function_registry_key):
setattr(f, _function_registry_key, defaultdict(set))
getattr(f, _function_registry_key)[order_key].add(wrapped_method)
return f
def before(method: _DECO_T) -> Callable[[_T], _T]:
"""see module documentation"""
def inner(f: _T) -> _T:
return _register(f, _before_key, method)
return inner
def after(method: _DECO_T) -> Callable[[_T], _T]:
"""see module documentation"""
def inner(f: _T) -> _T:
return _register(f, _after_key, method)
return inner
def _get_from_registry(cls: type):
cls_method_registry = defaultdict(lambda: defaultdict(set))
for cla in reversed(cls.__mro__[:-1]):
existing = getattr(cla, _class_registry_key, None)
if isinstance(existing, defaultdict):
for wrapped_method, method_d in existing.items():
for order_key, registered_fs in method_d.items():
for registered_f in registered_fs:
cls_method_registry[wrapped_method][order_key].add(registered_f)
else:
for name, f in cla.__dict__.items():
method_d = getattr(f, _function_registry_key, None)
if isinstance(method_d, dict):
for order_key, wrapped_methods in method_d.items():
for wrapped_method in wrapped_methods:
cls_method_registry[wrapped_method][order_key].add(name)
getattr(cls, _class_registry_key, cls_method_registry)
return cls_method_registry
def _perform(method_names: _METHOD_RUN_T, instance, *args, **kwargs) -> None:
for method in method_names:
_bound = getattr(instance, method).__get__(instance, type(instance))
try:
_bound(*args, **kwargs)
except TypeError:
_bound()
def _wrapper_factory(method_to_be_wrapped: Callable, wrapped_method_d: Dict[str, Set[str]]):
@wraps(method_to_be_wrapped)
def _wrapper(self, *args, **kwargs) -> None:
_perform(wrapped_method_d[_before_key], self, *args, **kwargs)
# SUPPRESS-LINTER <will throw TypeError if args are passed to placeholder>
# noinspection PyArgumentList
method_to_be_wrapped(self, *args, **kwargs)
_perform(wrapped_method_d[_after_key], self, *args, **kwargs)
setattr(_wrapper, _unwrapped_method_key, method_to_be_wrapped)
return _wrapper
def _wrap_methods_with_registered_functions(cls: type) -> None:
for wrapped_method_name, registered_d in _get_from_registry(cls).items():
to_be_wrapped = getattr(cls, wrapped_method_name, None)
if to_be_wrapped is None:
def _to_be_wrapped(self) -> None:
f"""placeholder method for {cls.__name__}"""
to_be_wrapped = _to_be_wrapped
else:
to_be_wrapped = getattr(to_be_wrapped, _unwrapped_method_key, to_be_wrapped)
setattr(cls, wrapped_method_name, _wrapper_factory(to_be_wrapped, registered_d))
class Mixin:
"""see module documentation"""
def __init_subclass__(cls) -> None:
_wrap_methods_with_registered_functions(cls)
super().__init_subclass__()
|
987,677 | 1286995f9bf2cc5c9748a0289fdf379bf8d377ca | import math
import random
import numpy as np
def generator():
num=random.uniform(0,1)
return math.pow(21,-num+1)-1
def generateMultiple(trials):#avg of one sample
total=0
for x in range(trials):
total+=generator()
return total/trials
##def multipleSampleOld(trials):
## sampleArr=[]
## total=0
## num=0
## differenceTotal=0
## for x in range(trials):
## num=generateMultiple(100)#change to 10000 for les than 10^-3, might run slow
## total+=num
## sampleArr.append(num)
## avg=total/trials
## print "Avg of all samples: "+str(avg)
## for n in range(len(sampleArr)):
## sampleArr[n]=sampleArr[n]-avg#subtracting mean from each value
## sampleArr[n]=sampleArr[n]**2#squaring each difference
## differenceTotal+=sampleArr[n]#adding upp squared differences to be averaged
## differenceAvg=differenceTotal/trials
## return math.sqrt(differenceAvg)
def multipleSample(trials):
sampleArr=[]
num=0
differenceTotal=0
for x in range(trials):
num=generator()
sampleArr.append(num)
avg=sum(sampleArr)/trials
print "Avg of all samples: "+str(avg)
for n in range(len(sampleArr)):
sampleArr[n]=sampleArr[n]-avg#subtracting mean from each value
sampleArr[n]=sampleArr[n]**2#squaring each difference
differenceTotal+=sampleArr[n]#adding upp squared differences to be averaged
differenceAvg=differenceTotal/trials
return math.sqrt(differenceAvg)
print "Standard dev: "+str(multipleSample(3500000))
|
987,678 | 1247f8707ad40c63a8a4f9d5e0f02aa0f241313d | import base64
import hmac
import hashlib
import logging
import bcrypt
from django.conf import settings
from django.contrib.auth.hashers import (BCryptPasswordHasher,
BasePasswordHasher, mask_hash)
from django.utils.crypto import constant_time_compare
from django.utils.encoding import smart_str
from django.utils.datastructures import SortedDict
log = logging.getLogger('common.hashers')
algo_name = lambda hmac_id: 'bcrypt{0}'.format(hmac_id.replace('-', '_'))
def get_hasher(hmac_id):
"""
Dynamically create password hashers based on hmac_id.
This class takes the hmac_id corresponding to an HMAC_KEY and creates a
password hasher class based off of it. This allows us to use djangos
built-in updating mechanisms to automatically update the HMAC KEYS.
"""
dash_hmac_id = hmac_id.replace('_', '-')
class BcryptHMACPasswordHasher(BCryptPasswordHasher):
algorithm = algo_name(hmac_id)
rounds = getattr(settings, 'BCRYPT_ROUNDS', 12)
def encode(self, password, salt):
shared_key = settings.HMAC_KEYS[dash_hmac_id]
hmac_value = self._hmac_create(password, shared_key)
bcrypt_value = bcrypt.hashpw(hmac_value, salt)
return '{0}{1}${2}'.format(
self.algorithm,
bcrypt_value,
dash_hmac_id)
def verify(self, password, encoded):
algo_and_hash, key_ver = encoded.rsplit('$', 1)
try:
shared_key = settings.HMAC_KEYS[key_ver]
except KeyError:
log.info('Invalid shared key version "{0}"'.format(key_ver))
return False
bc_value = '${0}'.format(algo_and_hash.split('$', 1)[1]) # Yes, bcrypt <3s the leading $.
hmac_value = self._hmac_create(password, shared_key)
return bcrypt.hashpw(hmac_value, bc_value) == bc_value
def _hmac_create(self, password, shared_key):
"""Create HMAC value based on pwd"""
hmac_value = base64.b64encode(hmac.new(
smart_str(shared_key),
smart_str(password),
hashlib.sha512).digest())
return hmac_value
return BcryptHMACPasswordHasher
# We must have HMAC_KEYS. If not, let's raise an import error.
if not settings.HMAC_KEYS:
raise ImportError('settings.HMAC_KEYS must not be empty.')
# For each HMAC_KEY, dynamically create a hasher to be imported.
for hmac_key in settings.HMAC_KEYS.keys():
hmac_id = hmac_key.replace('-', '_')
globals()[algo_name(hmac_id)] = get_hasher(hmac_id)
class BcryptHMACCombinedPasswordVerifier(BCryptPasswordHasher):
"""
This reads anything with 'bcrypt' as the algo. This should be used
to read bcypt values (with or without HMAC) in order to re-encode them
as something else.
"""
algorithm = 'bcrypt'
rounds = getattr(settings, 'BCRYPT_ROUNDS', 12)
def encode(self, password, salt):
"""This hasher is not meant to be used for encoding"""
raise NotImplementedError()
def verify(self, password, encoded):
algo_and_hash, key_ver = encoded.rsplit('$', 1)
try:
shared_key = settings.HMAC_KEYS[key_ver]
except KeyError:
log.info('Invalid shared key version "{0}"'.format(key_ver))
# Fall back to normal bcrypt
algorithm, data = encoded.split('$', 1)
return constant_time_compare(data, bcrypt.hashpw(password, data))
bc_value = '${0}'.format(algo_and_hash.split('$', 1)[1]) # Yes, bcrypt <3s the leading $.
hmac_value = self._hmac_create(password, shared_key)
return bcrypt.hashpw(hmac_value, bc_value) == bc_value
def _hmac_create(self, password, shared_key):
"""Create HMAC value based on pwd"""
hmac_value = base64.b64encode(hmac.new(
smart_str(shared_key),
smart_str(password),
hashlib.sha512).digest())
return hmac_value
class SHA256PasswordHasher(BasePasswordHasher):
"""The SHA256 password hashing algorithm."""
algorithm = 'sha256'
def encode(self, password, salt):
assert password
assert salt and '$' not in salt
hash = getattr(hashlib, self.algorithm)(salt + password).hexdigest()
return '%s$%s$%s' % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return SortedDict([
('algorithm', algorithm),
('salt', mask_hash(salt, show=2)),
('hash', mask_hash(hash)),
])
class SHA1PasswordHasher(SHA256PasswordHasher):
"""The SHA1 password hashing algorithm."""
algorithm = 'sha1'
class SHA512PasswordHasher(SHA256PasswordHasher):
"""The SHA512 password hashing algorithm."""
algorithm = 'sha512'
class SHA512b64PasswordHasher(SHA512PasswordHasher):
"""The SHA512 password hashing algorithm with base64 encoding."""
algorithm = 'sha512b64'
def encode(self, password, salt):
assert password
assert salt and '$' not in salt
hash = base64.encodestring(hashlib.sha512(salt + password).digest())
return '%s$%s$%s' % (self.algorithm, salt, hash)
|
987,679 | cd34f004267c78abbe668f9a9a9aac8c61f34902 | """
Needs Revision
==============
"""
from y10.fr import cython013_py25
__all__ = dir()
|
987,680 | cce41b99723ae9504bc3738f5042725664890ef3 | from flask import Flask, render_template, jsonify, request, flash
from forms import ContactForm
from flaskext.mail import Mail, Message
import os
app = Flask(__name__)
app.secret_key = 'development key'
mail = Mail()
app.config["MAIL_SERVER"] = "smtp.gmail.com" #gmail smtp settings
app.config["MAIL_PORT"] = 465
app.config["MAIL_USE_SSL"] = True
app.config["MAIL_USERNAME"] = os.environ.get('DB_USER')
app.config["MAIL_PASSWORD"] = os.environ.get('DB_PASS')
mail.init_app(app)
@app.route('/contact', methods=['GET', 'POST'])
def contact():
form = ContactForm()
if request.method=='POST':
if form.validate()== False:
flash("all Field are required.")
return render_template('form.html', form=form)
else:
msg = Message(form.subject.data, sender='contact@exemple.com', recipients=[os.environ.get('DB_USER')])
msg.body = """
From: {} <{}>
{}
""".format(form.name.data, form.email.data, form.message.data)
mail.send(msg)
elif request.method == 'GET':
return render_template('form.html', form=form)
if __name__=="__main__":
app.run(debug=True) |
987,681 | 44138976fef88b5dfc09868a60b602484e4db386 | #from Gcode3D_executer import
from threading import Thread, Semaphore
class Tramas_V1:
def LineaProcesa(lines):
if lines==[]:
1; #blank lines
elif lines[0:3]=='G90':
Control_V1.motorOn();
print ('start');
elif lines[0:3]=='G92':
print ('Reset Extruder to 0');
MExt.position = 0;
elif lines[0:3]=='G20':# working in inch;
dx/=25.4;
dy/=25.4;
print ('Working in inch');
elif lines[0:3]=='G21':# working in mm;
print ('Working in mm');
elif lines[0:3]=='G28': # homing all axis
print ('Homing all axis...');
#move till endstops trigger
print ('Homing X axis...');
homeAxis(MX,EndStopX)
print ('Homing Y axis...');
homeAxis(MY,EndStopY)
print ('Homing Z axis...');
homeAxis(MZ,EndStopZ)
elif lines[0:3]=='M05': # these will not be used (M05) for the 3D Printer, I used this code for a pen plotter orginally but I could be used to attach a milling tool
PenOff(MZ)
#GPIO.output(Laser_switch,False);
print ('Pen turned off');
elif lines[0:3]=='M03':
PenON(MZ)
#GPIO.output(Laser_switch,True);
print ('Pen turned on');
elif lines[0:3]=='M02':
GPIO.output(Laser_switch,False);
print ('finished. shuting down');
## break;
elif lines[0:3]=='M84':
Control_V1.motorOff();
print ('motor off');
## break;
elif lines[0:4]=='M104': #Set Extruder Temperature
#note that we should just be setting the tempurature here, but because this always fires before M109 call
#I'm just turning the extruder on as well because then it can start heating up
extTemp = float(SinglePosition(lines,'S'));
print ('Extruder Heater On and setting temperature to '+ str(extTemp) +'C');
GPIO.output(ExtHeater,True);
sampleHeaters(0,1);
elif lines[0:4]=='M106': #Fan on
#for now we will just print the following text
print ('Fan On');
elif lines[0:4]=='M107': #Fan off
#for now we will just print the following text
print ('Fan Off');
elif lines[0:4]=='M109': #Set Extruder Temperature and Wait
#need to set temperature here and wait for correct temp as well
#for now we will just turn on extruderheater
#I would like to this all with the raspberry pi but...
#I may use a simple Arduino(Uno) sketch to handle tempurature regulation
#Doing with the RaspPi only would require polling the tempurature(maybe at each Z axis move?)
print ('Extruder Heater On');
GPIO.output(ExtHeater,True);
extTemp = float(SinglePosition(lines,'S'));
print ('Extruder Heater On and setting temperature to '+ str(extTemp) +'C');
print ('Waiting to reach target temp...');
sampleHeaters(extChannel,heatBedChannel);
temp = getTempAtADCChannel(extChannel)
while temp < extTemp:
time.sleep(0.02);
temp = getAverageTempFromQue(getTempAtADCChannel(extChannel), "Extruder");
print (str(temp));
elif lines[0:4]=='M140': #Set Heat Bed Temperature
#need to set temperature here as well
#for now we will just turn on extruderheater
heatBedTemp = float(SinglePosition(lines,'S'));
print ('Setting Heat Bed temperature to '+ str(heatBedTemp) +'C');
elif lines[0:4]=='M190': #Set HeatBed Temperature and Wait
#need to set temperature here and wait for correct temp as well
#for now we will just turn on HeatBedheater
#I would like to this all with the raspberry pi but...
#I may use a simple Arduino(Uno) sketch to handle tempurature regulation
#Doing with the RaspPi only would require polling the tempurature(maybe at each Z axis move?)
heatBedTemp = float(SinglePosition(lines,'S'));
print ('HeatBed Heater On');
print ('Setting HeatBed temperature to '+ str(heatBedTemp) +'C and waiting');
GPIO.output(HeatBed,True);
sampleHeaters(extChannel,heatBedChannel);
temp = getTempAtADCChannel(heatBedChannel)
while temp < heatBedTemp:
time.sleep(0.02);
temp = getAverageTempFromQue(getTempAtADCChannel(heatBedChannel), "HeatBed");
print (str(temp));
elif (lines[0:3]=='G1F')|(lines[0:4]=='G1 F'):
1;#do nothing
elif (lines[0:3]=='G0 ')|(lines[0:3]=='G1 ')|(lines[0:3]=='G01'):#|(lines[0:3]=='G02')|(lines[0:3]=='G03'):
#linear engraving movement
if (lines[0:3]=='G0 '):
engraving=False;
else:
engraving=True;
#Update F Value(speed) if available
if(lines.find('F') >= 0):
speed = (SinglePosition(lines,'F')/60)/min(dx,dy); #getting F value as mm/min so we need to convert to mm/sec then calc and update speed
if(lines.find('E') < 0 and lines.find('Z') < 0):
[x_pos,y_pos]=XYposition(lines);
#moveto(MX,x_pos,dx,MY,y_pos,dy,speed,engraving);
Control_V1.MoveToCoordinate_XY(x_pos,dx,y_pos,dy,speed)
elif(lines.find('X') < 0 and lines.find('Z') < 0): #Extruder only
ext_pos = SinglePosition(lines,'E');
stepsExt = int(round(ext_pos/dext)) - MExt.position;
#TODO fix this extMotor Delay
Motor_control_new.Single_Motor_Step(MExt,stepsExt,speed); #changed from static 40
#still need to move Extruder using stepExt(signed int)
elif(lines.find('X') < 0 and lines.find('E') < 0): #Z Axis only
print ('Moving Z axis only');
z_pos = SinglePosition(lines,'Z');
#stepsZ = int(round(z_pos/dz)) - MZ.position;
#Motor_control_new.Single_Motor_Step(MZ,stepsZ); #changed from static 60
Control_V1.MoveToCoordinate_Z(z_pos,speedWork);
#check Extruder and Heat Bed temp after Z axiz move
checkTemps();
else:
# [x_pos,y_pos,ext_pos]=XYExt_position(lines);
#movetothree(MX,x_pos,dx,MY,y_pos,dy,MExt,ext_pos,dext,speed,engraving);
Control_V1.MoveToCoordinate_XY(x_pos,dx,y_pos,dy,speed)
heaterCheck += 1;
Control_V1.MoveToCoordinate_XY(x_pos,dx,y_pos,dy,speed)
print("?");
#create new moveto function to include Extruder postition
elif (lines[0:3]=='G02')|(lines[0:3]=='G03'): #circular interpolation
old_x_pos=x_pos;
old_y_pos=y_pos;
ext_pos = 0;
#still need to add code here to handle extrusion info from the line if it is available
if(lines.find('E') >= 0):
#get E value as well as the rest
[x_pos,y_pos]=XYposition(lines);
[i_pos,j_pos,ext_pos]=IJEposition(lines);
else:
[x_pos,y_pos]=XYposition(lines);
[i_pos,j_pos]=IJposition(lines);
xcenter=old_x_pos+i_pos; #center of the circle for interpolation
ycenter=old_y_pos+j_pos;
Dx=x_pos-xcenter;
Dy=y_pos-ycenter; #vector [Dx,Dy] points from the circle center to the new position
r=sqrt(i_pos**2+j_pos**2); # radius of the circle
e1=[-i_pos,-j_pos]; #pointing from center to current position
if (lines[0:3]=='G02'): #clockwise
e2=[e1[1],-e1[0]]; #perpendicular to e1. e2 and e1 forms x-y system (clockwise)
else: #counterclockwise
e2=[-e1[1],e1[0]]; #perpendicular to e1. e1 and e2 forms x-y system (counterclockwise)
#[Dx,Dy]=e1*cos(theta)+e2*sin(theta), theta is the open angle
costheta=(Dx*e1[0]+Dy*e1[1])/r**2;
sintheta=(Dx*e2[0]+Dy*e2[1])/r**2; #theta is the angule spanned by the circular interpolation curve
if costheta>1: # there will always be some numerical errors! Make sure abs(costheta)<=1
costheta=1;
elif costheta<-1:
costheta=-1;
theta=arccos(costheta);
if sintheta<0:
theta=2.0*pi-theta;
no_step=int(round(r*theta/dx/5.0)); # number of point for the circular interpolation
extruderMovePerStep = 0;
if ext_pos != 0:
extruderMovePerStep = (ext_pos - MExt.position)/no_step;
for i in range(1,no_step+1):
tmp_theta=i*theta/no_step;
tmp_x_pos=xcenter+e1[0]*cos(tmp_theta)+e2[0]*sin(tmp_theta);
tmp_y_pos=ycenter+e1[1]*cos(tmp_theta)+e2[1]*sin(tmp_theta);
if extruderMovePerStep == 0:
#moveto(MX,tmp_x_pos,dx,MY, tmp_y_pos,dy,speed,True);
Control_V1.MoveToCoordinate_XY(x_pos,dx,y_pos,dy,speed)
else:
#movetothree(MX,tmp_x_pos,dx,MY, tmp_y_pos,dy,MExt,MExt.position+extruderMovePerStep,dext,speed,True);
Control_V1.MoveToCoordinate_XY(x_pos,dx,y_pos,dy,speed)
if heaterCheck >= 2: #checking every fifth extruder motor move
print ('Checking Temps');
checkTemps();
heaterCheck = 0;
|
987,682 | 9671b45579723369cda720b66a0ef8e1af166447 | import time
import vtk
#import datetime
import math
#import random
#import os
import csv
#import sys
import randompolygon as rpg
def timeRPG(N):
t_before = time.time()
polygon= rpg.rpg(N)
t_after = time.time()
times=[]
times.append(t_after-t_before)
return times
if __name__ == "__main__":
print "RPG version: ",rpg.version()
Nmax_exp_start = 5
Nmax_exp_end = 20
# 10 -> 32 linesites
# 14 -> 128
# 18 -> 512
# 20 -> 1024
# 24 -> 4096
# 28 -> 16384
# 32 -> 65536
# 33 -> 92681
# 34 -> 131072
# 35 -> 185363
# 36 -> 262144
exp_list = [0.5*x for x in range(Nmax_exp_start,Nmax_exp_end+1)]
Nmax_list=[]
n=5
# generate the list of problem sizes to run
for e in exp_list:
Nmax_list.append( [ n, int( math.floor( (math.pow(2,e) ) ) ) ] )
n=n+1
#print Nmax_list
#exit()
csvWriter = csv.writer(open('bench_results.csv', 'wb'), delimiter=',' )
for case in Nmax_list:
n=case[0]
Nmax=case[1]
times = timeRPG(Nmax)
print n," RPG for ",Nmax," took {0:.3f} seconds".format(sum(times))
row = []
row.append(Nmax)
for t in times:
row.append(t)
csvWriter.writerow( row )
print "PYTHON All DONE."
|
987,683 | 7c60f8ab9dbca447ae3050a76621f1a53e98c40b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This module downloads all the ROAs from ftp.ripe.net/rpki or all the ROAs
that exist for a specific date.
Processing the HTML from the server has been tested to be faster than
using Python's built-in FTP library.
Guidelines:
1. Maintain a table of files that have been parsed.
2. Extract the URLs. This step is not multiprocessed.
3. Use the URLs to multiprocess the downloading, reformatting, and insertion
of all roas.csv files.
"""
__author__ = "Tony Zheng"
__credits__ = ["Tony Zheng"]
__Lisence__ = "BSD"
__maintainer__ = "Justin Furuness"
__email__ = "jfuruness@gmail.com"
__status__ = "Development"
import os
from pathos.multiprocessing import ProcessPool
import requests
from bs4 import BeautifulSoup as Soup
from datetime import datetime
from ...utils.base_classes import Parser
from ...utils import utils
from .tables import Historical_ROAs_Table, Historical_ROAs_Parsed_Table
class Historical_ROAs_Parser(Parser):
session = requests.Session()
root = 'https://ftp.ripe.net/rpki'
def _run(self, date: datetime = None):
"""Pass a datetime object to get ROAs for specified date
or default to getting all ROAs."""
if date is None:
urls = self._get_csvs()
else:
urls = self._get_csvs_date(date)
parsed = self._get_parsed_files()
urls = [url for url in urls if url not in parsed]
download_paths = self._create_local_download_paths(urls)
# Multiprocessingly download/format/insert all csvs
# using four times # of CPUs
with utils.Pool(0, 4, self.name) as pool:
pool.map(utils.download_file, urls, download_paths)
pool.map(self._reformat_csv, download_paths)
pool.map(self._db_insert, download_paths)
with Historical_ROAs_Table() as t:
t.delete_duplicates()
self._add_parsed_files(urls)
utils.delete_files(self.path)
def _get_parsed_files(self):
"""Return the csvs that have already been parsed and inserted into db"""
parsed = []
with Historical_ROAs_Parsed_Table() as t:
for row in t.execute(f'SELECT * FROM {t.name}'):
parsed.append(row['file'])
return parsed
def _add_parsed_files(self, files):
"""Adds newly parsed csvs to the parsed table"""
path = os.path.join(self.path, 'roas_parsed.csv')
with open(path, 'w+') as f:
for line in files:
f.write(line + '\n')
utils.csv_to_db(Historical_ROAs_Parsed_Table, path)
def _create_local_download_paths(self, urls):
"""Create the local directories where csvs will be downloaded to."""
# URL: https://ftp.ripe.net/rpki/afrinic.tal/2019/08/01/roas.csv
# Path: /tmp/bgp_Historical_ROAs_Parser/rpki/2019/08/01/
download_paths = []
for url in urls:
download_path = os.path.join(self.path, url[url.index('rpki'):])
# p flag creates necessary parent directories
# slicing off the 'roas.csv'
utils.run_cmds(f'mkdir -p {download_path[:-8]}')
download_paths.append(download_path)
return download_paths
def _reformat_csv(self, csv):
"""Delete URI (1st) column using cut,
delete the first row (column names),
delete 'AS', add the date, replace commas with tabs using sed"""
# avoid using extra backslashes because sed uses them as delimiter
date = csv[-19:-9].replace('/', '-')
cmds = [f'cut -d , -f 1 --complement <{csv} >{csv}.new',
f'mv {csv}.new {csv}',
f'sed -i "1d" {csv}',
f'sed -i "s/AS//g" {csv}',
f'sed -i "s/,/\t/g" {csv}',
f'sed -i "s/$/\t{date}/" {csv}']
utils.run_cmds(cmds)
def _db_insert(self, csv):
utils.csv_to_db(Historical_ROAs_Table, csv)
def _get_csvs(self):
"""
Returns the paths to all the csvs that exist under root.
"""
stack = [self.root]
urls = []
while stack:
curr_dir = stack.pop()
# skip first link which is always the parent dir
for link in self._soup(curr_dir)('a')[1:]:
href = link['href']
next_link = os.path.join(curr_dir, href)
# Case 1: found the csv, add it to the list of URLs we return
# Case 2: Empty dir or repo.tar.giz. Ignore.
# Case 3: DFS further down
if 'csv' in href:
urls.append(next_link)
elif href != '/' and href != 'repo.tar.gz':
stack.append(next_link)
return urls
def _get_csvs_date(self, date):
"""Get all the paths to roas.csv for a specific date."""
# from the root page, get the links to each internet registry
paths = []
for registry in self._soup(self.root)('a')[1:]:
paths.append(os.path.join(self.root, registry['href']))
# complete the url by adding the date and 'roas.csv'
date_as_url = date.strftime('%Y/%m/%d/')
for i in range(len(paths)):
paths[i] = os.path.join(paths[i], date_as_url, 'roas.csv')
# return the paths that exists
return [p for p in paths if self.session.get(p).status_code == 200]
def _soup(self, url):
"""Returns BeautifulSoup object of url."""
r = self.session.get(url)
r.raise_for_status()
html = Soup(r.text, 'lxml') # lxml is fastert than html.parser
r.close()
return html
|
987,684 | 32d737148a6c11376ebde0e039c5ec05e3e13024 | import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('../..')
sys.path.append('../../../../QuantumCircuitOptimizer')
from vqe import *
from ucc import UnitaryCoupledCluster
#from spsa import SPSA
from quantum_circuit import QuantumCircuit,SecondQuantizedHamiltonian
l = 3
n = 2
delta = 1 # Level spacing
g = 1 # Interaction strength
#g /= 4
# Matrix elements
h_pq = np.identity(l)
for p in range(l):
h_pq[p,p] *= delta*(p - (p%2))/2
h_pqrs = np.zeros((l,l,l,l))
for p in range(0,l-1,2):
for r in range(0,l-1,2):
h_pqrs[p,p+1,r,r+1] = -0.5*g
# Prepare circuit list
pairing = SecondQuantizedHamiltonian(n,l)
pairing.set_integrals(h_pq,h_pqrs)
pairing.get_circuit()
circuit_list = pairing.to_circuit_list(ptype='vqe')
ansatz = UnitaryCoupledCluster(n,l,'S',1)
og_params = ansatz.new_parameters(pairing.h,
pairing.v)
print(len(og_params))
grid_points = 50
x = np.linspace(0,2*np.pi,grid_points)
y = np.linspace(0,2*np.pi,grid_points)
params = []
for i,xi in enumerate(x):
for j,yi in enumerate(y):
params.append([xi,yi])
Es = np.zeros(grid_points*grid_points)
theta = og_params
vqe = VQE(n_qubits = l,
ansatz = ansatz,
circuit_list = circuit_list,
shots = 1000,
ancilla=0,
max_energy=False,
prnt=False)
assert len(params) == grid_points*grid_points
for i,theta in enumerate(params):
print('{} / {}'.format(i+1,grid_points*grid_points))
Es[i] = vqe.expval(theta)
np.save('data/brute/2D/grid.npy',x)
np.save('data/brute/2D/parameters.npy',np.asarray(params))
np.save('data/brute/2D/values1000.npy',Es)
|
987,685 | 575096c41d005ae6e506af5c4f51b5d7ea2da9b0 | #!/usr/bin/env python
import numpy as np
from LegKinematics import LegIK
from LieAlgebra import RpToTrans, TransToRp, TransInv, RPY, TransformVector
from collections import OrderedDict
class SpotModel:
def __init__(self,
hip_length=0.04,
shoulder_length=0.1,
leg_length=0.1,
hip_lim=[-0.548, 0.548],
shoulder_lim=[-2.17, 0.97],
leg_lim=[-0.1, 2.59]):
""" Spot Micro Kinematics
"""
# Leg Parameters
self.hip_length = hip_length
self.shoulder_length = shoulder_length
self.leg_length = leg_length
# Leg Vector desired_positions
# Distance Between Hips
# Length
self.hip_x = 0.192
# Width
self.hip_y = 0.075
# Distance Between Feet
# Length
self.foot_x = 0.21
# Width
self.foot_y = 0.18
# Body Height
self.height = 0.15
# Joint Parameters
self.hip_lim = hip_lim
self.shoulder_lim = shoulder_lim
self.leg_lim = leg_lim
# Dictionary to store Leg IK Solvers
self.Legs = OrderedDict()
self.Legs["FL"] = LegIK("LEFT", self.hip_length, self.shoulder_length,
self.leg_length, self.hip_lim,
self.shoulder_lim, self.leg_lim)
self.Legs["FR"] = LegIK("RIGHT", self.hip_length, self.shoulder_length,
self.leg_length, self.hip_lim,
self.shoulder_lim, self.leg_lim)
self.Legs["BL"] = LegIK("LEFT", self.hip_length, self.shoulder_length,
self.leg_length, self.hip_lim,
self.shoulder_lim, self.leg_lim)
self.Legs["BR"] = LegIK("RIGHT", self.hip_length, self.shoulder_length,
self.leg_length, self.hip_lim,
self.shoulder_lim, self.leg_lim)
# Dictionary to store Hip and Foot Transforms
# Transform of Hip relative to world frame
# With Body Centroid also in world frame
Rwb = np.eye(3)
self.WorldToHip = OrderedDict()
self.ph_FL = np.array([self.hip_x / 2.0, self.hip_y / 2.0, 0])
self.WorldToHip["FL"] = RpToTrans(Rwb, self.ph_FL)
self.ph_FR = np.array([self.hip_x / 2.0, -self.hip_y / 2.0, 0])
self.WorldToHip["FR"] = RpToTrans(Rwb, self.ph_FR)
self.ph_BL = np.array([-self.hip_x / 2.0, self.hip_y / 2.0, 0])
self.WorldToHip["BL"] = RpToTrans(Rwb, self.ph_BL)
self.ph_BR = np.array([-self.hip_x / 2.0, -self.hip_y / 2.0, 0])
self.WorldToHip["BR"] = RpToTrans(Rwb, self.ph_BR)
# Transform of Foot relative to world frame
# With Body Centroid also in world frame
self.WorldToFoot = {}
self.pf_FL = np.array(
[self.foot_x / 2.0, self.foot_y / 2.0, -self.height])
self.WorldToFoot["FL"] = RpToTrans(Rwb, self.pf_FL)
self.pf_FR = np.array(
[self.foot_x / 2.0, -self.foot_y / 2.0, -self.height])
self.WorldToFoot["FR"] = RpToTrans(Rwb, self.pf_FR)
self.pf_BL = np.array(
[-self.foot_x / 2.0, self.foot_y / 2.0, -self.height])
self.WorldToFoot["BL"] = RpToTrans(Rwb, self.pf_BL)
self.pf_BR = np.array(
[-self.foot_x / 2.0, -self.foot_y / 2.0, -self.height])
self.WorldToFoot["BR"] = RpToTrans(Rwb, self.pf_BR)
def IK(self, orn, pos, T_bf):
""" Converts a desired position and orientation wrt Spot's
home position, with a desired body-to-foot Transform
into a body-to-hip Transform, of which the translational
component can be fed into the LegIK solver.
Finally, the resultant joint angles are returned
from the LegIK solver for each leg.
:param orn: A 3x1 np.array([]) with Spot's Roll, Pitch, Yaw angles
:param pos: A 3x1 np.array([]) with Spot's X, Y, Z coordinates
:param T_bf: Dictionary of desired body-to-foot Transforms.
:return: Joint angles for each of Spot's joints.
"""
# Following steps in attached document: SpotBodyIK.
# TODO: LINK DOC
# 4 legs, 3 joints per leg
joint_angles = np.zeros((4, 3))
# Only get Rot component
Rb, _ = TransToRp(RPY(orn[0], orn[1], orn[2]))
# print("Rb: ", Rb)
pb = pos
# print("pb:", pb)
T_wb = RpToTrans(Rb, pb)
# print("T_wb: ", T_wb)
for i, (key, T_wh) in enumerate(self.WorldToHip.items()):
# ORDER: FL, FR, FR, BL, BR
# Extract vector component
_, p_bf = TransToRp(T_bf[key])
# Step 1, get T_bh for each leg
T_bh = np.dot(TransInv(T_wb), T_wh)
# Step 2, get T_hf for each leg
# VECTOR ADDITION METHOD
_, p_bh = TransToRp(T_bh)
p_hf0 = p_bf - p_bh
# TRANSFORM METHOD - UNCOMMENT TO USE
T_hf = np.dot(TransInv(T_bh), T_bf[key])
_, p_hf1 = TransToRp(T_hf)
if p_hf1.all() != p_hf0.all():
print("NOT EQUAL")
p_hf = p_hf1
# OLD METHOD - DONT USE
# Rb_T = RpToTrans(Rb, np.array([0, 0, 0]))
# _, bodytoFR4 = TransToRp(T_bf[key])
# # print("bodytoFR4", bodytoFR4)
# _, bodytoFR0 = TransToRp(T_wh)
# # print("bodytoFR0", bodytoFR0)
# _bodytoFR0 = TransformVector(bodytoFR0, Rb_T, pb)
# # print("_bodytoFR0", _bodytoFR0)
# FRcoord = bodytoFR4 - _bodytoFR0
# # print("FRcoord", FRcoord)
# neg_Rb = RPY(-orn[0], -orn[1], -orn[2])
# _FRcoord = TransformVector(FRcoord, neg_Rb, -pb)
# # print("_FRcoord", _FRcoord)
# Step 3, compute joint angles from T_hf for each leg
joint_angles[i, :] = self.Legs[key].solve(p_hf)
return joint_angles
|
987,686 | a0e8eb214532013903f3f753d4e71a1d380d29f2 | class Solution:
def findPeakElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
len_n = len(nums)
for i in range(len_n - 1):
if nums[i] > nums[i + 1]:
return i
return len_n - 1 |
987,687 | 094931a8ded87106a5e2f68dae90d523dde607e3 | # -*- coding: utf-8 -*-
"""
Éditeur de Spyder
Ceci est un script temporaire.
"""
import turtle as tu
Room = [
{
"color": "Red",
"pos" : (0,0),
"height" : 75 ,
"width" : 250
},
{
"color": "Blue",
"pos" : (0,-75),
"height" : 250 ,
"width" : 50
},
{
"color": "Grey",
"pos" : (50,-75),
"height" : 150 ,
"width" : 50
},
{
"color": "Orange",
"pos" : (50,-225),
"height" : 100 ,
"width" : 50
},
{
"color": "Green",
"pos" : (100,-75),
"height" : 250 ,
"width" : 150
}
]
def draw():
tu.Screen().bgpic("source.gif")
tu.hideturtle()
tu.speed(0)
for i in Room:
tu.penup()
tu.goto(i.get("pos"))
tu.pendown()
tu.color(i.get("color"))
tu.begin_fill()
tu.forward(i.get("width"))
tu.right(90)
tu.forward(i.get("height"))
tu.right(90)
tu.forward(i.get("width"))
tu.right(90)
tu.forward(i.get("height"))
tu.right(90)
tu.end_fill()
tu.done()
tu.bye()
draw() |
987,688 | 74ef99fb0cf752e0818ce576c2f5e3b21d2c2aa3 | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class rss_field(models.Model):
feed_link=models.CharField(max_length=200)
created_by=models.CharField(max_length=100)
class news(models.Model):
title=models.CharField(max_length=400)
summary=models.CharField(max_length=400)
link=models.CharField(max_length=400)
feed_link=models.CharField(max_length=200)
created_at=models.DateTimeField(auto_now_add=True)
class user_filter(models.Model):
owned_by=models.CharField(max_length=100)
keywds=models.CharField(max_length=1000)
|
987,689 | 2e0c59c1f34c8b65823152c7f0605ebad6a14d49 | from app import mythic, links, use_ssl, db_objects
from app.routes.routes import env
from sanic import response
from sanic_jwt.decorators import scoped, inject_user
import app.database_models.model as db_model
import base64
from app.routes.routes import respect_pivot
async def get_scripts(user):
try:
scripts_to_add = {}
browser_scripts = ""
support_scripts_to_add = {}
final_support_scripts = ""
query = await db_model.operator_query()
operator = await db_objects.get(query, username=user['username'])
query = await db_model.operation_query()
operation = await db_objects.get(query, name=user['current_operation'])
query = await db_model.browserscript_query()
# get your own scripts
operator_scripts = await db_objects.execute(
query.where((db_model.BrowserScript.operator == operator) & (db_model.BrowserScript.active == True)))
for s in operator_scripts:
if s.command is not None:
scripts_to_add[s.command.id] = s.script
else:
support_scripts_to_add[s.payload_type.ptype.lower() + "_" + s.name] = s.payload_type.ptype.lower() + "_" + s.name + ":" + base64.b64decode(s.script).decode('utf-8') + ","
# final_support_scripts += s.name + ":" + base64.b64decode(s.script).decode('utf-8') + ","
# get scripts assigned to the operation
operation_query = await db_model.browserscriptoperation_query()
operation_scripts = await db_objects.execute(
operation_query.where(db_model.BrowserScriptOperation.operation == operation))
for s in operation_scripts:
if s.browserscript.command is not None:
scripts_to_add[
s.browserscript.command.id] = s.browserscript.script # will overwrite a user script if it existed, which is what we want
else:
support_scripts_to_add[s.browserscript.payload_type.ptype.lower() + "_" + s.browserscript.name] = s.browserscript.payload_type.ptype.lower() + "_" + s.browserscript.name + ":" + base64.b64decode(
s.browserscript.script).decode('utf-8') + ","
# final_support_scripts += s.name + ":" + base64.b64decode(s.script).decode('utf-8') + ","
for s, v in scripts_to_add.items():
browser_scripts += str(s) + ":" + base64.b64decode(v).decode('utf-8') + ","
for s, v in support_scripts_to_add.items():
final_support_scripts += v
return browser_scripts, final_support_scripts
except Exception as e:
return "", ""
@mythic.route("/callbacks")
@inject_user()
@scoped('auth:user')
async def callbacks(request, user):
template = env.get_template('callbacks.html')
browser_scripts, final_support_scripts = await get_scripts(user)
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https",
ws="wss", config=user['ui_config'], browser_scripts=browser_scripts,
support_scripts=final_support_scripts, view_utc_time=user['view_utc_time'])
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http",
ws="ws", config=user['ui_config'], browser_scripts=browser_scripts,
support_scripts=final_support_scripts, view_utc_time=user['view_utc_time'])
return response.html(content)
@mythic.route("/payload_management",methods=['GET'])
@inject_user()
@scoped('auth:user')
async def payload_management(request, user):
template = env.get_template('payload_management.html')
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https",
ws="wss", config=user['ui_config'], view_utc_time=user['view_utc_time'],
view_mode=user['view_mode'])
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http",
ws="ws", config=user['ui_config'], view_utc_time=user['view_utc_time'],
view_mode=user['view_mode'])
return response.html(content)
@mythic.route("/payloadtype_management",methods=['GET'])
@inject_user()
@scoped('auth:user')
async def payloadtype_management(request, user):
template = env.get_template('payloadtype_management.html')
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https",
ws="wss", config=user['ui_config'], view_utc_time=user['view_utc_time'],
view_mode=user['view_mode'])
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http",
ws="ws", config=user['ui_config'], view_utc_time=user['view_utc_time'],
view_mode=user['view_mode'])
return response.html(content)
@mythic.route("/analytics", methods=['GET'])
@inject_user()
@scoped('auth:user')
async def analytics(request, user):
template = env.get_template('analytics.html')
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https",
ws="wss", config=user['ui_config'], view_utc_time=user['view_utc_time'])
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http",
ws="ws", config=user['ui_config'], view_utc_time=user['view_utc_time'])
return response.html(content)
@mythic.route("/c2profile_management", methods=['GET'])
@inject_user()
@scoped('auth:user')
async def c2profile_management(request, user):
template = env.get_template('c2profile_management.html')
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https", ws="wss",
current_operation=user['current_operation'], config=user['ui_config'],
view_utc_time=user['view_utc_time'], view_mode=user['view_mode'])
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http", ws="ws",
current_operation=user['current_operation'], config=user['ui_config'],
view_utc_time=user['view_utc_time'], view_mode=user['view_mode'])
return response.html(content)
@mythic.route("/operations_management", methods=['GET'])
@inject_user()
@scoped('auth:user')
async def operations_management(request, user):
template = env.get_template('operations_management.html')
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https", ws="wss",
current_operation=user['current_operation'], config=user['ui_config'],
view_utc_time=user['view_utc_time'])
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http", ws="ws",
current_operation=user['current_operation'], config=user['ui_config'],
view_utc_time=user['view_utc_time'])
return response.html(content)
@mythic.route("/screenshots", methods=['GET'])
@inject_user()
@scoped('auth:user')
async def screenshots(request, user):
template = env.get_template('screenshots.html')
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https",
ws="wss", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http",
ws="ws", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
return response.html(content)
@mythic.route("/keylogs", methods=['GET'])
@inject_user()
@scoped('auth:user')
async def keylogs(request, user):
template = env.get_template('keylogs.html')
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https",
ws="wss", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http",
ws="ws", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
return response.html(content)
@mythic.route("/files", methods=['GET'])
@inject_user()
@scoped('auth:user')
async def files(request, user):
template = env.get_template('files.html')
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https",
ws="wss", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http",
ws="ws", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
return response.html(content)
@mythic.route("/credentials", methods=['GET'])
@inject_user()
@scoped('auth:user')
async def credentials(request, user):
template = env.get_template('credentials.html')
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https",
ws="wss", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http",
ws="ws", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
return response.html(content)
@mythic.route("/view_tasks", methods=['GET'])
@inject_user()
@scoped('auth:user')
async def view_tasks(request, user):
template = env.get_template('view_tasks.html')
browser_scripts, final_support_scripts = await get_scripts(user)
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https",
ws="wss", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], browser_scripts=browser_scripts,
support_scripts=final_support_scripts, view_utc_time=user['view_utc_time'])
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http",
ws="ws", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], browser_scripts=browser_scripts,
support_scripts=final_support_scripts, view_utc_time=user['view_utc_time'])
return response.html(content)
@mythic.route("/tasks/<tid:int>", methods=['GET'])
@inject_user()
@scoped('auth:user')
async def view_shared_task(request, user, tid):
template = env.get_template('share_task.html')
browser_scripts, final_support_scripts = await get_scripts(user)
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https",
ws="wss", admin=user['admin'], current_operation=user['current_operation'],
tid=tid, config=user['ui_config'], browser_scripts=browser_scripts,
support_scripts=final_support_scripts, view_utc_time=user['view_utc_time'])
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http",
ws="ws", admin=user['admin'], current_operation=user['current_operation'],
tid=tid, config=user['ui_config'], browser_scripts=browser_scripts,
support_scripts=final_support_scripts, view_utc_time=user['view_utc_time'])
return response.html(content)
@mythic.route("/split_callbacks/<cid:int>", methods=['GET'])
@inject_user()
@scoped('auth:user')
async def view_split_callbacks(request, user, cid):
template = env.get_template('split_callback.html')
browser_scripts, final_support_scripts = await get_scripts(user)
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https",
ws="wss", admin=user['admin'], current_operation=user['current_operation'],
cid=cid, config=user['ui_config'], browser_scripts=browser_scripts,
support_scripts=final_support_scripts, view_utc_time=user['view_utc_time'])
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http",
ws="ws", admin=user['admin'], current_operation=user['current_operation'], cid=cid,
config=user['ui_config'], browser_scripts=browser_scripts,
support_scripts=final_support_scripts, view_utc_time=user['view_utc_time'])
return response.html(content)
@mythic.route("/search", methods=['GET'])
@inject_user()
@scoped('auth:user')
async def search(request, user):
template = env.get_template('search.html')
browser_scripts, final_support_scripts = await get_scripts(user)
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https", ws="wss",
config=user['ui_config'], view_utc_time=user['view_utc_time'], browser_scripts=browser_scripts,
support_scripts=final_support_scripts,)
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http", ws="ws",
config=user['ui_config'], view_utc_time=user['view_utc_time'], browser_scripts=browser_scripts,
support_scripts=final_support_scripts,)
return response.html(content)
@mythic.route("/web_log", methods=['GET'])
@inject_user()
@scoped('auth:user')
async def web_log(request, user):
template = env.get_template('web_log.html')
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https",
ws="wss", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http",
ws="ws", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
return response.html(content)
@mythic.route("/artifacts_management", methods=['GET'])
@inject_user()
@scoped('auth:user')
async def artifacts_management(request, user):
template = env.get_template('artifacts_management.html')
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https",
ws="wss", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http",
ws="ws", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
return response.html(content)
@mythic.route("/reporting_artifacts", methods=['GET'])
@inject_user()
@scoped('auth:user')
async def reporting_artifacts(request, user):
template = env.get_template('reporting_artifacts.html')
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https",
ws="wss", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http",
ws="ws", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
return response.html(content)
@mythic.route("/manage_browser_scripts", methods=['GET'])
@inject_user()
@scoped('auth:user')
async def manage_browser_scripts(request, user):
template = env.get_template('browser_scripts.html')
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https",
ws="wss", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http",
ws="ws", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
return response.html(content)
@mythic.route("/live_task_feed", methods=['GET'])
@inject_user()
@scoped('auth:user')
async def live_task_feed(request, user):
template = env.get_template('live_feed.html')
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https",
ws="wss", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http",
ws="ws", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
return response.html(content)
@mythic.route("/live_event_feed", methods=['GET'])
@inject_user()
@scoped('auth:user')
async def live_event_feed(request, user):
template = env.get_template('live_event_feed.html')
if use_ssl:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="https",
ws="wss", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
else:
content = template.render(links=await respect_pivot(links, request), name=user['username'], http="http",
ws="ws", admin=user['admin'], current_operation=user['current_operation'],
config=user['ui_config'], view_utc_time=user['view_utc_time'])
return response.html(content)
# add links to these routes at the bottom
links['callbacks'] = mythic.url_for('callbacks')
links['payload_management'] = mythic.url_for('payload_management')
links['payloadtype_management'] = mythic.url_for('payloadtype_management')
links['analytics'] = mythic.url_for('analytics')
links['c2profile_management'] = mythic.url_for('c2profile_management')
links['operations_management'] = mythic.url_for('operations_management')
links['screenshots'] = mythic.url_for('screenshots')
links['keylogs'] = mythic.url_for('keylogs')
links['files'] = mythic.url_for('files')
links['credentials'] = mythic.url_for('credentials')
links['view_tasks'] = mythic.url_for('view_tasks')
links['artifacts_management'] = mythic.url_for('artifacts_management')
links['reporting_artifacts'] = mythic.url_for('reporting_artifacts')
links['manage_browser_scripts'] = mythic.url_for('manage_browser_scripts')
links['web_log'] = mythic.url_for('web_log')
links['live_feed'] = mythic.url_for('live_task_feed')
links['live_event_feed'] = mythic.url_for('live_event_feed')
links['search'] = mythic.url_for('search')
|
987,690 | 9cad6edc4489447eebb8d65133623c91126e9d60 | import unittest
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
class class_login(unittest.TestCase):
@classmethod
def setUp(self):
self.driver = webdriver.Chrome()
def test_cms(self):
user = "smoorthi"
pwd = "swethacse"
driver = self.driver
driver.maximize_window()
driver.get("http://smoorthi.pythonanywhere.com/login/")
elem = driver.find_element_by_xpath("/html/body/div/div/div/div/div[2]/form/div[1]/input")
elem.send_keys(user)
elem = driver.find_element_by_xpath("/html/body/div/div/div/div/div[2]/form/div[2]/input")
elem.send_keys(pwd)
time.sleep(5)
elem = driver.find_element_by_xpath("/html/body/div/div/div/div/div[2]/form/div[3]/input")
elem.click()
time.sleep(3)
elem = driver.find_element_by_id("userMenu")
elem.click()
time.sleep(3)
try:
# attempt to find the 'logout' - if found, logged in
logout_link = driver.find_element_by_xpath("/html/body/nav/div/div/ul/li/div/a[2]").text
if "Log Out" in logout_link:
print("User Successfully logged in")
assert True
else:
assert False
except NoSuchElementException:
self.fail("Login Failed - user may not exist")
assert False
driver.quit()
@classmethod
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main()
|
987,691 | 7439da6b1039b6fe5e56a4fcb0dc96f823e6fcaa | import os
import yaml
import string
import codecs
def main():
# template を開く
tmpf = open("template.html", "r")
template = string.Template(tmpf.read())
# yaml から値を取得する
f = open("config.yml", "r")
data = yaml.load(f)
# yaml の設定分ループ
for i in data:
# yaml の値で、template を書き換える
conf = data[i]
value = template.safe_substitute(conf)
# ./html/ に書き換えた template を保存
name = "./html/" + i + ".html"
scriptf = codecs.open(name, "w", 'utf-8')
scriptf.write(value)
scriptf.close()
tmpf.close()
if __name__ == '__main__':
main()
|
987,692 | b70f3cc08abf559ad3f69ff2a5c405f8e146d4a0 | import numpy as np
import cv2
import os, sys
import Image
import ImageDraw
import ImageFont
import matplotlib.image as mpimg
import skimage.io
sys.path.insert(0, '../../python/')
import caffe
#caffe.set_device(0)
#caffe.set_mode_gpu()
caffe.set_mode_cpu()
mean = np.require([104, 117, 123], dtype=np.float32)[:, np.newaxis, np.newaxis]
def get_names_from_file(filename):
result = []
fd = file(filename, 'r')
for line in fd.readlines():
result.append(line.replace('\n', ''))
return result
def get_color_from_file(filename):
colors = []
fd = file(filename, 'r')
for line in fd.readlines():
words = line.split(r',')
color = (int(words[0]), int(words[1]), int(words[2]))
colors.append(color)
return colors
def draw_image(pic_name, boxes, namelist_file):
name_list = get_names_from_file(namelist_file)
color_list = get_color_from_file('ink.color')
im = Image.open(pic_name)
draw = ImageDraw.Draw(im)
lena = mpimg.imread(pic_name)
height, width = lena.shape[:2]
for box in boxes:
x = box[3]
y = box[4]
w = box[5]
h = box[6]
left = (x - w / 2) * width
right = (x + w / 2) * width
top = (y - h / 2) * height
bot = (y + h / 2) * height
if left < 0:
left = 0
if right > width - 1:
right = width - 1
if top < 0:
top = 0
if bot > height - 1:
bot = height - 1
category_id = int(box[1])
category = name_list[category_id]
color = color_list[category_id % color_list.__len__()]
draw.line((left, top, right, top), fill=color, width=5)
draw.line((right, top, right, bot), fill=color, width=5)
draw.line((left, top, left, bot), fill=color, width=5)
draw.line((left, bot, right, bot), fill=color, width=5)
font_size = 20
my_font = ImageFont.truetype("/usr/share/fonts/truetype/ubuntu-font-family/Ubuntu-M.ttf", size=font_size)
draw.text([left + 5, top], category, font=my_font, fill=color)
im.show()
def det(model, im_path, show=0):
'''forward processing'''
image = caffe.io.load_image(im_path)
#image = skimage.img_as_ubyte(skimage.io.imread(im_path, as_grey=False)).astype(np.float32)
#transformer.set_mean('data', mean)
transformer = caffe.io.Transformer({'data': (1, 3, 416, 416)})
transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension
transformed_image = transformer.preprocess('data', image)
model.blobs['data'].reshape(1, 3, 416, 416)
model.blobs['data'].data[...] = transformed_image
out_blobs = model.forward()
'''
net_params = {}
for layer_name, params in model.params.iteritems():
print layer_name + '\t' + str(params[0].data.shape)
blobs = []
for p in params:
blobs.append(p)
net_params[layer_name] = blobs
print len(params)
'''
reg_out = model.blobs["Output"].data[0]
print reg_out
return reg_out
if __name__=="__main__":
net_proto = "./yolo-voc-deploy.prototxt"
model_path = "./model_voc/yolo_voc_iter_120000.caffemodel"
im_path = "data/000017.jpg"#"data/000999.jpg" # "data/2011_003275.jpg"
namelist = "voc.names"
if sys.argv.__len__() >= 4:
net_proto = sys.argv[1]
model_path = sys.argv[2]
im_path = sys.argv[3]
if sys.argv.__len__() >= 5:
namelist = sys.argv[4]
model = caffe.Net(net_proto, caffe.TEST, weights=model_path)
boxes = det(model, im_path)
boxes = boxes.reshape([-1, boxes.shape[-1]])
draw_image(im_path, boxes, namelist_file=namelist)
|
987,693 | 56d28e9fd06cc4aefdbbf204a365b105b3c88979 | from PIL import Image
import os
# im = Image.open("lenna.jpg")
# im.show()
#
# # 指定逆时针旋转的角度
# im_rotate = im.rotate(45)
# im_rotate.show()
# data_dir = 'D:/sharedhome/models/data/GCtest/Cancer/'
# im = Image.open(data_dir+images_list[0])
# imrot1 = im.rotate(90)
# imrot2 = im.rotate(180)
# imrot3 = im.rotate(270)
# imrot1.save(data_dir+'r90_'+images_list[0])
# imrot2.save(data_dir+'r180_'+images_list[0])
# imrot3.save(data_dir+'r270_'+images_list[0])
def rot_dataset(data_dir):
listimg = os.listdir(data_dir)
for class_name in listimg:
im = Image.open(data_dir + class_name)
imrot1 = im.rotate(90)
imrot2 = im.rotate(180)
imrot3 = im.rotate(270)
imrot1.save(data_dir + 'r90_' + class_name)
imrot2.save(data_dir + 'r180_' + class_name)
imrot3.save(data_dir + 'r270_' + class_name)
rot_dataset('D:/sharedhome/models/data/GCtest/Normal/')
rot_dataset('D:/sharedhome/models/data/GCtest/Cancer/')
|
987,694 | 509bbc7bf7c3d6f3961c5ce3fe176fb4f0557069 | #! /usr/bin/python
import sys
import os
from numpy import *
import Scientific.IO.NetCDF
from Numeric import *
from Scientific.IO import NetCDF as Net
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
##name of NetCDF trajectory file
filename='../ptraj/mergtraj_equiv_translated.nc'
#filename='/home/pjanowsk/Case/pepsim/RunCase/WrapWater/mergtraj_waterwrapped_tr.nc'
firstwaterO=9793 #number of first water oxygen
watermodelatoms=3 #number of atoms in water model
totalwaters=144 #total number of water molecules
frames=3242 #frames in trajectory
coordinate=3 # x-coord=0, ycoord=1, z coord=2, a-coord=3, b-coord=4, c-coord=5
##here enter the invU matrix (deorthogonalization matrix) for taking cartesian
#coordinates into fractional coordinates, ie boxspace
invU=array( [ [ 0.092575448990928, 0.005110941781298, 0.012649310992308],\
[0., 0.061214034829125, 0.031095562758146],\
[0., 0., 0.063119390656148] ])
##get the coordinates variable of the binary file
file = Net.NetCDFFile(filename, 'r')
coords=file.variables['coordinates']
###these were some checks of how to work with NetCDF
#~ print coords.shape
#~ print coords [0,0,:]
#~ print coords [0,0,1]
#~ x=coords[:,:,0]
#~ x.shape()
#~ print x
#~ print x.min()
#~ xmax=x.max()
#~ print '%20.20f' %xmax
#~ xl=x.tolist()
#~ print xl
#~ print xl.index(60.8875274658203125)
#~ i=where[x=='60.8875']
###get all of the water coordinates into one array
xcoords=[]
for frame in range(frames):
for atom in range(totalwaters):
#if you want fractional coordinate histogram (along a,b or c)
if coordinate in range(3,6):
r=array ( coords[frame,(firstwaterO-1+(watermodelatoms*atom))] )
rfrac=dot(invU,r)
#print rfrac
xcoords.append(rfrac[coordinate-3])
#if you want cartesian coordiante histogram (along x, y, z)
elif coordinate in range(0,3):
xcoords.append(coords[frame,(firstwaterO-1+(watermodelatoms*atom)),coordinate])
#print xcoords
print len(xcoords)
###plot histogram of the x-coordinates
fig=plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
#ax.hist(xcoords,500, range=[-1,6], facecolor='blue', alpha=1.00,histtype='stepfilled')
n=ax.hist(xcoords,500, facecolor='blue', alpha=1.00,histtype='stepfilled')
print n[1]
print n[0]
print n[2]
#~ fig.suptitle('Waterchannel cross-section profile (histogram of water positions, 500bins)', fontsize=16)
fig.suptitle('a-coord profile (histogram of water positions, 500bins)', fontsize=16)
#~ ax.set_xlabel('x-coordinate (supercell runs from 0-43.2 4 unitcells per supercell)')
ax.set_xlabel('a-coordinate')
ax.set_ylabel('count (# of times a water was found at that coordinate)')
minorLocator = MultipleLocator(.1)
majorLocator = MultipleLocator(1)
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_minor_locator(minorLocator)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_markeredgewidth(3)
line.set_markersize(10)
for line in ax.xaxis.get_minorticklines():
line.set_markeredgewidth(2)
line.set_markersize(5)
##the next part uses the histogram class (need histogram.py) to color the histogram. Not necessary
#~ from histogram import *
#~ from numpy import random
#~ h1 = histogram(xcoords, bins=500, range=[-3,6])
#~ colors = ['red', 'blue', 'red']
#~ ranges = [[-20,0.0], [0.0,4], [4,20]]
#~ for c, r in zip(colors, ranges):
#~ plt = ax.overlay(h1, range=r, facecolor=c)
#~ pyplot.savefig("channelProfileY.png")
#plt.savefig("a.png")
plt.show()
|
987,695 | 2492e7448fc8060244f10058b59add5708c39454 | dungeon = "--"
room = "--"
player_name = 'PLAYER'
player_hp = 8
player_ap = 1
player_dp = 1
player_gp = 0
player_found_gp_in_room_2 = False
monster_name = "MONSTER"
monster_hp = 16
monster_ap = 2
monster_dp = 2
monster_gp = 4
|
987,696 | 2089073ebead8dabbd2e0f382d03063b57a3df71 | #!/usr/bin/python
import itertools
import argparse
import os
import database
parser = argparse.ArgumentParser(description='Print database for energy shifts in EuO and substrate')
parser.add_argument('-d', '--database', default='/users/stollenw/projects/euo/database/isolated.db', help='Database file name')
parser.add_argument('-p', '--plotfolder', default='/users/stollenw/projects/euo/database/analysis/isolated/isodelta/', help='Database file name')
parser.add_argument('-s', '--short', action='store_true', help='Less output')
args = parser.parse_args()
if not os.path.exists(args.plotfolder):
os.makedirs(args.plotfolder)
database=database.isolated_database()
if args.database!='/users/stollenw/projects/euo/database/isolated.db':
database.read(args.database)
else:
database.download()
# get columns of data and remove duplicates by converting to
# a set (no duplicates) and back to a list
material_list=list(set([row[0] for row in database.data ]))
N_list=list(set([int(row[1]) for row in database.data ]))
nc_list=list(set([float(row[2]) for row in database.data ]))
# sort data
material_list.sort()
N_list.sort()
nc_list.sort()
# all combinations
parameter_list=[material_list,N_list,nc_list]
parameter=list(itertools.product(*parameter_list))
for p in parameter:
#print "# Material: %s, N=%03i, nc=%06.4f" %(p[0], p[1], p[2])
material = p[0]
N=p[1]
nc=p[2]
if len(filter(lambda element : element[0] == material and element[1] == N and element[2] == nc, database.data))!=0:
print p
if not args.short:
print "Temperature\tDelta\t\tSource"
else:
print "Temperature\tDelta"
f = open("%s/isodeltas_%s_N%03i_nc%06.4f.dat" % (args.plotfolder, material, N, nc), 'w')
for e in sorted(filter(lambda element : element[0] == material and element[1] == N and element[2] == nc, database.data), key= lambda element: element[3]):
if not args.short:
print "%e\t%e\t%s" % (e[3], e[4], e[5])
f.write("%e\t%e\t%s\n" % (e[3], e[4], e[5]))
else:
print "%e\t%e" % (e[3], e[4])
f.write("%e\t%e\n" % (e[3], e[4]))
|
987,697 | 6c81f0ba9ca96563c8bcf5fb54cdb3b410dfe787 | import codecs
fileData = './src/raw/charData.txt'
fileDataLs = './src/raw/chars.ls'
with codecs.open(fileData,'r',encoding='utf8') as f:
data = f.read()
front = '''# ============================================================================
# Column settings
# ============================================================================
# name: name of this character
# class: class of this character
# rarity: rarity of this character
# plain: plain type of this character
# hpF: HP increase factor
# atF: ATK increase factor
# dfF: DEF increase factor
# totF: total increase factor
# ============================================================================
char = {
slotChar: <[name weapon rarity plain hpF atF dfF totF]>
data:[\n'''
back = ''' ]
}
module.exports = char'''
with codecs.open(fileDataLs,'w',encoding='utf8') as f:
f.write(front)
for line in data.splitlines():
line = line.strip()
f.write(" <[" + line + "]>\n")
f.write(back)
print("chars.ls generated complete!")
|
987,698 | bbb0860be0b232bd8dec420473fdf9d2681f81aa | from lab5.constant import prime_number, size
def rabin_karp(text: str, pattern: str):
found_patterns_idx = []
text_len = len(text)
pattern_len = len(pattern)
if text_len < pattern_len:
return -1
pattern_hash = hash(pattern, pattern_len)
text_hash = hash(text, pattern_len)
if pattern_hash == text_hash:
if check_match(text, pattern, 0):
found_patterns_idx.append({'start': 0, 'end': 0 + pattern_len - 1})
for i in range(1, text_len - pattern_len + 1):
text_hash = substring_new_hash(pattern, text_hash, text[i - 1], text[i + pattern_len - 1])
if pattern_hash == text_hash:
if check_match(text, pattern, i):
found_patterns_idx.append({'start': i, 'end': i + pattern_len - 1})
return found_patterns_idx
def hash(pattern, pattern_len):
pattern_hash = 0
for i in range(pattern_len):
pattern_hash = (pattern_hash * size + ord(pattern[i])) % prime_number
return pattern_hash
# hash for substring with next letter
def substring_new_hash(pattern_length, prev_hash, old_letter, new_letter):
new_hash = size ** (len(pattern_length) - 1) % prime_number
return ((prev_hash - ord(old_letter) * new_hash) * size + ord(new_letter)) % prime_number
def check_result(expected_res, text: str, pattern: str):
found_patterns = rabin_karp(text, pattern)
confirmation = False
for index in range(len(found_patterns)):
confirmation = expected_res[index][0] == found_patterns[index]['start'] and \
expected_res[index][1] == found_patterns[index]['end']
return confirmation
def check_match(text, pattern, start_index):
for i in range(len(pattern)):
if pattern[i] != text[start_index + i]:
return False
return True
|
987,699 | 0226fa8b09149f7754e4b81f3e26c047fcbfe395 | from django.conf import settings
DB_LOGGER_URL = getattr(settings, 'DB_LOGGER_URL', 'pg.example.com')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.