text stringlengths 38 1.54M |
|---|
from abc import abstractmethod
from abc import ABCMeta
import numpy as np
import cv2
from utils import crop_image, expand_bbox
class ImageSample:
__metaclass__ = ABCMeta
"""Image interface"""
@property
@abstractmethod
def bgr_data(self):
pass
@property
@abstractmethod
def id(self):
pass
@property
@abstractmethod
def label(self):
pass
def __hash__(self):
return hash(self.id)
class SampleWithCache(ImageSample):
"""with cache
"""
_CACHE_MAX_SIZE = 16 * 1024 * 1024 * 1024 # 16 Gb
_CACHE = {}
_CACHE_SIZE = 0
def __init__(self, image_path, label, cfg):
self._image_path = image_path
self._label = label
self._cfg = cfg
@property
def data(self):
if self._image_path in SampleWithCache._CACHE:
im = SampleWithCache._CACHE[self._image_path]
else:
im = cv2.imread(self._image_path)
# prepare size
prepare_sz = tuple(self._cfg.PREPARE_SIZE)
if im.shape[0] != prepare_sz[1] or im.shape[1] != prepare_sz[0]:
prepare_r = float(prepare_sz[0]) / prepare_sz[1]
orig_r = float(im.shape[1]) / im.shape[0]
if orig_r < prepare_r:
# fit width
crop_w = im.shape[1]
crop_h = crop_w / prepare_r
else:
# fit height
crop_h = im.shape[0]
crop_w = crop_h * prepare_r
crop_x = int((im.shape[1] - crop_w) / 2.)
crop_y = int((im.shape[0] - crop_h) / 2.)
crop_w = int(crop_w)
crop_h = int(crop_h)
im = im[crop_y:crop_y+crop_h,crop_x:crop_x+crop_w,:]
interp = cv2.INTER_AREA if im.shape[1] > prepare_sz[0] else cv2.INTER_LINEAR
im = cv2.resize(im, prepare_sz, interpolation=interp)
if SampleWithCache._CACHE_SIZE < SampleWithCache._CACHE_MAX_SIZE:
SampleWithCache._CACHE[self._image_path] = im
SampleWithCache._CACHE_SIZE += im.shape[0] * im.shape[1] * im.shape[2] * 1 # in bytes (type uint8)
return im.copy()
@property
def label(self):
return self._label
@property
def id(self):
return self._image_path
@staticmethod
def reset_cache():
SampleWithCache._CACHE = {}
SampleWithCache._CACHE_SIZE = 0 |
#! /home/fyh/anaconda3/envs/NERpy3/bin/python
import random
import nltk
from util import HPOTree,HPO_class,getNames, containNum
obo_file_path="../data/hpo.json"
# wiki_file_path="../models/wikipedia.txt"
# none_list=[]
# # wiki中无意义的短语
# with open(wiki_file_path, "r", encoding="utf-8") as wiki_file:
# max_length=10000000
# data=wiki_file.read()[:max_length]
# tokens=processStr(data)
# indexes=random.sample(range(len(tokens)), 10000)
# for sub_index in indexes:
# none_list.append(" ".join(tokens[sub_index:sub_index+random.randint(1,10)]))
hpo_tree=HPOTree()
# p_phrase 2 HPO
p_phrase2HPO=hpo_tree.p_phrase2HPO
# HPO:[name+synonym]
HPO2phrase={}
data=hpo_tree.data
for hpo_name in data:
struct=HPO_class(data[hpo_name])
names=getNames(struct)
HPO2phrase[hpo_name]=names
total_neg_lst = []
part_pos_lst = []
for true_hpo_name in HPO2phrase.keys():
struct = HPO_class(data[true_hpo_name])
# 子代
Candidate_hpo_set_son = list(struct.child)
Candidate_hpo_set_son_set = set(Candidate_hpo_set_son)
if len(Candidate_hpo_set_son) > 10:
Candidate_hpo_set_son = random.sample(Candidate_hpo_set_son, 10)
# 祖先代
Candidate_hpo_set_f = list(struct.father)
Candidate_hpo_set_f_set = set(Candidate_hpo_set_f)
if len(Candidate_hpo_set_f) > 10:
Candidate_hpo_set_f = random.sample(Candidate_hpo_set_f, 10)
# 直系父代
Candidate_hpo_set_f_d = list(struct.is_a)
sub_neg_phrases = []
sub_pos_phrases = []
sub_pos_phrases_d = []
# 祖先关系
tmp = []
for hpo_name in Candidate_hpo_set_f:
tmp.extend(HPO2phrase[hpo_name])
sub_pos_phrases.extend(tmp)
# 直属父代
tmp = []
for hpo_name in Candidate_hpo_set_f_d:
tmp.extend(HPO2phrase[hpo_name])
sub_pos_phrases_d.extend(tmp)
sub_pos_phrases.extend(sub_pos_phrases_d)
sub_fa_phrases_set = set(sub_pos_phrases)
# 加入了祖先
part_pos_lst.extend(
[random.choice(HPO2phrase[true_hpo_name]) + "::" + item + "\t1\n" for item in sub_fa_phrases_set if
not containNum(item)])
# 子代关系
tmp = []
for hpo_name in Candidate_hpo_set_son:
tmp.extend(HPO2phrase[hpo_name])
if len(tmp) > 10:
tmp = random.sample(tmp, 10)
sub_neg_phrases.extend(tmp)
# 不相关的病种
tmp = []
Candidate_hpo_set2 = random.sample(HPO2phrase.keys(), 5)
for item in Candidate_hpo_set2:
if item in Candidate_hpo_set_son_set or item in Candidate_hpo_set_f_set or item == true_hpo_name:
continue
tmp.extend(HPO2phrase[item])
if len(tmp) > 5:
tmp = random.sample(tmp, 5)
sub_neg_phrases.extend(tmp)
total_neg_lst.extend(
[random.choice(HPO2phrase[true_hpo_name]) + "::" + item + "\t0\n" for item in sub_neg_phrases if
not containNum(item)])
sub_neg_phrases = []
# 包含词
phrases = set(HPO2phrase[true_hpo_name])
part_pos_set = set(sub_pos_phrases)
word_set = set()
for phrase in phrases:
word_set.update(nltk.word_tokenize(phrase))
if len(word_set) > 10:
word_set = random.sample(word_set, 10)
for word in word_set:
if word not in phrases and word not in part_pos_set:
sub_neg_phrases.append(word)
total_neg_lst.extend(
[item + "::" + random.choice(HPO2phrase[true_hpo_name]) + "\t0\n" for item in sub_neg_phrases if
not containNum(item)])
# this part produce train file for bert
write_file=open("../models/all4bert_new_triple_2.txt","w")
# get part neg list
pos_lst=[]
for hpo_name in HPO2phrase:
names=HPO2phrase[hpo_name]
if len(names)<=1:
continue
tmp_lst=[]
for index1 in range(len(names)):
for index2 in range(len(names)):
if index1==index2:
continue
# 表示语义完全相关
if not containNum(names[index1]) and not containNum(names[index2]):
tmp_lst.append(names[index1]+"::"+names[index2]+"\t2\n")
pos_lst.extend(tmp_lst)
# 78968
print("we have %d positive entries." % len(pos_lst))
# 140066
print("we have %d part positive entries." % len(part_pos_lst))
# 174602
print("we have %d part/total negative entries." % len(total_neg_lst))
total_neg_lst.extend(pos_lst)
total_neg_lst.extend(part_pos_lst)
random.shuffle(total_neg_lst)
write_file.write("".join(total_neg_lst))
write_file.close()
|
import os
import numpy as np
import pickle
class NeuralNetwork:
def __init__(self, train_x, train_y, hidden_layer_size, train_validation_ratio):
self.train_size = train_x.shape[0]
# shuffle samples
s = np.arange(self.train_size)
np.random.shuffle(s)
train_x = train_x[s]
train_y = train_y[s]
# Divides data
self.train_validation_ratio = train_validation_ratio
self.train_x = train_x[:int(self.train_size * self.train_validation_ratio), :]
self.validation_x = train_x[int(self.train_size * self.train_validation_ratio):, :]
self.train_y = train_y[:int(self.train_size * self.train_validation_ratio)]
self.validation_y = train_y[int(self.train_size * self.train_validation_ratio):]
self.w1 = np.random.uniform(-0.08, 0.08, (hidden_layer_size, 784))
self.b1 = np.random.uniform(-0.08, 0.08, (hidden_layer_size, 1))
self.w2 = np.random.uniform(-0.08, 0.08, (10, hidden_layer_size))
self.b2 = np.random.uniform(-0.08, 0.08, (10, 1))
self.params = {'w1': self.w1, 'b1': self.b1, 'w2': self.w2, 'b2': self.b2}
def train(self, num_of_epochs, learning_rate, should_print=True):
for epoch in range(num_of_epochs):
# Shuffles arrays
s = np.arange(self.train_x.shape[0])
np.random.shuffle(s)
self.train_x = self.train_x[s]
self.train_y = self.train_y[s]
loss_sum = 0
validation_loss = 0
validation_success = 0
# Trains model
for x, y in zip(self.train_x, self.train_y):
y = int(y)
# Normalizes vector
x = np.ndarray.astype(x, dtype=float)
x /= 255.0
x = np.expand_dims(x, axis=1)
# Forward propagation
back_params = {}
y_hat = self.forward_propagation(x, back_params)
h1, z1 = back_params['h1'], back_params['z1']
# Computes loss_sum
loss_sum -= np.log(y_hat[y])
# Backward propagation
backward_params = {'y_hat': y_hat, 'h1': h1, 'w2': self.params['w2'], 'z1': z1}
dw1, db1, dw2, db2 = self.backward_propagation(x, y, backward_params)
# update
derivatives = {'dw1': dw1, 'db1': db1, 'dw2': dw2, 'db2': db2}
self._update(derivatives, learning_rate)
# Validates
for x, y in zip(self.validation_x, self.validation_y):
y = int(y)
# Normalizes vector
x = np.ndarray.astype(x, dtype=float)
x /= 255.0
x = np.expand_dims(x, axis=1)
# Applies forward propagation
y_hat = self.forward_propagation(x)
# Computes success
if y == np.argmax(y_hat):
validation_success += 1
# Computes loss_sum
validation_loss -= np.log(y_hat[y])
avg_loss = loss_sum / (self.train_size * self.train_validation_ratio)
avg_validation_loss = validation_loss / (self.train_size * (1 - self.train_validation_ratio))
validation_accuracy = validation_success / (self.train_size * (1 - self.train_validation_ratio))
if should_print:
print('Epoch #' + str(epoch) + ', Validation accuracy: ' + str(
validation_accuracy) + ', Loss sum: ' + str(avg_loss[0]) + ', Validation loss sum: ' + str(
avg_validation_loss[0]))
def _update(self, derivatives, learning_rate):
w1, b1, w2, b2 = [self.params[key] for key in ('w1', 'b1', 'w2', 'b2')]
dw1, db1, dw2, db2 = [derivatives[key] for key in ('dw1', 'db1', 'dw2', 'db2')]
w2 -= learning_rate * dw2
b2 -= learning_rate * db2
w1 -= learning_rate * dw1
b1 -= learning_rate * db1
self.params = {'w1': w1, 'b1': b1, 'w2': w2, 'b2': b2}
def predict(self, sample):
sample = np.asmatrix(sample).T
return np.argmax(self.forward_propagation(sample))
def forward_propagation(self, sample, back_params=None):
# Follows procedure given in notes
if back_params is None:
back_params = {}
w1, b1, w2, b2 = [self.params[key] for key in ('w1', 'b1', 'w2', 'b2')]
activation_func = active['func']
z1 = np.dot(w1, sample) + b1
h1 = activation_func(z1)
z2 = np.dot(w2, h1) + b2
y_hat = softmax(z2)
back_params['z1'], back_params['h1'], back_params['z2'], back_params['h2'] = z1, h1, z2, y_hat
return y_hat
@staticmethod
def backward_propagation(x, y, params):
y_hat, h1, w2, z1 = [params[key] for key in ('y_hat', 'h1', 'w2', 'z1')]
active_derivative = active['derivative']
dz2 = y_hat
dz2[y] -= 1
dw2 = np.dot(dz2, h1.T)
db2 = dz2
dz1 = np.dot(y_hat.T, w2).T * active_derivative(z1)
dw1 = np.dot(dz1, x.T)
db1 = dz1
return dw1, db1, dw2, db2
def softmax(x):
e_z = np.exp(x - np.max(x))
return e_z / e_z.sum()
def leaky_relu(x):
for i in range(len(x)):
x[i] = max(0.01 * x[i], x[i])
return x
def d_leaky_relu(x):
for i in range(len(x)):
x[i] = max(0.01 * np.sign(x[i]), np.sign(x[i]))
return x
# activation functions
activation_funcs = {'leakyReLU': {'func': leaky_relu, 'derivative': d_leaky_relu}}
active = activation_funcs['leakyReLU']
def read_resources(train_x, train_y, test):
x, y, t = None, None, None
if os.path.exists(train_x + ".npy"):
x = np.load(train_x + ".npy")
else:
x = np.loadtxt(train_x)
np.save(train_x, x)
if os.path.exists(train_y + ".npy"):
y = np.load(train_y + ".npy")
else:
y = np.loadtxt(train_y)
np.save(train_y, y)
if os.path.exists(test + ".npy"):
t = np.load(test + ".npy")
else:
t = np.loadtxt(test)
np.save(test, t)
return x, y, t
if __name__ == '__main__':
file_path = "my_neural_network.pkl"
# Reads data
print("Reading resources...")
x, y, test = read_resources("train_x", "train_y", "test_x")
print("Reading done!")
nn = None
if os.path.exists(file_path):
print("Loading pre-trained neural network from \"" + file_path + "\"...")
with open(file_path, 'rb') as f:
nn = pickle.load(f)
print("Loading done!")
else:
print("Pre-trained neural network was not found. Creating a new network.")
nn = NeuralNetwork(x, y, 100, 0.8)
print("Training neural network...")
nn.train(10, 0.001)
print("Training done!")
print("Saving neural network to file \"" + file_path + "\" for later use...")
with open(file_path, 'wb') as f:
pickle.dump(nn, f)
print("Saving done!")
# Writes predictions of given tests
print("Predicting samples from tests file and writes predictions to output file...")
with open("test.pred", "w") as f:
for t in test:
f.write(str(nn.predict(t)) + '\n')
print("Testing done!")
|
import pytest
@pytest.fixture
def headers():
return {'user-agent': 'my-app/0.0.1'}
@pytest.fixture
def main_url():
return "https://rabota.by/search/vacancy?L_is_autosearch=false&area=16&clusters=true&enable_snippets=true&text=python&page="
|
from bezmouse import *
import pyautogui
import time
import random
import importlib.machinery
visitor_lib = importlib.machinery.SourceFileLoader('visitor_lib', 'visitor_lib/visitor_lib.py').load_module()
def bitearn():
captcha_reps = 0
while True:
visitor_lib.browser_open_url('https://bitearn.io/page/dashboard/')
time.sleep(random.randint(6000, 10000)/1000)
if not visitor_lib.solve_captcha_buster() and captcha_reps < 3:
captcha_reps += 1
time.sleep(random.randint(10, 20))
continue
time.sleep(random.randint(4000, 6000)/1000)
visitor_lib.move_to_area_relative("bitearn/claim.png", -443, -1, 1103, 45, True)
time.sleep(random.randint(7000, 8000)/1000)
success = visitor_lib.move_to_area_relative("bitearn/success.png", 82, 25, 149, 25, False)
time.sleep((random.randint(200, 1000)/1000)*60)
return success
bitearn_settings = {
'collection_amount_usd': 0.001,
'collection_interval_minutes': 30,
'collection_interval_additional_minutes_max': 2,
'rest_at_hour_min': 20,
'rest_at_hour_max': 21,
'hours_rest_min': 0,
'hours_rest_max': 1,
'skip_every_min_collections': 15,
'skip_every_max_collections': 20,
'skip_by_min_minutes': 10,
'skip_by_max_minutes': 20
}
|
from django.db import models
from datetime import datetime
from django.contrib.auth.models import AbstractUser
# Create your models here.
class UserProfile(AbstractUser):
birthday=models.DateField(verbose_name=u'生日',null=True,blank=True)
gender=models.CharField(verbose_name=u'性别',max_length=10,choices=(("male",u"男"),("fenale","女")),default=u"")
user_type = models.CharField(verbose_name="用户类型",max_length=50, choices=(("s_group_leader", "小组长"), ("b_group_leader", "大组长"),
("administrator", "管理员")), default="小组长")
mobile=models.CharField(verbose_name="联系电话",max_length=11,null=True,blank=True)
image=models.ImageField(verbose_name='头像',upload_to="image/%Y/%m",default=u"image/avatar3.png",max_length=100)
class Meta:
verbose_name="用户信息"
verbose_name_plural=verbose_name
def __str__(self):
return self.username
|
#!/usr/bin/env python
'''This program should take a BAM file produced by Bismark and a list of adjacent CpG sites (one line per read and _one_ eligible pairs) for which the methylation statuses will be reported in a pairwise manner. '''
import sys
import argparse
import getopt
import pysam
import os
import doctest
import gzip
import warnings
import itertools
def get_args():
parser=argparse.ArgumentParser(description='Count numbers of methylated and unmethylated Cs per read per adjacent CpG pair.')
parser.add_argument('--BAMfile', '-bam', type = str, required=True, help="BAM file")
parser.add_argument('--CpGpairs', '-pairs', type = str, required=True, help="Reads with pair information, must be _sorted_ by read name and contain the following columns: read name, chr, start, end (of overlapped pair). bedtools intersect -abam test03.bam -b pairs_03.bed -bed -u | awk '{OFS=t; print $1,$7,$8,$4}' | bedtools intersect -b stdin -a pairs_03.bed -f 1 -wb | awk '{OFS=t; print $7, $1, $2,$3}' | sort -k1,1g > intersect_test03_b.txt.")
parser.add_argument('--minMapQual', '-mmq', type = int, default=0, help="Min. mapping quality accepted for the reads that will be used to count the methylation state ocurrences. See http://bit.ly/25glGcI for information about the different aligners MapQ calculations.")
parser.add_argument('--minCoverage', '-mc', type = int, default = 0, help = 'Indicate the minimum number of reads that must overlap with each adjacent pair.')
parser.add_argument('--trimStart', '-strim', type=int, required=False, default = 0, help = 'Number indicating how many bp should be ignored at the 5 prime end of the read.')
parser.add_argument('--trimEnd', '-etrim', type=int, required=False, default = 0, help = 'Number indicating how many bp should be ignored at the 3 prime end of the read.')
parser.add_argument('--outfile', '-out', type = str, required=True, help = 'Prefix for the output file')
args=parser.parse_args()
return args
def get_CIGARbased_sequence(readinfo):
ori_seq = [item for item in readinfo.tags if item[0] == 'XM'][0][1]
ct = readinfo.cigartuples
new_seq = ''
start_pos = 0
'''see http://pysam.readthedocs.org/en/latest/api.html#pysam.AlignedSegment.cigartuples for information about how the integers are mapped to the CIGAR operations'''
for c in ct:
# match/mismatch
if c[0] == 0 or c[0]== 7 or c[0] == 8:
end_pos = start_pos + c[1]
new_seq = new_seq + ori_seq[start_pos:end_pos]
start_pos = end_pos
# deletion in read --> insertion necessary to make it fit with the reference
elif c[0] == 2 or c[0] == 3 or c[0] == 5:
new_seq = new_seq + '_'*c[1]
# insertion in read compared to reference sequence --> must be deleted from the read sequence
elif c[0] == 1:
start_pos =+ c[1]
return new_seq
def trim_seq(Read_seq, strim, etrim):
'''returns the trimmed sequence'''
beg = 0 + strim
if etrim > 0:
end = -etrim
else:
end = None
return Read_seq[beg:end]
def main():
args = get_args()
# read in read - CpG pair information and create a dictionary
rpd = {}
pl = []
intfile = open(args.CpGpairs, "r")
prev_read = 'none'
pairs_dict = {}
for Line in intfile.readlines():
Line = Line.strip("\n")
Line = Line.split("\t")
r, p = Line[0], Line[1:4]
pairs_dict[tuple(p)] = [0,0,0,0]
if r != prev_read:
rpd[prev_read] = pl
pl = [p]
else:
pl.append(p)
prev_read = r
rpd[r] = pl
del(rpd['none'])
# get the reads
bamfile = pysam.Samfile(args.BAMfile, "rb")
# for each read, check which adjacent pairs it overlaps and extract
# the methylation state information for those pairs
for Read in bamfile:
Rname = Read.query_name
# is read overlapping with any adjacent CpG pairs?
if not Rname in rpd:
continue
chrom = Read.reference_name
r_start = int(Read.reference_start)
r_end = int(Read.reference_end)
# get pairs within the read's range
elig_pairs = rpd[Rname]
if Read.mapping_quality < args.minMapQual:
continue
# if read has proper minMapQual,
# check CIGAR string for coordinate-altering operations
cs = Read.cigarstring
if 'D' in cs or 'I' in cs or 'N' in cs or 'H' in cs or 'P' in cs:
bs_seq = get_CIGARbased_sequence(Read)
else:
bs_seq = [item for item in Read.tags if item[0] == 'XM'][0][1]
# get the length of the sequence after taking the trimming into account
trim_seq_len = len(trim_seq(bs_seq, args.trimStart, args.trimEnd))
if(args.trimStart == 0):
beg = -1
else:
beg = args.trimStart
# now, determine the me states per pair
for P in elig_pairs:
P = tuple(P)
if not Read.is_reverse:
b1 = int(P[1]) - r_start
b2 = int(P[2]) - 1 - r_start
else:
b1 = int(P[1]) + 1 - r_start
b2 = int(P[2]) - r_start
if b1 > beg and b2 <= trim_seq_len:
state = bs_seq[b1] + bs_seq[b2-1]
else:
continue
if not state in ['ZZ','Zz','zZ','zz']:
#raise StandardError("Did not find a z or Z at the expected position (%s, %d, %d) within read %s" % (chrom, P[1], P[2], #Read.query_name))
warnings.warn("Did not find a z or Z at the expected position (%s, %d, %d) within read %s" % (chrom, int(P[1]), int(P[2]), Rname))
continue
# record state in temporary dictionary
sdc = dict(itertools.izip(['ZZ','Zz','zZ','zz'], [0,0,0,0]))
sdc[state] += 1
# update dictionary of pairs
pairs_dict[P][0] += sdc['ZZ']
pairs_dict[P][1] += sdc['Zz']
pairs_dict[P][2] += sdc['zZ']
pairs_dict[P][3] += sdc['zz']
# save output
out = open(args.outfile + 'CpG_pair_states.txt', 'wb')
header = ['chr','cpg1','cpg2','ZZ','Zz','zZ','zz']
out.write('\t'.join(header) + '\n')
for i in pairs_dict:
if sum(pairs_dict[i]) >= args.minCoverage:
chrom, cpg1, cpg2 = i[0], int(i[1]), int(i[2])
ZZ, Zz, zZ, zz = pairs_dict[i][0], pairs_dict[i][1], pairs_dict[i][2], pairs_dict[i][3]
out.write("%s\t%d\t%d\t%d\t%d\t%d\t%d\n" % (chrom, cpg1, cpg2, ZZ, Zz, zZ, zz))
out.close()
if __name__ == '__main__':
main()
|
# -*- coding:utf-8 -*-
#Get method: headers, param
import requests
import json
import glob,os
host = "http://httpbin.org/"
endpoint = "get"
url = ''.join([host,endpoint])
headers = {"User-Agent":"test request headers"}
params = {"show_env":"1"}
r = requests.get(url=url,headers=headers,params=params)
print (r.text)
print ((eval(r.text))['headers']['User-Agent'])
|
import numpy as np
import sklearn.cluster as sk
import sklearn.manifold as mf
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import colors as mplc
from mpl_toolkits.mplot3d import axes3d
from scipy.cluster.hierarchy import ward, fcluster
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.cluster import hierarchy
from sklearn.cluster import SpectralClustering
from collections import Counter
###########################################################
###### ESS DIST ##################################
#
# read and analyze hic distance matrix
#
###########################################################
class dist:
# constructor -------------------------------------
def __init__(self,filename,metafile=None):
"This class stores a distance matrix and provides some analysis tools"
self.metafile = metafile
torm = []
self.pseudo = []
if self.metafile:
meta = open(self.metafile,'r')
k = 0
colors = []
refs = []
nrefs = []
for line in meta:
if line[0] != '#':
fld = line.split()
if fld[1] == 'pseudo':
self.pseudo += [1]
else:
refs += [ fld[1] ]
nrefs+= [ fld[0] ]
self.pseudo += [0]
if len(fld) == 4:
torm += [k]
colors += [fld[2]]
k += 1
colorset = set(colors)
colors = np.array(colors)
counter = 0
self.col2lab = {}
self.lab2col = {}
for c in colorset:
self.lab2col[c] = counter
self.col2lab[counter] = c
colors[colors==c] = counter
counter += 1
self.colors = colors.astype(int)
self.refs = np.array(refs)
self.nrefs = np.array(nrefs)
else:
ipt = open(filename,'r')
k = 0
for line in ipt:
if line[0] != '#':
flds = line.split()
k = max([k,int(flds[0])])
ipt.close()
self.pseudo = np.array(self.pseudo, dtype=bool)
x = np.empty((k,k))
x[:] = -1
ipt = open(filename,'r')
for line in ipt:
if line[0] != '#':
flds = line.split()
i = int(flds[0])-1
j = int(flds[1])-1
if flds[2] != '-' and i not in torm and j not in torm:
x[i,j] = float(flds[2])
x[j,i] = x[i,j]
x[np.isnan(x)] = -1
self.mask = (x<0)
self.dist = np.ma.masked_array(x,self.mask)
tomsk = lambda M,i: np.all(M[i,np.arange(len(M))!=i]<0)
rowmask = np.array([ tomsk(x,i) for i in range(k) ])
notorm = np.arange(k)[~rowmask]
self.mdist = x[notorm,:][:,notorm]
self.mcol = self.colors[~rowmask]
self.mpsd = self.pseudo[~rowmask]
self.mrefs = self.refs[~rowmask]
self.mnrefs= self.nrefs[~rowmask]
self.mask2original = notorm
# methods -------------------------------------
def print_dist(self,where='prova.dat'):
"prints the distance map"
opt = open(where,'w')
dummy_matrix = np.zeros(self.dist.shape)-1.
for i in range(self.dist.shape[0]):
for j in range(self.dist.shape[0]):
dummy_matrix[i,j] = self.dist[i,j]
for i in range(self.dist.shape[0]):
for j in range(self.dist.shape[1]):
if dummy_matrix[i,j] < 0.:
opt.write('%d\t%d\t-\n' % ( i+1, j+1 ) )
else:
opt.write('%d\t%d\t%f\n' % (i+1, j+1, dummy_matrix[i,j] ) )
opt.close()
def order(self,ind=None):
if ind is None:
ind = np.argsort(self.colors)
ind_= np.argsort(self.mcol)
else:
ind_= np.argsort(ind[self.mask2original])
self.dist = self.dist[:,ind][ind,:]
self.mask = self.mask[:,ind][ind,:]
self.mdist = self.mdist[:,ind_][ind_,:]
self.colors = self.colors[ind]
self.mcol = self.mcol[ind_]
self.pseudo = self.pseudo[ind]
self.mpsd = self.mpsd[ind_]
dni = np.arange(len(ind))[np.argsort(ind)]
self.mask2original = np.sort(dni[self.mask2original])
try:
self.clusters = self.clusters[ind_]
except AttributeError:
pass
def get_cmap(self,index=-1):
"get similarity map from colors"
cmat = 0
if index==-1:
ncol = self.mcol.max()+1
for i in range(ncol):
K = np.zeros(self.mcol.shape)
K[self.mcol==i] = 1
kx,ky = np.meshgrid(K,K)
cmat += kx*ky
else:
K = np.zeros(self.mcol.shape)
K[self.mcol==index] = 1
kx,ky = np.meshgrid(K,K)
cmat += kx*ky
K = np.zeros(self.mcol.shape)
K[self.mcol!=index] = 1
cmat += kx*ky
return np.triu(cmat,1), np.triu(1-cmat,1)
def get_roc_area(self):
"returns area under roc curve"
A = 0
for i in range(1,len(self.roc[0])):
A += (self.roc[1][i] + self.roc[1][i-1])*(self.roc[0][i] - self.roc[0][i-1])*0.5
return A
def get_roc(self,index=-1,sample=10):
"get roc curve"
C,N = self.get_cmap(index)
iu = np.triu_indices(len(C),1)
c = C[iu]
n = N[iu]
TPR_max = np.sum(c)
FPR_max = np.sum(n)
TPR = [0.]
FPR = [0.]
DST = [0.]
d = self.mdist[iu]
ind = np.argsort(d)
c = c[ind]
n = n[ind]
d = d[ind]
for i in range(1,len(d),sample):
DST += [ d[i] ]
TPR += [ np.sum(c[:i])/TPR_max ]
FPR += [ np.sum(n[:i])/FPR_max ]
TPR += [1.]
FPR += [1.]
DST += [d[-1]]
self.dlist = np.array(DST)
self.roc = np.array([ FPR, TPR ])
def get_gauss_sim(self,gamma=None):
"create an affinity map with a gaussian kernel"
if not gamma:
gamma = 1./(2*np.std(self.mdist))
self.sim_map = np.exp(-gamma*self.mdist*self.mdist)
def MDS(self,ndim=3):
"return low dimensional representation of the points, using MDS"
embedding = mf.MDS(n_components=ndim, dissimilarity='precomputed')
self.MDSrep = embedding.fit_transform(self.mdist).T
return self.MDSrep
def spec_clustering(self,nclust=3):
"compute spectral clustering"
spectral = sk.SpectralClustering(n_clusters=nclust, affinity='precomputed' )
self.clusters = spectral.fit_predict(self.sim_map)+1
return self.clusters
def hier_clustering(self,nclust=3):
"compute hierarchical clustering"
flat_dist = self.mdist[ np.triu_indices(len(self.mdist),1) ]
linked = ward(flat_dist)
self.clusters = fcluster(linked,nclust,criterion='maxclust')
return self.clusters
def get_dunn_score(self):
"compute clustering dunn score"
cluster_matrix = np.zeros(self.mdist.shape)
try:
ncls = self.clusters.max()+1
except AttributeError:
self.clusters = self.mcol+1
ncls = self.clusters.max()
for i in range(1,ncls+1):
K = np.zeros(self.mcol.shape)
K[self.clusters==i] = 1
kx,ky = np.meshgrid(K,K)
cluster_matrix += kx*ky
cluster_matrix = np.array(cluster_matrix, dtype=bool)
diameter = np.max( self.mdist[cluster_matrix] )
separation = np.min( self.mdist[~cluster_matrix] )
return separation/diameter
def get_quality_score(self):
"compute clustering quality score"
try:
ncls = self.clusters.max()
except AttributeError:
self.clusters = self.mcol+1
ncls = self.clusters.max()
medoids = {}
for c in range(1,ncls+1):
nodes_in_cluster = np.arange(len(self.mcol))[self.clusters==c]
medind = np.argmin(np.sum(self.mdist[nodes_in_cluster,:][:,nodes_in_cluster],axis=1))
medoids[c] = nodes_in_cluster[medind]
rho = []
for c in range(1,ncls+1):
nodes_in_cluster = np.arange(len(self.mcol))[self.clusters==c]
nodes_in_cluster = nodes_in_cluster[nodes_in_cluster!=medoids[c]]
if len(nodes_in_cluster) < 1:
continue
for node in nodes_in_cluster:
dist_to_intramedoid = self.mdist[node,medoids[c]]
dist_to_intermedoid = []
for c_ in range(1,ncls+1):
if c_ != c:
dist_to_intermedoid += [ self.mdist[node,medoids[c_]] ]
dist_to_intermedoid = np.min( dist_to_intermedoid )
rho += [ dist_to_intermedoid/dist_to_intramedoid ]
rho = np.median(rho)
return rho
def get_purity_score(self):
try:
ncls = self.clusters.max()
except AttributeError:
self.clusters = self.mcol+1
ncls = self.clusters.max()
self.purity = 0.
for nc in range(1,ncls+1):
cluster_colors = self.mcol[self.clusters==nc]
counter_colors = Counter(cluster_colors)
color_max = max(counter_colors, key = lambda x: counter_colors.get(x) )
self.purity += counter_colors[color_max]
self.purity /= len(self.clusters)
return self.purity
# plotters -------------------------------------
def plot(self,cmap='inferno',save=None):
"plot the matrix"
fig, ax = plt.subplots(1,1,figsize=(8,8))
cax = ax.imshow(self.dist, cmap=cmap)
cbar = plt.colorbar(cax)
cbar.set_label(r"$d$",fontsize=30,rotation='horizontal')
if save:
plt.savefig(save, transparent=True)
def plot_masked(self,cmap='inferno',save=None):
"plot the masked matrix"
fig, ax = plt.subplots(1,1,figsize=(8,8))
cax = ax.imshow(self.mdist, cmap=cmap)
#cbar = plt.colorbar(cax)
#cbar.set_label("DISTANCE",fontsize=20,rotation='vertical')
if save:
plt.savefig(save, transparent=True)
def plot_squares(self,cmap='Spectral'):
"plots squares boundaries for different colors"
def plot_square(a,b,c='k'):
x = [ a, b, b, a, a]
y = [ a, a, b, b, a]
plt.plot(x,y,c=c,lw=3.)
regions = []
col_reg = [self.mcol[0]]
blo = 0.
for i in range(len(self.mcol)):
if self.mcol[i] != col_reg[-1]:
regions += [ (blo-0.5,i-0.5) ]
blo = i
col_reg += [ self.mcol[i] ]
regions += [ (blo-0.5, len(self.mcol)-0.5)]
colormap = cm.get_cmap('Spectral')
ncol = np.max(self.mcol)+1
col = [ mplc.rgb2hex(colormap(i*1./(ncol-1.))) for i in range(ncol) ]
for i in range(len(regions)):
plot_square(regions[i][0],regions[i][1],c=col[col_reg[i]])
def plot_similarity(self,cmap='Greys'):
"plot knn map"
fig, ax = plt.subplots(1,1,figsize=(8,8))
ax.imshow(self.sim_map, cmap=cmap)
def plot_roc(self,col='r',sim='-',lw=3.,rnd=True,save=None):
"plot roc curve"
if rnd:
plt.plot([0,1],[0,1],'k--')
plt.plot(self.roc[0],self.roc[1],sim, c=col, lw=lw)
plt.xlim([-0.05,1.05])
plt.ylim([-0.05,1.05])
plt.xlabel(r'False Positive Rate', fontsize=25)
plt.ylabel(r'True Positive Rate', fontsize=25)
plt.tight_layout()
if save:
plt.savefig(save, transparent=True)
def show_hist(self):
"plot histogram of the distances"
cmap = np.zeros( self.mdist.shape )
for c in range(self.mcol.max()+1):
k = np.zeros(self.mcol.shape)
k[self.mcol==c] = 1
kx,ky = np.meshgrid(k,k)
cmap += kx*ky
px,py = np.meshgrid( 1-self.mpsd.astype(int), 1-self.mpsd.astype(int) )
omap = px*py
px,py = np.meshgrid( self.mpsd.astype(int), 1-self.mpsd.astype(int))
pmap = px*py
ind = np.triu_indices(self.mdist.shape[0],k=1)
samecell_dist = np.copy(self.mdist)
samecell_dist[ cmap*omap==0 ] = -1
samecell_dist = samecell_dist[ind]
samecell_dist = samecell_dist[samecell_dist>-1]
diffcell_dist = np.copy(self.mdist)
diffcell_dist[ (1-cmap)*omap==0 ] = -1
diffcell_dist = diffcell_dist[ind]
diffcell_dist = diffcell_dist[diffcell_dist>-1]
pseudo_dist = np.copy(self.mdist)
pseudo_dist[ cmap*pmap==0 ] = -1
pseudo_dist = pseudo_dist[ind]
pseudo_dist = pseudo_dist[pseudo_dist>-1]
plt.hist(samecell_dist,range=(0.,1.),bins=100,alpha=0.6, density=True)
plt.hist(diffcell_dist,range=(0.,1.),bins=100,alpha=0.6, density=True)
plt.hist(pseudo_dist,range=(0.,1.), bins=100,alpha=0.6, density=True)
plt.show()
def plot_MDS(self,cmap='Spectral', save=None):
"plot scatter plot using the MDS representation"
colormap = cm.get_cmap(cmap)
col = [ mplc.rgb2hex(colormap(i*1./self.colors.max())) for i in range(self.colors.max()+1) ]
ndim = len(self.MDSrep)
fig = plt.figure(figsize=plt.figaspect(0.8)*1.5)
Xoriginal = self.MDSrep[:,~self.mpsd]
Xpseudo = self.MDSrep[:, self.mpsd]
if ndim > 3:
n = np.random.choice(range(3,ndim))
ax = fig.add_subplot(111,projection='3d')
ax.scatter(Xoriginal[0],Xoriginal[1],Xoriginal[2],marker='o', c=self.mcol[~self.mpsd], s=1000*(Xoriginal[n]-self.MDSrep[n].min()), edgecolors='k',cmap=cmap)
ax.scatter(Xpseudo[0],Xpseudo[1],Xpseudo[2],marker='*',c=self.mcol[self.mpsd],s=1000*(Xpseudo[n]-self.MDSrep[n].min()),edgecolors='k',cmap=cmap)
if ndim == 3:
ax = fig.add_subplot(111,projection='3d')
ax.scatter(Xoriginal[0],Xoriginal[1],Xoriginal[2],marker='o',c=self.mcol[~self.mpsd],s=100,edgecolors='k',cmap=cmap)
ax.scatter(Xpseudo[0],Xpseudo[1],Xpseudo[2],marker='*',c=self.mcol[self.mpsd],s=100,edgecolors='k',cmap=cmap)
if ndim == 2:
ax = fig.add_subplot(111)
ax.scatter(Xoriginal[0],Xoriginal[1],marker='o',c=self.mcol[~self.mpsd],s=100,edgecolors='k',cmap=cmap)
ax.scatter(Xpseudo[0],Xpseudo[1],marker='*',c=self.mcol[self.mpsd],s=100,edgecolors='k',cmap=cmap)
if ndim == 1:
ax = fig.add_subplot(111)
ax.scatter(Xoriginal[0],Xoriginal[1],marker='o',c=self.mcol[~self.mpsd],s=100,edgecolors='k',cmap=cmap)
ax.scatter(Xpseudo[0],marker='*',c=self.mcol[self.mpsd],s=100,edgecolors='k',cmap=cmap)
for c in range(self.colors.max()+1):
ax.plot([],[],marker='o',ms=10,mew=0,c=col[c],lw=0, label=self.col2lab[c])
ax.plot([],[],marker='*',ms=10,mew='1',c='w',mec='k',label='pseudoreplicates')
#plt.legend(loc=2)
ax.set_axis_off()
if save:
plt.savefig(save, transparent=True)
def show_clusters(self,cmap='Spectral', save=None):
"show clusters"
def clock(hour):
"returns next hour on the clock"
hour[0] += 1
if hour[0] == hour[2]:
hour[0] = 0
hour[1] += 1
if hour[1] == 6:
hour[1] = 0
hour[2]+= 1
return hour
def read(hour):
"returns n,m from hour"
base = [ ( 1, 0),
( 0, 1),
(-1, 1),
(-1, 0),
( 0,-1),
( 1,-1),
( 1, 0) ]
n = ( hour[2]-hour[0] )*base[hour[1]][0] + hour[0]*base[hour[1]+1][0]
m = ( hour[2]-hour[0] )*base[hour[1]][1] + hour[0]*base[hour[1]+1][1]
return n,m
def hexagonal(N,L,offset=[0.,0.]):
"define hexagonal lattice with N points and lattice constant L"
lattice = [ ]
a = np.array([0.,L])
b = np.array([np.sqrt(0.75)*L, 0.5*L])
offset = np.array(offset)
lattice += [ 0.*a+0.*b + offset ]
hour = [0,0,1]
while len(lattice)<N :
n,m = read(hour)
lattice += [ n*a + m*b + offset ]
hour = clock(hour)
lattice = np.transpose(lattice)
return lattice
colormap = cm.get_cmap(cmap)
col = [ mplc.rgb2hex(colormap(i*1./(self.mcol.max()))) for i in range(self.mcol.max()+1) ]
lab = self.lab2col.keys()
cnum = self.clusters.max()+1
cpos = hexagonal(cnum,10)
cluster_colors = [ self.mcol[self.clusters==c] for c in range(cnum) ]
pseudo_markers = [ self.mpsd[self.clusters==c] for c in range(cnum) ]
fig = plt.figure(figsize=plt.figaspect(1.)*1.5)
ax = fig.add_subplot(111)
fig.patch.set_visible(False)
ax.set_aspect('equal','datalim')
ax.axis('off')
for c in range(1,cnum):
ccol = cluster_colors[c]
cpsd = pseudo_markers[c]
ppos = hexagonal(len(ccol),1,offset=[cpos[0][c],cpos[1][c]])
for cc in range(self.mcol.max()+1):
ind = (ccol==cc)*(~cpsd)
jnd = (ccol==cc)*(cpsd)
ax.plot(ppos[0][ind],ppos[1][ind], marker='o', lw=0, ms=10, mec='k', c=col[cc])
ax.plot(ppos[0][jnd] ,ppos[1][jnd] , marker='*', lw=0, ms=10, mec='k', c=col[cc])
for cc in range(self.mcol.max()+1):
ax.plot([],[],marker='o',ms=10,mew=0,c=col[cc],lw=0,label=lab[cc])
ax.plot([],[],marker='*',ms=10,mew='1',c='w',mec='k',label='pseudoreplicates')
plt.legend(loc=2)
if save:
plt.savefig(save, transparent=True)
plt.show()
def plot_dendrogram(self,cutoff=0.,method='ward', leafname='new', save=None):
"compute and plot hierarchical clustering"
ax = plt.gca()
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(0)
ax.spines[axis].set_zorder(0)
flat_dist = self.mdist[ np.triu_indices(len(self.mdist),1) ]/self.mdist.max()
linked = linkage(flat_dist, method)
label_list = np.arange(len(self.mdist))
if leafname == 'new':
labs = np.copy(self.mnrefs)
elif leafname == 'old':
labs = np.copy(self.mrefs)
my_palette = cm.Set2(np.linspace(0,1,len(self.col2lab)))
hierarchy.set_link_color_palette([mplc.rgb2hex(rgb[:3]) for rgb in my_palette])
self.dendro = dendrogram(linked,
orientation='top',
labels=label_list,
distance_sort='descending',
color_threshold=cutoff,
show_leaf_counts=True,
above_threshold_color='black'
)
scramble = self.mcol[self.dendro['ivl']]
labrable = labs[self.dendro['ivl']]
cmap = cm.get_cmap('Spectral')
clist = list(set(self.mcol))
col = [ cmap(i*1./(max(clist))) for i in range(max(clist)+1) ]
leaves = np.arange(len(self.mdist))*10 + 5
for i in clist:
i_leaves = leaves[scramble==i]
plt.plot(i_leaves, [0]*len(i_leaves), 'o', mec='none', c=col[i], ms=10.)
for c in range(max(clist)+1):
plt.plot([],[],marker='o',ms=10,mew=0,c=col[c],lw=0, label=self.col2lab[c])
plt.xticks(leaves,labrable,fontsize=15)
plt.legend(loc=2)
ax.set_ylim(bottom=-0.2)
plt.tight_layout()
if save:
plt.savefig(save, transparent=True)
return self.dendro
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bookmanager.settings")
import django
django.setup()
from app01 import models
#################基于对象的查询############
# 正向查询
# book_obj = models.Book.objects.get(pk=1)
# print(book_obj.publisher) # 关联的出版社对象
# print(book_obj.publisher_id) # 关联的出版社的id
# 反向查询
# pub_obj = models.Publisher.objects.get(pk=1)
# 没有指定related_name 类名小写_set
# print(pub_obj)
# print(pub_obj.book_set,type(pub_obj.book_set)) #类名小写_set 关系管理对象
# print(pub_obj.book_set.all())
# 指定related_name = 'books' 没有类名小写_set的写法
#################基于字段的查询###########
# ret = models.Book.objects.filter(publisher__name='新华出版社')
# 不指定related_name='boos' 类名小写
# ret = models.Publisher.objects.filter(book__name='床上学python')
# 指定related_name='books' 不指定related_query_name
# ret = models.Publisher.objects.filter(books__name='床上学python')
# 指定related_query_name='book'
# ret = models.Publisher.objects.filter(book__name='床上学python')
|
import sys
import time
import RPi.GPIO as GPIO
# Use BCM GPIO references
# instead of physical pin numbers
#GPIO.setmode(GPIO.BCM)
mode=GPIO.getmode()
print " mode ="+str(mode)
GPIO.cleanup()
# Define GPIO signals to use
# Physical pins 11,15,16,18
# GPIO17,GPIO22,GPIO23,GPIO24
StepPinForward=11
StepPinBackward=15
sleeptime=2
GPIO.setmode(GPIO.BOARD)
GPIO.setup(StepPinForward, GPIO.OUT)
GPIO.setup(StepPinBackward, GPIO.OUT)
def forward(x):
GPIO.output(StepPinForward, GPIO.HIGH)
print "forwarding running motor "
time.sleep(x)
GPIO.output(StepPinForward, GPIO.LOW)
def reverse(x):
GPIO.output(StepPinBackward, GPIO.HIGH)
print "backwarding running motor"
time.sleep(x)
GPIO.output(StepPinBackward, GPIO.LOW)
#time.sleep(2)
print "forward motor "
#reverse(5)
forward(20)
#time.sleep(2)
#print "reverse motor"
#forward(7)
#print "Stopping motor"
GPIO.cleanup()
|
# v2dump.py 20100914
# Author: Peter Sovietov
import sys
Prefix = ''
def dw(buf, d):
return (ord(buf[d + 3]) << 24) | (ord(buf[d + 2]) << 16)| \
(ord(buf[d + 1]) << 8) | ord(buf[d])
def delta(buf, d, num):
return ord(buf[d]) | (ord(buf[d + num]) << 8) | \
(ord(buf[d + 2 * num]) << 16)
def nt(c, buf, d, num):
r = []
t = p = v = 0
for i in range(d, d + num):
t += delta(buf, i, num)
p = (p + ord(buf[i + 3 * num])) & 0xff
v = (v + ord(buf[i + 4 * num])) & 0xff
r += [(t, chr(0x90 | c) + chr(p) + chr(v))]
return r
def pc(c, buf, d, num):
r = []
t = p = 0
for i in range(d, d + num):
t += delta(buf, i, num)
p = (p + ord(buf[i + 3 * num])) & 0xff
r += [(t, chr(0xc0 | c) + chr(p))]
return r
def pb(c, buf, d, num):
r = []
t = p0 = p1 = 0
for i in range(d, d + num):
t += delta(buf, i, num)
p0 = (p0 + ord(buf[i + 3 * num])) & 0xff
p1 = (p1 + ord(buf[i + 4 * num])) & 0xff
r += [(t, chr(0xe0 | c) + chr(p0) + chr(p1))]
return r
def cc(c, n, buf, d, num):
r = []
t = p = 0
for i in range(d, d + num):
t += delta(buf, i, num)
p = (p + ord(buf[i + 3 * num])) & 0xff
r += [(t, chr(0xb0 | c) + chr(n + 1) + chr(p))]
return r
def v2dump(buf):
d = 0
v2 = {}
v2['timediv'] = dw(buf, d)
v2['maxtime'] = dw(buf, d + 4)
gdnum = dw(buf, d + 8)
d += 12
v2['gptr'] = buf[d:d + 10 * gdnum]
d += 10 * gdnum
for i in range(16):
v2[i] = {}
notenum = dw(buf, d)
d += 4
if notenum:
v2[i]['noteptr'] = nt(i, buf, d, notenum)
d += 5 * notenum
pcnum = dw(buf, d)
d += 4
v2[i]['pcptr'] = pc(i, buf, d, pcnum)
d += 4 * pcnum
pbnum = dw(buf, d)
d += 4
v2[i]['pbptr'] = pb(i, buf, d, pcnum)
d += 5 * pbnum
for j in range(7):
ccnum = dw(buf, d)
d += 4
v2[i][j] = cc(i, j, buf, d, ccnum)
d += 4 * ccnum
size = dw(buf, d)
d += 4
v2['globals'] = buf[d:d + size]
d += size
size = dw(buf, d)
d += 4
v2['patchmap'] = buf[d:d + size]
return v2
def v2load(name):
f = open(name, 'rb')
buf = f.read()
f.close()
return v2dump(buf)
def save(name, buf):
f = open(Prefix + name, 'wb')
f.write(buf)
f.close()
def mididelta(t):
return chr(((t >> 21) & 0x7f) | 0x80) + chr(((t >> 14) & 0x7f) | 0x80) + \
chr(((t >> 7) & 0x7f) | 0x80) + chr(t & 0x7f)
def miditrack(c, mt):
r = ''
t = 0
s = sorted(c['pcptr'] + c[0] + c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + \
c['pbptr'] + c['noteptr'])
for e in s:
if e[0] > mt:
break
r += mididelta(e[0] - t) + e[1]
t = e[0]
r += '\x00\xff\x2f\x00'
n = len(r)
return 'MTrk' + chr((n >> 24) & 0xff) + chr((n >> 16) & 0xff) + \
chr((n >> 8) & 0xff) + chr(n & 0xff) + r
def save_midifile(v2):
t = ''
n = 0
for i in range(16):
if v2[i]:
t += miditrack(v2[i], v2['maxtime'])
n += 1
save('.mid', 'MThd\0\0\0\6\0\1\0' + chr(n) + '\0\xac' + t)
def save_patch(i, buf):
name = 'v2p1' + Prefix + str(i)
buf = name + '\0' * (36 - len(name)) + '\6\0\0\0' + buf
buf += '\0' * (895 - len(buf))
save('_' + str(i) + '.v2p', buf)
def save_patchmap(buf):
i = 0
patch = begin = dw(buf, i)
i += 4
while i < patch:
end = dw(buf, i)
save_patch(i / 4, buf[begin:end])
begin = end
i += 4
save_patch(i / 4, buf[begin:])
if len(sys.argv) != 2:
print 'v2dump by Peter Sovietov\nUsage: v2dump file.v2m'
sys.exit()
name = sys.argv[1]
Prefix = name.lower().replace('.v2m', '')
v2 = v2load(name)
save_midifile(v2)
save_patchmap(v2['patchmap'])
|
import requests, sys, re, json, mne, pickle, os, ctypes
import DogSchemaView_UI043 as view
from mne.viz import circular_layout, plot_connectivity_circle
import numpy as np
import matplotlib.pyplot as plt
import PySimpleGUI as sg
import mne.viz.utils as utils
from functools import partial
class analyseInput():
def __init__(self):
import requests, sys, re, json
self.server = "http://rest.ensembl.org"
self.overlap = "/overlap/region/canis_lupus_familiaris/"
self.headers = { "Content-Type" : "application/json", "Accept" : "application/json"}
def getFeatureOverlap(self, loc):
text = requests.get(self.server+self.overlap+loc+'?feature=gene;', headers=self.headers)
text = text.json()
geneNames = ['', '']
for i in text:
#print(i['feature_type'])
try:
geneNames[0] = geneNames[0] + i['external_name'] + '%0d'
geneNames[1] = geneNames[1] + i['description'] + '\n'
except:
geneNames = geneNames
return geneNames
def sliceChromosome(inputChromosome, refrenceChromosome, slices):
print('dividing chromosome into smaller slices for analysis')
Alist = []
Blist = []
Ase = []
Bse = []
cor = []
sliver = slices
endA = 0
endB = 0
i = 0
for a in range(sliver):
Alist.append([])
for i in range(endA, int(endA+len(inputChromosome)/sliver)):
Alist[a].append(inputChromosome[i])
start = inputChromosome[endA].split('\t')
start = start[3]
end = inputChromosome[i].split('\t')
end = end[3]
Ase.append((int(start)+3002782, int(end)+3002782))
endA = i
Blist.append([])
for i in range(endB, int(endB+len(refrenceChromosome)/sliver)):
Blist[a].append(refrenceChromosome[i])
start = refrenceChromosome[endB].split('\t')
start = start[2]
end = refrenceChromosome[i].split('\t')
end = end[2]
Bse.append((start, end))
endB = i
inputSet, refrenceSet = Alist, Blist
inputSlices, refrenceSlices = Ase, Bse
return inputSet, refrenceSet, inputSlices, refrenceSlices
def alignSlices(inputSlices, refrenceSlices, inputSliceBounds, refrenceSliceBounds, slices):
print('aligning input slices and refrence slices to optimize speed')
location = []
alignedSlices = []
for i in range(slices):
alignedSlices.append([i, []])
for t in range(1, len(refrenceSliceBounds)):
if int(refrenceSliceBounds[t][0]) >= int(inputSliceBounds[i][0]) and int(refrenceSliceBounds[t][0]) <= int(inputSliceBounds[i][1]) or int(refrenceSliceBounds[t][1]) <= int(inputSliceBounds[i][1]) and int(refrenceSliceBounds[t][1]) >= int(inputSliceBounds[i][0]):
alignedSlices[i][1].append(t)
else:
continue
if alignedSlices[i][1]:
location.append(str(inputSliceBounds[i][0])+'-'+str(inputSliceBounds[i][1]))
#print(location)
return alignedSlices, location
def getBreedDominanceBySlice(inputSlices, refrenceSlices, sliceAlignments):
print('analysing slices for breed dominance and traits')
geneList = []
labels = []
thisLabel = []
slices = {}
for Slice, c in enumerate(sliceAlignments):
#print(Slice)
if not c[0] or not c[1]:
continue
else:
inputLine = inputSlices[c[0]][0].split('\t')
subStart = str(inputLine[0] + ':' + inputLine[3])
inputLine = inputSlices[c[0]][len(inputSlices[c[0]])-1].split('\t')
loc = str(inputLine[3])
loc = subStart + '-' + loc
print('getting feature overlap for %s from ensemble database' % loc)
geneList.append(analyseInput().getFeatureOverlap(loc))
for i, inputLine in enumerate(inputSlices[c[0]]):
inputLine = inputLine.split('\t')
for s in c[1]:
breed = {}
refrenceLine = refrenceSlices[s][0].split('\t')
for refrenceLine in refrenceSlices[s]:
refrenceLine = refrenceLine.split('\t')
if refrenceLine[5] in breed:
breed[refrenceLine[5]][0] += 1
if str(inputLine[1]) == str(refrenceLine[0]):
breed[refrenceLine[5]][1] += 1
else:
breed[refrenceLine[5]] = [1, 0]
if str(inputLine[1]) == str(refrenceLine[0]):
breed[refrenceLine[5]][1] += 1
subEnd = int(refrenceLine[2])
try:
slices[Slice].append(breed)
except:
slices[Slice] = []
slices[Slice].append(breed)
inputLine = inputSlices[c[0]][len(inputSlices[c[0]])-1].split('\t')
highest = []
matches = {}
for dictionary in slices[Slice]:
for i in dictionary:
if dictionary[i][1] > 0:
if i in matches:
matches[i] += dictionary[i][1]
else:
matches[i] = dictionary[i][1]
if len(highest) < 10:
highest.append((i, dictionary[i][0]))
else:
for x, cont in enumerate(highest):
if cont[1] < dictionary[i][0]:
if i in highest != True:
highest[x] = (i, dictionary[i][0])
highest = tuple(highest)
highest = dict((y, x) for y, x in highest)
slices[Slice] = [matches, highest]
highestRelative = (0, 0)
for lst in slices[Slice][0]:
for subLst in slices[Slice][1]:
if lst == subLst:
if float(slices[Slice][0][lst]/slices[Slice][1][subLst]) > highestRelative[1]:
highestRelative = (lst, float(slices[Slice][0][lst]/slices[Slice][1][subLst]))
slices[Slice].append(highestRelative)
for i in slices:
labels.append(slices[i])
length = len(slices)
return labels, length, geneList
def export(item, subPath, name):
mainPath = r'C:/Users/webaccus/OneDrive - Agilent Technologies/Desktop/DGAE'
print('saving to' + mainPath + subPath)
if not os.path.exists(mainPath + subPath):
os.makedirs(mainPath + subPath)
file = open(mainPath + subPath + '/%s.pkl' % name, 'wb')
print('saving to' + str(file))
pickle.dump(item, file)
file.close()
def get(subPath, name):
mainPath = r'C:/Users/webaccus/OneDrive - Agilent Technologies/Desktop/DGAE'
file = open(mainPath + subPath + '/%s.pkl' % name, 'rb')
item = pickle.load(file)
file.close()
return item
def processSpecimen(fname, dogName):
print(fname)
file = open(fname, 'r')
with file as f:
data = f.readlines()
data = data[755:]
#print chromosome cutoff lines
chromosome = 1
cutoff = []
for i, line in enumerate(data):
line = line.split('\t')
try:
replaceSpace = line[5].replace(' ', '')
data[i].replace('%s' % line[5], '%s' % replaceSpace)
except Exception as e: print(repr(e))
try:
replaceComma = replaceSpace.replace(',', '')
data[i].replace('%s' % replaceSpace, '%s' % replaceComma)
except Exception as e: print(repr(e))
if chromosome != line[0]:
cutoff.append(i+1)
chromosome = line[0]
print(cutoff)
chromosome = []
masha = []
for i in range(38):
chromosome.append([])
with open('C:/Users/webaccus/OneDrive - Agilent Technologies/Desktop/DGAE/canfam3/chr%d_allsnps.txt' % (i+1), 'r') as f:
chromosome[i] = f.readlines()
masha.append([])
masha[i] = data[cutoff[i]:cutoff[i+1]]
firstGenes, lastGenes = False, False
dogName = dogName
for x in range(38):
slices = 200
sg.OneLineProgressMeter('Analysis progress', x + 1, 39, 'key')
#contents
inputSlices, refrenceSlices, inputSliceBounds, refrenceSliceBounds = analyseInput.sliceChromosome(masha[x], chromosome[x], slices)
sliceAlignments, location = analyseInput.alignSlices(inputSlices, refrenceSlices, inputSliceBounds, refrenceSliceBounds, slices)
slices, length, geneList = analyseInput.getBreedDominanceBySlice(inputSlices, refrenceSlices, sliceAlignments)
#processStrings
#subPath = '/DogNames'
#export(geneList, subPath, 'geneList%d' % (x+1))
firstGenes, lastGenes = False, False
#print('\n', x+1)
newpath = r'C:/Users/webaccus/OneDrive - Agilent Technologies/Desktop/DGAE/DogNames/%s' % dogName
print(os.path.exists(newpath))
if not os.path.exists(newpath):
print(newpath)
os.makedirs(newpath)
subPath = r'/DogNames/%s/%d' % (dogName, x+1)
export(slices, subPath, r'alignedSlices')
export(length, subPath, r'length')
export(geneList, subPath, r'geneList')
export(location, subPath, r'location')
mainPath = r'C:/Users/webaccus/OneDrive - Agilent Technologies/Desktop/DGAE'
keys = []
def getProjects(path):
radioButtons = []
if not os.path.exists(path):
os.makedirs(path)
projects = os.listdir(projectPath)
for project in projects:
radioButtons.append(sg.Radio('%s' % project, 'file', key='%s' % project, enable_events=True))
keys.append('%s' % project)
#radioButtons.append(sg.Radio('Masha (sample)', 'file', key='Masha', enable_events=True))
return radioButtons
def collapse(layout, key):
return sg.pin(sg.Column(layout, key=key))
#simpleGUI user interface
projectPath = mainPath + '/DogNames'
SYMBOL_UP = '▲'
SYMBOL_DOWN = '▼'
layout = [
[sg.Text('select a .bim file containing the sequenced dna of your dog')],
[sg.In(), sg.FileBrowse()],
[sg.Text('Type the name of your dog', size=(15, 1)), sg.InputText()],
[sg.Text('or select a pre-processed file from below', size=(30, 1))],
getProjects(projectPath),
[sg.T(SYMBOL_DOWN, enable_events=True, k='-OPEN SEC2-', text_color='purple'),
sg.T('click drop down icon to select a chromosome for visualization', enable_events=True, text_color='purple', k='-OPEN SEC2-TEXT')],
[collapse(view.showLayout(), '-SEC2-')],
[collapse([[sg.Output(size=(50,10), key='-OUTPUT-')]], 'print')],
[sg.Submit(key='Submit'), sg.Cancel(key='Cancel')]
]
window = sg.Window('Select a file for processing', layout, finalize=True)
opened2 = False
window['-SEC2-'].update(visible=opened2)
while True:
event, values = window.read()
#print(event, values)
if event.startswith('-OPEN SEC2-'):
opened2 = not opened2
window['-OPEN SEC2-'].update(SYMBOL_DOWN if opened2 else SYMBOL_UP)
window['-SEC2-'].update(visible=opened2)
if str(event) == 'Submit':
fname = values[0]
dogName = values[1]
print(fname, values)
window['-OUTPUT-'].update()
processSpecimen(fname, dogName)
elif str(event) == 'Cancel':
window.close()
break
elif str(event).isnumeric() == True:
try:
if dogName in locals() == False:
dogName = values[1]
view.viewLoop(window, dogName)
except:
window['-OUTPUT-'].update('You did not select or name a valid project file')
window['print'].update(visible=True)
else:
if str(event) in keys:
print(event)
dogName = str(event)
opened2 = True
window['-SEC2-'].update(visible=opened2)
#print(event, values)
|
sexo = str(input("Digite a inicial do seu sexo: (M para masculino ou F para feminino) "))
if sexo == 'M' or sexo == 'm':
print("O sexo selecionado foi Masculino")
elif sexo == 'F' or sexo == 'f':
print("O sexo selecionado foi Feminino")
else:
print("Sexo Indefinido") |
import bpy
from .utils.global_settings import SequenceTypes
# TODO: Use a handler to auto move the fades with extend
# and the strips' handles
class FadeStrips(bpy.types.Operator):
"""

Animate a strips opacity to zero. By default, the duration of the
fade is 0.5 seconds.
"""
bl_idname = "power_sequencer.fade_strips"
bl_label = "Fade Strips"
bl_description = "Fade left, right or both sides of all selected strips in the VSE"
bl_options = {'REGISTER', 'UNDO'}
fade_duration = bpy.props.FloatProperty(
name="Fade Duration",
description="The Duration of the Fade",
default=0.5,
min=0)
fade_type = bpy.props.EnumProperty(
items=[('both', 'Fade in and out', 'Fade selected strips in and out'),
('left', 'Fade in', 'Fade in selected strips'),
('right', 'Fade out', 'Fade out selected strips')],
name="Fade type",
description="Fade in, out, or both in and out. Default is both.",
default='both')
function = bpy.props.StringProperty("")
@classmethod
def poll(cls, context):
return True
def execute(self, context):
scene = bpy.context.scene
fps = scene.render.fps / scene.render.fps_base
self.fade_length = int(self.fade_duration * fps)
selection = bpy.context.selected_sequences
if not selection:
return {"CANCELLED"}
fade_sequence_count = 0
for s in selection:
max_value = s.volume if s.type in SequenceTypes.SOUND else s.blend_alpha
if not max_value:
max_value = 1.0
# Create animation data and an action if there is none in the scene
if scene.animation_data is None:
scene.animation_data_create()
if scene.animation_data.action is None:
action = bpy.data.actions.new(scene.name + "Action")
scene.animation_data.action = action
# Create fade
fcurves = bpy.context.scene.animation_data.action.fcurves
self.fade_clear(s)
frame_start, frame_end = s.frame_final_start, s.frame_final_end
# fade_in_frames = (frame_start, frame_start + self.fade_length)
# fade_out_frames = (frame_end - self.fade_length, frame_end)
fade_fcurve, fade_curve_type = self.fade_find_fcurve(s)
if fade_fcurve is None:
fade_fcurve = fcurves.new(
data_path=s.path_from_id(fade_curve_type))
min_length = self.fade_length * 2 if self.fade_type == 'both' else self.fade_length
if not s.frame_final_duration > min_length:
continue
keys = fade_fcurve.keyframe_points
if self.fade_type in ['left', 'both']:
keys.insert(frame=frame_start, value=0)
keys.insert(
frame=frame_start + self.fade_length, value=max_value)
if self.fade_type in ['right', 'both']:
keys.insert(
frame=frame_end - self.fade_length, value=max_value)
keys.insert(frame=frame_end, value=0)
fade_sequence_count += 1
self.report({"INFO"}, "Added fade animation to {!s} sequences.".format(
fade_sequence_count))
return {"FINISHED"}
def fade_find_fcurve(self, sequence=None):
"""
Checks if there's a fade animation on a single sequence
If the right fcurve is found,
volume for audio sequences and blend_alpha for other sequences,
Returns a tuple of (fade_fcurve, fade_type)
"""
fcurves = bpy.context.scene.animation_data.action.fcurves
if not sequence:
raise AttributeError('Missing sequence parameter')
fade_fcurve = None
fade_type = 'volume' if sequence.type in SequenceTypes.SOUND else 'blend_alpha'
for fc in fcurves:
if (fc.data_path == 'sequence_editor.sequences_all["' +
sequence.name + '"].' + fade_type):
fade_fcurve = fc
break
return fade_fcurve, fade_type
def fade_clear(self, sequence=None):
"""
Deletes all keyframes in the blend_alpha
or volume fcurve of the provided sequence
"""
if not sequence:
raise AttributeError('Missing sequence parameter')
fcurves = bpy.context.scene.animation_data.action.fcurves
fade_fcurve = self.fade_find_fcurve(sequence)[0]
if fade_fcurve:
fcurves.remove(fade_fcurve)
|
def golf(m):d=[1e9]*len(m);d[0]=0;r(m,d,0);return 0 if d[-1]==1e9 else d[-1]
def r(m,d,i):
for j,c in enumerate(m[i]):
b=d[i]+c
if c and b<d[j]:
d[j]=b;r(m,d,j)
|
class Sirket():
def __init__(self,calisansayisi,sirketismi,mudurler,muduryrd,faaliyet):
self.calısansayisi = calisansayisi
self.sirketismi = sirketismi
self.mudurler = mudurler
self.muduryrd = muduryrd
self.faaaliyet = faaliyet
def calısanAl(self,sayi):
self.calısansayisi += sayi
def calısanKov(self,sayi):
self.calısansayisi -= sayi
def sirketIsmiDegistir(self,isim):
self.sirketismi = isim
def sirketiBatir(self):
self.faaaliyet = False
def mudurEkle(self,isim):
self.mudurler.append(isim)
def muduryrd(self,isim):
self.muduryrd.append(isim)
def bilgileriGoster(self):
print("""
Sirket İsmi : {}
Calisan sayisi : {}
Mudurler : {}
Mudur Yardimcilari : {}
Sirket Aktif mi ? : {}
""".format(self.sirketismi,self.calısansayisi,self.mudurler,self.muduryrd,self.faaaliyet))
PortiCode = Sirket(78,"PortiCode",["Furkan Portakal","Mert Sis"],["Mehmet Ekici","Ozan Tufan"],True)
PortiCode.calısanAl(10)
PortiCode.mudurEkle("Ali Koc")
PortiCode.sirketiBatir()
PortiCode.bilgileriGoster()
|
#from typing import List, Dict
from . import settings
#NodeEdgesDict = Dict[int, Dict[str, int]]
def check_dep_lemma(sentence, dep_dict, dep, lemma) -> bool:
if(dep not in dep_dict): return False
target_list = dep_dict[dep]
for targetID in target_list:
token = sentence.token[targetID]
if(token.lemma == lemma): return True
return False
def check_negation(sentence, dep, targetID):
if(dep == "neg"): return True
if(dep == "det" or dep == "advmod"):
target_token = sentence.token[targetID]
target_lemma = target_token.lemma
if(target_lemma == "no" or target_lemma == "not"
or target_lemma == "n't" or target_lemma == "never"): return True
elif(dep == "cc:preconj"):
target_token = sentence.token[targetID]
target_lemma = target_token.lemma
if(target_lemma == "neither"): return True
return False
def check_word_negation(sentence, tokenID, source_edges_dict):
for edge_dep, edge_targets_list in source_edges_dict[tokenID].items():
for target_tokenID in edge_targets_list:
if(check_negation(sentence, edge_dep, target_tokenID)): return True
return False
class PhrasesResolver:
"""Creates phrases from dependencies."""
def __init__(self, coref_dict):
self.source_edges_dict = dict()
self.target_edges_dict = dict()
self.phrases_dict = dict()
self.doc_coref_dict = coref_dict
def clear(self):
self.source_edges_dict.clear()
self.target_edges_dict.clear()
self.phrases_dict.clear()
def fill_dicts(self, sentence):
self.clear()
for edge in getattr(sentence, settings.get_dep_type()).edge:
self.source_edges_dict.setdefault(edge.source - 1, dict()).setdefault(edge.dep, list()).append(edge.target - 1)
self.target_edges_dict.setdefault(edge.target - 1, dict()).setdefault(edge.dep, list()).append(edge.source - 1)
def get_resolved_phrases(self, sentence, tokenID):
if(tokenID in self.phrases_dict): return self.phrases_dict[tokenID]
token = sentence.token[tokenID]
clusterID = token.corefClusterID
resolved = self.doc_coref_dict[clusterID] if clusterID != 0 else resolve_phrases(sentence, tokenID, self)
self.phrases_dict[tokenID] = resolved
return resolved
def __get_normalized_phrases_dicts(sentence, tokenID, resolver:PhrasesResolver, normalize = True, ignored_tokenID = -1):
token = sentence.token[tokenID]
normalized_word = token.lemma if normalize and token.pos.startswith("NN") and token.pos.endswith("S") else token.word.lower() if token.ner == "O" else token.word
normalized_phrases = [{tokenID: normalized_word}]
if(settings.get_is_enhanced()): #for "is a type of something"
target_dict = resolver.target_edges_dict.get(tokenID, None)
if(target_dict is not None and "fixed" in target_dict):
for fixed_tokenID in target_dict["fixed"]:
for fixed_targets_list in resolver.source_edges_dict[fixed_tokenID].values():
for fixed_target_tokenID in fixed_targets_list:
normalized_phrases[0][fixed_target_tokenID] = sentence.token[fixed_target_tokenID].lemma
if(fixed_tokenID in resolver.target_edges_dict):
for fixed_sources_list in resolver.target_edges_dict[fixed_tokenID].values():
for fixed_source_tokenID in fixed_sources_list:
if(fixed_source_tokenID in normalized_phrases[0]): continue #cycle checking
new_dicts = __get_normalized_phrases_dicts(sentence, fixed_source_tokenID, resolver, False)
normalized_phrases = [{**current_dict, **new_dict}
for current_dict in normalized_phrases
for new_dict in new_dicts]
if(tokenID not in resolver.source_edges_dict): return normalized_phrases
for edge_dep, edge_targets_list in resolver.source_edges_dict[tokenID].items():
for target_tokenID in edge_targets_list:
if(target_tokenID == ignored_tokenID): continue
if(check_negation(sentence, edge_dep, target_tokenID)): return []
if(settings.get_is_enhanced()):
if(next((True for normalized_dict in normalized_phrases #cycle checking
if target_tokenID in normalized_dict), False)):
continue
if(not edge_dep.startswith("conj")):
target_dep_dict = resolver.target_edges_dict[target_tokenID]
if(next((True for target_dep in target_dep_dict #multiple roots checking
if target_dep.startswith('conj')), False)):
continue
if(edge_dep == "amod" or edge_dep == "advmod" or edge_dep == "compound" or (edge_dep == "nummod" and not normalize)):
new_dicts = __get_normalized_phrases_dicts(sentence, target_tokenID, resolver, False)
normalized_phrases = [{**current_dict, **new_dict}
for current_dict in normalized_phrases
for new_dict in new_dicts]
elif(edge_dep.startswith("nmod")):
#"such as" checking
if((settings.get_is_enhanced() and edge_dep == "nmod:such_as") or
(target_tokenID in resolver.source_edges_dict and check_dep_lemma(sentence, resolver.source_edges_dict[target_tokenID], "case", "such"))): continue
new_dicts = __get_normalized_phrases_dicts(sentence, target_tokenID, resolver, False)
if(edge_dep == "nmod:poss"):
target_token = sentence.token[target_tokenID]
target_pos = target_token.pos
is_possessive = target_pos == "PRP$"
if((target_pos == "PRP" or is_possessive) and target_token.corefClusterID != 0):
resolved_corefs = resolver.doc_coref_dict.get(target_token.corefClusterID, None)
if(resolved_corefs is not None):
resolved_new_dicts = []
for new_dict in new_dicts:
for coref_phrase in resolved_corefs:
resolved_new_dict = dict(new_dict)
resolved_new_dict[target_tokenID] = coref_phrase
if(is_possessive): resolved_new_dict[target_tokenID + 0.5] = "'s"
resolved_new_dicts.append(resolved_new_dict)
new_dicts = resolved_new_dicts
normalized_phrases = [{**current_dict, **new_dict}
for current_dict in normalized_phrases
for new_dict in new_dicts]
target_source_dict = resolver.source_edges_dict.get(target_tokenID, None)
if(target_source_dict is not None and "case" in target_source_dict):
for case_tokenID in target_source_dict["case"]:
for current_dict in normalized_phrases:
current_dict[case_tokenID] = sentence.token[case_tokenID].lemma
elif(edge_dep.startswith("conj")):
normalized_phrases.extend(__get_normalized_phrases_dicts(sentence, target_tokenID, resolver, normalize))
return normalized_phrases
def resolve_phrases(sentence, tokenID, resolver: PhrasesResolver, ignored_tokenID = -1):
phrases_dicts = __get_normalized_phrases_dicts(sentence, tokenID, resolver, ignored_tokenID=ignored_tokenID)
phrases = [' '.join([phrase_dict[key] for key in sorted(phrase_dict)]) for phrase_dict in phrases_dicts]
return phrases
def resolve_phrases_from_coref_dict(sentence, tokenID, coref_dict, ignored_tokenID = -1):
resolver = PhrasesResolver(coref_dict)
resolver.fill_dicts(sentence)
return resolve_phrases(sentence, tokenID, resolver, ignored_tokenID)
|
import math
import sys
seen = dict()
count = 0
first = 0
nums = []
for line in sys.stdin:
if(line[0] == '-'):
a = int(line[1:])
nums.append(-a)
else:
nums.append(int(line))
while True:
for n in nums:
count += n
if count in seen:
first = count
print("seen " + str(first) )
exit(1)
seen[count] = 1
print(count)
print(first) |
import uuid
from django.db import models
from django.utils.translation import ugettext_lazy as _
# Create your models here.
class BaseModel(models.Model):
"""Base model for providing uuid, updated_at, created_at fields"""
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
update_at = models.DateTimeField(auto_now=True, verbose_name=_("Updated date"))
created_at = models.DateTimeField(auto_now_add=True, verbose_name=_("Created date"))
class Meta:
abstract = True
|
import numpy as np
from numpy.polynomial import polynomial as P
import matplotlib.pyplot as plt
x = np.linspace(0, 8 * np.pi, 50)
y1 = np.cos(x)
y2 = np.sin(x - np.pi)
# Fitting data
coeff, stats = P.polyfit(x, y1, 20, full=True)
roots = np.real(P.polyroots(coeff))
fit = P.Polynomial(coeff)
yf1 = fit(x)
# Differentiating fitted polynomial
new_coeff = P.polyder(coeff)
dfit = P.Polynomial(new_coeff)
yf2 = dfit(x)
# Plotting results for illustration
fig = plt.figure(figsize=(8,6))
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax1.set_title('Data and fitting')
ax2.set_title('Differentiation')
ax1.set_xlim(0, x.max())
ax2.set_xlim(0, x.max())
ax1.set_ylim(-1.25, 1.25)
ax2.set_ylim(-1.25, 1.25)
ax1.plot(x, yf1, label='Fitted', color='gray')
ax1.scatter(x, y1, label='Original', edgecolor='none', facecolor='red')
ax1.scatter(roots, np.zeros(roots.shape), facecolor='none', edgecolor='black', s=50)
ax2.plot(x, yf2, label='Differentiated', color='gray')
ax2.scatter(x, y2, label='Original', edgecolor='none', facecolor='red')
fig.savefig('polynomials.png')
|
# https://projecteuler.net/problem=10
# The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
# Find the sum of all the primes below two million.
def check_primness(prime_candidate):
for i in xrange(prime_candidate - 1, 1, -1):
if prime_candidate % i == 0:
return False
return True
def run_test():
primes = []
range_of_test = 2000000
for i in xrange(2, range_of_test + 1):
if check_primness(i) is True:
print i
primes.append(i)
return primes
def sum_primes():
primes = run_test()
summation = 0
for i in primes:
summation += i
# I used a lot of the code for 7. 10001th prime here. Quite sure it will work
# properly but also that it will take a heck of a long time |
import cv2
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
road=cv2.imread(r'/home/abhilash/Coding/computervision/objectDetectionWithOpenCVandPython/DATA/road_image.jpg')
roadCopy=np.copy(road)
markerImage=np.zeros(road.shape[:2],dtype=np.int32)
segments=np.zeros(road.shape,dtype=np.uint8)
def create_rgb(i):
return tuple(np.array(cm.tab10(i)[:3])*255)
colors=[]
for i in range(10):
colors.append(create_rgb(i))
###
# GLOBAL VARIABLES
# COLOR CHOICE
nMarkers=10
currentMarker=1
marksUpdated=False
# DEF Mouse Callback function
def mouse_callback(event,x,y,flags,param):
global marks_updated
if event ==cv2.EVENT_LBUTTONDOWN:
cv2.circle(markerImage,(x,y),10,(currentMarker),-1)
cv2.circle(roadCopy,(x,y),10,colors[currentMarker],-1)
marksUpdated=True
# WHILE TRUE
cv2.namedWindow('Road Image')
cv2.setMouseCallback('Road Image',mouse_callback)
while True:
cv2.imshow('Watershed Segments',segments)
cv2.imshow('Road Image',roadCopy)
# CLOSE ALL WINDOWS
k=cv2.waitKey(1)
if k==27:
break
# CLEARING ALL COLORS C KEY
elif k==ord('c'):
roadCopy=road.copy()
markerImage=np.zeros(road.shape[:2],dtype=np.int32)
segments=np.zeros(road.shape,dtype=np.uint8)
# Update the color choice
elif k>0 and chr(k).isdigit():
currentMarker=int(chr(k))
if marksUpdated:
markerImageCopy=markerImage.copy()
cv2.watershed(road,markerImageCopy)
segments=np.zeros(road.shape,dtype=np.uint8)
for colorInd in nMarkers:
segments[markerImageCopy==(colorInd)]=colors[colorInd]
cv2.destroyAllWindows() |
# Automate the Daily Bing Search
#
#
from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
import time
binary = FirefoxBinary('C:\Program Files (x86)\Mozilla Firefox\Firefox.exe')
browser = webdriver.Firefox(firefox_binary=binary)
browser.get("https://login.live.com/login.srf?wa=wsignin1.0&rpsnv=13&ct=1480117030&rver=6.7.6631.0&wp=MBI&wreply=https%3a%2f%2fwww.bing.com%2fsecure%2fPassport.aspx%3frequrl%3dhttp%253a%252f%252fwww.bing.com%252f%253fwlexpsignin%253d1&lc=1033&id=264960")
usr = input("enter username")
pwd = input("enter password")
def login_entry(username, password):
login_email = browser.find_element_by_id('i0116')
login_email.send_keys(username)
submit_elem_1 = browser.find_element_by_id('idSIButton9').click()
time.sleep(3)
login_password = browser.find_element_by_id('i0118')
login_password.send_keys(password)
submit_elem_2 = browser.find_element_by_id('idSIButton9')
submit_elem_2.click()
login_entry(usr, pwd)
def brute_search(count):
starting_number = int(time.strftime("%j"))
search_input = browser.find_element_by_id('sb_form')
search_enter = browser.find_element_by_id('sb_form_go')
time.sleep(2)
search_input.send_keys(str(starting_number-1))
search_enter.click()
browser.find_element_by_xpath("//*[contains(text(), 'Images')]").click()
for i in range(starting_number, starting_number+count+1):
time.sleep(3)
# ERROR HERE:
#browser.find_element_by_xpath("//*[contains(title(), 'Search for:')]").click()
#print(i)
time.sleep(3)
brute_search(30)
|
import random
while True:
x = random.randint(0,20)
y = random.randint(0,20)
error = random.randint(-2,2)
operator = ["+","-","*","/"]
op = random.choice(operator)
if op == "+":
r = x + y + error
elif op == "-":
r = x - y + error
elif op == "*":
r = x * y + error
else:
r = x / y + error
print(x,op,y,"=",result)
answer = input("Y/N:")
result = None
if answer == "y":
if error == 0:
result = "Yay"
else :
result = "Nay"
if answer == "n":
if error != 0:
result = "Yay"
else :
result = "Nay"
print(result) |
# Question Link : https://www.hackerrank.com/challenges/non-divisible-subset/problem
from collections import Counter, defaultdict
def nonDivisibleSubset(k, S):
S = list(map(lambda x : x % k, S))
dic = defaultdict(lambda : 0, Counter(S)) # This size is always less than k( < 100).
for i in range(k//2 + 1):
if i == k/2:
if dic[i] > 0 :
dic[i] = 1 # it should not be more than 1
elif i == 0:
if dic[0] > 0:
dic[0] = 1 # it should not be more than 1
elif dic[i] > dic[k-i]:
dic[k-i] = 0
else:
dic[i] = 0
result = 0
for key in dic.keys():
result += dic[key]
return result
# Method to Solve : Hash
# Time Complexity : O(n)
# Space Complexity : O(n)
# Restraint : 0 <= n <= 100 |
# Generate a Verilog sin table module.
import math
SAMPLES = 256
OUTMAX = 255
print("module sinTable(")
print("input [7:0] in,")
print("output [7:0] out);")
print("")
print("assign out = ")
for sample in range(SAMPLES):
angle = (sample * 180) / SAMPLES -90
sine = math.sin(math.radians(angle))
rescaled = int(round(((sine + 1) * OUTMAX ) / 2.0))
print ("\t(in == %d) ? %d : " % (sample, rescaled));
print("\t0;")
print("endmodule") |
import boto3
import botocore
import paramiko
import sounddevice
import numpy as np
import time
import gc
from botocore.client import Config
duration = 0.01 # seconds
sample_rate=44100
ACCESS_KEY_ID = 'AKIAI3GG45ZOXLW5C2XA'
ACCESS_SECRET_KEY = 'Qd+blzLHW8ea+PLCiyl/JPtPAIfuHlJOJDmDgOHP'
BUCKET_NAME = 'prova-bucket2'
while(1):
X = sounddevice.rec(int(duration * sample_rate), samplerate=sample_rate, channels=1)
sounddevice.wait()
print("Ascolta")
sounddevice.play(X, sample_rate)
np.savetxt('miofile.txt', X)
data = open('miofile.txt', 'rb')
s3 = boto3.resource(
's3',
aws_access_key_id=ACCESS_KEY_ID,
aws_secret_access_key=ACCESS_SECRET_KEY,
config=Config(signature_version='s3v4')
)
s3.Bucket(BUCKET_NAME).put_object(Key='miofile.txt', Body=data)
print ("Done - Caricato. Adesso eseguo il comando su EC2")
cert = paramiko.RSAKey.from_private_key_file("random.pem")
c = paramiko.SSHClient()
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
print ("connecting...")
c.connect( hostname = "ec2-54-200-218-31.us-west-2.compute.amazonaws.com",
username = "ec2-user", pkey = cert)
print ("connected!!!")
comando_2= 'python ClassiPi.py'
#salvo prima X in un file
stdin_2, stdout_2, stderr_2 = c.exec_command(comando_2)
#print ('Output del programma: ',stdout_2.readlines())
# use readline() to read the first line
testsite_array = []
testsite_array = stdout_2.readlines()
print(testsite_array)
val = testsite_array[3]
print("VAL: ",type(val),len(val))
print("Primo valore: ",val[0])
if "0" in val:
print("trovato")
c.close() |
# -*- coding: utf-8 -*-
import datetime
from flask import g
from portfelo.contrib.models import RootDocument
class User(RootDocument):
collection_name = u'users'
structure = {'email': unicode,
'first_name': unicode,
'last_name': unicode,
'password': unicode,
'added': datetime.datetime,
'modified': datetime.datetime,
'last_login': datetime.datetime,
'is_active': bool,
'is_superuser': bool,
'sessions': list}
required_fields = ['email', 'password', 'added', 'modified', \
'is_active', 'is_superuser']
default_values = {'added': datetime.datetime.utcnow,
'modified': datetime.datetime.utcnow}
def __repr__(self):
return '<User %s >' % (self.email,)
|
# coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for BaseModelValidator."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
import unittest
from core.platform import models
from jobs import base_model_validator
from jobs import base_model_validator_errors as errors
import apache_beam as beam
from apache_beam.runners.direct import direct_runner
from apache_beam.testing import test_pipeline as pipeline
from apache_beam.testing import util as beam_testing_util
(base_models, user_models) = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.user])
datastore_services = models.Registry.import_datastore_services()
class MockModel(base_models.BaseModel):
pass
class BaseModelValidatorTests(unittest.TestCase):
def setUp(self):
self.now = datetime.datetime.utcnow()
self.year_ago = self.now - datetime.timedelta(weeks=52)
self.year_later = self.now + datetime.timedelta(weeks=52)
def test_base_model_validator_ptransform(self):
with pipeline.TestPipeline(runner=direct_runner.DirectRunner()) as p:
invalid_id = MockModel(
id='123@?!*',
deleted=False,
created_on=self.year_ago,
last_updated=self.now)
invalid_timestamp = MockModel(
id='124',
deleted=False,
created_on=self.now,
last_updated=self.year_later)
expired_model = MockModel(
id='125',
deleted=True,
created_on=self.year_ago,
last_updated=self.year_ago)
valid_model = MockModel(
id='126',
deleted=False,
created_on=self.year_ago,
last_updated=self.now)
pcoll = (
p
| beam.Create([
invalid_id, invalid_timestamp, expired_model, valid_model
]))
output = pcoll | base_model_validator.BaseModelValidator()
beam_testing_util.assert_that(
output,
beam_testing_util.equal_to([
errors.ModelInvalidIdError(invalid_id),
errors.ModelMutatedDuringJobError(invalid_timestamp),
errors.ModelExpiredError(expired_model)
]))
class ValidateDeletedTests(BaseModelValidatorTests):
def test_process_reports_error_for_old_deleted_model(self):
with pipeline.TestPipeline(runner=direct_runner.DirectRunner()) as p:
expired_model = MockModel(
id='123',
deleted=True,
created_on=self.year_ago,
last_updated=self.year_ago)
pcoll = p | beam.Create([expired_model])
output = (
pcoll
| beam.ParDo(
base_model_validator.ValidateDeleted()))
beam_testing_util.assert_that(
output,
beam_testing_util.equal_to([
errors.ModelExpiredError(expired_model)
]))
class ValidateModelTimeFieldTests(BaseModelValidatorTests):
def test_process_reports_model_timestamp_relationship_error(self):
with pipeline.TestPipeline(runner=direct_runner.DirectRunner()) as p:
invalid_timestamp = MockModel(
id='123',
created_on=self.now,
last_updated=self.year_ago)
pcoll = p | beam.Create([invalid_timestamp])
output = (
pcoll
| beam.ParDo(
base_model_validator.ValidateModelTimeFields()))
beam_testing_util.assert_that(
output,
beam_testing_util.equal_to([
errors.ModelTimestampRelationshipError(invalid_timestamp)
]))
def test_process_reports_model_mutated_during_job_error(self):
with pipeline.TestPipeline(runner=direct_runner.DirectRunner()) as p:
invalid_timestamp = MockModel(
id='124',
created_on=self.now,
last_updated=self.year_later)
pcoll = p | beam.Create([invalid_timestamp])
output = (
pcoll
| beam.ParDo(
base_model_validator.ValidateModelTimeFields()))
beam_testing_util.assert_that(
output,
beam_testing_util.equal_to([
errors.ModelMutatedDuringJobError(invalid_timestamp)
]))
class ValidateModelIdTests(BaseModelValidatorTests):
def test_validate_model_id(self):
with pipeline.TestPipeline(runner=direct_runner.DirectRunner()) as p:
invalid_id_model = MockModel(
id='123@?!*',
created_on=self.year_ago,
last_updated=self.now)
pcoll = p | beam.Create([invalid_id_model])
output = (
pcoll
| beam.ParDo(
base_model_validator.ValidateModelIdWithRegex(),
'^[A-Za-z0-9-_]{1,%s}$' % base_models.ID_LENGTH))
beam_testing_util.assert_that(
output,
beam_testing_util.equal_to([
errors.ModelInvalidIdError(invalid_id_model)
]))
|
from unittest import TestCase
from core.models import User, connection, Question
from .base import DatabaseTestCaseMixin
class ModelsTestCase(TestCase, DatabaseTestCaseMixin):
def setUp(self):
self.db = connection.test
super(ModelsTestCase, self).setUp()
self.setup_connection()
def tearDown(self):
self.teardown_connection()
def test_question_check_answer(self):
q = Question()
q['correct'] = u'Yes'
self.assertTrue(q.check_answer('yes'))
self.assertTrue(not q.check_answer('no'))
q['alternatives'] = [u'Maybe', u'Perhaps']
self.assertTrue(q.check_answer('yes', alternatives_are_correct=True))
self.assertTrue(q.check_answer('maybe', alternatives_are_correct=True))
self.assertTrue(q.check_answer('perhaps', alternatives_are_correct=True))
self.assertTrue(not q.check_answer('inte', alternatives_are_correct=True))
# now with edit distance
# too short
self.assertTrue(not q.check_answer('yeah'))
# sufficiently long
q['correct'] = u'Correct'
self.assertTrue(q.check_answer('corect'))
self.assertTrue(not q.check_answer('korecct'))
q['correct'] = u'Tupac Shakur'
self.assertTrue(q.check_answer('tupak Shacur'))
self.assertTrue(not q.check_answer('Toopack shakure')) # too wrong
def test_question_check_answer_with_numbers(self):
q = Question()
q['correct'] = u'90,000'
self.assertTrue(q.check_answer('90,000'))
self.assertTrue(not q.check_answer('60,000'))
q['correct'] = u'Terminator 2'
self.assertTrue(q.check_answer('Terminatur 2'))
|
#!/usr/bin/env python
# coding=utf-8
# author:jingjian@datagrand.com
# datetime:2019/4/19 下午2:43
import os, sys, re, json, traceback
from uuid import uuid4 as uuid
import requests
from conf import conf
from pydocx import PyDocX
def get_file_names(file_dir, fileType):
'''
得到某个目录下所有的文件名
:param file_dir:文件夹路径
:param fileType:后缀名
:return:
'''
L = []
for root, dirs, files in os.walk(file_dir):
for file in files:
if os.path.splitext(file)[1] == "." + fileType:
# L.append(os.path.join(root, file))
L.append(os.path.join(file))
return L
def get_folder_names(file_dir):
'''
得到某个目录下所有的文件夹名
:param file_dir:文件夹路径
:return:
'''
L = []
for root, dirs, files in os.walk(file_dir):
for file in dirs:
L.append(os.path.join(file))
return L
def get_file_names_without_type(file_dir):
'''
得到某个目录下所有的文件名
:param file_dir:文件夹路径
:return:
'''
L = []
for root, dirs, files in os.walk(file_dir):
for file in files:
L.append(os.path.join(file))
return L
def get_uuid():
'''
获取一个uuid,去除了'-'
:return:
'''
return str(uuid()).replace("-", "")
def file_extension(path):
'''
获取一个文件路径的扩展名
:param path:
:return:
'''
return os.path.splitext(path)[1]
def doc2docx(file_path):
'''
将doc使用unoconv服务转换成docx
:param file_path:
:return:
'''
# 如果是,通过unoconv进行转换
response = requests.post("http://" + conf.unoconv_host + ":3000/unoconv/docx",
files={'file': open(file_path, 'rb')})
return response.content
def docx2html(file_path):
'''
将一个docx转换成html
:param file_path:
:return:
'''
return PyDocX.to_html(file_path)
def get_file_name_from_path(path):
return os.path.basename(path)
class TreeNode(object):
def __init__(self, name):
self.parent = None
self.children = []
self.name = name
self.level = 0
self.path = ""
self.files = []
if __name__ == "__main__":
# for a in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', '', '1', '1', '1',]
print(get_uuid())
# a = get_file_names("/Users/jingjian/datagrand/2019_2/guojianengyuanshenhua/20190506/download", "")
# print(len(a))
# for each in a:
# os.system("mv /Users/jingjian/datagrand/2019_2/guojianengyuanshenhua/2019-5-6/00/{0} /Users/jingjian/datagrand/2019_2/guojianengyuanshenhua/2019-5-6/00/{1}.txt".format(each,each))
|
from os import mkdir
from os.path import join
from shutil import copy
from libw3g.util import get_replay_id
from libdota.tests.util import get_replay_path
def add(replay_file):
target_fn = get_replay_path(get_replay_id(replay_file).read())
copy(replay_file, target_fn)
print 'added replay %s' % target_fn
if __name__=="__main__":
import sys
for f in sys.argv[1:]:
add(f)
|
#!/usr/bin/python
#!/apps/python/3.4.3/bin/python3
# Toshiyuki Gogami
# Dec 20, 2019
import sys
import time, os.path
from subprocess import call
#import concurrent.futures
#from logging import StreamHandler, Formatter, INFO, getLogger
#from concurrent.futures import ThreadPoolExecutor
#from concurrent.futures.process import ProcessPoolExecutor
#import numpy as np
datafile="data.dat"
def main():
inputfile = open(datafile,"r")
lines = inputfile.readlines()
for line in lines:
data = line.split()
com = "./mkroot " + data[0]
call(com,shell=True)
stime = time.time()
main()
print("\n Jobs were done in %.0f sec \n" % float(time.time()-stime))
|
import sys
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
import matplotlib.pyplot as plt
from numpy.random import seed
from tensorflow import set_random_seed
# Usage
def printUsage():
print("Usage: KerasPredictor.py white|red")
sys.exit(2)
try:
wine = sys.argv[1]
if wine != "red" and wine != "white":
printUsage()
except:
printUsage()
# Load data
df = pd.read_csv("Data/winequality-"+wine+".csv", sep=";")
# Preprocessing should go here
# Divide data in training and testing
dfTrain, dfTest = train_test_split(df, test_size=0.2, shuffle=False)
# Divide target from data
X = np.array(dfTrain.ix[:,:11])
Y = np.array(dfTrain.ix[:,11])
xTest = np.array(dfTest.ix[:,:11])
yTest = np.array(dfTest.ix[:,11])
# Scale data
scaler1 = MinMaxScaler()
scaledX = scaler1.fit_transform(X)
scaler2 = MinMaxScaler()
scaledTest = scaler2.fit_transform(xTest)
# Define seed
seed(1)
set_random_seed(1)
# Define and compile model
model = Sequential()
if wine=="white":
model.add(Dense(11, input_dim=11, activation="relu"))
model.add(Dense(15, activation="relu"))
model.add(Dense(8, activation="relu"))
else:
model.add(Dense(10, input_dim=11, activation="relu"))
model.add(Dense(8, activation="relu"))
model.add(Dense(1))
adam = Adam(lr=0.01)
model.compile(loss='mean_absolute_percentage_error', optimizer=adam)
# Fit the model
model.fit(scaledX, Y, batch_size=scaledX.shape[0], epochs=410, validation_data=(scaledTest, yTest))
trainPrediction = model.predict(scaledX)
testPrediction = model.predict(scaledTest)
f = open('results-'+wine+'.txt', 'w')
for i in range(len(yTest)):
f.write(str(int(round(testPrediction[i][0])))+" "+str(yTest[i])+"\n")
f.close() |
import argparse
import gym
import gym_gvgai
from player import Player
import pdb
import numpy as np
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
parser.add_argument('-trial_num', default = 1, required = False)
parser.add_argument('-batch_size', default = 32, required = False)
parser.add_argument('-lr', default = .00025, type = float, required = False)
parser.add_argument('-gamma', default = .99, required = False)
parser.add_argument('-eps_start', default = 1, required = False)
parser.add_argument('-eps_end', default = .1, required = False)
parser.add_argument('-eps_decay', default = 200., required = False)
parser.add_argument('-target_update', default = 10, required = False)
parser.add_argument('-img_size', default = 64, required = False)
parser.add_argument('-num_episodes', default = 20000, type = int, required = False)
parser.add_argument('-max_steps', default = 5e6, required = False)
parser.add_argument('-max_mem', default = 50000, required = False)
parser.add_argument('-model_name', default = 'DQN', required = False)
parser.add_argument('-model_weight_path', default = 'gvgai-aliens_episode157_trial1_levelswitch.pt', required = False)
parser.add_argument('-test_mode', default = 0, type = int, required = False)
parser.add_argument('-pretrain', default = 0, type = int, required = False)
parser.add_argument('-cuda', default = 1, required = False)
parser.add_argument('-doubleq', default = 1, type = int, required = False)
parser.add_argument('-level_switch', default = 'random', type = str, required = False)
parser.add_argument('-steps_to_restart', default = 1000, type = int, required = False)
parser.add_argument('-game_name', default = 'aliens', required = False)
parser.add_argument('-level_to_test', default = 3, type = int, required = False)
parser.add_argument('-train_mode', default = 'all_levels', type = str, required = True)
# pdb.set_trace()
# python main.py -game_name gvgai-aliens-lvl0-v0
config = parser.parse_args();
all_gvgai_games = [env.id for env in gym.envs.registry.all() if env.id.startswith('gvgai')]
config.all_level_names = [game for game in all_gvgai_games if config.game_name in game]
config.level_names = config.all_level_names
# config.level_names = [game for game in config.all_level_names if 'lvl0' in game or 'lvl1' in game or 'lvl2' in game]
# config.level_names = [game for game in config.all_level_names if 'lvl3' in game or 'lvl4' in game]
# for game_name in all_gvgai_games:
# print(game_name)
# print("_"*10)
# pdb.set_trace()
# print(config.game_name)
# gvgai-missilecommand-lvl0-v0
# module load jdk/1.8.0_45-fasrc01
print("Trial {}".format(config.trial_num))
print("Game: {}".format(config.game_name))
game_player = Player(config)
# game_player.env.reset()
# pdb.set_trace()
game_player.train_model()
# game_player.test_model()
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import rospy
from geometry_msgs.msg import Pose2D, Twist
from tms_msg_rc.srv import rc_robot_control, rc_robot_controlResponse
from tms_msg_db.srv import TmsdbGetData, TmsdbGetDataRequest
import datetime
import pymongo
from math import sin, cos, atan2, pi, radians, degrees, sqrt
BURGER_MAX_LIN_VEL = 0.22 #turtlebot3 model = burger
BURGER_MAX_ANG_VEL = 2.84
#pub = rospy.Publisher("turtlebot3/cmd_vel",Twist,queue_size = 10)
pub = rospy.Publisher("cmd_vel",Twist,queue_size = 10)
GOAL = None
pose = Pose2D()
cmd = 10
def main():
global GOAL
global cmd
global pose
print "Turtlebot3_voronoi_follower"
rospy.init_node ('turtlebot3_voronoi_follower')
rospy.wait_for_service('/tms_db_reader')
service = rospy.Service(
"turtlebot3_goal_pose" , rc_robot_control, goalPoseCallBack)
r = rospy.Rate(30)
while not rospy.is_shutdown():
pose = getCurrentPose()
if None == GOAL:
continue
KPang = 0.5#1
KDang = 0
KPdist = 0.5#1.0
KDdist = 0
ARV_DIST = 0.2
twist = Twist()
errorT = normalizeAng(GOAL.theta - pose.theta)
if cmd == 10:
twist.angular.z = 0
twist.linear.x = 0
GOAL = None
elif cmd == 1:
tmp_turn = limit(KPang * errorT,1,-1)
#rospy.loginfo("turn:{0}".format(tmp_turn))
twist.angular.z = tmp_turn
twist.linear.x = 0
elif cmd == 2:
errorX = GOAL.x - pose.x
errorY = GOAL.y - pose.y
errorNX = errorX * cos(-pose.theta) - errorY * sin(-pose.theta)
tmp_spd = limit(KPdist * errorNX, BURGER_MAX_LIN_VEL, -BURGER_MAX_LIN_VEL)
twist.angular.z = 0
twist.linear.x = tmp_spd
#print("pose : {0}, {1}, {2}".format(pose.x, pose.y, pose.theta))
else:
errorX = GOAL.x - pose.x
errorY = GOAL.y - pose.y
targetT = atan2(errorY, errorX)
errorNX = errorX * cos(-pose.theta) - errorY * sin(-pose.theta)
errorNT = normalizeAng(targetT - pose.theta)
tmp_spd = limit(KPdist * errorNX, 1, -1)
tmp_turn = limit(KPang * errorNT, 1, -1)
distance = sqrt(errorX ** 2 + errorY **2)
#rospy.loginfo("dist:{0}".format(distance))
#rospy.loginfo("spd:{0}" "turn:{1}".format(tmp_spd, tmp_turn))
twist.angular.z = tmp_turn
twist.linear.x = tmp_spd
pub.publish(twist)
r.sleep()
def constrain(input, low, high):
if input < low:
input = low
elif input > high:
input = high
else:
input = input
return input
def goalPoseCallBack(req):
global pose
global cmd
if req.cmd == 10:
cmd = req.cmd
return rc_robot_controlResponse()
elif req.cmd == 2:
rad = diffRadian(pose.theta, req.arg[2])
if rad >= 0.1 or rad <= -0.1:
return rc_robot_controlResponse()
else:
cmd = req.cmd
else:
cmd = req.cmd
global GOAL
GOAL = Pose2D()
GOAL.x = req.arg[0]
GOAL.y = req.arg[1]
GOAL.theta = req.arg[2]
goal_dis = req.arg[3]
rospy.loginfo("SubGOAL : {0}".format(GOAL))
print("cmd : {0}".format(cmd))
#*******************************************
# conditions for moving next operation
while True:
if cmd == 1:
rad = diffRadian(pose.theta, GOAL.theta)
if rad > -0.02 and rad < 0.02:
if goal_dis <= 0.25:
print("goal_arrived")
else:
print("turned")
cmd = 10
return rc_robot_controlResponse()
elif cmd == 2:
dis = distance(pose.x, pose.y, GOAL.x, GOAL.y)
rad = diffRadian(pose.theta, GOAL.theta)
if dis <= 0.25 or rad >= 0.1 or rad <= -0.1:
cmd = 10
print("pose : {0}, {1}, {2}".format(pose.x, pose.y, pose.theta))
return rc_robot_controlResponse()
return rc_robot_controlResponse()
def distance(x1, y1, x2, y2):
dis = sqrt((x1 - x2)*(x1 - x2) + (y1 - y2)*(y1 - y2))
return dis
def diffRadian(rad1, rad2):
rad1 = normalizeAng(rad1)
rad2 = normalizeAng(rad2)
rad = normalizeAng(rad2- rad1)
return rad
def getCurrentPose():
pose = Pose2D()
db_req = TmsdbGetDataRequest()
db_req.tmsdb.id = 2013
db_req.tmsdb.sensor = 3001
try:
srv_client = rospy.ServiceProxy("/tms_db_reader", TmsdbGetData)
res = srv_client(db_req)
if 0 == len(res.tmsdb):
return pose
pose.x = res.tmsdb[0].x
pose.y = res.tmsdb[0].y
pose.theta = res.tmsdb[0].ry
except rospy.ServiceException as e:
print "Service call failed: %s" %e
return pose
def normalizeAng(rad):
while rad > pi: # 角度を-180°~180°(-π~π)の範囲に合わせる
rad = rad - (2 * pi)
while rad < -pi:
rad = rad + (2 * pi)
return rad
def limit(val, maxn, minn):
return max(min(maxn, val), minn)
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
|
x = input("\nEnter the kilometer distance : ")
a = int(x)
b = int(a / 1000)
c = (float(a / 1000) - b) * 1000
c = int(c)
print("\nDistance is : " + str(b) + " Km and " + str(c) + " cmeter") |
#!/usr/bin/env python3
"""Initial script."""
from brain_games.gameplay import play_game
from brain_games.games import prime
def main():
"""Define main code."""
game_type = prime
play_game(game_type)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import csv
import re
import os
import tweepy
import time
import random
import datetime
import pyautogui
import cv2
consumer_key = consumer_key
consumer_secret = CONSUMER_SECRET_KEY
access_key = ACCESS_KEY
access_secret = ACCESS_SECRET_KEY
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
updated_status= False
def get_tweets(username):
try:
global updated_status
number_of_tweets = 1
for tweet in tweepy.Cursor(api.user_timeline, screen_name = username).items(number_of_tweets):
if (updated_status == False) :
tweet_status = "Device woke at " + str(datetime.datetime.now())
status = api.update_status(status=tweet_status)
updated_status = True
tweet = str(tweet)
tweetdata = re.search("'text': '(.*)', 'truncated'", tweet)
tweetdata = tweetdata.group(1)
if (tweetdata.find("snapshot") != -1 and tweetdata.find("READY FOR NEXT COMMAND") == -1):
myScreenshot = pyautogui.screenshot()
myScreenshot.save('screenshot.png')
tweet_status = "SNAPSHOT Taken at " + str(datetime.datetime.now())
media = api.media_upload("screenshot.png")
post_result = api.update_status(status=tweet_status, media_ids=[media.media_id])
tweet_text= " SUCESSFULLY Posted snapshot at " + str(datetime.datetime.now())+ "\nREADY FOR NEXT COMMAND"
status = api.update_status(status=tweet_text)
#os.remove('screenshot.png')
elif (tweetdata.find("webcam") != -1 and tweetdata.find("READY FOR NEXT COMMAND") == -1):
videoCaptureObject = cv2.VideoCapture(0)
result = True
while(result):
ret,frame = videoCaptureObject.read()
cv2.imwrite("Web.jpg",frame)
result = False
videoCaptureObject.release()
cv2.destroyAllWindows()
tweet_text = "WEBCAM shot Taken at " + str(datetime.datetime.now())
media = api.media_upload("Web.jpg")
post_result = api.update_status(status=tweet_text, media_ids=[media.media_id])
tweet_text= " SUCESSFULLY Posted webcomshot at " + str(datetime.datetime.now())+ "\nREADY FOR NEXT COMMAND"
status = api.update_status(status=tweet_text)
#os.remove('Web.jpg')
elif (tweetdata.find("READY FOR NEXT COMMAND") == -1):
os.system(tweetdata)
try:
tweet_status = "SUCESSFULLY COMPLETED COMMAND: "+ tweetdata + "\nREADY FOR NEXT COMMAND"
status = api.update_status(status=tweet_status)
except:
tweet_status = "SUCESSFULLY COMPLETED COMMAND: "+ tweetdata + "\nREADY FOR NEXT COMMAND " + str(random.randint(1,99999))
status = api.update_status(status=tweet_status)
else:
time.sleep(10)
except:
pass
while(1):
get_tweets(USER_ID)
|
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-style regularizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.keras import regularizers
from tensor2tensor.utils import test_utils
import tensorflow as tf
import tensorflow_probability as tfp
ed = tfp.edward2
tf.compat.v1.enable_eager_execution()
class RegularizersTest(tf.test.TestCase):
@test_utils.run_in_graph_and_eager_modes
def testHalfCauchyKLDivergence(self):
shape = (3,)
regularizer = regularizers.get('half_cauchy_kl_divergence')
variational_posterior = ed.Independent(
ed.LogNormal(loc=tf.zeros(shape), scale=1.).distribution,
reinterpreted_batch_ndims=1)
kl = regularizer(variational_posterior)
kl_value = self.evaluate(kl)
self.assertGreaterEqual(kl_value, 0.)
if __name__ == '__main__':
tf.test.main()
|
# -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..one_drive_object_base import OneDriveObjectBase
class EducationTeacher(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def teacher_number(self):
"""Gets and sets the teacherNumber
Returns:
str:
The teacherNumber
"""
if "teacherNumber" in self._prop_dict:
return self._prop_dict["teacherNumber"]
else:
return None
@teacher_number.setter
def teacher_number(self, val):
self._prop_dict["teacherNumber"] = val
@property
def external_id(self):
"""Gets and sets the externalId
Returns:
str:
The externalId
"""
if "externalId" in self._prop_dict:
return self._prop_dict["externalId"]
else:
return None
@external_id.setter
def external_id(self, val):
self._prop_dict["externalId"] = val
|
import json
import bs4
import requests
import urllib.request
from pprint import pprint
stats_passing = {}
def schedule_url(year, stype, week):
"""
Returns the NFL.com XML schedule URL. `year` should be an
integer, `stype` should be one of the strings `PRE`, `REG` or
`POST`, and `gsis_week` should be a value in the range
`[0, 17]`.
"""
xmlurl = 'http://www.nfl.com/ajax/scorestrip?'
if stype == 'POST':
week += 17
if week == 21: # NFL.com you so silly
week += 1
url = '%sseason=%d&seasonType=%s&week=%d' % (xmlurl, year, stype, week)
get_game_id(url)
def get_game_id(url):
"""Returns the game ID for each unique game."""
handler = requests.get(url)
soup = bs4.BeautifulSoup(handler.text, "lxml")
# Get game id from nfl.com
for message in soup.find_all('g'):
msg_attrs = dict(message.attrs)
game_id = msg_attrs['eid']
get_game_data(game_id)
def get_game_data(game_id):
"""Return the unique game ID."""
json_url = 'http://www.nfl.com/liveupdate/game-center/'
url = '%s%s/%s_gtd.json' % (json_url, game_id, game_id)
resp = urllib.request.urlopen(url)
get_players(resp, game_id)
def get_players(resp, game_id):
"""Collect the passing players for the game."""
data = json.loads(resp.read().decode())
passing_stats = {**data[game_id]['away']['stats']['passing'],
**data[game_id]['home']['stats']['passing']}
for name, item in passing_stats.items():
stats_passing[game_id] = item
def save_players(filename):
"""Save the player dictionary to a new file"""
players = {'games': stats_passing}
with open(filename, 'a') as file:
file.write(json.dumps(players))
def player_loop(start_year, end_year, start_week, end_week, game_type):
"""Enter the search range for passing stats and return results."""
# display_categories()
for year in range(start_year, (end_year + 1)):
for week in range(start_week, (end_week + 1)):
schedule_url(year, game_type, week)
filename = 'player_lists/reg_passing_stats_17.py'
save_players(filename)
# Start Year, End Year, Start, Week, End Week, Game Type('PRE', "REG',
# or 'POST')
player_loop(2017, 2017, 2, 2, 'REG')
##############################################################################
#
# def display_players(passers):
# """Print the formatted passing players to the console."""
# with open(passers, 'r') as ViewFileOpen:
# data = ViewFileOpen.read()
# stuff = json.loads(data)
# pprint(stuff)
# #
# # pprint(data)
#
#
# display_players('player_lists/reg_passing_stats_17') |
import asyncio
import re
import time
from collections import OrderedDict
from contextlib import suppress
from typing import Any, AsyncIterator, Iterable, Mapping, Optional, Tuple
from cashews._typing import Key, Value
from cashews.serialize import SerializerMixin
from cashews.utils import Bitarray, get_obj_size
from .interface import NOT_EXIST, UNLIMITED, Backend
__all__ = ["Memory"]
_missed = object()
class _Memory(Backend):
"""
Inmemory backend lru with ttl
"""
__slots__ = ["store", "_check_interval", "size", "__is_init", "__remove_expired_stop", "__remove_expired_task"]
def __init__(self, size: int = 1000, check_interval: float = 1, **kwargs):
self.store: OrderedDict = OrderedDict()
self._check_interval = check_interval
self.size = size
self.__is_init = False
self.__remove_expired_stop = asyncio.Event()
self.__remove_expired_task = None
super().__init__(**kwargs)
async def init(self):
self.__is_init = True
if self._check_interval:
self.__remove_expired_stop = asyncio.Event()
self.__remove_expired_task = asyncio.create_task(self._remove_expired())
@property
def is_init(self) -> bool:
return self.__is_init
async def _remove_expired(self):
while not self.__remove_expired_stop.is_set():
for key in dict(self.store):
await self.get(key)
with suppress(asyncio.TimeoutError, TimeoutError):
await asyncio.wait_for(self.__remove_expired_stop.wait(), self._check_interval)
async def clear(self):
self.store = OrderedDict()
async def set(
self,
key: Key,
value: Value,
expire: Optional[float] = None,
exist: Optional[bool] = None,
) -> bool:
if exist is not None:
if not (key in self.store) is exist:
return False
self._set(key, value, expire)
return True
async def set_raw(self, key: Key, value: Value, **kwargs: Any):
self.store[key] = value
async def get(self, key: Key, default: Optional[Value] = None) -> Value:
return await self._get(key, default=default)
async def get_raw(self, key: Key) -> Value:
return self.store.get(key)
async def get_many(self, *keys: Key, default: Optional[Value] = None) -> Tuple[Optional[Value], ...]:
values = []
for key in keys:
val = await self._get(key, default=default)
if isinstance(val, Bitarray):
continue
values.append(val)
return values
async def set_many(self, pairs: Mapping[Key, Value], expire: Optional[float] = None):
for key, value in pairs.items():
self._set(key, value, expire)
async def scan(self, pattern: str, batch_size: int = 100) -> AsyncIterator[Key]: # type: ignore
pattern = pattern.replace("*", ".*")
regexp = re.compile(pattern)
for key in dict(self.store):
if regexp.fullmatch(key):
yield key
async def incr(self, key: Key, value: int = 1, expire: Optional[float] = None) -> int:
value += int(await self._get(key, 0)) # type: ignore
_expire = None if value != 1 else expire
self._set(key=key, value=value, expire=_expire)
return value
async def exists(self, key: Key) -> bool:
return await self._key_exist(key)
async def delete(self, key: Key):
return await self._delete(key)
async def _delete(self, key: Key) -> bool:
if key in self.store:
del self.store[key]
await self._call_on_remove_callbacks(key)
return True
return False
async def delete_many(self, *keys: Key):
for key in keys:
await self._delete(key)
async def delete_match(self, pattern: Key):
async for key in self.scan(pattern):
await self._delete(key)
async def get_match(
self,
pattern: str,
batch_size: int = None,
) -> AsyncIterator[Tuple[Key, Value]]: # type: ignore
async for key in self.scan(pattern):
value = await self.get(key)
if not isinstance(value, Bitarray):
yield key, value
async def expire(self, key: Key, timeout: float):
if not await self._key_exist(key):
return
value = await self._get(key, default=_missed)
if value is _missed:
return
self._set(key, value, timeout)
async def get_expire(self, key: Key) -> int:
if key not in self.store:
return NOT_EXIST
expire_at, _ = self.store[key]
if expire_at is not None:
return round(expire_at - time.time())
return UNLIMITED
async def ping(self, message: Optional[bytes] = None) -> bytes:
return b"PONG" if message in (None, b"PING") else message # type: ignore[return-value]
async def get_bits(self, key: Key, *indexes: int, size: int = 1) -> Tuple[int, ...]:
array: Bitarray = await self._get(key, default=Bitarray("0")) # type: ignore
return tuple(array.get(index, size) for index in indexes)
async def incr_bits(self, key: Key, *indexes: int, size: int = 1, by: int = 1) -> Tuple[int, ...]:
array: Optional[Bitarray] = await self._get(key)
if array is None:
array = Bitarray("0")
self._set(key, array)
result = []
for index in indexes:
array.incr(index, size, by)
result.append(array.get(index, size))
return tuple(result)
def _set(self, key: Key, value: Value, expire: Optional[float] = None):
expire = time.time() + expire if expire else None
if expire is None and key in self.store:
expire, _ = self.store[key]
self.store[key] = (expire, value)
self.store.move_to_end(key)
if len(self.store) > self.size:
self.store.popitem(last=False)
async def _get(self, key: Key, default: Optional[Value] = None) -> Optional[Value]:
if key not in self.store:
return default
self.store.move_to_end(key)
expire_at, value = self.store[key]
if expire_at and expire_at < time.time():
await self._delete(key)
return default
return value
async def _key_exist(self, key: Key) -> bool:
return (await self._get(key, default=_missed)) is not _missed
async def is_locked(self, key: Key, wait: Optional[float] = None, step: float = 0.1) -> bool:
if wait is None:
return await self._key_exist(key)
while wait > 0:
if not await self._key_exist(key):
return False
wait -= step
await asyncio.sleep(step)
return await self._key_exist(key)
async def unlock(self, key: Key, value: Value) -> bool:
return await self._delete(key)
async def get_size(self, key: Key) -> int:
if key in self.store:
return get_obj_size(self.store[key])
return 0
async def slice_incr(self, key: Key, start: int, end: int, maxvalue: int, expire: Optional[float] = None) -> int:
val_list = await self._get(key)
count = 0
new_val = []
if val_list:
for val in val_list:
if start <= val < end:
count += 1
new_val.append(val)
if count < maxvalue:
count += 1
new_val.append(end)
self._set(key, new_val, expire=expire)
return count
async def set_add(self, key: Key, *values: str, expire: Optional[float] = None):
val = await self._get(key, default=set())
val.update(values)
self._set(key, val, expire=expire)
async def set_remove(self, key: Key, *values: str):
val = await self._get(key, default=set())
val.difference_update(values)
self._set(key, val)
async def set_pop(self, key: Key, count: int = 100) -> Iterable[str]:
values = await self._get(key, default=set())
_values = []
for _ in range(count):
if not values:
break
_values.append(values.pop())
self._set(key, values)
return _values
async def get_keys_count(self) -> int:
return len(self.store)
async def close(self):
self.__remove_expired_stop.set()
if self.__remove_expired_task:
await self.__remove_expired_task
self.__remove_expired_task = None
self.__is_init = False
class Memory(SerializerMixin, _Memory):
pass
|
from ecs.entity import Entity
from ecs.time import Time
from ecs.world_clocks import WorldClocks
from core.inventory import Inventory
clocks = WorldClocks(
Time(minutes=1),
systems=[
]
)
player = Entity(
Inventory()
)
|
from datetime import datetime
from django.db import models
# Create your models here.
# 课程对应多个章节、每个章节对应多节课、每节课对应多个视频 每个课程对应多个课程资源 如前端代码
# 课程信息表
class Course(models.Model):
DEGREE_CHOICES = (
("cj", "初级"),
("zj", "中级"),
("gj", "高级")
)
name = models.CharField(max_length=50, verbose_name="课程名")
desc = models.CharField(max_length=300, verbose_name="课程描述")
# TextField允许我们不输入长度。可以输入到无限大。暂时定义为TextFiled,之后更新为富文本
detail = models.TextField(verbose_name="课程详情")
# 课程级别
degree = models.CharField(choices=DEGREE_CHOICES, max_length=2, verbose_name="难度")
# 使用分钟做后台记录(存储最小单位)前台转换
learn_times = models.IntegerField(default=0, verbose_name="学习时长(分钟数)")
# 保存学习人数:点击开始学习才算
students = models.IntegerField(default=0, verbose_name="学习人数")
fav_nums = models.IntegerField(default=0, verbose_name="收藏人数")
image = models.ImageField(
upload_to="courses/%Y/%m",
verbose_name="封面图",
max_length=100)
# 保存点击量,点进页面就算
click_nums = models.IntegerField(default=0, verbose_name="点击数")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "课程"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
# 章节
class Lesson(models.Model):
# 作为一个字段来让我们可以知道这个章节对应那个课程
# on_delete参数:CASCADE:级联操作。如果课程删除,那么章节也删除
course = models.ForeignKey(Course, on_delete=models.CASCADE, verbose_name="课程")
name = models.CharField(max_length=100, verbose_name="章节名")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "章节"
verbose_name_plural = verbose_name
def __str__(self):
return '《{0}》课程的章节 >> {1}'.format(self.course, self.name)
# 每章视频
class Video(models.Model):
# 因为一个章节对应很多视频。所以在视频表中将章节设置为外键。
# 作为一个字段来存储让我们可以知道这个视频对应哪个章节.
lesson = models.ForeignKey(Lesson, on_delete=models.CASCADE, verbose_name="章节")
name = models.CharField(max_length=100, verbose_name="视频名")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "视频"
verbose_name_plural = verbose_name
def __str__(self):
return '{0}章节的视频 >> {1}'.format(self.lesson, self.name)
# 课程资源
class CourseResource(models.Model):
# 因为一个课程对应很多资源。所以在课程资源表中将课程设置为外键。
# 作为一个字段来让我们可以知道这个资源对应那个课程
course = models.ForeignKey(Course, on_delete=models.CASCADE, verbose_name="课程")
name = models.CharField(max_length=100, verbose_name="名称")
# 这里定义成文件类型的field,后台管理系统中会直接有上传的按钮。
# FileField也是一个字符串类型,要指定最大长度。
download = models.FileField(
upload_to="course/resource/%Y/%m",
verbose_name="资源文件",
max_length=100)
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "课程资源"
verbose_name_plural = verbose_name
def __str__(self):
return '《{0}》课程的资源: {1}'.format(self.course, self.name)
|
"""
coding:utf8
@Time : 2020/8/1 17:21
@Author : cjr
@File : get_ip.py
"""
from . import errors
import requests
def get_ips():
"""
github上的免费IP池:https://github.com/jiangxianli/ProxyIpLib.git
响应消息格式:
{
"code":0,
"msg":"成功",
"data":{
"current_page":1,
"data":[
{
"unique_id":"dd2aa4a97ab900ad5c7b679e445d9cde",
"ip":"119.167.153.50",
"port":"8118",
"ip_address":"山东省 青岛市",
"anonymity":0,
"protocol":"http",
"isp":"联通",
"speed":46,
"validated_at":"2017-12-25 15:11:05",
"created_at":"2017-12-25 15:11:05",
"updated_at":"2017-12-25 15:11:05"
},
{
"unique_id":"7468e4ee73bf2be35b36221231ab02d5",
"ip":"119.5.0.42",
"port":"22",
"ip_address":"四川省 南充市",
"anonymity":0,
"protocol":"http",
"isp":"联通",
"speed":127,
"validated_at":"2017-12-25 15:10:04",
"created_at":"2017-12-25 14:38:14",
"updated_at":"2017-12-25 15:10:04"
}
],
"last_page":1,
"per_page":15,
"to":8,
"total":8
}
}
:return:
"""
res = []
try:
ips_info = requests.request('GET', 'https://ip.jiangxianli.com/api/proxy_ips').json()
except Exception as e:
print(e)
ip_list = ips_info['data']['data']
if not ip_list:
raise errors.GetIpFailRequest('IP列表获取失败')
for ip_info in ip_list:
ip = ip_info['ip']
protocol = ip_info['protocol']
port = ip_info['port']
res.append(f'{protocol}://{ip}:{port}')
return res
|
import os
from nose.tools import assert_true, assert_false
from nerds.util.file import mkdir, rmdir
def test_mkdir():
directory1 = "data"
directory2 = "data/foo"
mkdir(directory1)
mkdir(directory2)
assert_true(os.path.exists(directory1))
assert_true(os.path.exists(directory2))
def test_rmdir():
directory1 = "data"
directory2 = "data/foo"
mkdir(directory1)
mkdir(directory2)
rmdir(directory2)
rmdir(directory1)
assert_false(os.path.exists(directory1))
assert_false(os.path.exists(directory2))
|
#!/usr/bin/env python
# -*- coding: gb2312 -*-
#
# pi_function.py
#
# Copyright 2017 Administrator <Administrator@WIN-84KOMAOFRMQ>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import math
def make_random_list(m,a,c,s0):
"""线性同余法生成伪随机序列"""
s = []
s.append(s0)
#~ s_float = []
#~ s_long = []
#~ s_float.append(s[0]/m)
#~ s_long.append(s[0])
i = 0
while i < m:
s.append((s[i]*a+c)%m)
#~ s_float.append(s[i+1]/m)
#~ s_long.append(s[i+1])
i += 1
#~ return s_float
return s
def make_pair(sx, sy, m):
"""将两个数组高低拼接起来提高精度"""
lenth = len(sx)
s = []
i = 0
while i < lenth:
s.append(sx[i]*m + sy[i])
i += 1
return s
def compute_pi(sx,sy,m):
"""计算pi的值"""
count = 0
i = 1
lenth = 2
while i < m:
j = 1
while j < m:
if math.sqrt(sx[i]*sx[i] + sy[j]*sy[j]) < m * m:
count += 1
j += lenth
i += lenth
pi = float(4 * count / (m * m) * lenth * lenth)
return pi
if __name__ == "__main__":
pi_sum = 0.0
k_list = [3,5,7,11,13,17,19,23,31,37]
total_times = 1
m = 65536
c = 5
s0 = 1
i = 0
# 多次执行求平均,目前只执行了一次
while i < total_times:
k1 = k_list[i]
k2 = k_list[i+1]
k3 = k_list[i+2]
k4 = k_list[i+3]
j = 0
while j < 1:
sx = make_random_list(m,4*k1+1,c,s0+1)
sy = make_random_list(m,4*k2+1,c,s0+500)
sk = make_random_list(m,4*k3+1,c,s0+1000)
sz = make_random_list(m,4*k4+1,c,s0+2000)
ssx = make_pair(sx, sy, m)
ssy = make_pair(sk, sz, m)
pi = compute_pi(ssx, ssy, m)
print(pi)
j += 1
pi_sum += pi
i += 1
print(pi_sum/total_times)
|
import pandas as pd
from sec.sql import create_connection
from sec.tickers import Tickers
from sec.cluster import StringCluster
CONN = create_connection('sec.db')
TICKERS = Tickers()
data = pd.read_sql(
sql="select * from holdings where report_pd == '12-31-2020';",
con=CONN
)
data['company'] = data['company'].str.upper()
data = data.groupby('company').sum().reset_index().sort_values(by='value', ascending=False).iloc[:25000, :]
data = data[data['company'] != ''] # remove blanks
data.reset_index(drop=True, inplace=True)
cluster = StringCluster()
labels = cluster.fit_transform(data['company'], TICKERS.names)
data['company'] = labels.values
top_25k = data.groupby('company').sum().sort_values(by='value', ascending=False).reset_index()
top_25k['ticker'] = top_25k['company']
top_25k['ticker'].replace(TICKERS.tickers, inplace=True)
top_25k.to_sql('top_holdings', CONN, if_exists='replace', index=False)
CONN.close()
# test = pd.DataFrame({'in': data['company'], 'out': labels})
|
# A simple random arc function by @bmheades
# You may not use it commerically
import amulet
from amulet.api.selection import SelectionGroup
from amulet.api.data_types import Dimension
from amulet.api.level import BaseLevel
from amulet.api.level import World
from amulet.api.block import Block
from amulet_nbt import (
TAG_String,
TAG_Int,
TAG_Byte,
)
import random
import math
# function to calculate the average
def midpoint(p1, p2):
return round((p1+p2)/2)
def colorful_arcs(
world: BaseLevel, dimension: Dimension, selection: SelectionGroup, options: dict
):
print("Colorful Arcs Started...")
block_platform = "bedrock" # the platform the blocks below are defined in
block_version = (1, 17, 0) # the version the blocks below are defined in
# Add your blocks here
blockPalette = [
Block("minecraft", "wool", {"color": TAG_String("white")}),
Block("minecraft", "wool", {"color": TAG_String("orange")}),
Block("minecraft", "wool", {"color": TAG_String("light blue")}),
Block("minecraft", "wool", {"color": TAG_String("yellow")}),
Block("minecraft", "wool", {"color": TAG_String("pink")}),
Block("minecraft", "wool", {"color": TAG_String("cyan")}),
Block("minecraft", "wool", {"color": TAG_String("blue")}),
Block("minecraft", "wool", {"color": TAG_String("black")})
]
# Test blocks
air = Block("minecraft", "air")
stone = Block("minecraft", "stone", {})
# Finding the center and size of the selection
for box in selection:
selectionCenterX = midpoint(box.max_x, box.min_x)
selectionCenterY = midpoint(box.max_y, box.min_y)
selectionCenterZ = midpoint(box.max_z, box.min_z)
selectionSizeX = box.max_x - box.min_x
selectionSizeY = box.max_y - box.min_y
selectionSizeZ = box.max_z - box.min_z
# P5.js inspired Arc Function
def arc(x1, y1, z1, r, angleStart, angleEnd, block):
while(angleStart < angleEnd):
pointX = round(x1 + r * math.cos(angleStart * 3.142 / 180))
pointZ = round(z1 + r * math.sin(angleStart * 3.142 / 180))
world.set_version_block(
pointX, y1, pointZ, dimension, (block_platform, block_version), block
)
angleStart += 1
# Deciding how big the arcs should be depending on the selection size
arcsSize = round(midpoint(selectionSizeX, selectionSizeZ) / 2.5)
# Arcs density and inner radius
density = options["Arcs Density"]
if density > 10: # Primitive fail safe
density = 10
innerRadius = options["Inner Radius"]
# Here come the crammed randoms
for i in range(arcsSize):
block1 = random.choice(blockPalette)
for z in range(density):
angleStart = random.randint(-180, 180)
angleEnd = angleStart + random.randint(45 - (i*2), 90)
arc(selectionCenterX, selectionCenterY, selectionCenterZ, innerRadius + i, angleStart, angleEnd, block1)
print("Colorful Arcs Ended...")
operation_options = {
"Arcs Density": [
"int",
2,
],
"Inner Radius": [
"int",
8,
]
}
export = {
"name": "Colorful Arcs",
"operation": colorful_arcs,
"options": operation_options,
}
|
#!/usr/bin/python3
""" Reviews """
from api.v1.views import app_views
import json
import models
from flask import jsonify, request
from models.place import Place
from models.review import Review
from models.user import User
@app_views.route(
'/places/<place_id>/reviews',
methods=['GET'],
strict_slashes=False)
def reviews(place_id):
"""
return (JSON)
array of all reviews
"""
arr = []
object_places = models.storage.all(Place)
for k, v in object_places.items():
if v.id == place_id:
all_reviews = [x.to_dict() for x in v.reviews]
return jsonify(all_reviews)
return jsonify(error='Not found'), 404
@app_views.route(
'/reviews/<review_id>',
methods=['GET'],
strict_slashes=False)
def review(review_id):
"""
return (JSON)
dict of the object from the review_id
"""
objects_reviews = models.storage.all(Review)
for k, v in objects_reviews.items():
if v.id == review_id:
return jsonify(v.to_dict())
return jsonify(error='Not found'), 404
@app_views.route(
'/reviews/<review_id>',
methods=['DELETE'],
strict_slashes=False)
def reviews_delete(review_id):
"""
return (JSON)
dict of the object from the review_id
"""
objects_review = models.storage.all(Review)
for k, v in objects_review.items():
if v.id == review_id:
models.storage.delete(v)
models.storage.save()
return jsonify({}), 200
return jsonify(error='Not found'), 404
@app_views.route(
'/places/<place_id>/reviews',
methods=['POST'],
strict_slashes=False)
def reviews_post(place_id):
"""
create a new review
header value: {text = value, place_id = value, user_id=value}
return (JSON)
"""
content = request.get_json()
objects_places = models.storage.all(Place)
if request.is_json is False:
return jsonify(error='Not a JSON'), 400
for k, v in objects_places.items():
if v.id == place_id:
if 'user_id' not in content:
return jsonify(error='Missing user_id'), 400
objects_users = models.storage.all(User)
tmp = 'User.' + content['user_id']
if tmp not in objects_users:
return jsonify(error='Not found'), 404
if 'text' not in content:
return jsonify(error='Missing text'), 400
review = Review(
user_id=content['user_id'],
place_id=place_id,
text=content['text'])
review.save()
return jsonify(review.to_dict()), 201
return jsonify(error='Not found'), 404
@app_views.route(
'/reviews/<review_id>',
methods=['PUT'],
strict_slashes=False)
def reviews_put(review_id):
"""
update a review
header value: {text=value}
return (JSON)
"""
content = request.get_json()
if request.is_json is False:
return jsonify(error='Not a JSON'), 400
objects_reviews = models.storage.all(Review)
for k, v in objects_reviews.items():
if v.id == review_id:
v.text = content['text']
v.save()
return jsonify(v.to_dict()), 200
return jsonify(error='Not Found'), 404
|
from django.urls import path,include
from .views import *
urlpatterns = [
# path('student/', student),
# path('signup/', signup),
path('login/', login),
path('logout/', logout),
path('adminsite/', adminsite,name="admin site"),
path('addstudent/', studentRegister,name="studentRegister"),
path('active/', activeStudent,name="activeStudent"),
path('inactive/', inactiveStudent,name="inactiveStudent"),
path('status/<int:id>', studentStatus,name="studentStatus"),
path('studentDetails/', studentDetails,name="studentDetails"),
path('updatedetails/', updateStudent,name="updateStudent"),
] |
import sys
import entityx
class UpdateTest(entityx.Entity):
updated = False
def update(self, dt):
# Should only be called with one entity.
assert self.entity.id.index == 0
assert self.entity.id.version == 1
self.updated = True
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = [
'description':'My project',
'author':'MY',
'url':'where to get',
'download':'Where to download',
'auter_email':'My email',
'version':'0.1',
'install_require':['nose'],
'packages':['NAME'],
'scripts':[],
'name':'projectname'
]
setup(**config)
|
#
# @lc app=leetcode.cn id=126 lang=python3
#
# [126] 单词接龙 II
#
# @lc code=start
from collections import defaultdict,deque
class Solution:
def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
res,dic = [],defaultdict(list)
queue1,queue2 = deque([(beginWord,[beginWord])]),deque()
visited = set()
for word in wordList:
for i in range(len(word)):
string = word[:i] + '*' + word[i+1:]
dic[string].append(word)
while queue1:
while queue1:
word,path = queue1.popleft()
if word == endWord:
res.append(path)
visited.add(word)
for i in range(len(word)):
string = word[:i] + '*' + word[i+1:]
for w in dic[string]:
if w not in visited:
queue2.append((w,path+[w]))
if res:
return res
queue1,queue2 = queue2,queue1
return []
# @lc code=end
|
# -*- coding: utf-8 -*-
from schedulingsystem import db
from schedulingsystem.errors.schedulingexception import SchedulingException
from schedulingsystem.meetingroom.models import MeetingRoom
import schedulingsystem.meetingroom.repository as meeting_room_rep
import schedulingsystem.scheduling.repository as scheduling_repository
def get_by_id(id):
meeting_room = meeting_room_rep.get_by_id(id)
if not meeting_room:
raise SchedulingException('Sala de reunião não encontrada.', 404)
return meeting_room
def get_all():
meeting_rooms = meeting_room_rep.get_all()
return meeting_rooms
def create(name, description):
meeting_room = MeetingRoom(name, description)
validate(meeting_room)
existing_meeting_room = meeting_room_rep.get_by_name(meeting_room.name)
if existing_meeting_room is not None:
raise SchedulingException('Já existe uma sala de reunião com esse nome.')
db.session.add(meeting_room)
db.session.commit()
def edit(id, meeting_room):
edited_meeting_room = get_by_id(id)
if meeting_room is None:
raise SchedulingException("Dados da sala de reunião inválidos")
existing_meeting_room = meeting_room_rep.get_by_name(meeting_room.name)
if existing_meeting_room is not None and existing_meeting_room.id != id:
raise SchedulingException('Já existe uma sala de reunião com esse nome.')
validate(meeting_room)
edited_meeting_room.name = meeting_room.name
edited_meeting_room.description = meeting_room.description
db.session.commit()
def delete(id):
if can_be_deleted(id):
meeting_room = get_by_id(id)
db.session.delete(meeting_room)
db.session.commit()
else:
raise SchedulingException('Sala de reunião não pode ser deletada pois possuí agendamentos.')
def can_be_deleted(id):
exists_scheduling = scheduling_repository.get_by_meeting_room_id(id)
if len(exists_scheduling) > 0:
return False
return True
def validate(meeting_room):
if not meeting_room.name or len(meeting_room.name) > 100:
raise SchedulingException('Nome da sala de reunião inválido. Não deve ser vazio, e deve conter no máximo 100 caracteres.')
if meeting_room.description and len(meeting_room.description) > 255:
raise SchedulingException('Descrição da sala de reunião deve conter no máximo 255 caracteres.')
|
import torch
import yaml
import config
import numpy as np
from . import keys,crann
from src.basemodel.basemodel import BaseModel
from .quadObject import quadObject
from .opImage import genRotateAroundMatrix, rotateImageByMatrix
import base64
import cv2
import json
from skimage import io
import logging
class CrannRecModel(BaseModel):
"""docstring for upperCase"""
def __init__(self, modelpath, config_yaml):
super(CrannRecModel, self).__init__()
self.alphabet = keys.alphabet
f = open(config_yaml)
opt = yaml.load(f)
opt['N_GPU'] = 1
opt['RNN']['multi_gpu'] = False
# print(opt)
self.model = crann.CRANN(opt, len(self.alphabet)+1)
if(config.USE_GPU):
self.model.cuda()
# self.model.half()
self.model.load_state_dict(torch.load(modelpath)['state_dict'])
if(config.USE_GPU):
self.model.half()
def cutimage(self,dat):
dat = json.loads(dat)
img = io.imread(dat['imgurl'])
logging.critical('img:'+str(img.shape))
img = cv2.cvtColor(img,cv2.COLOR_RGBA2BGR)
# logging.critical(img.shape)
bboxes = dat['bboxes']
num = 0
showimg = img.copy()
imglist = []
# for box in bboxes:
# box = np.array(box)
# for i in range(4):
# cv2.line(showimg,tuple(box[i]),tuple(box[(i+1)%4]),(0,0,255),3)
# box_obj = quadObject(box)
# logging.critical('angle:'+str(box_obj.angle_of_center_line))
# M = box_obj.genRotateAroundMatrix(box_obj.center_point, box_obj.angle_of_center_line)
# box_obj1 = box_obj.rotateByMatrix(M)
# new_box = box_obj1.quad
# x = new_box[:, 0]
# y = new_box[:, 1]
# rotated_img = rotateImageByMatrix(img, M)
# left = np.min(new_box[:, 0])
# right = np.max(new_box[:, 0])
# bottom = np.min(new_box[:, 1])
# top = np.max(new_box[:, 1])
# part_img = rotated_img[bottom:top, left:right]
# if 0 in part_img.shape:
# part_img = np.ones((32,32,3), dtype=np.uint8) * 255
# cv2.imwrite('cut_img'+str(num)+'.jpg',part_img)
# num+=1
for box in bboxes:
box = np.array(box)
# for i in range(4):
# cv2.line(showimg,tuple(box[i]),tuple(box[(i+1)%4]),(0,0,255),3)
x0 = np.min(box[:, 0])
y0 = np.min(box[:, 1])
x1 = np.max(box[:, 0])
y1 = np.max(box[:, 1])
box = np.stack([box[:, 0]-x0, box[:, 1]-y0], axis=1)
sub_img = img[y0:y1+1, x0:x1+1].copy()
if np.min(sub_img.shape) == 0:
part_img = np.ones((32,32,3), dtype=np.uint8) * 255
else:
box_obj = quadObject(box)
M = box_obj.genRotateAroundMatrix(box_obj.center_point, box_obj.angle_of_center_line)
box_obj1 = box_obj.rotateByMatrix(M)
new_box = box_obj1.quad
# x = new_box[:, 0]
# y = new_box[:, 1]
# logging.critical(M.shape)
# logging.critical(sub_img.shape)
rotated_img = rotateImageByMatrix(sub_img, M)
left = np.min(new_box[:, 0])
right = np.max(new_box[:, 0])
bottom = np.min(new_box[:, 1])
top = np.max(new_box[:, 1])
# logging.critical(str(top+1-bottom)+'/'+str(right+1-left))
# logging.critical(rotated_img.shape)
part_img = rotated_img[bottom:top+1, left:right+1]
if 0 in part_img.shape:
part_img = np.ones((32,32,3), dtype=np.uint8) * 255
# logging.critical(part_img.shape)
# cv2.imwrite('cut_img'+str(num)+'.jpg',part_img)
num+=1
imglist.append(255 - cv2.cvtColor(part_img, cv2.COLOR_BGR2GRAY))
# cv2.imwrite('img.jpg',showimg)
return imglist |
'''
goetia@sentineldr.com
check log for TOR relays in past connections.
'''
import urllib.request
import sys
import re
def main(args):
usage = "Usage: extor.py logfile IP_column# date_column#"
if '-h' in args or '--help' in args:
print("extor.py: check log for TOR relays in past connections")
print(usage)
elif len(args) < 4:
print("Invalid number of arguments provided")
print(usage)
else:
try:
f = open(args[1])
except:
print('Could not open file ' + args[1])
print(usage)
sys.exit(1)
with open(args[1]) as f:
log = f.readlines()
if log:
for line in log:
try:
ip = line.split()[int(args[2])-1]
date = line.split()[int(args[3])-1]
except:
print("Please specify correct column number for IP and date")
print(usage)
sys.exit(1)
ipregx = re.compile('([0-9]{1,3}\.){3}[0-9]{1,3}')
dateregx = re.compile('([0-9]{1,4}(\/|\-|\.)){2}[0-9]{1,4}')
ip = ipregx.match(ip)
date = dateregx.match(date)
if (date is not None) and (ip is not None):
date = re.split('\/|-|\.', date[0])
if len(date[2]) == 4:
date = date[2] + '-' + date[1] + '-' + date[0]
else:
date = '-'.join(date)
try:
req = urllib.request.urlopen('https://metrics.torproject.org/exonerator.html?ip=' + ip[0] + '×tamp=' + date + '&lang=en)')
except:
print("Could not connect to API")
sys.exit(1)
else:
print("Could not find IP and/or date in specified column")
sys.exit(1)
torp = req.read().decode(req.headers.get_content_charset())
if "Result is positive" in torp:
print(line.rstrip('\n') + " <--- TOR relay")
else:
print(line.rstrip('\n'))
else:
print("Could not read any data from " + args[1])
f.close()
if __name__ == '__main__':
main(sys.argv)
|
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
# read the data file
data = pd.read_csv('pubs.txt', sep='\t')
data2 = data.iloc[:-1,1:] #Remove names of countries and "world"
'''
Calculate differences between consecutive years (columns) as in:
difference_matrix = data2.diff(axis=1)
'''
df = data2['2001'] - data2['2000']
df2 = pd.DataFrame(df, columns=['diff1'])
df2['diff2'] = data2['2002'] - data2['2001']
df2['diff3'] = data2['2003'] - data2['2002']
df2['diff4'] = data2['2004'] - data2['2003']
df2['diff5'] = data2['2005'] - data2['2004']
df2['diff6'] = data2['2006'] - data2['2005']
df2['diff7'] = data2['2007'] - data2['2006']
df2['diff8'] = data2['2008'] - data2['2007']
df2['diff9'] = data2['2009'] - data2['2008']
df2['diff10'] = data2['2010'] - data2['2009']
df2['diff11'] = data2['2011'] - data2['2010']
df2['diff12'] = data2['2012'] - data2['2011']
df2['diff13'] = data2['2013'] - data2['2012']
df2['diff14'] = data2['2014'] - data2['2013']
df2['diff15'] = data2['2015'] - data2['2014']
df2['diff16'] = data2['2016'] - data2['2015']
df2['diff17'] = data2['2017'] - data2['2016']
df2['diff18'] = data2['2018'] - data2['2017']
df2['diff19'] = data2['2019'] - data2['2018']
# Normalizing data
data3 = MinMaxScaler().fit_transform(df2)
# Dim reduction
tsne = TSNE(n_components=2, random_state=1, perplexity=50, learning_rate=100).fit_transform(data3)
'''
# Plot data
plt.scatter(tsne[:,0], tsne[:,1])
plt.show()
plt.close()
'''
# Determine a reasonable number of clusters in tsne space
Sum_of_squared_distances = []
K = range(1,10)
for k in K:
km = KMeans(n_clusters=k).fit(tsne)
Sum_of_squared_distances.append(km.inertia_)
plt.plot(K, Sum_of_squared_distances, 'bx-')
plt.xlabel('k')
plt.ylabel('Sum_of_squared_distances')
plt.title('Elbow Method For Optimal k')
plt.show()
km.labels_
for k in [2,3,4,5,6,7,8,9,10]:
clusterer = KMeans(n_clusters=k)
preds = clusterer.fit_predict(tsne)
centers = clusterer.cluster_centers_
score = silhouette_score(tsne, preds)
print("For n_clusters = {}, silhouette score is {})".format(k, score))
# Cluster data and plot
kmeans = KMeans(n_clusters=4).fit(tsne)
labels_tsne_scale = kmeans.labels_
tsne_df_scale = pd.DataFrame(tsne, columns=['tsne1', 'tsne2'])
clusters_tsne_scale = pd.concat([tsne_df_scale, pd.DataFrame({'tsne_clusters':labels_tsne_scale})], axis=1)
plt.scatter(clusters_tsne_scale.iloc[:,0], clusters_tsne_scale.iloc[:,1], c=labels_tsne_scale, cmap='Set1', s=100, alpha=0.6)
plt.show()
kmeans.cluster_centers_
# Random forest classifier and Y-randomization
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
import numpy as np
# repeated stratified 10-fold validation
scores = []
for i in range(10):
kf = StratifiedKFold(10, shuffle=True, random_state=i)
rf = RandomForestClassifier(n_estimators=100, random_state=1)
cv = cross_validate(rf, data3, labels_tsne_scale, cv=kf, scoring='accuracy', n_jobs=5)
scores += [np.sum(cv['test_score'])/10]
np.mean(scores) # 0.9076060606060606
np.std(scores) # 0.009726181798963642
# repeated adversarial control
scores_adv = []
for i in range(10):
kf = StratifiedKFold(10, shuffle=True, random_state=i)
rf = RandomForestClassifier(n_estimators=100, random_state=1)
labels_tsne_scale2 = np.random.permutation(labels_tsne_scale)
cv = cross_validate(rf, data3, labels_tsne_scale2, cv=kf, scoring='accuracy', n_jobs=5)
scores_adv += [np.sum(cv['test_score'])/10]
np.mean(scores_adv) # 0.3995707070707071
np.std(scores_adv) # 0.018067785643892987
|
from unittest import TestCase, main
import pickle, json
from datetime import datetime
from from_email import parse_message_parts, Post
from curate_video import poster_filename_for
import sitebuilder
from main import is_sender_whitelisted, filter_whitelist
class ParseEmailTests(TestCase):
def test_parse_subject(self):
with open("test_data/test_mail_subject.pickle", "r") as f:
message_parts = pickle.load(f)
post = parse_message_parts(message_parts)
self.assertEqual(post._subject, "The sample title")
self.assertEqual(post._body[:10], "This is a ")
self.assertEqual(len(post._attachments), 0)
self.assertEqual(post._date.strftime("%Y %m %d"),
"2015 03 29")
def test_parse_image(self):
with open("test_data/test_mail_image.pickle", "r") as f:
message_parts = pickle.load(f)
post = parse_message_parts(message_parts)
self.assertEqual(post._subject, "")
self.assertEqual(len(post._body), 48)
self.assertEqual(len(post._attachments), 1)
self.assertTrue("20150329_155012.jpeg" in post._attachments)
def test_parse_video(self):
with open("test_data/test_mail_video.pickle", "r") as f:
message_parts = pickle.load(f)
post = parse_message_parts(message_parts)
self.assertEqual(post._subject, "")
self.assertEqual(len(post._attachments), 1)
self.assertTrue("20150328_100104_001.mp4" in post._attachments)
def test_to_markdown(self):
with open("test_data/test_mail_subject.pickle", "r") as f:
message_parts = pickle.load(f)
post = parse_message_parts(message_parts)
markdown = post.to_markdown().splitlines()
self.assertEqual(markdown[0], "date: 2015-03-29 17:46:43")
self.assertEqual(markdown[1], "title: The sample title")
self.assertEqual(markdown[2], "media: []")
self.assertEqual(markdown[3], "")
self.assertEqual(markdown[4][:15], "This is a test ")
class PostTests(TestCase):
def test_add_subject(self):
p = Post()
self.assertEqual(p._subject, "")
p.add_subject(None)
self.assertEqual(p._subject, "")
p.add_subject("a sub")
self.assertEqual(p._subject, "a sub")
def test_basic_post(self):
p = Post()
md_lines = p.to_markdown().splitlines()
self.assertEqual(md_lines[0][:5], "date:")
self.assertEqual(md_lines[1], "title: ")
self.assertEqual(md_lines[2], "media: []")
def test_fixup_dates(self):
now = datetime.now()
p = Post()
p.add_body("This is a regular post")
p.add_date(now)
p.fixup_date()
self.assertEqual(p._date, now)
new_date = datetime(2015, 6, 17)
p.add_body("{} This is an anachronism".format(
new_date.strftime("%Y-%m-%d")))
self.assertEqual(p._date, now)
p.fixup_date()
self.assertEqual(p._date, new_date)
self.assertEqual(p._body, "This is an anachronism")
class SiteBuilderTests(TestCase):
def setUp(self):
class TestConfig(object):
POSTS_PATH="test_posts"
BUILD_PATH="test_build"
MEDIA_PATH="test_media"
POSTS_PER_PAGE=5
self.app = sitebuilder.create_app(TestConfig())
self.test_client = self.app.test_client()
def test_html_for_image(self):
filename = "boo.png"
with self.app.test_request_context("/"):
html = sitebuilder.html_for_image(filename)
self.assertIn("src=\"media/" + filename, html)
def test_html_for_media(self):
with self.app.test_request_context("/"):
html = sitebuilder.html_for_media("test.mp4")
self.assertIn("<video ", html)
self.assertIn("test.webm", html)
self.assertIn("test.mp4", html)
def test_get_media_type(self):
for fname in ("doo.jpeg", "doo.JPEG", "doo.jpg", "doo.JPG"):
self.assertEqual(sitebuilder.get_media_type(fname),
sitebuilder.MediaType.image)
for fname in ("doo.mp4", "doo.webm", "doo.3pg", "doo.mov",
"doo.MOV"):
self.assertEqual(sitebuilder.get_media_type(fname),
sitebuilder.MediaType.video)
for fname in ("doo.txt", "foo.png", "wok.tar.gz"):
self.assertEqual(sitebuilder.get_media_type(fname),
sitebuilder.MediaType.other)
def test_compute_media_html(self):
""" TODO: figure out how to test; this
looks up image width and height using PIL..
"""
pass
def test_index(self):
rv = self.test_client.get("/")
self.assertEqual(rv.status_code, 200)
self.assertIn("Bowman", rv.data)
class CurateVideoTests(TestCase):
def test_poster_filename(self):
self.assertEqual(poster_filename_for("boopie.png"),
"boopie-poster.jpg")
class MainTests(TestCase):
def test_is_whitelisted(self):
whitelist = ["abc", "123"]
self.assertTrue(is_sender_whitelisted("a.bc@gmail.com", whitelist))
for sender in ("a.bc@gmail.com", "glad_abc", "jimmy123@hotmail"):
self.assertTrue(is_sender_whitelisted(sender, whitelist))
for sender in ("a@bc.com", "124@hotmail.com"):
self.assertFalse(is_sender_whitelisted(sender, whitelist))
if __name__ == '__main__':
main()
|
from kivymd.app import MDApp
from kivy.lang.builder import Builder
from kivy.core.window import Window
from kivy.uix.screenmanager import ScreenManager,Screen
#Window.size=(720,1280)
home_page_helper="""
ScreenManager:
LoginScreen:
SignupScreen:
DashboardScreen:
<LoginScreen>:
name:'login_page'
Image :
source : "logo.png"
pos_hint:{'center_x':.5,"center_y":.7}
size_hint:(None,None)
width:400
height:400
MDTextField :
id:'login_email'
hint_text:"Email or Phone Number"
pos_hint:{'center_x':.5,"center_y":.5}
size_hint:(None,None)
helper_text:"example@email.com or 821850****"
helper_text_mode:"on_focus" #persistent
width:550
height:300
icon_right:"account"
icon_right_color:app.theme_cls.primary_color
MDTextField :
id:login_password
hint_text:"Password"
pos_hint:{'center_x':.5,"center_y":.4}
size_hint:(None,None)
helper_text:"Don't Know Click Forget Password"
helper_text_mode:"on_focus" #persistent
width:550
height:300
icon_right:"shield-key"
password:True
icon_right_color:app.theme_cls.primary_color
MDRectangleFlatButton :
text:' Login '
pos_hint:{'center_x':0.5,"center_y":0.3}
size_hint:(None,None)
width:300
height:250
on_press:root.manager.current='dashboard'
on_press:app.login()
MDRectangleFlatButton :
text:'Sign Up'
pos_hint:{'center_x':0.5,"center_y":0.2}
size_hint:(None,None)
width:300
height:250
on_press:root.manager.current='signup_page'
<SignupScreen>:
name:'signup_page'
MDTextField :
hint_text:"Enter Your First Name"
pos_hint:{'center_x':.5,"center_y":.9}
size_hint:(None,None)
width:550
height:300
icon_right:"account"
icon_right_color:app.theme_cls.primary_color
MDTextField :
hint_text:"Enter Your Last Name"
pos_hint:{'center_x':.5,"center_y":.8}
size_hint:(None,None)
width:550
height:300
icon_right:"account"
icon_right_color:app.theme_cls.primary_color
MDTextField :
hint_text:"Enter Your Phone Number"
pos_hint:{'center_x':.5,"center_y":.7}
size_hint:(None,None)
width:550
height:300
icon_right:"phone"
icon_right_color:app.theme_cls.primary_color
MDTextField :
hint_text:"Enter Your Email"
pos_hint:{'center_x':.5,"center_y":.6}
size_hint:(None,None)
width:550
height:300
icon_right:"mail"
icon_right_color:app.theme_cls.primary_color
MDTextField :
hint_text:"Enter Your Password"
pos_hint:{'center_x':.5,"center_y":.5}
size_hint:(None,None)
width:550
height:300
icon_right:"shield-key"
icon_right_color:app.theme_cls.primary_color
password:True
MDTextField :
hint_text:"Security Question"
pos_hint:{'center_x':.5,"center_y":.4}
size_hint:(None,None)
width:550
height:300
icon_right:"account-question"
icon_right_color:app.theme_cls.primary_color
MDTextField :
hint_text:"Security Answer"
pos_hint:{'center_x':.5,"center_y":.3}
size_hint:(None,None)
width:550
height:300
icon_right:"animation-play"
icon_right_color:app.theme_cls.primary_color
MDRectangleFlatButton :
text:'sign up'
pos_hint:{'center_x':0.5,"center_y":0.2}
size_hint:(None,None)
width:300
height:250
on_press:root.manager.current='login_page'
<DashboardScreen>:
name:'dashboard'
BoxLayout:
orientation:"vertical"
MDToolbar:
title:'Mohit'
#left_action_items:[["menu",lambda x:camera.play]]
MDLabel:
text:'I Am Your Assistant Rem Rem'
halign:'center'
MDBottomAppBar:
MDToolbar:
#title: "Titl"
icon: "microphone"
type: "bottom"
left_action_items: [["abjad-arabic", lambda x: x]]
right_action_items: [["abjad-hebrew", lambda x: x]]
"""
class LoginScreen(Screen):
pass
class SignupScreen(Screen):
pass
class DashboardScreen(Screen):
pass
sm=ScreenManager()
sm.add_widget(LoginScreen(name='login_page'))
sm.add_widget(SignupScreen(name='signup_page'))
sm.add_widget(DashboardScreen(name='dashboard'))
class MainApp(MDApp):
def build(self):
self.theme_cls.theme_style='Dark'
self.theme_cls.primary_palette="DeepOrange"
self.screen=Builder.load_string(home_page_helper)
return self.screen
def login(self):
pass
MainApp().run() |
from django.shortcuts import render, HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
from django.core.mail import send_mail, EmailMultiAlternatives, EmailMessage
from django.contrib.sites.shortcuts import get_current_site
from django.conf import settings
from django.template.loader import get_template
from django.template import Context
from django.utils.html import strip_tags
import json
from django.contrib.auth import get_user_model
User = get_user_model()
from customers.forms import RecipientAddressForm
from customers.models import Recipient, CreditCard
from catalog.models import Subscription
from cart.models import CartItem, Cart
from orders.models import Order, Record
from django.contrib import messages
# Create your views here.
def add_existing_recipient(request):
if request.method == 'POST':
print "adding an existing recipient"
print request.POST
id = request.POST.get("recipOption", "")
#get the recipient object
recipient = Recipient.objects.get(id=id)
print recipient
try:
next = request.POST.get("next")
except:
next = None
print "next is %s" % next
try:
rec = request.POST.get("rec")
except:
rec = None
print "rec is %s" % rec
try:
itm = request.POST.get("itm")
except:
# item is a single, not a subbie
itm = None
print "itm is %s" % itm
if rec != "None":
# single, order has main recip; assign.
print "were getting order with a main recipient-a single"
order = Order.objects.get(id=rec)
order.main_recipient = recipient
order.save()
if itm != "None":
# if itm exists, this item is a subbie, so get it and its cart
print "we are getting cart item with itm = %s" % itm
ci = CartItem.objects.get(id=itm)
cart = Cart.objects.get(id=ci.cart.id)
# items = CartItem.objects.filter(cart=cart)
# print "here follows a list of items in the cart"
# print items
# for item in items:
# print "item recipient is %s" % item.recipient
# print "item subscritpion is %s" % item.subscription
# # for each item, check if it has same recipient and if it is a subbie
# # get starting issue from subscription
# # subtract one to get last active issue
# starting_issue = item.subscription.first_issue
# last_issue = starting_issue -1
# print "starting issue is %s" % starting_issue
# print "last issue is %s" % last_issue
# try:
# # look for records with this recip and starting issue
# record = Record.objects.get(recipient=recipient, issue=starting_issue)
# print "we have a record, it is %s" % record
# except:
# record = None
# print "record retrieve excpetion"
# if recipient == item.recipient and item.subscription:
# print "changed address to active recipient!"
# messages.error(request,
# "This recipient has an active subscription. \
# This subscription will be added after the current subscription ends",
# extra_tags='safe')
# return HttpResponseRedirect(reverse("checkout"))
# elif record != None and item.subscription:
# print "added an active recipient to a fresh unassigned subbie"
# # need to turn into a renewal
# # maybe bump to add_renewal url?
# renewal = Subscription.objects.get(slug="one-year-renewal")
# ci.subscription = renewal
# ci.save()
# messages.error(request,
# "This recipient has an active subscription. \
# New issues will be added after the current subscription ends",
# extra_tags='safe')
# return HttpResponseRedirect(reverse("checkout"))
# else:
# print "did not detect recipient as active"
# return HttpResponseRedirect(reverse("checkout"))
# # check if recipient is one of the users
# # if recipient.user != request.user:
# # bye
# # return HttpResponseRedirect(reverse("checkout")
# # else:
# this should go in sequence above when i figure it out
if itm != "None":
# this item is a subbie, assign new recip to it
print "were getting cart item with itm = %s" % itm
ci = CartItem.objects.get(id=itm)
ci.recipient = recipient
ci.save()
return HttpResponseRedirect(reverse("checkout"))
else:
return HttpResponseRedirect(reverse("checkout"))
def is_this_my_recipient(recipient_id, user_id):
"""check if this is my recipient, so i cannot edit someone else's"""
recipient = Recipient.objects.get(id=recipient_id)
user = User.objects.get(id=user_id)
my_recipients = Recipient.objects.filter(user=user)
if my_recipients.filter(pk=recipient.pk).count():
return True
else:
return False
def edit_recipient(request):
next = request.GET.get("next", "")
rec = request.GET.get("rec", "")
submit_btn = "Save"
form_title = "Edit Recipient"
recipient = Recipient.objects.get(id=rec)
#safety first
if is_this_my_recipient(recipient.id, request.user.id):
pass # yep, this is my recipient
else:
return HttpResponseRedirect(reverse("dashboard")) #gtfo
recipient_form = RecipientAddressForm(instance=recipient)
if request.method == "POST":
recipient_form = RecipientAddressForm(request.POST, instance=recipient)
print "edit form posted"
print request.POST
if recipient_form.is_valid():
print "edit form valid"
#new_address = recipient_form.save(commit=False)
#new_address.user = request.user
#new_address.recipient = str(cart_item)
#new_address.save()
recipient_form.save()
print "did it save?"
if next is not None:
return HttpResponseRedirect(reverse(str(next)))
else:
print recipient_form.errors #To see the form errors in the console.
context = {"recipient_form": recipient_form,
"submit_btn": submit_btn,
"form_title": form_title,
"rec": rec,
"next": next,
}
template = "customers/add_recipient.html"
return render(request, template, context)
def add_recipient(request):
next = request.GET.get("next", None)
rec = request.GET.get("rec", None)
itm = request.GET.get("itm", None)
print next, rec, itm
user = request.user
if itm:
print "we have subbie item"
ci = CartItem.objects.get(id=itm)
cart = ci.cart
all_cartitems = cart.cartitem_set.all()
incart_recipients = []
for c in all_cartitems:
if c.subscription and c.recipient:
incart_recipients.append(c.recipient.id)
else:
pass
first_issue = ci.subscription.first_issue if ci.subscription else None
print "cart item is %s" % ci
print "subscription is %s" % ci.subscription
print "first issue is %s" % first_issue
record_set = Record.objects.filter(issue=first_issue)
print "record set %s" % record_set
active_recipients = []
for record in record_set:
if record.recipient:
active_recipients.append(record.recipient.id)
else:
pass
print active_recipients
else:
print "we don't have subbie item"
itm = None
incart_recipients = None
active_recipients = None
# add context of existing recipients in cart
if user.is_authenticated():
recip_list = Recipient.objects.filter(user=user)
if incart_recipients:
# incart are recips that are already in the cart (renewal or new)
recip_list = recip_list.exclude(id__in=incart_recipients)
if active_recipients:
# active are recips that already getting an issue (via record)
recip_list = recip_list.exclude(id__in=active_recipients)
else:
recip_list = None
recipient_form = RecipientAddressForm(request.POST or None)
if request.method == "POST":
if recipient_form.is_valid():
new_address = recipient_form.save(commit=False)
#new_address.user = request.user
#new_address.recipient = str(cart_item)
new_address.save()
if itm is not None:
ci = CartItem.objects.get(id=itm)
cart = ci.cart
print "the cart is"
print cart
print ci
ci.recipient = new_address
print ci.recipient
ci.save()
if request.user.is_authenticated():
new_address.user = request.user
new_address.save()
else:
pass
if rec is not None:
print rec
order = Order.objects.get(id=rec)
order.main_recipient = new_address
order.save()
print order
if request.user.is_authenticated():
new_address.user = request.user
new_address.save()
else:
pass
# is_default = form.cleaned_data["default"]
# if is_default:
# default_address, created = UserDefaultAddress.objects.get_or_create(user=request.user)
# default_address.shipping = new_address
# default_address.save()
if next is not None:
return HttpResponseRedirect(reverse(str(next)))
submit_btn = "Add this Recipient"
form_title = "Add New Address"
context = {"recipient_form": recipient_form,
"submit_btn": submit_btn,
"form_title": form_title,
"recip_list": recip_list,
"itm": itm,
"rec": rec,
"next": next,
}
template = "customers/add_recipient.html"
return render(request, template, context)
def delete_recipient(request, pk):
recipient = Recipient.objects.get(pk=pk)
recipient.delete()
messages.success(request, "Successfully Removed Client.")
return HttpResponseRedirect(reverse("dashboard"))
def notify_html(email_context, recipient, template, subject):
c = Context(email_context)
sender = settings.DEFAULT_FROM_EMAIL
admin_cc = settings.DEFAULT_ADMIN_CC
recipients = [recipient, admin_cc, ]
template = get_template(template)
headers = {
"X-SMTPAPI": json.dumps({
"unique_args": {
"campaign_id": 999
},
"category": "notice"
})
}
html_part = template.render(c)
text_part = strip_tags(html_part)
msg = EmailMultiAlternatives(subject, text_part, sender, recipients, headers=headers)
msg.attach_alternative(html_part, "text/html")
print "purchase nofication sent"
return msg.send(True)
def purchase_notify(email_context, recipient):
c = Context(email_context)
sender = settings.DEFAULT_FROM_EMAIL
recipients = [recipient, ]
template = get_template('orders/purchase_notify_email.html')
headers = {
"X-SMTPAPI": json.dumps({
"unique_args": {
"campaign_id": 999
},
"category": "notice"
})
}
subject = "Your Receipt"
html_part = template.render(c)
text_part = strip_tags(html_part)
msg = EmailMultiAlternatives(subject, text_part, sender, recipients, headers=headers)
msg.attach_alternative(html_part, "text/html")
print "purchase nofication sent"
return msg.send(True)
def delete_creditcard(request, pk):
cc = CreditCard.objects.get(pk=pk)
cc.delete()
try:
cc_default = CreditCard.objects.get(default=True)
except:
cc_default = CreditCard.objects.latest('timestamp')
cc_default.default=True
cc_default.save()
messages.success(request, "Successfully Removed Credit Card.")
return HttpResponseRedirect(reverse("dashboard"))
|
from .base import * # noqa
DEBUG = False
DATABASE_URL = str(os.getenv("DATABASE_URL"))
DATABASES = {
'url': dj_database_url.parse(DATABASE_URL, conn_max_age=600),
}
DATABASES['default'] = DATABASES['url']
|
# std imports
from typing import Set, Dict, Type, Mapping, TypeVar, Iterable, Optional, OrderedDict
# local
from .terminal import Terminal
_T = TypeVar("_T")
class Keystroke(str):
def __new__(
cls: Type[_T],
ucs: str = ...,
code: Optional[int] = ...,
name: Optional[str] = ...,
) -> _T: ...
@property
def is_sequence(self) -> bool: ...
@property
def name(self) -> Optional[str]: ...
@property
def code(self) -> Optional[int]: ...
def get_keyboard_codes() -> Dict[int, str]: ...
def get_keyboard_sequences(term: Terminal) -> OrderedDict[str, int]: ...
def get_leading_prefixes(sequences: Iterable[str]) -> Set[str]: ...
def resolve_sequence(
text: str, mapper: Mapping[str, int], codes: Mapping[int, str]
) -> Keystroke: ...
|
'''
You are given a node that is the beginning of a linked list. This list always contains a tail and a loop.
Your objective is to determine the length of the loop.
For example in the following picture the tail's size is 3 and the loop size is 11.
'''
def loop_size(node):
temp = {}
n = 0
while True:
temp[node] = n
node = node.next
n += 1
if node in temp:
return len(temp) - temp[node]
break
|
from main.models import (
Neighborhood,
Zipcode,
BlockGroup,
Listing,
Amenity,
Crime
)
from rest_framework import viewsets
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.decorators import detail_route, list_route
from rest_framework import mixins
from django.views.decorators.csrf import csrf_exempt
from django.contrib.gis.db.models.functions import AsGeoJSON
from api.serializers import NeighborhoodSerializer, ListingListSerializer, ListingDetailSerializer, AmenitySerializer
from api.filters import get_filter_query, random_sample
from api.stats import get_stats
from api.predict import predict_price
from django.core.cache import cache
class FilterableViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet):
"""
A base viewset that provides 'retrieve' and 'list' actions.
Supports filtering on 'list' actions via POST or GET methods.
Also supports dual serializer classes for list and detail views.
To use it, override the class and set the queryset, list_serializer_class, and detail_serializer_class
attributes to match the model.
"""
def get_serializer_class(self):
if self.action == 'list':
return self.list_serializer_class
else:
return self.detail_serializer_class
def get_queryset(self):
if self.action == 'list':
return self.list_queryset
else:
return self.detail_queryset
@list_route(methods=['get', 'post'])
@csrf_exempt
def filter(self, request):
if 'filters' in request.data.keys():
filters = request.data['filters']
queryset = self.get_queryset().filter(get_filter_query(filters))
if 'max_sample_size' in filters.keys():
queryset = random_sample(queryset, filters['max_sample_size'])
else:
queryset = self.get_queryset()
serializer = self.get_serializer_class()(queryset, many=True)
return Response(serializer.data)
@list_route(methods=['get', 'post'])
@csrf_exempt
def stats(self, request):
if 'filters' in request.data.keys():
filters = request.data['filters']
queryset = self.get_queryset().filter(get_filter_query(filters))
else:
queryset = self.get_queryset()
return Response(get_stats(queryset))
class ListingViewSet(FilterableViewSet):
list_queryset = cache.get_or_set(
'listings_with_geojson',
Listing.objects.annotate(geometry=AsGeoJSON('point')),
None)
list_serializer_class = ListingListSerializer
detail_queryset = Listing.objects.all()
detail_serializer_class = ListingDetailSerializer
class NeighborhoodViewSet(FilterableViewSet):
list_queryset = cache.get_or_set(
'neighborhoods_with_geojson',
Neighborhood.objects.annotate(geometry=AsGeoJSON('mpoly'),
center=AsGeoJSON('centroid')),
None)
detail_queryset = list_queryset
list_serializer_class = NeighborhoodSerializer
detail_serializer_class = NeighborhoodSerializer
class AmenityViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Amenity.objects.all()
serializer_class = AmenitySerializer
class PredictPriceView(APIView):
"""
View to get price prediction.
"""
@csrf_exempt
def post(self, request, format=None):
if 'listing_attrs' in request.data:
return Response(predict_price(request.data['listing_attrs']))
else:
return Response('Error: No listing attributes were submitted.') |
# Enter your code here. Read input from STDIN. Print output to STDOUT
import calendar
ip = input().split()
m = int(ip[0])
d = int(ip[1])
y = int(ip[2])
caldays=list(calendar.day_name)
wk=[calendar.weekday(y, m, d)][0]
print(caldays[wk].upper())
|
import dace
import dace.graph.labeling
import sys
import time
print(time.time(), 'loading')
a = dace.SDFG.from_file(sys.argv[1])
print(time.time(), 'propagating')
dace.graph.labeling.propagate_labels_sdfg(a)
print(time.time(), 'drawing')
a.draw_to_file()
exit()
a.apply_strict_transformations()
a.apply_strict_transformations()
a.draw_to_file()
|
#!/usr/bin/python3
'''This module contains a LockedClass class'''
class LockedClass:
'''This class defines a class with only one attribute'''
__slots__ = ['first_name']
def __init__(self, value=""):
'''Constructor method'''
self.first_name = value
|
from django.conf.urls import url
from board import views
urlpatterns = [
url(r'^$', views.mainIndex, name ='index'),
url(r'^([0-9]+)/$', views.boardDetail),
url(r'^create/$', views.boardCreate),
url(r'^update/([0-9]+)/$', views.boardUpdate),
url(r'^delete/([0-9]+)/$', views.boardDelete),
] |
#-*_coding:utf8-*-
import requests
import re
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class spider(object):
def __init__(self):
print 'Spider Starts...'
def getsource(self,url):
html = requests.get(url)
return html.text
def changepage(self,url,total_page):
now_page = int(re.search('page=(\d+)',url,re.S).group(1))
page_group = []
for i in range(now_page, total_page+1):
link = re.sub('page=\d+','page=%s'%i,url,re.S)
page_group.append(link)
return page_group
def geteveryclass(self,source):
everyclass = re.findall('<li class="course-one">(.*?)</li>',source,re.S)
return everyclass
def getinfo(self,eachclass):
info = {}
info['title'] = re.search('<span>(.*?)</span>',eachclass,re.S).group(1)
info['content'] = re.search('"text-ellipsis">(.*?)</p>',eachclass,re.S).group(1)
timeandlevel = re.search('"time-label">(.*?)</span>',eachclass,re.S).group(1).split("|")
info['classtime'] = timeandlevel[0]
info['classlevel'] = timeandlevel[1]
learnnum_full = re.search('<span class="l ml20">(.*?)</span>',eachclass,re.S).group(1)
info['learnnum'] = re.findall(r'\d+', learnnum_full, re.S)[0]
return info
def saveinfo(self,classinfo):
f = open('info.txt','a')
for each in classinfo:
f.writelines('title:' + each['title'] + '\n')
f.writelines('content:' + each['content'] + '\n')
f.writelines('classtime:' + each['classtime'].strip(' \t\n\r') + '\n')
f.writelines('classlevel:' + each['classlevel'].strip(' \t\n\r') + '\n')
f.writelines('learnnum:' + each['learnnum'].strip(' \t\n\r') + '\n\n')
f.close()
if __name__ == '__main__':
classinfo = []
url = 'http://www.imooc.com/course/list?page=1'
coursespider = spider()
all_links = coursespider.changepage(url,23)
for link in all_links:
print 'loading: ' + link
html = coursespider.getsource(link)
everyclass = coursespider.geteveryclass(html)
for each in everyclass:
info = coursespider.getinfo(each)
classinfo.append(info)
coursespider.saveinfo(classinfo)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""
Convolutional Dictionary Learning
=================================
This example demonstrating the use of :class:`.dictlrn.DictLearn` to construct a dictionary learning algorithm with the flexibility of choosing the sparse coding and dictionary update classes. In this case they are :class:`.cbpdn.ConvBPDNJoint` and :class:`.ccmod.ConvCnstrMOD` respectively, so the resulting dictionary learning algorithm is not equivalent to :class:`.dictlrn.cbpdndl.ConvBPDNDictLearn`. The example uses colour input images and a greyscale dictionary :cite:`wohlberg-2016-convolutional`.
"""
from __future__ import division
from __future__ import print_function
from builtins import input
from builtins import range
import pyfftw # See https://github.com/pyFFTW/pyFFTW/issues/40
import numpy as np
from sporco.admm import cbpdn
from sporco.admm import ccmod
from sporco.dictlrn import dictlrn
from sporco import cnvrep
from sporco import util
from sporco import plot
"""
Load training images.
"""
exim = util.ExampleImages(scaled=True, zoom=0.5)
img1 = exim.image('barbara.png', idxexp=np.s_[10:522, 100:612])
img2 = exim.image('kodim23.png', idxexp=np.s_[:, 60:572])
img3 = exim.image('monarch.png', idxexp=np.s_[:, 160:672])
S = np.stack((img1, img2, img3), axis=3)
"""
Highpass filter training images.
"""
npd = 16
fltlmbd = 5
sl, sh = util.tikhonov_filter(S, fltlmbd, npd)
"""
Construct initial dictionary.
"""
np.random.seed(12345)
D0 = np.random.randn(8, 8, 64)
"""
Construct object representing problem dimensions.
"""
cri = cnvrep.CDU_ConvRepIndexing(D0.shape, sh)
"""
Define X and D update options.
"""
lmbda = 0.2
mu = 0.1
optx = cbpdn.ConvBPDNJoint.Options({'Verbose': False, 'MaxMainIter': 1,
'rho': 50.0*lmbda + 0.5, 'AutoRho': {'Period': 10,
'AutoScaling': False, 'RsdlRatio': 10.0, 'Scaling': 2.0,
'RsdlTarget': 1.0}})
optd = ccmod.ConvCnstrMODOptions({'Verbose': False, 'MaxMainIter': 1,
'rho': 10.0*cri.K, 'AutoRho': {'Period': 10, 'AutoScaling': False,
'RsdlRatio': 10.0, 'Scaling': 2.0, 'RsdlTarget': 1.0}},
method='ism')
"""
Normalise dictionary according to dictionary Y update options.
"""
D0n = cnvrep.Pcn(D0, D0.shape, cri.Nv, dimN=2, dimC=0, crp=True,
zm=optd['ZeroMean'])
"""
Update D update options to include initial values for Y and U.
"""
optd.update({'Y0': cnvrep.zpad(cnvrep.stdformD(D0n, cri.Cd, cri.M), cri.Nv),
'U0': np.zeros(cri.shpD)})
"""
Create X update object.
"""
xstep = cbpdn.ConvBPDNJoint(D0n, sh, lmbda, mu, optx)
"""
Create D update object.
"""
dstep = ccmod.ConvCnstrMOD(None, sh, D0.shape, optd, method='ism')
"""
Create DictLearn object and solve.
"""
opt = dictlrn.DictLearn.Options({'Verbose': True, 'MaxMainIter': 100})
d = dictlrn.DictLearn(xstep, dstep, opt)
D1 = d.solve()
print("DictLearn solve time: %.2fs" % d.timer.elapsed('solve'), "\n")
"""
Display dictionaries.
"""
D1 = D1.squeeze()
fig = plot.figure(figsize=(14, 7))
plot.subplot(1, 2, 1)
plot.imview(util.tiledict(D0), title='D0', fig=fig)
plot.subplot(1, 2, 2)
plot.imview(util.tiledict(D1), title='D1', fig=fig)
fig.show()
"""
Plot functional value and residuals.
"""
itsx = xstep.getitstat()
itsd = dstep.getitstat()
fig = plot.figure(figsize=(20, 5))
plot.subplot(1, 3, 1)
plot.plot(itsx.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig)
plot.subplot(1, 3, 2)
plot.plot(np.vstack((itsx.PrimalRsdl, itsx.DualRsdl, itsd.PrimalRsdl,
itsd.DualRsdl)).T, ptyp='semilogy', xlbl='Iterations',
ylbl='Residual', lgnd=['X Primal', 'X Dual', 'D Primal', 'D Dual'],
fig=fig)
plot.subplot(1, 3, 3)
plot.plot(np.vstack((itsx.Rho, itsd.Rho)).T, xlbl='Iterations',
ylbl='Penalty Parameter', ptyp='semilogy', lgnd=['Rho', 'Sigma'],
fig=fig)
fig.show()
# Wait for enter on keyboard
input()
|
import os
import random
import tempfile
from controller.helpers import get_player_colour
from data import consts
from data.enums import ResponseFlags as rF
from data.interpreter import Interpreter
from state import helpers
from state.context import context
PROX_LIMIT_X = 5
PROX_LIMIT_Y = 3
class Callbacks:
def __init__(self, game_state): # Untyped due to circular reference
"""
Initialises the callback dictionary set for the Among Us parser.
:param game_state: A GameState object.
"""
self._game = game_state
@property
def cb(self):
return {
'Event': self.event_callback,
'StartMeeting': self.start_meeting_callback,
'StartGame': self.start_game_callback,
'Chat': self.chat_callback,
'RemovePlayer': self.remove_player_callback,
'PlayerMovement': self.player_movement_callback,
}
@property
def root_dir(self):
return tempfile.gettempdir() + r'\among_bots'
def event_callback(self, _):
if not os.path.exists(self.root_dir):
os.makedirs(self.root_dir)
helpers.cleanup_states(self.root_dir)
context.update_state(self._game, self.root_dir)
self._game.update_state(self.root_dir)
if self._game.game_id is not None:
self._game.curr_lobby = self._game.game_id
def start_meeting_callback(self, _):
context.chat_log_reset()
context.response_flags_reset()
if len({rF.BODY_FOUND_OTHER, rF.BODY_FOUND_ME}.intersection(self._game.get_response_flags())) > 0:
context.response_flags_append(rF.BODY_NOT_LOCATED)
imp_list = self._game.impostor_list
prev_player_len = len(context.trust_map)
context.trust_map_players_set(self._game.get_players_colour(include_me=True))
context.trust_map_score_scale(0.5)
if prev_player_len == 0 and self._game.me is not None and self._game.me.alive:
me = self._game.me_colour
players = [x for x in self._game.get_players_colour() if x != me]
if len(players) > 0:
context.trust_map_score_set(me, players[random.randint(0, len(players) - 1)], -0.5)
if imp_list is not None:
for i in imp_list:
context.trust_map_score_set(me, i, 1)
self._game.set_player_loc()
@staticmethod
def start_game_callback(_):
context.trust_map_players_reset()
context.last_seen_reset()
def chat_callback(self, event):
interpreter = Interpreter(self._game, event['player'], event['message'].decode("utf-8"))
interpret = interpreter.interpret()
if interpret is not None:
if consts.debug_chat:
print("Trust map:")
for x in context.trust_map:
print(x, "\t:", context.trust_map[x])
print("Aggregate:", {k: f"{v:0.2f}" for k, v in context.trust_map_score_get().items()})
print()
def remove_player_callback(self, event):
if event['player'] is not None and self._game.game_started:
player = get_player_colour(event['player'])
context.trust_map_player_remove(player)
players_in_frame = context.last_seen
if player in players_in_frame:
context.last_seen_remove(player)
def player_movement_callback(self, event):
me = self._game.me
if me is None or self._game.meeting_reason is not False:
return
me_id = me.playerId
pl_id = event["player"].playerId
if me_id == pl_id:
me_x, me_y = self._game.get_player_loc(me_id)
# print(me_x, me_y)
player = self._game.get_player_from_id(pl_id)
if player is None:
return
player_colour = get_player_colour(player)
in_frame = self._in_frame(me_id, pl_id)
players_in_frame = context.last_seen
if not player.alive:
if player_colour in players_in_frame:
context.last_seen_remove(player_colour)
return
if me_id == pl_id:
players = self._game.get_players()
if players is None:
return
new_pl = \
[get_player_colour(p) for p in players
if p.playerId != me_id
and p.color is not False and p.alive
and self._in_frame(me_id, p.playerId)]
for p in players_in_frame.copy():
if p not in new_pl:
context.last_seen_remove(p)
for p in new_pl.copy():
if p in players_in_frame:
new_pl.remove(p)
for p in new_pl:
context.last_seen_append(p)
elif in_frame and player_colour not in players_in_frame:
context.last_seen_append(player_colour)
elif not in_frame and player_colour in players_in_frame:
context.last_seen_remove(player_colour)
else:
return
def _in_frame(self, me_id: int, pl_id: int) -> bool:
me_x, me_y = self._game.get_player_loc(me_id)
pl_loc = self._game.get_player_loc(pl_id)
if pl_loc is None:
return False
pl_x, pl_y = pl_loc
dist_x, dist_y = abs(me_x - pl_x), abs(me_y - pl_y)
return dist_x < PROX_LIMIT_X and dist_y < PROX_LIMIT_Y
|
#Python Program to Find the Largest Number in a List
li = []
n = int(input("How many numbers to be added in the list = "))
for i in range(n):
li.append(float(input("enter number ")))
print(li)
def check_largest_no():
largest = li[0]
for i in range(len(li)):
if (li[i] > largest):
largest = li[i]
return largest
largest = check_largest_no()
print("largest number in the list is %s" %largest)
|
#!/usr/bin/python
import pymongo
# Import Python modules
import os
import sys
current_dir = os.path.dirname(os.path.abspath(__file__))
pygeo_dir = os.path.join(current_dir, "../../")
sys.path.append(pygeo_dir)
import pygeo
from pygeo.time.standardtime import attrib_to_converters
class mongo_import:
def __init__(self, server, database):
self._connection = pymongo.Connection(server)
self._db = self._connection[database]
def import_file(self, collection, filename, private=True):
"""Import metadata from a filename into the database
This method reads a filename (fullpath) for its metadata
and stores it into the specified collection of the database.
:param collection: Name of the collection to look for basename.
:type collection: str.
:param filename: Name of the file with fullpath (eg. /home/clt.nc).
:type filename: str.
:param private: Should the entry be marked as private.
:type private: bool
"""
if (not (os.path.isfile(filename) and os.path.exists(filename))):
raise Exception("File " + filename + " does not exist")
# @note Assuming that getting mongo collection everytime
# is not going to cause much performance penalty
coll = self._db[collection]
print 'Begin importing %s into database' % filename
variables = []
basename = os.path.basename(filename)
filenamesplitted = os.path.splitext(basename)
fileprefix = filenamesplitted[0]
filesuffix = filenamesplitted[1]
if self.is_exists(collection, filename):
print 'Data %s already exists' % filename
return
if filesuffix == ".nc":
# VTK is required
import vtk
reader = vtk.vtkNetCDFCFReader()
reader.SphericalCoordinatesOff()
reader.SetOutputTypeToImage()
reader.ReplaceFillValueWithNanOn()
reader.SetFileName(filename)
reader.Update()
data = reader.GetOutput()
# Obtain spatial information
bounds = data.GetBounds()
# Obtain temporal information
timeInfo = {}
times = reader.GetOutputInformation(0).Get(vtk.vtkStreamingDemandDrivenPipeline.TIME_STEPS())
timeInfo['rawTimes'] = times #time steps in raw format
tunits = reader.GetTimeUnits()
timeInfo['units'] = tunits #calendar info needed to interpret/convert times
converters = attrib_to_converters(tunits)
if converters and times:
timeInfo['numSteps'] = len(times)
nativeStart = converters[3]
timeInfo['nativeStart'] = nativeStart
stepUnits = converters[2]
timeInfo['nativeUnits'] = stepUnits
stepSize = 0
if len(times) > 1:
stepSize = times[1]-times[0]
timeInfo['nativeDelta'] = stepSize
stdTimeRange = (converters[0](times[0]), converters[0](times[-1]))
timeInfo['nativeRange'] = (times[0], times[-1])
stdTimeDelta = 0
if len(times) > 1:
stdTimeDelta = converters[0](times[1]) - converters[0](times[0])
timeInfo['stdDelta'] = stdTimeDelta
stdTimeRange = (converters[0](times[0]), converters[0](times[-1]))
timeInfo['stdTimeRange'] = stdTimeRange #first and last time as normalized integers
dateRange = (converters[1](stdTimeRange[0]), converters[1](stdTimeRange[1]))
timeInfo['dateRange'] = dateRange #first and last time in Y,M,D format
# Obtain array information
pds = data.GetPointData()
pdscount = pds.GetNumberOfArrays()
if times == None:
times = [0]
# Go through all timesteps to accumulate global min and max values
for t in times:
firstTStep = t==times[0]
arrayindex = 0
# Go through all arrays
for i in range(0, pdscount):
pdarray = pds.GetArray(i)
if not pdarray:
# Got an abstract array
continue
if firstTStep:
# Create new record for this array
variable = {}
else:
# Extend existing record
variable = variables[arrayindex]
# Tell reader to read data so that we can get info about this time step
sddp = reader.GetExecutive()
sddp.SetUpdateTimeStep(0,t)
sddp.Update()
arrayindex = arrayindex + 1
if firstTStep:
# Record unchanging meta information
variable["name"] = pdarray.GetName()
variable["dim"] = []
variable["tags"] = []
variable["units"] = reader.QueryArrayUnits(pdarray.GetName())
# Find min and max for each component of this array at this timestep
componentCount = pdarray.GetNumberOfComponents()
minmax = []
for j in range(0, componentCount):
minmaxJ = [0,-1]
pdarray.GetRange(minmaxJ, j)
minmax.append(minmaxJ[0])
minmax.append(minmaxJ[1])
if firstTStep:
# Remember what we learned about this new array
variable["range"] = minmax
variables.append(variable)
else:
# Extend range if necessary from this timesteps range
for j in range(0, componentCount):
if minmax[j*2+0] < variable["range"][j*2+0]:
variable["range"][j*2+0] = minmax[j*2+0]
if minmax[j*2+1] > variable["range"][j*2+1]:
variable["range"][j*2+1] = minmax[j*2+1]
# Record what we've learned in the data base
insertId = coll.insert({"name":fileprefix, "basename":filename,
"variables":variables,
"timeInfo":timeInfo,
"spatialInfo":bounds,
"private":private})
print 'Done importing %s into database' % filename
def import_directory(self, collection, directory, drop_existing=False):
"""Import metadata from files of a directory into the database
This method reads all of the files that belong to a directory
for its metadata and stores it into the specified collection of
the database.
:param collection: Name of the collection to look for basename.
:type collection: str.
:param directory: Full path to the directory.
:type directory: str.
"""
if (not (os.path.isdir(directory) and os.path.exists(directory))):
raise Exception("Directory " + directory + " does not exist")
# Gather all files in the directory
from os import listdir
from os.path import isfile, join
files = [f for f in listdir(directory) if isfile(join(directory,f))]
# Check if requested to drop existing collection
if drop_existing:
self._db.drop_collection(collection)
# Add files to the database
for filename in files:
self.import_file(collection, os.path.join(directory, filename), False)
def is_exists(self, collection, basename):
"""Check if a basename exists in the given collection of the database.
:param collection: Name of the collection to look for basename.
:type collection: str.
:param basename: Name of the file (eg. clt.nc).
:type basename: str.
:returns: bool -- True if exists False otherwise.
"""
if self._connection and self._db:
coll = self._db[collection]
if (coll.find({"basename": basename}).count() > 0):
return True
else:
print 'Invalid connection'
return False
if __name__ == "__main__":
import sys
print sys.argv
if (len(sys.argv) < 5):
print "usage: import_data server database collection directory"
sys.exit(1)
server = sys.argv[1]
database = sys.argv[2]
coll = sys.argv[3]
directory = sys.argv[4]
ins = mongo_import(server, database)
ins.import_directory(coll, directory, True)
|
import maya.cmds as mc
import pcCreateRig00AUtilities
from pcCreateRig00AUtilities import pcCreateRigUtilities as CRU
reload(pcCreateRig00AUtilities)
sels = mc.ls(sl=True)
sel = sels[0]
if len(sels) == 1:
if CRU.checkObjectType(sel) == "mesh":
if sel[:2] == "l_":
toReplace = "l_"
replaceWith = "r_"
elif sel[:2] == "r_":
toReplace = "r_"
replaceWith = "l_"
replaceName = sel.replace(toReplace, replaceWith)
dupMesh = mc.duplicate(sel, n = replaceName, rc=True)[0]
mc.move(0, 0, -50, replaceName, r=True)
else:
print("Please select a geometry")
else:
print("Please select a single object")
|
# -*- coding utf-8 -*-
import click
import time
import pyodbc as odbc
from flask import Flask, request, jsonify
from flask_cors import CORS
from db.initDB import initDB, initIndex
from handler import *
import processbar
app = Flask(__name__)
CORS(app, supports_credentials=True)
connection = odbc.connect('DRIVER={SQL Server};SERVER=localhost;DATABASE=TDLTE')
@app.cli.command()
def initdb():
initDB()
initIndex()
click.echo('\nInitialized database.\n')
@app.cli.command()
def dbg():
start1 = time.clock()
handle_enodeb(enodeb_id='273991')
end1 = time.clock()
initIndex()
start2 = time.clock()
handle_enodeb(enodeb_id='273991')
end2 = time.clock()
print("无索引:", end1 - start1, "有索引:", end2 - start2)
click.echo('\nOK.\n')
@app.route('/register', methods=['POST'])
def register():
c = connection.cursor()
req = request.json
result = handle_register(req['account'], req['authentication'], c)
c.close()
return jsonify(
status=0 if result else 1,
message='this account has existed'
)
@app.route('/login', methods=['POST'])
def login():
c = connection.cursor()
code, msg = handle_login(request.json['account'], request.json['authentication'], c)
c.close()
return jsonify(
status=code,
message=msg,
)
@app.route('/config', methods=['POST'])
def config():
c = connection.cursor()
code, msg = handle_config(request.json['path'], c)
c.close()
return jsonify(
status=code,
message=msg
)
@app.route('/kpi', methods=['POST'])
def kpi():
c = connection.cursor()
code, msg = handle_kpi(request.json['path'], c)
c.close()
return jsonify(
status=code,
message=msg
)
@app.route('/prb', methods=['POST'])
def prb():
c = connection.cursor()
code, msg = handle_prb(request.json['path'], c)
c.close()
return jsonify(
status=code,
message=msg
)
@app.route('/mro', methods=['POST'])
def mro():
c = connection.cursor()
code, msg = handle_mro(request.json['path'], c)
c.close()
return jsonify(
status=code,
message=msg
)
@app.route('/processbar', methods=['GET'])
def percent():
table = request.args.get('table')
p = processbar.row
if table == 'config':
p /= 5505
elif table == 'kpi':
p /= 970
elif table == 'prb':
p /= 93025
elif table == 'mro':
p /= 875605
return jsonify(
percent= p * 100
)
@app.route('/cell', methods=['GET'])
def cell_query():
cell_id = request.args.get('cell_id')
cell_name = request.args.get('cell_name')
c = connection.cursor()
code, msg, info = handle_cell(cell_id, cell_name)
c.close()
return jsonify(
status=code,
message=msg,
result=info,
)
@app.route('/enodeb', methods=['GET'])
def enodeb_query():
enodeb_id = request.args.get('enodeb_id')
enodeb_name = request.args.get('enodeb_name')
code, msg, info = handle_enodeb(enodeb_id, enodeb_name)
return jsonify(
status=code,
message=msg,
result=info,
)
@app.route('/kpi', methods=['GET'])
def kpi_query():
sector_name = request.args.get('sector_name')
start_time = request.args.get('from')
end_time = request.args.get('to')
prop = request.args.get('prop')
code, msg, info = handle_kpi_query(sector_name, start_time, end_time, prop)
return jsonify(
status=code,
message=msg,
result=info,
)
@app.route('/prb_stat', methods=['POST'])
def prb_stat():
code, msg= handle_prb_stat(request.json['dst_path'])
return jsonify(
status=code,
message=msg,
)
@app.route('/prb_query', methods=['GET'])
def prb_query():
sector_name = request.args.get('sector_name')
start_time = request.args.get('from')
end_time = request.args.get('to')
granularity = request.args.get('granularity')
prop = request.args.get('prop')
code, msg, info = handle_prb_query(sector_name, start_time, end_time, granularity, prop)
return jsonify(
status=code,
message=msg,
result=info,
)
@app.route('/export', methods=['POST'])
def export():
code, msg = handle_export(request.json['tb_name'], request.json['format'], request.json['path'])
return jsonify(
status=code,
message=msg
)
@app.route('/c2i_analysis', methods=['GET'])
def c2i_analysis():
code, msg, info = handle_c2i_analysis()
return jsonify(
status=code,
message=msg,
result=info
)
@app.route('/overlay_analysis', methods=['POST'])
def overlay_analysis():
code, msg, triple = handle_overlay_analysis(request.json['x'])
return jsonify(
status=code,
message=msg,
result=triple
) |
result = [12, 46, 23, 12, 56, 78]
print(result)
rating = [0] * len(result)
place = 1
while place <= len(result):
for idx, num in enumerate(result):
if num == max(result) and result.count(num) == 1:
result[idx] = 0
rating[idx] = place
place += 1
elif num == max(result) and result.count(num) == 2:
result[idx] = 0
rating[idx] = place
place += 1
print(rating) |
import discord
from discord.ext import commands
import datetime
import typing
from functools import wraps
from config import Config
if typing.TYPE_CHECKING:
from bot import CodinGameBot
def setup(bot: "CodinGameBot"):
bot.add_cog(Moderation(bot=bot))
def moderation(func: typing.Callable):
@wraps(func)
async def wrapper(self: "Moderation", ctx: commands.Context, *args):
if ctx.guild is None or ctx.guild.id != Config.GUILD:
return
await func(self, ctx, *args)
return wrapper
class Moderation(commands.Cog):
def __init__(self, bot):
self.bot: "CodinGameBot" = bot
self.logger = self.bot.logger.getChild("moderation")
# --------------------------------------------------------------------------
# Class methods
@property
def log_channel(self) -> discord.TextChannel:
return self.bot.get_channel(Config.MOD_LOG_CHANNEL)
def log_embed(
self,
action: str,
user: discord.User,
moderator: discord.User,
reason: str,
duration: datetime.timedelta = None,
) -> discord.Embed:
colors = {
"warn": discord.Colour.gold(),
"kick": discord.Colour.orange(),
"mute": 0x000000,
"unmute": discord.Colour.green(),
"ban": discord.Colour.red(),
"unban": discord.Colour.green(),
}
embed = self.bot.embed(
title=f"**{action.title()}**",
description=user.mention,
color=colors[action],
footer=f"ID: {user.id}",
)
embed.add_field(name="User", value=user)
embed.add_field(name="Moderator", value=moderator.mention)
embed.add_field(name="Reason", value=reason)
if duration:
embed.add_field(name="Duration", value=duration)
embed.set_author(name=user, icon_url=user.avatar_url)
return embed
def success_embed(
self,
action: str,
user: discord.User,
) -> discord.Embed:
verbs = {
"warn": "warned",
"kick": "kicked",
"mute": "muted",
"unmute": "unmuted",
"ban": "banned",
"unban": "unbanned",
}
embed = self.bot.embed(
title=f"**{user}** was {verbs[action]}.",
colour=discord.Colour.green(),
)
return embed
async def cog_check(self, ctx) -> bool:
return ctx.guild is not None
# --------------------------------------------------------------------------
# Commands
@commands.command("purge")
@commands.has_guild_permissions(manage_messages=True)
@moderation()
async def purge(self, ctx: commands.Context, number_of_messages: int):
"""Delete a number of messages (limit: 1000)"""
await ctx.channel.purge(limit=number_of_messages, before=ctx.message)
await ctx.message.delete()
self.logger.info(
f"channel `{ctx.channel}` purged of `{number_of_messages}` messages by `{ctx.author}`"
)
@commands.command("kick")
@commands.has_guild_permissions(kick_members=True)
@moderation()
async def kick(
self, ctx: commands.Context, user: discord.Member, *, reason: str = None
):
"""Kick a member with an optional reason"""
# Checks
if user == self.bot.user:
return await ctx.send("I can't kick myself")
if user == ctx.author:
return await ctx.send("You can't kick yourself")
if user.top_role.position >= ctx.author.top_role.position:
return await ctx.send(
"You can't kick a user who has a higher role than you"
)
# Kick
await ctx.guild.kick(user, reason=reason)
await ctx.message.delete()
# DM the user
try:
await user.send(
f"You were kicked from {ctx.guild.name} for reason: {reason}"
)
except discord.Forbidden:
self.logger.info(
f"user `{user}` kicked from guild `{ctx.guild}` "
f"for reason `{reason}` (couldn't DM them)"
)
else:
self.logger.info(
f"user `{user}` kicked from guild `{ctx.guild}` for reason `{reason}`"
)
# Success embed
success_embed = self.success_embed("kick", user)
await ctx.send(embed=success_embed)
# Modlog embed
log_embed = self.log_embed("kick", user, ctx.author, reason)
await self.log_channel.send(embed=log_embed)
@commands.command("ban")
@commands.has_guild_permissions(ban_members=True)
@moderation()
async def ban(
self,
ctx: commands.Context,
user: discord.User,
delete_message_days: str = "1",
*,
reason: str = None,
):
"""Ban a member with an optional reason"""
# Compute the `delete_message_days` and the `reason`
if delete_message_days.isdigit():
delete_message_days = int(delete_message_days)
else:
reason = reason or ""
reason = delete_message_days + reason
delete_message_days = 1
# Checks
if user == self.bot.user:
return await ctx.send("I can't ban myself")
if user == ctx.author:
return await ctx.send("You can't ban yourself")
member: discord.Member = ctx.guild.get_member(user.id)
if member and member.top_role.position >= ctx.author.top_role.position:
return await ctx.send(
"You can't ban a user who has a higher role than you"
)
bans = await ctx.guild.bans()
if user.id in {ban.user.id for ban in bans}:
return await ctx.send("User is already banned")
# Ban
await ctx.guild.ban(
user, reason=reason, delete_message_days=delete_message_days
)
await ctx.message.delete()
# DM the user
try:
await user.send(
f"You were banned from {ctx.guild.name} for reason: {reason}"
)
except discord.Forbidden:
self.logger.info(
f"user `{user}` banned from guild `{ctx.guild}` "
f"for reason `{reason}` (couldn't DM them)"
)
else:
self.logger.info(
f"user `{user}` banned from guild `{ctx.guild}` for reason `{reason}`"
)
# Success embed
success_embed = self.success_embed("ban", user)
await ctx.send(embed=success_embed)
# Modlog embed
log_embed = self.log_embed("ban", user, ctx.author, reason)
await self.log_channel.send(embed=log_embed)
@commands.command("unban")
@commands.has_guild_permissions(ban_members=True)
@moderation()
async def unban(
self,
ctx: commands.Context,
user: discord.User,
*,
reason: str = None,
):
"""Unban a member with an optional reason"""
# Unban
await ctx.guild.unban(user, reason=reason)
await ctx.message.delete()
# DM the user
try:
await user.send(
f"You were unbanned from {ctx.guild.name} for reason: {reason}"
)
except discord.Forbidden:
self.logger.info(
f"user `{user}` unbanned from guild `{ctx.guild}` "
f"for reason `{reason}` (couldn't DM them)"
)
else:
self.logger.info(
f"user `{user}` unbannned from guild `{ctx.guild}` for reason `{reason}`"
)
# Success embed
success_embed = self.success_embed("unban", user)
await ctx.send(embed=success_embed)
# Modlog embed
log_embed = self.log_embed("unban", user, ctx.author, reason)
await self.log_channel.send(embed=log_embed)
# ---------------------------------------------------------------------------------------------
# Command errors
@kick.error
async def kick_error(self, ctx: commands.Context, error):
error = getattr(error, "original", error)
self.logger.warning(
f"command `{ctx.command.name}` raised exception: {error}"
)
if isinstance(error, commands.errors.MissingRequiredArgument):
return await ctx.send_help("kick")
elif isinstance(error, commands.errors.MemberNotFound):
return await ctx.send("User not found")
elif isinstance(error, discord.Forbidden):
return await ctx.send("User is higher than the bot")
else:
await self.bot.handle_error(error, ctx=ctx)
@ban.error
async def ban_error(self, ctx: commands.Context, error):
error = getattr(error, "original", error)
self.logger.warning(
f"command `{ctx.command.name}` raised exception: {error}"
)
# Missing argument
if isinstance(error, commands.errors.MissingRequiredArgument):
return await ctx.send_help("ban")
# User not found
elif isinstance(error, commands.errors.UserNotFound):
try:
id = int(error.argument)
user = await self.bot.fetch_user(id)
assert user is not None
except (ValueError, AssertionError):
return await ctx.send("User not found, you should use their id")
else:
# Reinvoke if the user is found
delete_message_days, *reason = (
error.argument.join(
ctx.message.content.split(error.argument)[1:]
)
.lstrip()
.split()
)
reason = " ".join(reason)
await ctx.invoke(
ctx.command, user, delete_message_days, reason=reason
)
# Can't ban
elif isinstance(error, discord.Forbidden):
return await ctx.send("User is higher than the bot")
else:
await self.bot.handle_error(error, ctx=ctx)
@unban.error
async def unban_error(self, ctx: commands.Context, error):
error = getattr(error, "original", error)
self.logger.warning(
f"command `{ctx.command.name}` raised exception: {error}"
)
# Missing argument
if isinstance(error, commands.errors.MissingRequiredArgument):
return await ctx.send_help("unban")
# User not found
elif isinstance(error, commands.errors.UserNotFound):
try:
id = int(error.argument)
user = await self.bot.fetch_user(id)
assert user is not None
except (ValueError, AssertionError):
return await ctx.send("User not found, you should use their id")
else:
# Reinvoke if the user is found
reason = error.argument.join(
ctx.message.content.split(error.argument)[1:]
).lstrip()
await ctx.invoke(ctx.command, user, reason=reason)
# User not banned
elif isinstance(error, discord.errors.NotFound):
return await ctx.send("User isn't banned")
else:
await self.bot.handle_error(error, ctx=ctx)
|
m = float(input('Digite seu valor em metros(m): '))
c = m * 100
mm = m * 1000
print(f'Sua conversão para centímetros é {c:.1f}cm e sua conversão para milímetros é {mm:.1f}mm') |
import os
import test_main
from os import path
import pytest
def check_file(name):
return path.exists(name)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# FileName : router_to_req.py
# Author : wuqingfeng@
import time
import random
from threading import Thread
import zmq
import zhelpers
NBR_WORKERS = 10
#LRU
def worker_thread(context=None):
context = context or zmq.Context()
worker = context.socket(zmq.REQ)
zhelpers.set_id(worker)
worker.connect("tcp://127.0.0.1:5671")
total = 0
while True:
worker.send(b"ready")
workload = worker.recv()
finished = workload == b"END"
if finished:
print("Process: %d tasks" % total)
break
total += 1
time.sleep(0.1 * random.random())
if __name__ == '__main__':
context = zmq.Context()
server = context.socket(zmq.ROUTER)
server.bind("tcp://0.0.0.0:5671")
for _ in range(NBR_WORKERS):
Thread(target=worker_thread).start()
for _ in range(NBR_WORKERS * 10):
address, empty, ready = server.recv_multipart()
# print "address is reach: %s" % address
server.send_multipart([
address,
b'',
b'This is the workload',
])
for _ in range(NBR_WORKERS):
address, empty, ready = server.recv_multipart()
server.send_multipart([
address,
b'',
b'END'
]) |
class HouseholdEvolutionSpecification(object):
def __init__(self, idSpec, agentType, hhldAttribs=None,
personAttribs=None, evolutionAttribs=None):
self.idSpec = idSpec
self.agentType = agentType
self.hhldAttribs = hhldAttribs
self.personAttribs = personAttribs
self.evolutionAttribs = evolutionAttribs
self.choices = None
self.coefficients = None
class IdSpecification(object):
def __init__(self, hidName, pidName):
self.hidName = hidName
self.pidName = pidName
class HouseholdAttributesSpecification(object):
def __init__(self, bldgszName, hhtName, hincName,
nocName, personsName, unittypeName,
vehiclName, wifName, yrMovedName):
self.bldgszName = bldgszName
self.hhtName = hhtName
self.hincName = hincName
self.nocName = nocName
self.personsName = personsName
self.unittypeName = unittypeName
self.vehiclName = vehiclName
self.wifName = wifName
self.yrMovedName = yrMovedName
class PersonAttributesSpecification(object):
def __init__(self, ageName, clwkrName, educName,
enrollName, esrName, indnaicsName,
occcen5Name, race1Name, relateName,
sexName, marstatName, hoursName,
gradeName, hispanName):
self.ageName = ageName
self.clwkrName = clwkrName
self.educName = educName
self.enrollName = enrollName
self.esrName = esrName
self.indnaicsName = indnaicsName
self.occcen5Name = occcen5Name
self.race1Name = race1Name
self.relateName = relateName
self.sexName = sexName
self.marstatName = marstatName
self.hoursName = hoursName
self.gradeName = gradeName
self.hispanName = hispanName
class EvolutionAttributesSpecification(object):
def __init__(self, mortality_fName, birth_fName,
age_fName,
enrollment_fName,
grade_fName, educ_fName, educInYears_fName,
residenceType_fName, laborParticipation_fName,
occupation_fName, income_fName,
marriageDecision_fName, divorceDecision_fName):
self.morality_fName = mortality_fName
self.birth_fName = birth_fName
self.age_fName = age_fName
self.enrollment_fName = enrollment_fName
self.grade_fName = grade_fName
self.educ_fName = educ_fName
self.educInYears_fName = educInYears_fName
self.residenceType_fName = residenceType_fName
self.laborParticipation_fName = laborParticipation_fName
self.occupation_fName = occupation_fName
self.income_fName = income_fName
self.marriageDecision_fName = marriageDecision_fName
self.divorceDecision_fName = divorceDecision_fName
|
import brownie
import pytest
def test_harvest(strategy, ethFundManager, admin, eth_whale):
whale = eth_whale
fundManager = ethFundManager
# deposit into fund manager
deposit_amount = 10 ** 18
eth_whale.transfer(fundManager, deposit_amount)
# transfer to strategy
strategy.deposit(2 ** 256 - 1, deposit_amount, {"from": admin})
def snapshot():
return {
"eth": {
"strategy": strategy.balance(),
"fundManager": fundManager.balance(),
},
"strategy": {"totalAssets": strategy.totalAssets()},
"fundManager": {"debt": fundManager.getDebt(strategy)},
}
# create profit
min_profit = 10 ** 18
eth_whale.transfer(strategy, min_profit)
before = snapshot()
tx = strategy.harvest(1, 0, 2 ** 256 - 1, {"from": admin})
after = snapshot()
# print(before)
# print(after)
# for e in tx.events:
# print(e)
assert after["strategy"]["totalAssets"] >= before["fundManager"]["debt"]
assert after["eth"]["fundManager"] >= before["eth"]["fundManager"]
|
__all__ = ('command_upgrade',)
from hata import Embed
from hata.ext.slash import P, abort
from scarletio import copy_docs
from sqlalchemy.sql import select
from ...core.constants import (
STAT_NAME_FULL_BEDROOM, STAT_NAME_FULL_CHARM, STAT_NAME_FULL_CUTENESS, STAT_NAME_FULL_HOUSEWIFE,
STAT_NAME_FULL_LOYALTY, STAT_NAME_SHORT_BEDROOM, STAT_NAME_SHORT_CHARM, STAT_NAME_SHORT_CUTENESS,
STAT_NAME_SHORT_HOUSEWIFE, STAT_NAME_SHORT_LOYALTY
)
from ...core.helpers import calculate_stat_upgrade_cost, get_user_chart_color
from .....bot_utils.bind_types import WaifuStats
from .....bot_utils.constants import EMOJI__HEART_CURRENCY, WAIFU_COST_DEFAULT
from .....bot_utils.models import DB_ENGINE, USER_COMMON_TABLE, user_common_model
STAT_NAMES_AND_SLOTS = (
(STAT_NAME_FULL_HOUSEWIFE, WaifuStats.stat_housewife),
(STAT_NAME_FULL_CUTENESS, WaifuStats.stat_cuteness),
(STAT_NAME_FULL_BEDROOM, WaifuStats.stat_bedroom),
(STAT_NAME_FULL_CHARM, WaifuStats.stat_charm),
(STAT_NAME_FULL_LOYALTY, WaifuStats.stat_loyalty),
)
STAT_NAME_IDENTIFYING = (
(STAT_NAME_SHORT_HOUSEWIFE.casefold(), STAT_NAME_FULL_HOUSEWIFE, WaifuStats.stat_housewife),
(STAT_NAME_SHORT_CUTENESS.casefold(), STAT_NAME_FULL_CUTENESS, WaifuStats.stat_cuteness),
(STAT_NAME_SHORT_BEDROOM.casefold(), STAT_NAME_FULL_BEDROOM, WaifuStats.stat_bedroom),
(STAT_NAME_SHORT_CHARM.casefold(), STAT_NAME_FULL_CHARM, WaifuStats.stat_charm),
(STAT_NAME_SHORT_LOYALTY.casefold(), STAT_NAME_FULL_LOYALTY, WaifuStats.stat_loyalty),
)
def try_identify_stat(value):
"""
Tries to identify the stat by the given value.
Parameters
----------
value : `str`
The received value.
Returns
-------
stat_name_and_slot : `None`, `tuple` of (`str`, ``FieldDescriptor``)
"""
value = value.casefold()
for name_to_match, name_full, slot in STAT_NAME_IDENTIFYING:
if value.startswith(name_to_match):
return name_full, slot
return None
def try_identify_stats(value):
"""
tries to identify the stat by the given value.
This function is an iterable generator.
Parameters
----------
value : `str`
The received value.
Returns
-------
stat_name_and_slot : `None`, `tuple` of (`str`, ``FieldDescriptor``)
"""
value = value.casefold()
for name_to_match, name_full, slot in STAT_NAME_IDENTIFYING:
if value.startswith(name_to_match):
yield name_full, slot
async def try_upgrade_stat(waifu_stats, slot):
"""
Gets how much hearts the user have.
This function is a coroutine.
Parameters
----------
waifu_stats : ``WaifuStats``
The user's waifu stats.
slot : ``FieldDescriptor``
The stat slot to upgrade.
Returns
-------
success : `bool`
Whether upgrading the stat was successful.
old_hearts : `int`
The user's old hearts.
cost : `int`
Upgrade cost.
next_point : `int`
The new amount the stat has been upgraded to.
"""
async with DB_ENGINE.connect() as connector:
response = await connector.execute(
select(
[
user_common_model.total_love,
user_common_model.total_allocated,
user_common_model.waifu_cost,
]
).where(
user_common_model.user_id == waifu_stats.user_id,
)
)
result = await response.fetchone()
if result is None:
total_love = 0
available_love = 0
waifu_cost = WAIFU_COST_DEFAULT
else:
total_love, total_allocated, waifu_cost = result
available_love = total_love - total_allocated
if not waifu_cost:
waifu_cost = WAIFU_COST_DEFAULT
total_points = (
waifu_stats.stat_housewife +
waifu_stats.stat_cuteness +
waifu_stats.stat_bedroom +
waifu_stats.stat_charm +
waifu_stats.stat_loyalty
)
next_point = slot.__get__(waifu_stats, WaifuStats) + 1
cost = calculate_stat_upgrade_cost(total_points, next_point)
if available_love > cost:
success = True
else:
success = False
if success:
await connector.execute(
USER_COMMON_TABLE.update(
user_common_model.user_id == waifu_stats.user_id,
).values(
total_love = user_common_model.total_love - cost,
waifu_cost = waifu_cost + cost // 100,
)
)
slot.__set__(waifu_stats, next_point)
waifu_stats.save()
return success, total_love, cost, next_point
# Locally we have no db, so we replace the function
if DB_ENGINE is None:
@copy_docs(try_upgrade_stat)
async def try_upgrade_stat(waifu_stats, slot):
available_love = total_love = 50000
total_points = (
waifu_stats.stat_housewife +
waifu_stats.stat_cuteness +
waifu_stats.stat_bedroom +
waifu_stats.stat_charm +
waifu_stats.stat_loyalty
)
next_point = slot.__get__(waifu_stats, WaifuStats) + 1
cost = calculate_stat_upgrade_cost(total_points, next_point)
if available_love > cost:
success = True
slot.__set__(waifu_stats, next_point)
else:
success = False
return success, total_love, cost, next_point
async def autocomplete_upgrade_stat(event, value):
"""
Auto completes the stats parameter of the `upgrade` command.
This function is a coroutine.
Parameters
----------
event : ``InteractionEvent``
The received interaction event.
stat : `None`, `str`
The received stat name.
Returns
-------
choices : `list` of `str`
"""
waifu_stats = await event.user.waifu_stats
if value is None:
iterator = iter(STAT_NAMES_AND_SLOTS)
else:
iterator = try_identify_stats(value)
total_points = (
waifu_stats.stat_housewife +
waifu_stats.stat_cuteness +
waifu_stats.stat_bedroom +
waifu_stats.stat_charm +
waifu_stats.stat_loyalty
)
choices = []
for name, slot in iterator:
next_point = slot.__get__(waifu_stats, WaifuStats) + 1
cost = calculate_stat_upgrade_cost(total_points, next_point)
choices.append( f'{name} -> {next_point} for {cost}')
return choices
async def command_upgrade(event, stat: P(str, 'Select a stat', autocomplete = autocomplete_upgrade_stat)):
"""
Upgrades the given stat.
This function is a coroutine generator.
Parameters
----------
event : ``InteractionEvent``
The received interaction event.
stat : `str`
The received stat name.
Yields
------
response : `None`, ``Embed``
"""
identified_stat = try_identify_stat(stat)
if identified_stat is None:
abort('Unknown stat given.')
return
yield
stat_name, slot = identified_stat
waifu_stats = await event.user.waifu_stats
success, total_love, cost, next_point = await try_upgrade_stat(waifu_stats, slot)
embed = Embed(
f'Upgrading {stat_name} -> {next_point} for {cost} {EMOJI__HEART_CURRENCY}',
color = get_user_chart_color(
event.user.id,
waifu_stats.stat_housewife,
waifu_stats.stat_cuteness,
waifu_stats.stat_bedroom,
waifu_stats.stat_charm,
waifu_stats.stat_loyalty,
)
).add_thumbnail(
event.user.avatar_url,
)
if success:
embed.description = 'Was successful.'
embed.add_field(
f'Your {EMOJI__HEART_CURRENCY}',
(
f'```\n'
f'{total_love} -> {total_love - cost}\n'
f'```'
)
)
else:
embed.description = 'You have insufficient amount of hearts.'
embed.add_field(
f'Your {EMOJI__HEART_CURRENCY}',
(
f'```\n'
f'{total_love}\n'
f'```'
),
)
yield embed
|
from pricehunters.store import get_stores
def test_get_targets():
stores = get_stores()
assert len(stores['stores']) > 0 |
from django.forms import ModelForm
from .models import *
from django.contrib.auth.forms import UserCreationForm
class RegistrationForm(UserCreationForm):
class Meta:
model = User
fields = "__all__"
def clean_email(self):
email = self.cleaned_data['mail'].lower()
try:
email = User.objects.get(email=email)
except Exception as e:
return email
raise forms.ValidationError(f"El mail: {email} ya esta en uso")
def clean_user(self):
username = self.cleaned_data['erabiltzailea'].lower()
try:
username = User.objects.get(username=username)
except Exception as e:
return username
raise forms.ValidationError(f"El usuario: {username} ya esta en uso")
|
import requests
import colorama
from requests.adapters import HTTPAdapter
from tools.base import config
from tools.base import matcher
from tools.base import util
LOG = util.get_logger('router')
def get_lobby_route(feature=None, insecure=False):
payload = {'appId': config.APP_ID}
if config.FEATURE is not None:
payload['feature'] = config.FEATURE
if insecure or config.FORCE_USE_INSECURE_ADDR:
payload['insecure'] = True
auth_url = "%s/route" % config.ROUTER_URL
LOG.info(colorama.Fore.YELLOW + "request game router at %s" % auth_url)
if not auth_url.startswith("http"):
auth_url = "https://" + auth_url
with requests.Session() as session:
session.mount(auth_url, HTTPAdapter(max_retries=3))
resp = session.get(auth_url, params=payload, timeout=5)
LOG.info(colorama.Fore.YELLOW +
"get router link got response %s" % resp.json())
return resp.json()
if __name__ == "__main__":
config.init_config('q0')
print(get_lobby_route())
|
def solve(input):
steps = int(input)
items = [0]
currentIndex = 0
for num in range(50000000):
if num > 0:
for num2 in range(steps):
currentIndex += 1
if currentIndex == len(items):
currentIndex = 0
currentIndex += 1
items.insert(currentIndex,num)
if num % 10000 == 0:
print(num/10000)
for index,item in enumerate(items):
if item == 0:
return items[index+1]
print(solve("355"))
|
from django.urls import path
from . import views
app_name = 'Match'
urlpatterns = [
# /Match/
path('match/', views.match, name='match'),
path('matching/', views.matching, name='matching'),
] |
class Queue(object):
def __init__(self):
self.items = []
def enqueue(self, item):
self.items.insert(0, item)
def dequeue(self):
return self.items.pop()
def is_empty(self):
return self.items == []
def size(self):
return size(self.items)
def display(self):
print self.items
"""
q = Queue()
q.enqueue(3)
q.enqueue('hello')
q.enqueue('dog')
q.dequeue()
q.display()
"""
q = Queue()
import timeit #import timeit module
from timeit import Timer #import timer class
t1 = Timer("Queue.enqueue(3)", "from __main__ import Queue.enqueue")
print "q.enqueue(3", t1.timeit(number = 1000), " milliseconds"
|
import pymongo, csv
from bson.objectid import ObjectId
import datetime
client = pymongo.MongoClient('mongodb://trendr01:letmeconnect@ds047988-a0.mongolab.com:47988/weareplayingit')
db = client.weareplayingit
#This is the genres to search, in case you want to make this a callable function
genres = ["FPS","Shooter"]
#The dates we're searching
start_time = datetime.datetime(2013, 12, 11)
end_time = datetime.datetime(2013,12,24)
#Just containers to use below
findtags = []
traffic_results = []
fieldnames = ["ID","Title","Max/Mean"]
for genre in genres:
findtags.append({"genre":genre})
# game scaffolding
gameCollection = db.games
gameHist = db.game_history
games = gameCollection.find({"$or":findtags})
#generate an output header. CSV cruft, but also useful for axis labels
tempdate = start_time
while not(tempdate >= end_time):
fieldnames.append(tempdate.isoformat())
tempdate = tempdate + datetime.timedelta(days=1)
print fieldnames
#order is id, Title, Max/Mean, Value for each date
end_id = ObjectId.from_datetime(end_time)
start_id = ObjectId.from_datetime(start_time)
for game in games:
print "----"
print game.get("title")
print game
game_id = game.get("_id")
game_title = game.get("title")
thisgamemax = [game_id,game_title,"Max"]
thisgamemean = [game_id,game_title,"Mean"]
days = gameHist.find({"$and":[{"game":game_id},{"_id":{"$lte":end_id}},{"_id":{"$gte":start_id}}]}).sort("_id", pymongo.ASCENDING)
tempdate2 = start_time
for day in days:
if day.get("_id") >= ObjectId.from_datetime(tempdate2):
tempdate2 = tempdate2 + datetime.timedelta(days=1)
print day.get("day_max")
day_max = day.get("day_max")
day_mean = day.get("day_mean")
thisgamemax.append(day_max)
thisgamemean.append(day_mean)
print thisgamemax
traffic_results.append(thisgamemax)
traffic_results.append(thisgamemean)
#csv cruft. Replace if you want to push to a graphing service or another db
csvout = open("history_out.csv",'wb')
csvwriter = csv.writer(csvout)
csvwriter.writerow(fieldnames)
csvwriter.writerows(traffic_results)
#for tag in tags:
# print tag |
from django.db import models
class Network(models.Model):
number = models.CharField(
max_length=35,
verbose_name=u'Номер сети')
class Meta:
verbose_name = 'Защищенная сеть'
verbose_name_plural = 'Защищенные сети'
def __str__(self):
return f'{self.number}'
class HardwarePlatform(models.Model):
name = models.CharField(
max_length=35,
verbose_name=u'Аппаратная платформа')
class Meta:
verbose_name = 'Аппаратная платформа'
verbose_name_plural = 'Аппаратные платформы'
ordering = ['-name']
def __str__(self):
return f'{self.name}'
class Modification(models.Model):
name = models.CharField(
max_length=35,
verbose_name=u'Модификация исполненеия')
class Meta:
verbose_name = 'Модификация исполненеия'
verbose_name_plural = 'Модификации исполненеий'
ordering = ['-name']
def __str__(self):
return f'{self.name}'
class Coordinator(models.Model):
network = models.ForeignKey(
'Network',
on_delete=models.SET_NULL,
related_name='coord',
verbose_name=u'Защищенная сеть',
blank=True,
null=True,
)
name = models.CharField(
max_length=50,
verbose_name=u'Название узла',)
date = models.DateField(
verbose_name=u'Дата введения в эксплуатацию',
blank=True,
null=True,)
address = models.CharField(
max_length=50,
verbose_name=u'Местоположение',
blank=True,
null=True,
)
vipnet_id = models.CharField(
max_length=10,
verbose_name=u'Идентификатор в сети',
blank=True,
null=True,
)
modification = models.ForeignKey(
'Modification',
on_delete=models.SET_NULL,
verbose_name=u'Модификация исполненеия',
blank=True,
null=True,
)
hardware_platform = models.ForeignKey(
'HardwarePlatform',
on_delete=models.SET_NULL,
verbose_name=u'Аппаратная платформа',
blank=True,
null=True,
)
serial_number = models.CharField(
max_length=25,
verbose_name=u'Серийный номер',
blank=True,
null=True,
)
account_number_skzi = models.CharField(
max_length=25,
verbose_name=u'Учетный номер СКЗИ',
blank=True,
null=True,
)
account_number_fstec = models.CharField(
max_length=25,
verbose_name=u'Учетный номер ФСТЭК',
blank=True,
null=True,
)
class Meta:
verbose_name = 'Координатор'
verbose_name_plural = 'Координаторы'
def __str__(self):
return f'{self.name}'
|
from random import randint
import matplotlib.pyplot as plt
stepsList = []
xnList = []
listOfR = []
def atomsMove(xn, L):
for atom in range(0,L):
x = 0
y = 0
for item in range(1, xn):
R =randint(0, 4)
if R == 0:
x = x + 1
elif R == 1:
x = x - 1
elif R == 3:
y = y + 1
elif R == 4:
y = y - 1
stepsList.append(x)
xnList.append(y)
R = x^2 + y^2
listOfR.append(R)
return listOfR
C01 = atomsMove(1000, 100) # C = 0.1
C05 = atomsMove(5000, 100) # C = 0.5
plt.plot(xnList, stepsList)
plt.show()
# C = n/L^2
# C = 0.1 gdy 10/100 100/1000 n =1000, L =100
# C = 0.5 gy 50 i 100 np n = 5000, L = 10000
|
#!/usr/bin/env python
"""
Usage:
qc-spikecount <nifti> <output> <bval>
nifti -- input 4D nifti image (fMRI or DTI)
output -- output .csv file
bval -- bval file (for DTI)
Calculates the mean and standard deviation of each axial slice over all TRs.
Counts the number of outliers across all slices, and prints this number to
the text file 'output'.
If bval is supplied (for DTI), we remove all time points that are B0.
"""
import os, sys
from copy import copy
import numpy as np
import nibabel as nib
import logging
logging.basicConfig(level=logging.WARN, format="[%(name)s] %(levelname)s: %(message)s")
logger = logging.getLogger(os.path.basename(__file__))
def reorient_4d_image(image):
"""
Reorients the data to radiological, one TR at a time
"""
for i in np.arange(image.shape[3]):
if i == 0:
newimage = np.transpose(image[:, :, :, i], (2,0,1))
newimage = np.rot90(newimage, 2)
elif i == 1:
tmpimage = np.transpose(image[:, :, :, i], (2,0,1))
tmpimage = np.rot90(tmpimage, 2)
newimage = np.concatenate((newimage[...,np.newaxis], tmpimage[...,np.newaxis]), axis=3)
else:
tmpimage = np.transpose(image[:, :, :, i], (2,0,1))
tmpimage = np.rot90(tmpimage, 2)
newimage = np.concatenate((newimage, tmpimage[...,np.newaxis]), axis=3)
image = copy(newimage)
return image
def main(nifti, output, bval=None):
logging.info('Starting')
nifti = reorient_4d_image(nib.load(nifti).get_data())
if bval:
bval = np.genfromtxt(bval)
idx = np.where(bval != 0)[0]
nifti = nifti[:, :, :, idx]
x = nifti.shape[1]
y = nifti.shape[2]
z = nifti.shape[0]
t = nifti.shape[3]
spikecount = 0
for i in np.arange(z):
for j in np.arange(t):
sample = nifti[i, :, :, j]
mean = np.mean(sample)
sd = np.std(sample)
if j == 0:
v_mean = copy(mean)
v_sd = copy(sd)
else:
v_mean = np.hstack((v_mean, mean))
v_sd = np.hstack((v_sd, sd))
above = len(np.where(v_mean > np.mean(v_mean)+np.mean(v_sd))[0])
below = len(np.where(v_mean < np.mean(v_mean)-np.mean(v_sd))[0])
spikecount = spikecount + above + below
with open(output, 'w') as f:
f.write(str(spikecount) + '\n')
if __name__ == '__main__':
if len(sys.argv) == 3:
main(sys.argv[1], sys.argv[2])
elif len(sys.argv) == 4:
main(sys.argv[1], sys.argv[2], bval=sys.argv[3])
else:
print(__doc__)
|
"""
input returns dictionary mapping word to it's occurence value
"olly olly in come free"
returns { olly:2, in:1, come:1, free:1 }
"""
def dictionary(sentence):
number = 0
lists = sentence.split(" ")
store= {}
for i in lists:
number = lists.count(i)
store[i] = number
return store
print dictionary(raw_input())
|
import requests
import time
import json
TOKEN_VK_api = '958eb5d439726565e9333aa30e50e0f937ee432e927f0dbd541c541887d919a7c56f95c04217915c32008'
class Backup_Data:
def __init__(self, id, token, num):
self.id = id
self.token = token
self.header = {'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': f'OAuth {self.token}'}
self.num = num
def upload_photos(self):
http_request = 'https://api.vk.com/method/photos.get?'
params = {'owner_id': self.id,
'album_id': 'profile',
'extended': 1,
'access_token': TOKEN_VK_api,
'v': 5.77,
'count': self.num}
response = requests.get(http_request, params=params)
if response.status_code == 200:
print('Photos uploaded successfully!')
else:
print('An error occurred while uploading photos.')
response_json = response.json()
self.photos = {}
for item in response_json['response']['items']:
num_likes = str(item['likes']['count'])
max_size = max(item['sizes'], key=lambda elem: elem['height'])
if num_likes not in self.photos.keys():
self.photos[num_likes] = [{'name': num_likes,
'date': time.strftime('%d-%m-%Y', time.localtime(item['date'])),
'url': max_size['url'],
'size': max_size['type']}]
else:
self.photos[num_likes].append({'name': num_likes,
'date': time.strftime('%d-%m-%Y', time.localtime(item['date'])),
'url': max_size['url'],
'size': max_size['type']})
def create_folder(self):
url_request = "https://cloud-api.yandex.net:443/v1/disk/resources?path=%2F" + str(self.id)
response = requests.put(url_request, headers=self.header)
if response.status_code == 201:
print('The folder is created!')
else:
print('An error occurred while creating the folder.')
def load_photos(self):
path = f'/{str(self.id)}/'
data = []
for key, value in self.photos.items():
for elem in value:
if len(value) > 1:
name = elem['name'] + f' {elem["date"]}.jpg'
else:
name = elem['name'] + '.jpg'
params = {'url': elem['url'], 'path': path + name}
http_request = 'https://cloud-api.yandex.net/v1/disk/resources/upload'
response = requests.post(http_request, headers=self.header, params=params)
if response.status_code == 202:
data.append({"file_name": name,
"size": elem['size']})
print(f'"{name}" successfully uploaded!')
self.writing_info(data)
def writing_info(self, data):
try:
with open('data.json', 'a') as data_file:
json.dump(data, data_file)
print('The information with download results was successfully saved to a file "data.json".')
except Exception as E:
print('An error occurred while writing the download results.')
if __name__ == '__main__':
user_id = input('Введите id пользователя:\n')
token = input('Введите токен для REST API Я.Диска:\n')
num = int(input('Введите количество загружаемых фотографий(Введите 0 для значения по умолчанию):\n'))
if num == 0:
num = 5
bd = Backup_Data(user_id, token, num)
bd.upload_photos()
bd.create_folder()
bd.load_photos() |
class ElectronicDeviec:
feature = ["High Performance","Non-Portable"]
class PocketGadget(ElectronicDeviec):
features = ["Low Performance","Portable"]
def ft(self):
return f"{self.feature[0]} and {self.feature[1]}"
class Phone(PocketGadget):
AverageFeature = ["Medium Performance","Portable"]
Big = ElectronicDeviec()
Pocket = PocketGadget()
Phone = Phone()
print(Pocket.ft()) |
from django import forms
from gifts.models import GiftSubscription
class GiftSubscriptionForm(forms.Form):
gifter = forms.EmailField(label="Your e-mail")
giftee = forms.EmailField(label="Recipient's e-mail")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.